aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-12-08 18:25:56 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-12-08 18:25:56 -0500
commit877a0893e3751022d2e60bea54dd5ac2e4026997 (patch)
tree0ec513e5a13cda947919c14480f9ed60f5c363aa
parent03cf152646ac177c56e3100732143e92278b0630 (diff)
Staging: lttng: remove from the drivers/staging/ tree
The "proper" way to do this is to work with the existing in-kernel tracing subsystem and work to get the missing features that are in lttng into those subsystems. Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lttng/Kconfig35
-rw-r--r--drivers/staging/lttng/LICENSE27
-rw-r--r--drivers/staging/lttng/Makefile33
-rw-r--r--drivers/staging/lttng/README48
-rw-r--r--drivers/staging/lttng/TODO131
-rw-r--r--drivers/staging/lttng/instrumentation/events/README7
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/block.h626
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/irq.h155
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h312
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h34
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/sched.h400
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h76
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/block.h569
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/irq.h150
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/kvm.h312
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/sched.h397
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/syscalls.h75
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4263
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6291
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/README18
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h3
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h3
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h7
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h14
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h7
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h4
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h55
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h1163
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h38
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h2232
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h17
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h1013
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h3
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h2076
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h5
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile1
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c85
-rw-r--r--drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh275
-rw-r--r--drivers/staging/lttng/lib/Makefile11
-rw-r--r--drivers/staging/lttng/lib/align.h61
-rw-r--r--drivers/staging/lttng/lib/bitfield.h400
-rw-r--r--drivers/staging/lttng/lib/bug.h29
-rw-r--r--drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c207
-rw-r--r--drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h117
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/api.h25
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/backend.h250
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/backend_internal.h449
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/backend_types.h80
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/config.h298
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/frontend.h228
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/frontend_api.h358
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/frontend_internal.h424
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/frontend_types.h176
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/iterator.h70
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/nohz.h30
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c854
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c1715
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c798
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c109
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c202
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c390
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/vatomic.h85
-rw-r--r--drivers/staging/lttng/lib/ringbuffer/vfs.h89
-rw-r--r--drivers/staging/lttng/ltt-context.c93
-rw-r--r--drivers/staging/lttng/ltt-debugfs-abi.c777
-rw-r--r--drivers/staging/lttng/ltt-debugfs-abi.h153
-rw-r--r--drivers/staging/lttng/ltt-endian.h31
-rw-r--r--drivers/staging/lttng/ltt-events.c1009
-rw-r--r--drivers/staging/lttng/ltt-events.h452
-rw-r--r--drivers/staging/lttng/ltt-probes.c164
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-client-discard.c21
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c21
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c21
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c21
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-client.h569
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-metadata-client.c21
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-metadata-client.h330
-rw-r--r--drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c21
-rw-r--r--drivers/staging/lttng/ltt-tracer-core.h28
-rw-r--r--drivers/staging/lttng/ltt-tracer.h67
-rw-r--r--drivers/staging/lttng/lttng-calibrate.c30
-rw-r--r--drivers/staging/lttng/lttng-context-nice.c68
-rw-r--r--drivers/staging/lttng/lttng-context-perf-counters.c271
-rw-r--r--drivers/staging/lttng/lttng-context-pid.c68
-rw-r--r--drivers/staging/lttng/lttng-context-ppid.c71
-rw-r--r--drivers/staging/lttng/lttng-context-prio.c89
-rw-r--r--drivers/staging/lttng/lttng-context-procname.c72
-rw-r--r--drivers/staging/lttng/lttng-context-tid.c68
-rw-r--r--drivers/staging/lttng/lttng-context-vpid.c74
-rw-r--r--drivers/staging/lttng/lttng-context-vppid.c79
-rw-r--r--drivers/staging/lttng/lttng-context-vtid.c74
-rw-r--r--drivers/staging/lttng/lttng-syscalls.c438
-rw-r--r--drivers/staging/lttng/probes/Makefile37
-rw-r--r--drivers/staging/lttng/probes/define_trace.h132
-rw-r--r--drivers/staging/lttng/probes/lttng-events-reset.h84
-rw-r--r--drivers/staging/lttng/probes/lttng-events.h703
-rw-r--r--drivers/staging/lttng/probes/lttng-ftrace.c188
-rw-r--r--drivers/staging/lttng/probes/lttng-kprobes.c164
-rw-r--r--drivers/staging/lttng/probes/lttng-kretprobes.c277
-rw-r--r--drivers/staging/lttng/probes/lttng-probe-block.c31
-rw-r--r--drivers/staging/lttng/probes/lttng-probe-irq.c31
-rw-r--r--drivers/staging/lttng/probes/lttng-probe-kvm.c31
-rw-r--r--drivers/staging/lttng/probes/lttng-probe-lttng.c24
-rw-r--r--drivers/staging/lttng/probes/lttng-probe-sched.c30
-rw-r--r--drivers/staging/lttng/probes/lttng-type-list.h21
-rw-r--r--drivers/staging/lttng/probes/lttng-types.c49
-rw-r--r--drivers/staging/lttng/probes/lttng-types.h72
-rw-r--r--drivers/staging/lttng/probes/lttng.h15
-rw-r--r--drivers/staging/lttng/wrapper/ftrace.h70
-rw-r--r--drivers/staging/lttng/wrapper/inline_memcpy.h11
-rw-r--r--drivers/staging/lttng/wrapper/kallsyms.h30
-rw-r--r--drivers/staging/lttng/wrapper/perf.h32
-rw-r--r--drivers/staging/lttng/wrapper/poll.h14
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/api.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/backend.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h2
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/backend_types.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/config.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/frontend.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/iterator.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/nohz.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/vatomic.h1
-rw-r--r--drivers/staging/lttng/wrapper/ringbuffer/vfs.h1
-rw-r--r--drivers/staging/lttng/wrapper/spinlock.h26
-rw-r--r--drivers/staging/lttng/wrapper/splice.c46
-rw-r--r--drivers/staging/lttng/wrapper/splice.h23
-rw-r--r--drivers/staging/lttng/wrapper/trace-clock.h75
-rw-r--r--drivers/staging/lttng/wrapper/uuid.h29
-rw-r--r--drivers/staging/lttng/wrapper/vmalloc.h49
134 files changed, 0 insertions, 25927 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4c41de15629b..21e2f4b87f14 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/phison/Kconfig"
66 66
67source "drivers/staging/line6/Kconfig" 67source "drivers/staging/line6/Kconfig"
68 68
69source "drivers/staging/lttng/Kconfig"
70
71source "drivers/gpu/drm/nouveau/Kconfig" 69source "drivers/gpu/drm/nouveau/Kconfig"
72 70
73source "drivers/staging/octeon/Kconfig" 71source "drivers/staging/octeon/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 821602f14b28..7c5808d7212d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_TRANZPORT) += frontier/
25obj-$(CONFIG_POHMELFS) += pohmelfs/ 25obj-$(CONFIG_POHMELFS) += pohmelfs/
26obj-$(CONFIG_IDE_PHISON) += phison/ 26obj-$(CONFIG_IDE_PHISON) += phison/
27obj-$(CONFIG_LINE6_USB) += line6/ 27obj-$(CONFIG_LINE6_USB) += line6/
28obj-$(CONFIG_LTTNG) += lttng/
29obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/ 28obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
30obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/ 29obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
31obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ 30obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
diff --git a/drivers/staging/lttng/Kconfig b/drivers/staging/lttng/Kconfig
deleted file mode 100644
index 34c4a4f762ea..000000000000
--- a/drivers/staging/lttng/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
1config LTTNG
2 tristate "LTTng kernel tracer"
3 depends on TRACEPOINTS
4 help
5 The LTTng 2.0 Tracer Toolchain allows integrated kernel and
6 user-space tracing from a single user interface: the "lttng"
7 command. See http://lttng.org website for the "lttng-tools"
8 user-space tracer control tools package and the "babeltrace"
9 package for conversion of trace data to a human-readable
10 format.
11
12 LTTng features:
13 - System-wide tracing across kernel, libraries and
14 applications,
15 - Tracepoints, detailed syscall tracing (fast strace replacement),
16 Function tracer, CPU Performance Monitoring Unit (PMU) counters
17 and kprobes support,
18 - Have the ability to attach "context" information to events in the
19 trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc). All
20 the extra information fields to be collected with events are
21 optional, specified on a per-tracing-session basis (except for
22 timestamp and event id, which are mandatory).
23 - Precise and fast clock sources with near cycle-level
24 timestamps,
25 - Efficient trace data transport:
26 - Compact Binary format with CTF,
27 - Per-core buffers ensures scalability,
28 - Fast-paths in caller context, amortized synchronization,
29 - Zero-copy using splice and mmap system calls, over disk,
30 network or consumed in-place,
31 - Multiple concurrent tracing sessions are supported,
32 - Designed to meet hard real-time constraints,
33 - Supports live streaming of the trace data,
34 - Produces CTF (Common Trace Format) natively (see
35 http://www.efficios.com/ctf).
diff --git a/drivers/staging/lttng/LICENSE b/drivers/staging/lttng/LICENSE
deleted file mode 100644
index bb880bf1faac..000000000000
--- a/drivers/staging/lttng/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
1LTTng modules licensing
2Mathieu Desnoyers
3June 2, 2011
4
5* LGPLv2.1/GPLv2 dual-license
6
7The files contained within this package are licensed under
8LGPLv2.1/GPLv2 dual-license (see lgpl-2.1.txt and gpl-2.0.txt for
9details), except for files identified by the following sections.
10
11* GPLv2 license
12
13These files are licensed exclusively under the GPLv2 license. See
14gpl-2.0.txt for details.
15
16lib/ringbuffer/ring_buffer_splice.c
17lib/ringbuffer/ring_buffer_mmap.c
18instrumentation/events/mainline/*.h
19instrumentation/events/lttng-modules/*.h
20
21* MIT-style license
22
23These files are licensed under an MIT-style license:
24
25lib/prio_heap/lttng_prio_heap.h
26lib/prio_heap/lttng_prio_heap.c
27lib/bitfield.h
diff --git a/drivers/staging/lttng/Makefile b/drivers/staging/lttng/Makefile
deleted file mode 100644
index 9ad4eb0e3972..000000000000
--- a/drivers/staging/lttng/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
1#
2# Makefile for the LTTng modules.
3#
4
5obj-m += ltt-ring-buffer-client-discard.o
6obj-m += ltt-ring-buffer-client-overwrite.o
7obj-m += ltt-ring-buffer-metadata-client.o
8obj-m += ltt-ring-buffer-client-mmap-discard.o
9obj-m += ltt-ring-buffer-client-mmap-overwrite.o
10obj-m += ltt-ring-buffer-metadata-mmap-client.o
11
12obj-m += ltt-relay.o
13ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
14 ltt-probes.o ltt-context.o \
15 lttng-context-pid.o lttng-context-procname.o \
16 lttng-context-prio.o lttng-context-nice.o \
17 lttng-context-vpid.o lttng-context-tid.o \
18 lttng-context-vtid.o lttng-context-ppid.o \
19 lttng-context-vppid.o lttng-calibrate.o
20
21ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
22ltt-relay-objs += lttng-syscalls.o
23endif
24
25ifneq ($(CONFIG_PERF_EVENTS),)
26ltt-relay-objs += $(shell \
27 if [ $(VERSION) -ge 3 \
28 -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
29 echo "lttng-context-perf-counters.o" ; fi;)
30endif
31
32obj-m += probes/
33obj-m += lib/
diff --git a/drivers/staging/lttng/README b/drivers/staging/lttng/README
deleted file mode 100644
index a154d6e19e97..000000000000
--- a/drivers/staging/lttng/README
+++ /dev/null
@@ -1,48 +0,0 @@
1LTTng 2.0 modules
2
3Mathieu Desnoyers
4November 1st, 2011
5
6LTTng 2.0 kernel modules is currently part of the Linux kernel staging
7tree. It features (new features since LTTng 0.x):
8
9- Produces CTF (Common Trace Format) natively,
10 (http://www.efficios.com/ctf)
11- Tracepoints, Function tracer, CPU Performance Monitoring Unit (PMU)
12 counters, kprobes, and kretprobes support,
13- Integrated interface for both kernel and userspace tracing,
14- Have the ability to attach "context" information to events in the
15 trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc).
16 All the extra information fields to be collected with events are
17 optional, specified on a per-tracing-session basis (except for
18 timestamp and event id, which are mandatory).
19
20To build and install, you need to select "Staging" modules, and the
21LTTng kernel tracer.
22
23Use lttng-tools to control the tracer. LTTng tools should automatically
24load the kernel modules when needed. Use Babeltrace to print traces as a
25human-readable text log. These tools are available at the following URL:
26http://lttng.org/lttng2.0
27
28Please note that the LTTng-UST 2.0 (user-space tracing counterpart of
29LTTng 2.0) is now ready to be used, but still only available from the
30git repository.
31
32So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39 and
333.0 (on x86 32/64-bit, and powerpc 32-bit at the moment, build tested on
34ARM). It should work fine with newer kernels and other architectures,
35but expect build issues with kernels older than 2.6.36. The clock source
36currently used is the standard gettimeofday (slower, less scalable and
37less precise than the LTTng 0.x clocks). Support for LTTng 0.x clocks
38will be added back soon into LTTng 2.0. Please note that lttng-modules
392.0 can build on a Linux kernel patched with the LTTng 0.x patchset, but
40the lttng-modules 2.0 replace the lttng-modules 0.x, so both tracers
41cannot be installed at the same time for a given kernel version.
42
43* Note about Perf PMU counters support
44
45Each PMU counter has its zero value set when it is attached to a context with
46add-context. Therefore, it is normal that the same counters attached to both the
47stream context and event context show different values for a given event; what
48matters is that they increment at the same rate.
diff --git a/drivers/staging/lttng/TODO b/drivers/staging/lttng/TODO
deleted file mode 100644
index 5e3a581d18e8..000000000000
--- a/drivers/staging/lttng/TODO
+++ /dev/null
@@ -1,131 +0,0 @@
1Please contact Mathieu Desnoyers <mathieu.desnoyers@efficios.com> for
2questions about this TODO list. The "Cleanup/Testing" section would be
3good to go through before integration into mainline. The "Features"
4section is a wish list of features to complete before releasing the
5"LTTng 2.0" final version, but are not required to have LTTng working.
6These features are mostly performance enhancements and instrumentation
7enhancements.
8
9TODO:
10
11A) Cleanup/Testing
12
13 1) Remove debugfs "lttng" file (keep only procfs "lttng" file).
14 The rationale for this is that this file is needed for
15 user-level tracing support (LTTng-UST 2.0) intended to be
16 used on production system, and therefore should be present as
17 part of a "usually mounted" filesystem rather than a debug
18 filesystem.
19
20 2) Cleanup wrappers. The drivers/staging/lttng/wrapper directory
21 contains various wrapper headers that use kallsyms lookups to
22 work around some missing EXPORT_SYMBOL_GPL() in the mainline
23 kernel. Ideally, those few symbols should become exported to
24 modules by the kernel.
25
26 3) Test lib ring buffer snapshot feature.
27 When working on the lttngtop project, Julien Desfossez
28 reported that he needed to push the consumer position
29 forward explicitely with lib_ring_buffer_put_next_subbuf.
30 This means that although the usual case of pairs of
31 lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf
32 work fine, there is probably a problem that needs to be
33 investigated in
34 lib_ring_buffer_get_subbuf/lib_ring_buffer_put_subbuf, which
35 depend on the producer to push the reader position.
36 Contact: Julien Desfossez <julien.desfossez@polymtl.ca>
37
38
39B) Features
40
41 1) Integration of the LTTng 0.x trace clocks into
42 LTTng 2.0.
43 Currently using mainline kernel monotonic clock. NMIs can
44 therefore not be traced, and this causes a significant
45 performance degradation compared to the LTTng 0.x trace
46 clocks. Imply the creation of drivers/staging/lttng/arch to
47 contain the arch-specific clock support files.
48 * Dependency: addition of clock descriptions to CTF.
49 See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
50 for the LTTng 0.x git tree.
51
52 2) Port OMAP3 LTTng trace clocks to x86 to support systems
53 without constant TSC.
54 * Dependency: (B.1)
55 See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
56 for the LTTng 0.x git tree.
57
58 3) Implement mmap operation on an anonymous file created by a
59 LTTNG_KERNEL_CLOCK ioctl to export data to export
60 synchronized kernel and user-level LTTng trace clocks:
61 with:
62 - shared per-cpu data,
63 - read seqlock.
64 The content exported by this shared memory area will be
65 arch-specific.
66 * Dependency: (B.1) && (B.2)
67 See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
68 for the LTTng 0.x git tree, which has vDSO support for
69 LTTng trace clock on the x86 architecture.
70
71 3) Integrate the "statedump" module from LTTng 0.x into LTTng
72 2.0.
73 * Dependency: addition of "dynamic enumerations" type to CTF.
74 See: http://git.lttng.org/?p=lttng-modules.git;a=shortlog;h=refs/heads/v0.19-stable
75 ltt-statedump.c
76
77 4) Generate system call TRACE_EVENT headers for all
78 architectures (currently done: x86 32/64).
79
80 5) Define "unknown" system calls into instrumentation/syscalls
81 override files / or do SYSCALL_DEFINE improvements to
82 mainline kernel to allow automatic generation of these
83 missing system call descriptions.
84
85 6) Create missing tracepoint event headers files into
86 instrumentation/events from headers located in
87 include/trace/events/. Choice: either do as currently done,
88 and copy those headers locally into the lttng driver and
89 perform the modifications locally, or push TRACE_EVENT API
90 modification into mainline headers, which would require
91 collaboration from Ftrace/Perf maintainers.
92
93 7) Poll: implement a poll and/or epoll exclusive wakeup scheme,
94 which contradicts POSIX, but protect multiple consumer
95 threads from thundering herd effect.
96
97 8) Re-integrate sample modules from libringbuffer into
98 lttng driver. Those modules can be used as example of how to
99 use libringbuffer in other contexts than LTTng, and are
100 useful to perform benchmarks of the ringbuffer library.
101 See: http://www.efficios.com/ringbuffer
102
103 9) NOHZ support for lib ring buffer. NOHZ infrastructure in the
104 Linux kernel does not support notifiers chains, which does
105 not let LTTng play nicely with low power consumption setups
106 for flight recorder (overwrite mode) live traces. One way to
107 allow integration between NOHZ and LTTng would be to add
108 support for such notifiers into NOHZ kernel infrastructure.
109
110 10) Turn drivers/staging/lttng/ltt-probes.c probe_list into a
111 hash table. Turns O(n^2) trace systems registration (cost
112 for n systems) into O(n). (O(1) per system)
113
114 11) drivers/staging/lttng/probes/lttng-ftrace.c:
115 LTTng currently uses kretprobes for per-function tracing,
116 not the function tracer. So lttng-ftrace.c should be used
117 for "all" function tracing.
118
119 12) drivers/staging/lttng/probes/lttng-types.c:
120 This is a currently unused placeholder to export entire C
121 type declarations into the trace metadata, e.g. for support
122 of describing the layout of structures/enumeration mapping
123 along with syscall entry events. The design of this support
124 will likely change though, and become integrated with the
125 TRACE_EVENT support within lttng, by adding new macros, and
126 support for generation of metadata from these macros, to
127 allow description of those compound types/enumerations.
128
129Please send patches
130To: Greg Kroah-Hartman <greg@kroah.com>
131To: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
diff --git a/drivers/staging/lttng/instrumentation/events/README b/drivers/staging/lttng/instrumentation/events/README
deleted file mode 100644
index dad2cbbd9de8..000000000000
--- a/drivers/staging/lttng/instrumentation/events/README
+++ /dev/null
@@ -1,7 +0,0 @@
1The workflow for updating patches from newer kernel:
2
3Diff mainline/ and lttng-module/ directories.
4
5Pull the new headers from mainline kernel to mainline/.
6Copy them into lttng-modules.
7Apply diff. Fix conflicts.
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/block.h b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
deleted file mode 100644
index 42184f3d1e71..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
+++ /dev/null
@@ -1,626 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BLOCK_H
6
7#include <linux/blktrace_api.h>
8#include <linux/blkdev.h>
9#include <linux/tracepoint.h>
10#include <linux/trace_seq.h>
11
12#ifndef _TRACE_BLOCK_DEF_
13#define _TRACE_BLOCK_DEF_
14
15#define __blk_dump_cmd(cmd, len) "<unknown>"
16
17enum {
18 RWBS_FLAG_WRITE = (1 << 0),
19 RWBS_FLAG_DISCARD = (1 << 1),
20 RWBS_FLAG_READ = (1 << 2),
21 RWBS_FLAG_RAHEAD = (1 << 3),
22 RWBS_FLAG_SYNC = (1 << 4),
23 RWBS_FLAG_META = (1 << 5),
24 RWBS_FLAG_SECURE = (1 << 6),
25};
26
27#endif /* _TRACE_BLOCK_DEF_ */
28
29#define __print_rwbs_flags(rwbs) \
30 __print_flags(rwbs, "", \
31 { RWBS_FLAG_WRITE, "W" }, \
32 { RWBS_FLAG_DISCARD, "D" }, \
33 { RWBS_FLAG_READ, "R" }, \
34 { RWBS_FLAG_RAHEAD, "A" }, \
35 { RWBS_FLAG_SYNC, "S" }, \
36 { RWBS_FLAG_META, "M" }, \
37 { RWBS_FLAG_SECURE, "E" })
38
39#define blk_fill_rwbs(rwbs, rw, bytes) \
40 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
41 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
42 ( (bytes) ? RWBS_FLAG_READ : \
43 ( 0 )))) \
44 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
45 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
46 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
47 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
48
49DECLARE_EVENT_CLASS(block_rq_with_error,
50
51 TP_PROTO(struct request_queue *q, struct request *rq),
52
53 TP_ARGS(q, rq),
54
55 TP_STRUCT__entry(
56 __field( dev_t, dev )
57 __field( sector_t, sector )
58 __field( unsigned int, nr_sector )
59 __field( int, errors )
60 __field( unsigned int, rwbs )
61 __dynamic_array_hex( unsigned char, cmd,
62 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
63 rq->cmd_len : 0)
64 ),
65
66 TP_fast_assign(
67 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
68 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
69 0 : blk_rq_pos(rq))
70 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
71 0 : blk_rq_sectors(rq))
72 tp_assign(errors, rq->errors)
73 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
74 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
75 rq->cmd : NULL);
76 ),
77
78 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
79 MAJOR(__entry->dev), MINOR(__entry->dev),
80 __print_rwbs_flags(__entry->rwbs),
81 __blk_dump_cmd(__get_dynamic_array(cmd),
82 __get_dynamic_array_len(cmd)),
83 (unsigned long long)__entry->sector,
84 __entry->nr_sector, __entry->errors)
85)
86
87/**
88 * block_rq_abort - abort block operation request
89 * @q: queue containing the block operation request
90 * @rq: block IO operation request
91 *
92 * Called immediately after pending block IO operation request @rq in
93 * queue @q is aborted. The fields in the operation request @rq
94 * can be examined to determine which device and sectors the pending
95 * operation would access.
96 */
97DEFINE_EVENT(block_rq_with_error, block_rq_abort,
98
99 TP_PROTO(struct request_queue *q, struct request *rq),
100
101 TP_ARGS(q, rq)
102)
103
104/**
105 * block_rq_requeue - place block IO request back on a queue
106 * @q: queue holding operation
107 * @rq: block IO operation request
108 *
109 * The block operation request @rq is being placed back into queue
110 * @q. For some reason the request was not completed and needs to be
111 * put back in the queue.
112 */
113DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
114
115 TP_PROTO(struct request_queue *q, struct request *rq),
116
117 TP_ARGS(q, rq)
118)
119
120/**
121 * block_rq_complete - block IO operation completed by device driver
122 * @q: queue containing the block operation request
123 * @rq: block operations request
124 *
125 * The block_rq_complete tracepoint event indicates that some portion
126 * of operation request has been completed by the device driver. If
127 * the @rq->bio is %NULL, then there is absolutely no additional work to
128 * do for the request. If @rq->bio is non-NULL then there is
129 * additional work required to complete the request.
130 */
131DEFINE_EVENT(block_rq_with_error, block_rq_complete,
132
133 TP_PROTO(struct request_queue *q, struct request *rq),
134
135 TP_ARGS(q, rq)
136)
137
138DECLARE_EVENT_CLASS(block_rq,
139
140 TP_PROTO(struct request_queue *q, struct request *rq),
141
142 TP_ARGS(q, rq),
143
144 TP_STRUCT__entry(
145 __field( dev_t, dev )
146 __field( sector_t, sector )
147 __field( unsigned int, nr_sector )
148 __field( unsigned int, bytes )
149 __field( unsigned int, rwbs )
150 __array_text( char, comm, TASK_COMM_LEN )
151 __dynamic_array_hex( unsigned char, cmd,
152 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
153 rq->cmd_len : 0)
154 ),
155
156 TP_fast_assign(
157 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
158 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
159 0 : blk_rq_pos(rq))
160 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
161 0 : blk_rq_sectors(rq))
162 tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
163 blk_rq_bytes(rq) : 0)
164 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
165 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
166 rq->cmd : NULL);
167 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
168 ),
169
170 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
171 MAJOR(__entry->dev), MINOR(__entry->dev),
172 __print_rwbs_flags(__entry->rwbs),
173 __entry->bytes,
174 __blk_dump_cmd(__get_dynamic_array(cmd),
175 __get_dynamic_array_len(cmd)),
176 (unsigned long long)__entry->sector,
177 __entry->nr_sector, __entry->comm)
178)
179
180/**
181 * block_rq_insert - insert block operation request into queue
182 * @q: target queue
183 * @rq: block IO operation request
184 *
185 * Called immediately before block operation request @rq is inserted
186 * into queue @q. The fields in the operation request @rq struct can
187 * be examined to determine which device and sectors the pending
188 * operation would access.
189 */
190DEFINE_EVENT(block_rq, block_rq_insert,
191
192 TP_PROTO(struct request_queue *q, struct request *rq),
193
194 TP_ARGS(q, rq)
195)
196
197/**
198 * block_rq_issue - issue pending block IO request operation to device driver
199 * @q: queue holding operation
200 * @rq: block IO operation operation request
201 *
202 * Called when block operation request @rq from queue @q is sent to a
203 * device driver for processing.
204 */
205DEFINE_EVENT(block_rq, block_rq_issue,
206
207 TP_PROTO(struct request_queue *q, struct request *rq),
208
209 TP_ARGS(q, rq)
210)
211
212/**
213 * block_bio_bounce - used bounce buffer when processing block operation
214 * @q: queue holding the block operation
215 * @bio: block operation
216 *
217 * A bounce buffer was used to handle the block operation @bio in @q.
218 * This occurs when hardware limitations prevent a direct transfer of
219 * data between the @bio data memory area and the IO device. Use of a
220 * bounce buffer requires extra copying of data and decreases
221 * performance.
222 */
223TRACE_EVENT(block_bio_bounce,
224
225 TP_PROTO(struct request_queue *q, struct bio *bio),
226
227 TP_ARGS(q, bio),
228
229 TP_STRUCT__entry(
230 __field( dev_t, dev )
231 __field( sector_t, sector )
232 __field( unsigned int, nr_sector )
233 __field( unsigned int, rwbs )
234 __array_text( char, comm, TASK_COMM_LEN )
235 ),
236
237 TP_fast_assign(
238 tp_assign(dev, bio->bi_bdev ?
239 bio->bi_bdev->bd_dev : 0)
240 tp_assign(sector, bio->bi_sector)
241 tp_assign(nr_sector, bio->bi_size >> 9)
242 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
243 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
244 ),
245
246 TP_printk("%d,%d %s %llu + %u [%s]",
247 MAJOR(__entry->dev), MINOR(__entry->dev),
248 __print_rwbs_flags(__entry->rwbs),
249 (unsigned long long)__entry->sector,
250 __entry->nr_sector, __entry->comm)
251)
252
253/**
254 * block_bio_complete - completed all work on the block operation
255 * @q: queue holding the block operation
256 * @bio: block operation completed
257 * @error: io error value
258 *
259 * This tracepoint indicates there is no further work to do on this
260 * block IO operation @bio.
261 */
262TRACE_EVENT(block_bio_complete,
263
264 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
265
266 TP_ARGS(q, bio, error),
267
268 TP_STRUCT__entry(
269 __field( dev_t, dev )
270 __field( sector_t, sector )
271 __field( unsigned, nr_sector )
272 __field( int, error )
273 __field( unsigned int, rwbs )
274 ),
275
276 TP_fast_assign(
277 tp_assign(dev, bio->bi_bdev->bd_dev)
278 tp_assign(sector, bio->bi_sector)
279 tp_assign(nr_sector, bio->bi_size >> 9)
280 tp_assign(error, error)
281 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
282 ),
283
284 TP_printk("%d,%d %s %llu + %u [%d]",
285 MAJOR(__entry->dev), MINOR(__entry->dev),
286 __print_rwbs_flags(__entry->rwbs),
287 (unsigned long long)__entry->sector,
288 __entry->nr_sector, __entry->error)
289)
290
291DECLARE_EVENT_CLASS(block_bio,
292
293 TP_PROTO(struct request_queue *q, struct bio *bio),
294
295 TP_ARGS(q, bio),
296
297 TP_STRUCT__entry(
298 __field( dev_t, dev )
299 __field( sector_t, sector )
300 __field( unsigned int, nr_sector )
301 __field( unsigned int, rwbs )
302 __array_text( char, comm, TASK_COMM_LEN )
303 ),
304
305 TP_fast_assign(
306 tp_assign(dev, bio->bi_bdev->bd_dev)
307 tp_assign(sector, bio->bi_sector)
308 tp_assign(nr_sector, bio->bi_size >> 9)
309 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
310 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
311 ),
312
313 TP_printk("%d,%d %s %llu + %u [%s]",
314 MAJOR(__entry->dev), MINOR(__entry->dev),
315 __print_rwbs_flags(__entry->rwbs),
316 (unsigned long long)__entry->sector,
317 __entry->nr_sector, __entry->comm)
318)
319
320/**
321 * block_bio_backmerge - merging block operation to the end of an existing operation
322 * @q: queue holding operation
323 * @bio: new block operation to merge
324 *
325 * Merging block request @bio to the end of an existing block request
326 * in queue @q.
327 */
328DEFINE_EVENT(block_bio, block_bio_backmerge,
329
330 TP_PROTO(struct request_queue *q, struct bio *bio),
331
332 TP_ARGS(q, bio)
333)
334
335/**
336 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
337 * @q: queue holding operation
338 * @bio: new block operation to merge
339 *
340 * Merging block IO operation @bio to the beginning of an existing block
341 * operation in queue @q.
342 */
343DEFINE_EVENT(block_bio, block_bio_frontmerge,
344
345 TP_PROTO(struct request_queue *q, struct bio *bio),
346
347 TP_ARGS(q, bio)
348)
349
350/**
351 * block_bio_queue - putting new block IO operation in queue
352 * @q: queue holding operation
353 * @bio: new block operation
354 *
355 * About to place the block IO operation @bio into queue @q.
356 */
357DEFINE_EVENT(block_bio, block_bio_queue,
358
359 TP_PROTO(struct request_queue *q, struct bio *bio),
360
361 TP_ARGS(q, bio)
362)
363
364DECLARE_EVENT_CLASS(block_get_rq,
365
366 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
367
368 TP_ARGS(q, bio, rw),
369
370 TP_STRUCT__entry(
371 __field( dev_t, dev )
372 __field( sector_t, sector )
373 __field( unsigned int, nr_sector )
374 __field( unsigned int, rwbs )
375 __array_text( char, comm, TASK_COMM_LEN )
376 ),
377
378 TP_fast_assign(
379 tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
380 tp_assign(sector, bio ? bio->bi_sector : 0)
381 tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
382 blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
383 bio ? bio->bi_size >> 9 : 0)
384 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
385 ),
386
387 TP_printk("%d,%d %s %llu + %u [%s]",
388 MAJOR(__entry->dev), MINOR(__entry->dev),
389 __print_rwbs_flags(__entry->rwbs),
390 (unsigned long long)__entry->sector,
391 __entry->nr_sector, __entry->comm)
392)
393
394/**
395 * block_getrq - get a free request entry in queue for block IO operations
396 * @q: queue for operations
397 * @bio: pending block IO operation
398 * @rw: low bit indicates a read (%0) or a write (%1)
399 *
400 * A request struct for queue @q has been allocated to handle the
401 * block IO operation @bio.
402 */
403DEFINE_EVENT(block_get_rq, block_getrq,
404
405 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
406
407 TP_ARGS(q, bio, rw)
408)
409
410/**
411 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
412 * @q: queue for operation
413 * @bio: pending block IO operation
414 * @rw: low bit indicates a read (%0) or a write (%1)
415 *
416 * In the case where a request struct cannot be provided for queue @q
417 * the process needs to wait for an request struct to become
418 * available. This tracepoint event is generated each time the
419 * process goes to sleep waiting for request struct become available.
420 */
421DEFINE_EVENT(block_get_rq, block_sleeprq,
422
423 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
424
425 TP_ARGS(q, bio, rw)
426)
427
428/**
429 * block_plug - keep operations requests in request queue
430 * @q: request queue to plug
431 *
432 * Plug the request queue @q. Do not allow block operation requests
433 * to be sent to the device driver. Instead, accumulate requests in
434 * the queue to improve throughput performance of the block device.
435 */
436TRACE_EVENT(block_plug,
437
438 TP_PROTO(struct request_queue *q),
439
440 TP_ARGS(q),
441
442 TP_STRUCT__entry(
443 __array_text( char, comm, TASK_COMM_LEN )
444 ),
445
446 TP_fast_assign(
447 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
448 ),
449
450 TP_printk("[%s]", __entry->comm)
451)
452
453DECLARE_EVENT_CLASS(block_unplug,
454
455 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
456
457 TP_ARGS(q, depth, explicit),
458
459 TP_STRUCT__entry(
460 __field( int, nr_rq )
461 __array_text( char, comm, TASK_COMM_LEN )
462 ),
463
464 TP_fast_assign(
465 tp_assign(nr_rq, depth)
466 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
467 ),
468
469 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
470)
471
472/**
473 * block_unplug - release of operations requests in request queue
474 * @q: request queue to unplug
475 * @depth: number of requests just added to the queue
476 * @explicit: whether this was an explicit unplug, or one from schedule()
477 *
478 * Unplug request queue @q because device driver is scheduled to work
479 * on elements in the request queue.
480 */
481DEFINE_EVENT(block_unplug, block_unplug,
482
483 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
484
485 TP_ARGS(q, depth, explicit)
486)
487
488/**
489 * block_split - split a single bio struct into two bio structs
490 * @q: queue containing the bio
491 * @bio: block operation being split
492 * @new_sector: The starting sector for the new bio
493 *
494 * The bio request @bio in request queue @q needs to be split into two
495 * bio requests. The newly created @bio request starts at
496 * @new_sector. This split may be required due to hardware limitation
497 * such as operation crossing device boundaries in a RAID system.
498 */
499TRACE_EVENT(block_split,
500
501 TP_PROTO(struct request_queue *q, struct bio *bio,
502 unsigned int new_sector),
503
504 TP_ARGS(q, bio, new_sector),
505
506 TP_STRUCT__entry(
507 __field( dev_t, dev )
508 __field( sector_t, sector )
509 __field( sector_t, new_sector )
510 __field( unsigned int, rwbs )
511 __array_text( char, comm, TASK_COMM_LEN )
512 ),
513
514 TP_fast_assign(
515 tp_assign(dev, bio->bi_bdev->bd_dev)
516 tp_assign(sector, bio->bi_sector)
517 tp_assign(new_sector, new_sector)
518 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
519 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
520 ),
521
522 TP_printk("%d,%d %s %llu / %llu [%s]",
523 MAJOR(__entry->dev), MINOR(__entry->dev),
524 __print_rwbs_flags(__entry->rwbs),
525 (unsigned long long)__entry->sector,
526 (unsigned long long)__entry->new_sector,
527 __entry->comm)
528)
529
530/**
531 * block_bio_remap - map request for a logical device to the raw device
532 * @q: queue holding the operation
533 * @bio: revised operation
534 * @dev: device for the operation
535 * @from: original sector for the operation
536 *
537 * An operation for a logical device has been mapped to the
538 * raw block device.
539 */
540TRACE_EVENT(block_bio_remap,
541
542 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
543 sector_t from),
544
545 TP_ARGS(q, bio, dev, from),
546
547 TP_STRUCT__entry(
548 __field( dev_t, dev )
549 __field( sector_t, sector )
550 __field( unsigned int, nr_sector )
551 __field( dev_t, old_dev )
552 __field( sector_t, old_sector )
553 __field( unsigned int, rwbs )
554 ),
555
556 TP_fast_assign(
557 tp_assign(dev, bio->bi_bdev->bd_dev)
558 tp_assign(sector, bio->bi_sector)
559 tp_assign(nr_sector, bio->bi_size >> 9)
560 tp_assign(old_dev, dev)
561 tp_assign(old_sector, from)
562 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
563 ),
564
565 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
566 MAJOR(__entry->dev), MINOR(__entry->dev),
567 __print_rwbs_flags(__entry->rwbs),
568 (unsigned long long)__entry->sector,
569 __entry->nr_sector,
570 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
571 (unsigned long long)__entry->old_sector)
572)
573
574/**
575 * block_rq_remap - map request for a block operation request
576 * @q: queue holding the operation
577 * @rq: block IO operation request
578 * @dev: device for the operation
579 * @from: original sector for the operation
580 *
581 * The block operation request @rq in @q has been remapped. The block
582 * operation request @rq holds the current information and @from hold
583 * the original sector.
584 */
585TRACE_EVENT(block_rq_remap,
586
587 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
588 sector_t from),
589
590 TP_ARGS(q, rq, dev, from),
591
592 TP_STRUCT__entry(
593 __field( dev_t, dev )
594 __field( sector_t, sector )
595 __field( unsigned int, nr_sector )
596 __field( dev_t, old_dev )
597 __field( sector_t, old_sector )
598 __field( unsigned int, rwbs )
599 ),
600
601 TP_fast_assign(
602 tp_assign(dev, disk_devt(rq->rq_disk))
603 tp_assign(sector, blk_rq_pos(rq))
604 tp_assign(nr_sector, blk_rq_sectors(rq))
605 tp_assign(old_dev, dev)
606 tp_assign(old_sector, from)
607 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
608 ),
609
610 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
611 MAJOR(__entry->dev), MINOR(__entry->dev),
612 __print_rwbs_flags(__entry->rwbs),
613 (unsigned long long)__entry->sector,
614 __entry->nr_sector,
615 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
616 (unsigned long long)__entry->old_sector)
617)
618
619#undef __print_rwbs_flags
620#undef blk_fill_rwbs
621
622#endif /* _TRACE_BLOCK_H */
623
624/* This part must be outside protection */
625#include "../../../probes/define_trace.h"
626
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
deleted file mode 100644
index 344015d4654a..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
+++ /dev/null
@@ -1,155 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_H
6
7#include <linux/tracepoint.h>
8
9#ifndef _TRACE_IRQ_DEF_
10#define _TRACE_IRQ_DEF_
11
12struct irqaction;
13struct softirq_action;
14
15#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
16#define show_softirq_name(val) \
17 __print_symbolic(val, \
18 softirq_name(HI), \
19 softirq_name(TIMER), \
20 softirq_name(NET_TX), \
21 softirq_name(NET_RX), \
22 softirq_name(BLOCK), \
23 softirq_name(BLOCK_IOPOLL), \
24 softirq_name(TASKLET), \
25 softirq_name(SCHED), \
26 softirq_name(HRTIMER), \
27 softirq_name(RCU))
28
29#endif /* _TRACE_IRQ_DEF_ */
30
31/**
32 * irq_handler_entry - called immediately before the irq action handler
33 * @irq: irq number
34 * @action: pointer to struct irqaction
35 *
36 * The struct irqaction pointed to by @action contains various
37 * information about the handler, including the device name,
38 * @action->name, and the device id, @action->dev_id. When used in
39 * conjunction with the irq_handler_exit tracepoint, we can figure
40 * out irq handler latencies.
41 */
42TRACE_EVENT(irq_handler_entry,
43
44 TP_PROTO(int irq, struct irqaction *action),
45
46 TP_ARGS(irq, action),
47
48 TP_STRUCT__entry(
49 __field( int, irq )
50 __string( name, action->name )
51 ),
52
53 TP_fast_assign(
54 tp_assign(irq, irq)
55 tp_strcpy(name, action->name)
56 ),
57
58 TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
59)
60
61/**
62 * irq_handler_exit - called immediately after the irq action handler returns
63 * @irq: irq number
64 * @action: pointer to struct irqaction
65 * @ret: return value
66 *
67 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
68 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
69 * a shared irq line, or the irq was not handled successfully. Can be used in
70 * conjunction with the irq_handler_entry to understand irq handler latencies.
71 */
72TRACE_EVENT(irq_handler_exit,
73
74 TP_PROTO(int irq, struct irqaction *action, int ret),
75
76 TP_ARGS(irq, action, ret),
77
78 TP_STRUCT__entry(
79 __field( int, irq )
80 __field( int, ret )
81 ),
82
83 TP_fast_assign(
84 tp_assign(irq, irq)
85 tp_assign(ret, ret)
86 ),
87
88 TP_printk("irq=%d ret=%s",
89 __entry->irq, __entry->ret ? "handled" : "unhandled")
90)
91
92DECLARE_EVENT_CLASS(softirq,
93
94 TP_PROTO(unsigned int vec_nr),
95
96 TP_ARGS(vec_nr),
97
98 TP_STRUCT__entry(
99 __field( unsigned int, vec )
100 ),
101
102 TP_fast_assign(
103 tp_assign(vec, vec_nr)
104 ),
105
106 TP_printk("vec=%u [action=%s]", __entry->vec,
107 show_softirq_name(__entry->vec))
108)
109
110/**
111 * softirq_entry - called immediately before the softirq handler
112 * @vec_nr: softirq vector number
113 *
114 * When used in combination with the softirq_exit tracepoint
115 * we can determine the softirq handler runtine.
116 */
117DEFINE_EVENT(softirq, softirq_entry,
118
119 TP_PROTO(unsigned int vec_nr),
120
121 TP_ARGS(vec_nr)
122)
123
124/**
125 * softirq_exit - called immediately after the softirq handler returns
126 * @vec_nr: softirq vector number
127 *
128 * When used in combination with the softirq_entry tracepoint
129 * we can determine the softirq handler runtine.
130 */
131DEFINE_EVENT(softirq, softirq_exit,
132
133 TP_PROTO(unsigned int vec_nr),
134
135 TP_ARGS(vec_nr)
136)
137
138/**
139 * softirq_raise - called immediately when a softirq is raised
140 * @vec_nr: softirq vector number
141 *
142 * When used in combination with the softirq_entry tracepoint
143 * we can determine the softirq raise to run latency.
144 */
145DEFINE_EVENT(softirq, softirq_raise,
146
147 TP_PROTO(unsigned int vec_nr),
148
149 TP_ARGS(vec_nr)
150)
151
152#endif /* _TRACE_IRQ_H */
153
154/* This part must be outside protection */
155#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
deleted file mode 100644
index e10455bc8cf8..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
+++ /dev/null
@@ -1,312 +0,0 @@
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 tp_assign(reason, reason)
29 tp_assign(errno, errno)
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37)
38
39#if defined(__KVM_HAVE_IOAPIC)
40TRACE_EVENT(kvm_set_irq,
41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
42 TP_ARGS(gsi, level, irq_source_id),
43
44 TP_STRUCT__entry(
45 __field( unsigned int, gsi )
46 __field( int, level )
47 __field( int, irq_source_id )
48 ),
49
50 TP_fast_assign(
51 tp_assign(gsi, gsi)
52 tp_assign(level, level)
53 tp_assign(irq_source_id, irq_source_id)
54 ),
55
56 TP_printk("gsi %u level %d source %d",
57 __entry->gsi, __entry->level, __entry->irq_source_id)
58)
59
60#define kvm_deliver_mode \
61 {0x0, "Fixed"}, \
62 {0x1, "LowPrio"}, \
63 {0x2, "SMI"}, \
64 {0x3, "Res3"}, \
65 {0x4, "NMI"}, \
66 {0x5, "INIT"}, \
67 {0x6, "SIPI"}, \
68 {0x7, "ExtINT"}
69
70TRACE_EVENT(kvm_ioapic_set_irq,
71 TP_PROTO(__u64 e, int pin, bool coalesced),
72 TP_ARGS(e, pin, coalesced),
73
74 TP_STRUCT__entry(
75 __field( __u64, e )
76 __field( int, pin )
77 __field( bool, coalesced )
78 ),
79
80 TP_fast_assign(
81 tp_assign(e, e)
82 tp_assign(pin, pin)
83 tp_assign(coalesced, coalesced)
84 ),
85
86 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
87 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
88 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
89 (__entry->e & (1<<11)) ? "logical" : "physical",
90 (__entry->e & (1<<15)) ? "level" : "edge",
91 (__entry->e & (1<<16)) ? "|masked" : "",
92 __entry->coalesced ? " (coalesced)" : "")
93)
94
95TRACE_EVENT(kvm_msi_set_irq,
96 TP_PROTO(__u64 address, __u64 data),
97 TP_ARGS(address, data),
98
99 TP_STRUCT__entry(
100 __field( __u64, address )
101 __field( __u64, data )
102 ),
103
104 TP_fast_assign(
105 tp_assign(address, address)
106 tp_assign(data, data)
107 ),
108
109 TP_printk("dst %u vec %x (%s|%s|%s%s)",
110 (u8)(__entry->address >> 12), (u8)__entry->data,
111 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112 (__entry->address & (1<<2)) ? "logical" : "physical",
113 (__entry->data & (1<<15)) ? "level" : "edge",
114 (__entry->address & (1<<3)) ? "|rh" : "")
115)
116
117#define kvm_irqchips \
118 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
119 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
120 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
121
122TRACE_EVENT(kvm_ack_irq,
123 TP_PROTO(unsigned int irqchip, unsigned int pin),
124 TP_ARGS(irqchip, pin),
125
126 TP_STRUCT__entry(
127 __field( unsigned int, irqchip )
128 __field( unsigned int, pin )
129 ),
130
131 TP_fast_assign(
132 tp_assign(irqchip, irqchip)
133 tp_assign(pin, pin)
134 ),
135
136 TP_printk("irqchip %s pin %u",
137 __print_symbolic(__entry->irqchip, kvm_irqchips),
138 __entry->pin)
139)
140
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151 { KVM_TRACE_MMIO_READ, "read" }, \
152 { KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155 TP_PROTO(int type, int len, u64 gpa, u64 val),
156 TP_ARGS(type, len, gpa, val),
157
158 TP_STRUCT__entry(
159 __field( u32, type )
160 __field( u32, len )
161 __field( u64, gpa )
162 __field( u64, val )
163 ),
164
165 TP_fast_assign(
166 tp_assign(type, type)
167 tp_assign(len, len)
168 tp_assign(gpa, gpa)
169 tp_assign(val, val)
170 ),
171
172 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174 __entry->len, __entry->gpa, __entry->val)
175)
176
177#define kvm_fpu_load_symbol \
178 {0, "unload"}, \
179 {1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182 TP_PROTO(int load),
183 TP_ARGS(load),
184
185 TP_STRUCT__entry(
186 __field( u32, load )
187 ),
188
189 TP_fast_assign(
190 tp_assign(load, load)
191 ),
192
193 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194)
195
196TRACE_EVENT(kvm_age_page,
197 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198 TP_ARGS(hva, slot, ref),
199
200 TP_STRUCT__entry(
201 __field( u64, hva )
202 __field( u64, gfn )
203 __field( u8, referenced )
204 ),
205
206 TP_fast_assign(
207 tp_assign(hva, hva)
208 tp_assign(gfn,
209 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
210 tp_assign(referenced, ref)
211 ),
212
213 TP_printk("hva %llx gfn %llx %s",
214 __entry->hva, __entry->gfn,
215 __entry->referenced ? "YOUNG" : "OLD")
216)
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 tp_assign(gva, gva)
232 tp_assign(gfn, gfn)
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236)
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243)
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250)
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 tp_assign(token, token)
265 tp_assign(gva, gva)
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270)
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277)
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284)
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 tp_assign(address, address)
299 tp_assign(pfn, page ? page_to_pfn(page) : 0)
300 tp_assign(gva, gva)
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305)
306
307#endif
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
deleted file mode 100644
index 6f3d6d141215..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
+++ /dev/null
@@ -1,34 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM lttng
3
4#if !defined(_TRACE_LTTNG_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_LTTNG_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(lttng_metadata,
10
11 TP_PROTO(const char *str),
12
13 TP_ARGS(str),
14
15 /*
16 * Not exactly a string: more a sequence of bytes (dynamic
17 * array) without the length. This is a dummy anyway: we only
18 * use this declaration to generate an event metadata entry.
19 */
20 TP_STRUCT__entry(
21 __string( str, str )
22 ),
23
24 TP_fast_assign(
25 tp_strcpy(str, str)
26 ),
27
28 TP_printk("")
29)
30
31#endif /* _TRACE_LTTNG_H */
32
33/* This part must be outside protection */
34#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
deleted file mode 100644
index 33f69213e424..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
+++ /dev/null
@@ -1,400 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
10#ifndef _TRACE_SCHED_DEF_
11#define _TRACE_SCHED_DEF_
12
13static inline long __trace_sched_switch_state(struct task_struct *p)
14{
15 long state = p->state;
16
17#ifdef CONFIG_PREEMPT
18 /*
19 * For all intents and purposes a preempted task is a running task.
20 */
21 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
22 state = TASK_RUNNING;
23#endif
24
25 return state;
26}
27
28#endif /* _TRACE_SCHED_DEF_ */
29
30/*
31 * Tracepoint for calling kthread_stop, performed to end a kthread:
32 */
33TRACE_EVENT(sched_kthread_stop,
34
35 TP_PROTO(struct task_struct *t),
36
37 TP_ARGS(t),
38
39 TP_STRUCT__entry(
40 __array_text( char, comm, TASK_COMM_LEN )
41 __field( pid_t, tid )
42 ),
43
44 TP_fast_assign(
45 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
46 tp_assign(tid, t->pid)
47 ),
48
49 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
50)
51
52/*
53 * Tracepoint for the return value of the kthread stopping:
54 */
55TRACE_EVENT(sched_kthread_stop_ret,
56
57 TP_PROTO(int ret),
58
59 TP_ARGS(ret),
60
61 TP_STRUCT__entry(
62 __field( int, ret )
63 ),
64
65 TP_fast_assign(
66 tp_assign(ret, ret)
67 ),
68
69 TP_printk("ret=%d", __entry->ret)
70)
71
72/*
73 * Tracepoint for waking up a task:
74 */
75DECLARE_EVENT_CLASS(sched_wakeup_template,
76
77 TP_PROTO(struct task_struct *p, int success),
78
79 TP_ARGS(p, success),
80
81 TP_STRUCT__entry(
82 __array_text( char, comm, TASK_COMM_LEN )
83 __field( pid_t, tid )
84 __field( int, prio )
85 __field( int, success )
86 __field( int, target_cpu )
87 ),
88
89 TP_fast_assign(
90 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
91 tp_assign(tid, p->pid)
92 tp_assign(prio, p->prio)
93 tp_assign(success, success)
94 tp_assign(target_cpu, task_cpu(p))
95 ),
96
97 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
98 __entry->comm, __entry->tid, __entry->prio,
99 __entry->success, __entry->target_cpu)
100)
101
102DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
103 TP_PROTO(struct task_struct *p, int success),
104 TP_ARGS(p, success))
105
106/*
107 * Tracepoint for waking up a new task:
108 */
109DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
110 TP_PROTO(struct task_struct *p, int success),
111 TP_ARGS(p, success))
112
113/*
114 * Tracepoint for task switches, performed by the scheduler:
115 */
116TRACE_EVENT(sched_switch,
117
118 TP_PROTO(struct task_struct *prev,
119 struct task_struct *next),
120
121 TP_ARGS(prev, next),
122
123 TP_STRUCT__entry(
124 __array_text( char, prev_comm, TASK_COMM_LEN )
125 __field( pid_t, prev_tid )
126 __field( int, prev_prio )
127 __field( long, prev_state )
128 __array_text( char, next_comm, TASK_COMM_LEN )
129 __field( pid_t, next_tid )
130 __field( int, next_prio )
131 ),
132
133 TP_fast_assign(
134 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
135 tp_assign(prev_tid, prev->pid)
136 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
137 tp_assign(prev_state, __trace_sched_switch_state(prev))
138 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
139 tp_assign(next_tid, next->pid)
140 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
141 ),
142
143 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
144 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
145 __entry->prev_state ?
146 __print_flags(__entry->prev_state, "|",
147 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
148 { 16, "Z" }, { 32, "X" }, { 64, "x" },
149 { 128, "W" }) : "R",
150 __entry->next_comm, __entry->next_tid, __entry->next_prio)
151)
152
153/*
154 * Tracepoint for a task being migrated:
155 */
156TRACE_EVENT(sched_migrate_task,
157
158 TP_PROTO(struct task_struct *p, int dest_cpu),
159
160 TP_ARGS(p, dest_cpu),
161
162 TP_STRUCT__entry(
163 __array_text( char, comm, TASK_COMM_LEN )
164 __field( pid_t, tid )
165 __field( int, prio )
166 __field( int, orig_cpu )
167 __field( int, dest_cpu )
168 ),
169
170 TP_fast_assign(
171 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
172 tp_assign(tid, p->pid)
173 tp_assign(prio, p->prio - MAX_RT_PRIO)
174 tp_assign(orig_cpu, task_cpu(p))
175 tp_assign(dest_cpu, dest_cpu)
176 ),
177
178 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
179 __entry->comm, __entry->tid, __entry->prio,
180 __entry->orig_cpu, __entry->dest_cpu)
181)
182
183DECLARE_EVENT_CLASS(sched_process_template,
184
185 TP_PROTO(struct task_struct *p),
186
187 TP_ARGS(p),
188
189 TP_STRUCT__entry(
190 __array_text( char, comm, TASK_COMM_LEN )
191 __field( pid_t, tid )
192 __field( int, prio )
193 ),
194
195 TP_fast_assign(
196 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
197 tp_assign(tid, p->pid)
198 tp_assign(prio, p->prio - MAX_RT_PRIO)
199 ),
200
201 TP_printk("comm=%s tid=%d prio=%d",
202 __entry->comm, __entry->tid, __entry->prio)
203)
204
205/*
206 * Tracepoint for freeing a task:
207 */
208DEFINE_EVENT(sched_process_template, sched_process_free,
209 TP_PROTO(struct task_struct *p),
210 TP_ARGS(p))
211
212
213/*
214 * Tracepoint for a task exiting:
215 */
216DEFINE_EVENT(sched_process_template, sched_process_exit,
217 TP_PROTO(struct task_struct *p),
218 TP_ARGS(p))
219
220/*
221 * Tracepoint for waiting on task to unschedule:
222 */
223DEFINE_EVENT(sched_process_template, sched_wait_task,
224 TP_PROTO(struct task_struct *p),
225 TP_ARGS(p))
226
227/*
228 * Tracepoint for a waiting task:
229 */
230TRACE_EVENT(sched_process_wait,
231
232 TP_PROTO(struct pid *pid),
233
234 TP_ARGS(pid),
235
236 TP_STRUCT__entry(
237 __array_text( char, comm, TASK_COMM_LEN )
238 __field( pid_t, tid )
239 __field( int, prio )
240 ),
241
242 TP_fast_assign(
243 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
244 tp_assign(tid, pid_nr(pid))
245 tp_assign(prio, current->prio - MAX_RT_PRIO)
246 ),
247
248 TP_printk("comm=%s tid=%d prio=%d",
249 __entry->comm, __entry->tid, __entry->prio)
250)
251
252/*
253 * Tracepoint for do_fork:
254 */
255TRACE_EVENT(sched_process_fork,
256
257 TP_PROTO(struct task_struct *parent, struct task_struct *child),
258
259 TP_ARGS(parent, child),
260
261 TP_STRUCT__entry(
262 __array_text( char, parent_comm, TASK_COMM_LEN )
263 __field( pid_t, parent_tid )
264 __array_text( char, child_comm, TASK_COMM_LEN )
265 __field( pid_t, child_tid )
266 ),
267
268 TP_fast_assign(
269 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
270 tp_assign(parent_tid, parent->pid)
271 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
272 tp_assign(child_tid, child->pid)
273 ),
274
275 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
276 __entry->parent_comm, __entry->parent_tid,
277 __entry->child_comm, __entry->child_tid)
278)
279
280/*
281 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
282 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
283 */
284DECLARE_EVENT_CLASS(sched_stat_template,
285
286 TP_PROTO(struct task_struct *tsk, u64 delay),
287
288 TP_ARGS(tsk, delay),
289
290 TP_STRUCT__entry(
291 __array_text( char, comm, TASK_COMM_LEN )
292 __field( pid_t, tid )
293 __field( u64, delay )
294 ),
295
296 TP_fast_assign(
297 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
298 tp_assign(tid, tsk->pid)
299 tp_assign(delay, delay)
300 )
301 TP_perf_assign(
302 __perf_count(delay)
303 ),
304
305 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
306 __entry->comm, __entry->tid,
307 (unsigned long long)__entry->delay)
308)
309
310
311/*
312 * Tracepoint for accounting wait time (time the task is runnable
313 * but not actually running due to scheduler contention).
314 */
315DEFINE_EVENT(sched_stat_template, sched_stat_wait,
316 TP_PROTO(struct task_struct *tsk, u64 delay),
317 TP_ARGS(tsk, delay))
318
319/*
320 * Tracepoint for accounting sleep time (time the task is not runnable,
321 * including iowait, see below).
322 */
323DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
324 TP_PROTO(struct task_struct *tsk, u64 delay),
325 TP_ARGS(tsk, delay))
326
327/*
328 * Tracepoint for accounting iowait time (time the task is not runnable
329 * due to waiting on IO to complete).
330 */
331DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
332 TP_PROTO(struct task_struct *tsk, u64 delay),
333 TP_ARGS(tsk, delay))
334
335/*
336 * Tracepoint for accounting runtime (time the task is executing
337 * on a CPU).
338 */
339TRACE_EVENT(sched_stat_runtime,
340
341 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
342
343 TP_ARGS(tsk, runtime, vruntime),
344
345 TP_STRUCT__entry(
346 __array_text( char, comm, TASK_COMM_LEN )
347 __field( pid_t, tid )
348 __field( u64, runtime )
349 __field( u64, vruntime )
350 ),
351
352 TP_fast_assign(
353 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
354 tp_assign(tid, tsk->pid)
355 tp_assign(runtime, runtime)
356 tp_assign(vruntime, vruntime)
357 )
358 TP_perf_assign(
359 __perf_count(runtime)
360 ),
361
362 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
363 __entry->comm, __entry->tid,
364 (unsigned long long)__entry->runtime,
365 (unsigned long long)__entry->vruntime)
366)
367
368/*
369 * Tracepoint for showing priority inheritance modifying a tasks
370 * priority.
371 */
372TRACE_EVENT(sched_pi_setprio,
373
374 TP_PROTO(struct task_struct *tsk, int newprio),
375
376 TP_ARGS(tsk, newprio),
377
378 TP_STRUCT__entry(
379 __array_text( char, comm, TASK_COMM_LEN )
380 __field( pid_t, tid )
381 __field( int, oldprio )
382 __field( int, newprio )
383 ),
384
385 TP_fast_assign(
386 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
387 tp_assign(tid, tsk->pid)
388 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
389 tp_assign(newprio, newprio - MAX_RT_PRIO)
390 ),
391
392 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
393 __entry->comm, __entry->tid,
394 __entry->oldprio, __entry->newprio)
395)
396
397#endif /* _TRACE_SCHED_H */
398
399/* This part must be outside protection */
400#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
deleted file mode 100644
index a2bb95635633..000000000000
--- a/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
4
5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_EVENTS_SYSCALLS_H
7
8#include <linux/tracepoint.h>
9
10#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
11
12#ifndef _TRACE_SYSCALLS_DEF_
13#define _TRACE_SYSCALLS_DEF_
14
15#include <asm/ptrace.h>
16#include <asm/syscall.h>
17
18#endif /* _TRACE_SYSCALLS_DEF_ */
19
20TRACE_EVENT(sys_enter,
21
22 TP_PROTO(struct pt_regs *regs, long id),
23
24 TP_ARGS(regs, id),
25
26 TP_STRUCT__entry(
27 __field( long, id )
28 __array( unsigned long, args, 6 )
29 ),
30
31 TP_fast_assign(
32 tp_assign(id, id)
33 {
34 tp_memcpy(args,
35 ({
36 unsigned long args_copy[6];
37 syscall_get_arguments(current, regs,
38 0, 6, args_copy);
39 args_copy;
40 }), 6 * sizeof(unsigned long));
41 }
42 ),
43
44 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
45 __entry->id,
46 __entry->args[0], __entry->args[1], __entry->args[2],
47 __entry->args[3], __entry->args[4], __entry->args[5])
48)
49
50TRACE_EVENT(sys_exit,
51
52 TP_PROTO(struct pt_regs *regs, long ret),
53
54 TP_ARGS(regs, ret),
55
56 TP_STRUCT__entry(
57 __field( long, id )
58 __field( long, ret )
59 ),
60
61 TP_fast_assign(
62 tp_assign(id, syscall_get_nr(current, regs))
63 tp_assign(ret, ret)
64 ),
65
66 TP_printk("NR %ld = %ld",
67 __entry->id, __entry->ret)
68)
69
70#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
71
72#endif /* _TRACE_EVENTS_SYSCALLS_H */
73
74/* This part must be outside protection */
75#include "../../../probes/define_trace.h"
76
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/block.h b/drivers/staging/lttng/instrumentation/events/mainline/block.h
deleted file mode 100644
index bf366547da25..000000000000
--- a/drivers/staging/lttng/instrumentation/events/mainline/block.h
+++ /dev/null
@@ -1,569 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BLOCK_H
6
7#include <linux/blktrace_api.h>
8#include <linux/blkdev.h>
9#include <linux/tracepoint.h>
10
11DECLARE_EVENT_CLASS(block_rq_with_error,
12
13 TP_PROTO(struct request_queue *q, struct request *rq),
14
15 TP_ARGS(q, rq),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( sector_t, sector )
20 __field( unsigned int, nr_sector )
21 __field( int, errors )
22 __array( char, rwbs, 6 )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
29 0 : blk_rq_pos(rq);
30 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
31 0 : blk_rq_sectors(rq);
32 __entry->errors = rq->errors;
33
34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
35 blk_dump_cmd(__get_str(cmd), rq);
36 ),
37
38 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
39 MAJOR(__entry->dev), MINOR(__entry->dev),
40 __entry->rwbs, __get_str(cmd),
41 (unsigned long long)__entry->sector,
42 __entry->nr_sector, __entry->errors)
43);
44
45/**
46 * block_rq_abort - abort block operation request
47 * @q: queue containing the block operation request
48 * @rq: block IO operation request
49 *
50 * Called immediately after pending block IO operation request @rq in
51 * queue @q is aborted. The fields in the operation request @rq
52 * can be examined to determine which device and sectors the pending
53 * operation would access.
54 */
55DEFINE_EVENT(block_rq_with_error, block_rq_abort,
56
57 TP_PROTO(struct request_queue *q, struct request *rq),
58
59 TP_ARGS(q, rq)
60);
61
62/**
63 * block_rq_requeue - place block IO request back on a queue
64 * @q: queue holding operation
65 * @rq: block IO operation request
66 *
67 * The block operation request @rq is being placed back into queue
68 * @q. For some reason the request was not completed and needs to be
69 * put back in the queue.
70 */
71DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
72
73 TP_PROTO(struct request_queue *q, struct request *rq),
74
75 TP_ARGS(q, rq)
76);
77
78/**
79 * block_rq_complete - block IO operation completed by device driver
80 * @q: queue containing the block operation request
81 * @rq: block operations request
82 *
83 * The block_rq_complete tracepoint event indicates that some portion
84 * of operation request has been completed by the device driver. If
85 * the @rq->bio is %NULL, then there is absolutely no additional work to
86 * do for the request. If @rq->bio is non-NULL then there is
87 * additional work required to complete the request.
88 */
89DEFINE_EVENT(block_rq_with_error, block_rq_complete,
90
91 TP_PROTO(struct request_queue *q, struct request *rq),
92
93 TP_ARGS(q, rq)
94);
95
96DECLARE_EVENT_CLASS(block_rq,
97
98 TP_PROTO(struct request_queue *q, struct request *rq),
99
100 TP_ARGS(q, rq),
101
102 TP_STRUCT__entry(
103 __field( dev_t, dev )
104 __field( sector_t, sector )
105 __field( unsigned int, nr_sector )
106 __field( unsigned int, bytes )
107 __array( char, rwbs, 6 )
108 __array( char, comm, TASK_COMM_LEN )
109 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
110 ),
111
112 TP_fast_assign(
113 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
114 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
115 0 : blk_rq_pos(rq);
116 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
117 0 : blk_rq_sectors(rq);
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0;
120
121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
122 blk_dump_cmd(__get_str(cmd), rq);
123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
124 ),
125
126 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
127 MAJOR(__entry->dev), MINOR(__entry->dev),
128 __entry->rwbs, __entry->bytes, __get_str(cmd),
129 (unsigned long long)__entry->sector,
130 __entry->nr_sector, __entry->comm)
131);
132
133/**
134 * block_rq_insert - insert block operation request into queue
135 * @q: target queue
136 * @rq: block IO operation request
137 *
138 * Called immediately before block operation request @rq is inserted
139 * into queue @q. The fields in the operation request @rq struct can
140 * be examined to determine which device and sectors the pending
141 * operation would access.
142 */
143DEFINE_EVENT(block_rq, block_rq_insert,
144
145 TP_PROTO(struct request_queue *q, struct request *rq),
146
147 TP_ARGS(q, rq)
148);
149
150/**
151 * block_rq_issue - issue pending block IO request operation to device driver
152 * @q: queue holding operation
153 * @rq: block IO operation operation request
154 *
155 * Called when block operation request @rq from queue @q is sent to a
156 * device driver for processing.
157 */
158DEFINE_EVENT(block_rq, block_rq_issue,
159
160 TP_PROTO(struct request_queue *q, struct request *rq),
161
162 TP_ARGS(q, rq)
163);
164
165/**
166 * block_bio_bounce - used bounce buffer when processing block operation
167 * @q: queue holding the block operation
168 * @bio: block operation
169 *
170 * A bounce buffer was used to handle the block operation @bio in @q.
171 * This occurs when hardware limitations prevent a direct transfer of
172 * data between the @bio data memory area and the IO device. Use of a
173 * bounce buffer requires extra copying of data and decreases
174 * performance.
175 */
176TRACE_EVENT(block_bio_bounce,
177
178 TP_PROTO(struct request_queue *q, struct bio *bio),
179
180 TP_ARGS(q, bio),
181
182 TP_STRUCT__entry(
183 __field( dev_t, dev )
184 __field( sector_t, sector )
185 __field( unsigned int, nr_sector )
186 __array( char, rwbs, 6 )
187 __array( char, comm, TASK_COMM_LEN )
188 ),
189
190 TP_fast_assign(
191 __entry->dev = bio->bi_bdev ?
192 bio->bi_bdev->bd_dev : 0;
193 __entry->sector = bio->bi_sector;
194 __entry->nr_sector = bio->bi_size >> 9;
195 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
196 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
197 ),
198
199 TP_printk("%d,%d %s %llu + %u [%s]",
200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
201 (unsigned long long)__entry->sector,
202 __entry->nr_sector, __entry->comm)
203);
204
205/**
206 * block_bio_complete - completed all work on the block operation
207 * @q: queue holding the block operation
208 * @bio: block operation completed
209 * @error: io error value
210 *
211 * This tracepoint indicates there is no further work to do on this
212 * block IO operation @bio.
213 */
214TRACE_EVENT(block_bio_complete,
215
216 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
217
218 TP_ARGS(q, bio, error),
219
220 TP_STRUCT__entry(
221 __field( dev_t, dev )
222 __field( sector_t, sector )
223 __field( unsigned, nr_sector )
224 __field( int, error )
225 __array( char, rwbs, 6 )
226 ),
227
228 TP_fast_assign(
229 __entry->dev = bio->bi_bdev->bd_dev;
230 __entry->sector = bio->bi_sector;
231 __entry->nr_sector = bio->bi_size >> 9;
232 __entry->error = error;
233 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
234 ),
235
236 TP_printk("%d,%d %s %llu + %u [%d]",
237 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
238 (unsigned long long)__entry->sector,
239 __entry->nr_sector, __entry->error)
240);
241
242DECLARE_EVENT_CLASS(block_bio,
243
244 TP_PROTO(struct request_queue *q, struct bio *bio),
245
246 TP_ARGS(q, bio),
247
248 TP_STRUCT__entry(
249 __field( dev_t, dev )
250 __field( sector_t, sector )
251 __field( unsigned int, nr_sector )
252 __array( char, rwbs, 6 )
253 __array( char, comm, TASK_COMM_LEN )
254 ),
255
256 TP_fast_assign(
257 __entry->dev = bio->bi_bdev->bd_dev;
258 __entry->sector = bio->bi_sector;
259 __entry->nr_sector = bio->bi_size >> 9;
260 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
261 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
262 ),
263
264 TP_printk("%d,%d %s %llu + %u [%s]",
265 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
266 (unsigned long long)__entry->sector,
267 __entry->nr_sector, __entry->comm)
268);
269
270/**
271 * block_bio_backmerge - merging block operation to the end of an existing operation
272 * @q: queue holding operation
273 * @bio: new block operation to merge
274 *
275 * Merging block request @bio to the end of an existing block request
276 * in queue @q.
277 */
278DEFINE_EVENT(block_bio, block_bio_backmerge,
279
280 TP_PROTO(struct request_queue *q, struct bio *bio),
281
282 TP_ARGS(q, bio)
283);
284
285/**
286 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
287 * @q: queue holding operation
288 * @bio: new block operation to merge
289 *
290 * Merging block IO operation @bio to the beginning of an existing block
291 * operation in queue @q.
292 */
293DEFINE_EVENT(block_bio, block_bio_frontmerge,
294
295 TP_PROTO(struct request_queue *q, struct bio *bio),
296
297 TP_ARGS(q, bio)
298);
299
300/**
301 * block_bio_queue - putting new block IO operation in queue
302 * @q: queue holding operation
303 * @bio: new block operation
304 *
305 * About to place the block IO operation @bio into queue @q.
306 */
307DEFINE_EVENT(block_bio, block_bio_queue,
308
309 TP_PROTO(struct request_queue *q, struct bio *bio),
310
311 TP_ARGS(q, bio)
312);
313
314DECLARE_EVENT_CLASS(block_get_rq,
315
316 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
317
318 TP_ARGS(q, bio, rw),
319
320 TP_STRUCT__entry(
321 __field( dev_t, dev )
322 __field( sector_t, sector )
323 __field( unsigned int, nr_sector )
324 __array( char, rwbs, 6 )
325 __array( char, comm, TASK_COMM_LEN )
326 ),
327
328 TP_fast_assign(
329 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
330 __entry->sector = bio ? bio->bi_sector : 0;
331 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
332 blk_fill_rwbs(__entry->rwbs,
333 bio ? bio->bi_rw : 0, __entry->nr_sector);
334 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
335 ),
336
337 TP_printk("%d,%d %s %llu + %u [%s]",
338 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
339 (unsigned long long)__entry->sector,
340 __entry->nr_sector, __entry->comm)
341);
342
343/**
344 * block_getrq - get a free request entry in queue for block IO operations
345 * @q: queue for operations
346 * @bio: pending block IO operation
347 * @rw: low bit indicates a read (%0) or a write (%1)
348 *
349 * A request struct for queue @q has been allocated to handle the
350 * block IO operation @bio.
351 */
352DEFINE_EVENT(block_get_rq, block_getrq,
353
354 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
355
356 TP_ARGS(q, bio, rw)
357);
358
359/**
360 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
361 * @q: queue for operation
362 * @bio: pending block IO operation
363 * @rw: low bit indicates a read (%0) or a write (%1)
364 *
365 * In the case where a request struct cannot be provided for queue @q
366 * the process needs to wait for an request struct to become
367 * available. This tracepoint event is generated each time the
368 * process goes to sleep waiting for request struct become available.
369 */
370DEFINE_EVENT(block_get_rq, block_sleeprq,
371
372 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
373
374 TP_ARGS(q, bio, rw)
375);
376
377/**
378 * block_plug - keep operations requests in request queue
379 * @q: request queue to plug
380 *
381 * Plug the request queue @q. Do not allow block operation requests
382 * to be sent to the device driver. Instead, accumulate requests in
383 * the queue to improve throughput performance of the block device.
384 */
385TRACE_EVENT(block_plug,
386
387 TP_PROTO(struct request_queue *q),
388
389 TP_ARGS(q),
390
391 TP_STRUCT__entry(
392 __array( char, comm, TASK_COMM_LEN )
393 ),
394
395 TP_fast_assign(
396 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
397 ),
398
399 TP_printk("[%s]", __entry->comm)
400);
401
402DECLARE_EVENT_CLASS(block_unplug,
403
404 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405
406 TP_ARGS(q, depth, explicit),
407
408 TP_STRUCT__entry(
409 __field( int, nr_rq )
410 __array( char, comm, TASK_COMM_LEN )
411 ),
412
413 TP_fast_assign(
414 __entry->nr_rq = depth;
415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
416 ),
417
418 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
419);
420
421/**
422 * block_unplug - release of operations requests in request queue
423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue
425 * @explicit: whether this was an explicit unplug, or one from schedule()
426 *
427 * Unplug request queue @q because device driver is scheduled to work
428 * on elements in the request queue.
429 */
430DEFINE_EVENT(block_unplug, block_unplug,
431
432 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
433
434 TP_ARGS(q, depth, explicit)
435);
436
437/**
438 * block_split - split a single bio struct into two bio structs
439 * @q: queue containing the bio
440 * @bio: block operation being split
441 * @new_sector: The starting sector for the new bio
442 *
443 * The bio request @bio in request queue @q needs to be split into two
444 * bio requests. The newly created @bio request starts at
445 * @new_sector. This split may be required due to hardware limitation
446 * such as operation crossing device boundaries in a RAID system.
447 */
448TRACE_EVENT(block_split,
449
450 TP_PROTO(struct request_queue *q, struct bio *bio,
451 unsigned int new_sector),
452
453 TP_ARGS(q, bio, new_sector),
454
455 TP_STRUCT__entry(
456 __field( dev_t, dev )
457 __field( sector_t, sector )
458 __field( sector_t, new_sector )
459 __array( char, rwbs, 6 )
460 __array( char, comm, TASK_COMM_LEN )
461 ),
462
463 TP_fast_assign(
464 __entry->dev = bio->bi_bdev->bd_dev;
465 __entry->sector = bio->bi_sector;
466 __entry->new_sector = new_sector;
467 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
468 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
469 ),
470
471 TP_printk("%d,%d %s %llu / %llu [%s]",
472 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
473 (unsigned long long)__entry->sector,
474 (unsigned long long)__entry->new_sector,
475 __entry->comm)
476);
477
478/**
479 * block_bio_remap - map request for a logical device to the raw device
480 * @q: queue holding the operation
481 * @bio: revised operation
482 * @dev: device for the operation
483 * @from: original sector for the operation
484 *
485 * An operation for a logical device has been mapped to the
486 * raw block device.
487 */
488TRACE_EVENT(block_bio_remap,
489
490 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
491 sector_t from),
492
493 TP_ARGS(q, bio, dev, from),
494
495 TP_STRUCT__entry(
496 __field( dev_t, dev )
497 __field( sector_t, sector )
498 __field( unsigned int, nr_sector )
499 __field( dev_t, old_dev )
500 __field( sector_t, old_sector )
501 __array( char, rwbs, 6 )
502 ),
503
504 TP_fast_assign(
505 __entry->dev = bio->bi_bdev->bd_dev;
506 __entry->sector = bio->bi_sector;
507 __entry->nr_sector = bio->bi_size >> 9;
508 __entry->old_dev = dev;
509 __entry->old_sector = from;
510 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
511 ),
512
513 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
514 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
515 (unsigned long long)__entry->sector,
516 __entry->nr_sector,
517 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
518 (unsigned long long)__entry->old_sector)
519);
520
521/**
522 * block_rq_remap - map request for a block operation request
523 * @q: queue holding the operation
524 * @rq: block IO operation request
525 * @dev: device for the operation
526 * @from: original sector for the operation
527 *
528 * The block operation request @rq in @q has been remapped. The block
529 * operation request @rq holds the current information and @from hold
530 * the original sector.
531 */
532TRACE_EVENT(block_rq_remap,
533
534 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
535 sector_t from),
536
537 TP_ARGS(q, rq, dev, from),
538
539 TP_STRUCT__entry(
540 __field( dev_t, dev )
541 __field( sector_t, sector )
542 __field( unsigned int, nr_sector )
543 __field( dev_t, old_dev )
544 __field( sector_t, old_sector )
545 __array( char, rwbs, 6 )
546 ),
547
548 TP_fast_assign(
549 __entry->dev = disk_devt(rq->rq_disk);
550 __entry->sector = blk_rq_pos(rq);
551 __entry->nr_sector = blk_rq_sectors(rq);
552 __entry->old_dev = dev;
553 __entry->old_sector = from;
554 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
555 ),
556
557 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
558 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
559 (unsigned long long)__entry->sector,
560 __entry->nr_sector,
561 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
562 (unsigned long long)__entry->old_sector)
563);
564
565#endif /* _TRACE_BLOCK_H */
566
567/* This part must be outside protection */
568#include <trace/define_trace.h>
569
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/irq.h b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
deleted file mode 100644
index 1c09820df585..000000000000
--- a/drivers/staging/lttng/instrumentation/events/mainline/irq.h
+++ /dev/null
@@ -1,150 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_H
6
7#include <linux/tracepoint.h>
8
9struct irqaction;
10struct softirq_action;
11
12#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
13#define show_softirq_name(val) \
14 __print_symbolic(val, \
15 softirq_name(HI), \
16 softirq_name(TIMER), \
17 softirq_name(NET_TX), \
18 softirq_name(NET_RX), \
19 softirq_name(BLOCK), \
20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \
23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
25
26/**
27 * irq_handler_entry - called immediately before the irq action handler
28 * @irq: irq number
29 * @action: pointer to struct irqaction
30 *
31 * The struct irqaction pointed to by @action contains various
32 * information about the handler, including the device name,
33 * @action->name, and the device id, @action->dev_id. When used in
34 * conjunction with the irq_handler_exit tracepoint, we can figure
35 * out irq handler latencies.
36 */
37TRACE_EVENT(irq_handler_entry,
38
39 TP_PROTO(int irq, struct irqaction *action),
40
41 TP_ARGS(irq, action),
42
43 TP_STRUCT__entry(
44 __field( int, irq )
45 __string( name, action->name )
46 ),
47
48 TP_fast_assign(
49 __entry->irq = irq;
50 __assign_str(name, action->name);
51 ),
52
53 TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
54);
55
56/**
57 * irq_handler_exit - called immediately after the irq action handler returns
58 * @irq: irq number
59 * @action: pointer to struct irqaction
60 * @ret: return value
61 *
62 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
63 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
64 * a shared irq line, or the irq was not handled successfully. Can be used in
65 * conjunction with the irq_handler_entry to understand irq handler latencies.
66 */
67TRACE_EVENT(irq_handler_exit,
68
69 TP_PROTO(int irq, struct irqaction *action, int ret),
70
71 TP_ARGS(irq, action, ret),
72
73 TP_STRUCT__entry(
74 __field( int, irq )
75 __field( int, ret )
76 ),
77
78 TP_fast_assign(
79 __entry->irq = irq;
80 __entry->ret = ret;
81 ),
82
83 TP_printk("irq=%d ret=%s",
84 __entry->irq, __entry->ret ? "handled" : "unhandled")
85);
86
87DECLARE_EVENT_CLASS(softirq,
88
89 TP_PROTO(unsigned int vec_nr),
90
91 TP_ARGS(vec_nr),
92
93 TP_STRUCT__entry(
94 __field( unsigned int, vec )
95 ),
96
97 TP_fast_assign(
98 __entry->vec = vec_nr;
99 ),
100
101 TP_printk("vec=%u [action=%s]", __entry->vec,
102 show_softirq_name(__entry->vec))
103);
104
105/**
106 * softirq_entry - called immediately before the softirq handler
107 * @vec_nr: softirq vector number
108 *
109 * When used in combination with the softirq_exit tracepoint
110 * we can determine the softirq handler runtine.
111 */
112DEFINE_EVENT(softirq, softirq_entry,
113
114 TP_PROTO(unsigned int vec_nr),
115
116 TP_ARGS(vec_nr)
117);
118
119/**
120 * softirq_exit - called immediately after the softirq handler returns
121 * @vec_nr: softirq vector number
122 *
123 * When used in combination with the softirq_entry tracepoint
124 * we can determine the softirq handler runtine.
125 */
126DEFINE_EVENT(softirq, softirq_exit,
127
128 TP_PROTO(unsigned int vec_nr),
129
130 TP_ARGS(vec_nr)
131);
132
133/**
134 * softirq_raise - called immediately when a softirq is raised
135 * @vec_nr: softirq vector number
136 *
137 * When used in combination with the softirq_entry tracepoint
138 * we can determine the softirq raise to run latency.
139 */
140DEFINE_EVENT(softirq, softirq_raise,
141
142 TP_PROTO(unsigned int vec_nr),
143
144 TP_ARGS(vec_nr)
145);
146
147#endif /* _TRACE_IRQ_H */
148
149/* This part must be outside protection */
150#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/kvm.h b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
deleted file mode 100644
index 46e3cd8e197a..000000000000
--- a/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
+++ /dev/null
@@ -1,312 +0,0 @@
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 __entry->reason = reason;
29 __entry->errno = errno;
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37);
38
39#if defined(__KVM_HAVE_IOAPIC)
40TRACE_EVENT(kvm_set_irq,
41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
42 TP_ARGS(gsi, level, irq_source_id),
43
44 TP_STRUCT__entry(
45 __field( unsigned int, gsi )
46 __field( int, level )
47 __field( int, irq_source_id )
48 ),
49
50 TP_fast_assign(
51 __entry->gsi = gsi;
52 __entry->level = level;
53 __entry->irq_source_id = irq_source_id;
54 ),
55
56 TP_printk("gsi %u level %d source %d",
57 __entry->gsi, __entry->level, __entry->irq_source_id)
58);
59
60#define kvm_deliver_mode \
61 {0x0, "Fixed"}, \
62 {0x1, "LowPrio"}, \
63 {0x2, "SMI"}, \
64 {0x3, "Res3"}, \
65 {0x4, "NMI"}, \
66 {0x5, "INIT"}, \
67 {0x6, "SIPI"}, \
68 {0x7, "ExtINT"}
69
70TRACE_EVENT(kvm_ioapic_set_irq,
71 TP_PROTO(__u64 e, int pin, bool coalesced),
72 TP_ARGS(e, pin, coalesced),
73
74 TP_STRUCT__entry(
75 __field( __u64, e )
76 __field( int, pin )
77 __field( bool, coalesced )
78 ),
79
80 TP_fast_assign(
81 __entry->e = e;
82 __entry->pin = pin;
83 __entry->coalesced = coalesced;
84 ),
85
86 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
87 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
88 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
89 (__entry->e & (1<<11)) ? "logical" : "physical",
90 (__entry->e & (1<<15)) ? "level" : "edge",
91 (__entry->e & (1<<16)) ? "|masked" : "",
92 __entry->coalesced ? " (coalesced)" : "")
93);
94
95TRACE_EVENT(kvm_msi_set_irq,
96 TP_PROTO(__u64 address, __u64 data),
97 TP_ARGS(address, data),
98
99 TP_STRUCT__entry(
100 __field( __u64, address )
101 __field( __u64, data )
102 ),
103
104 TP_fast_assign(
105 __entry->address = address;
106 __entry->data = data;
107 ),
108
109 TP_printk("dst %u vec %x (%s|%s|%s%s)",
110 (u8)(__entry->address >> 12), (u8)__entry->data,
111 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112 (__entry->address & (1<<2)) ? "logical" : "physical",
113 (__entry->data & (1<<15)) ? "level" : "edge",
114 (__entry->address & (1<<3)) ? "|rh" : "")
115);
116
117#define kvm_irqchips \
118 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
119 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
120 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
121
122TRACE_EVENT(kvm_ack_irq,
123 TP_PROTO(unsigned int irqchip, unsigned int pin),
124 TP_ARGS(irqchip, pin),
125
126 TP_STRUCT__entry(
127 __field( unsigned int, irqchip )
128 __field( unsigned int, pin )
129 ),
130
131 TP_fast_assign(
132 __entry->irqchip = irqchip;
133 __entry->pin = pin;
134 ),
135
136 TP_printk("irqchip %s pin %u",
137 __print_symbolic(__entry->irqchip, kvm_irqchips),
138 __entry->pin)
139);
140
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151 { KVM_TRACE_MMIO_READ, "read" }, \
152 { KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155 TP_PROTO(int type, int len, u64 gpa, u64 val),
156 TP_ARGS(type, len, gpa, val),
157
158 TP_STRUCT__entry(
159 __field( u32, type )
160 __field( u32, len )
161 __field( u64, gpa )
162 __field( u64, val )
163 ),
164
165 TP_fast_assign(
166 __entry->type = type;
167 __entry->len = len;
168 __entry->gpa = gpa;
169 __entry->val = val;
170 ),
171
172 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174 __entry->len, __entry->gpa, __entry->val)
175);
176
177#define kvm_fpu_load_symbol \
178 {0, "unload"}, \
179 {1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182 TP_PROTO(int load),
183 TP_ARGS(load),
184
185 TP_STRUCT__entry(
186 __field( u32, load )
187 ),
188
189 TP_fast_assign(
190 __entry->load = load;
191 ),
192
193 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194);
195
196TRACE_EVENT(kvm_age_page,
197 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198 TP_ARGS(hva, slot, ref),
199
200 TP_STRUCT__entry(
201 __field( u64, hva )
202 __field( u64, gfn )
203 __field( u8, referenced )
204 ),
205
206 TP_fast_assign(
207 __entry->hva = hva;
208 __entry->gfn =
209 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
210 __entry->referenced = ref;
211 ),
212
213 TP_printk("hva %llx gfn %llx %s",
214 __entry->hva, __entry->gfn,
215 __entry->referenced ? "YOUNG" : "OLD")
216);
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 __entry->gfn = gfn;
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236);
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243);
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250);
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 __entry->token = token;
265 __entry->gva = gva;
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270);
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277);
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284);
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 __entry->address = address;
299 __entry->pfn = page ? page_to_pfn(page) : 0;
300 __entry->gva = gva;
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305);
306
307#endif
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/sched.h b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
deleted file mode 100644
index f6334782a593..000000000000
--- a/drivers/staging/lttng/instrumentation/events/mainline/sched.h
+++ /dev/null
@@ -1,397 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret=%d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waking up a task:
54 */
55DECLARE_EVENT_CLASS(sched_wakeup_template,
56
57 TP_PROTO(struct task_struct *p, int success),
58
59 TP_ARGS(p, success),
60
61 TP_STRUCT__entry(
62 __array( char, comm, TASK_COMM_LEN )
63 __field( pid_t, pid )
64 __field( int, prio )
65 __field( int, success )
66 __field( int, target_cpu )
67 ),
68
69 TP_fast_assign(
70 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
71 __entry->pid = p->pid;
72 __entry->prio = p->prio;
73 __entry->success = success;
74 __entry->target_cpu = task_cpu(p);
75 ),
76
77 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
78 __entry->comm, __entry->pid, __entry->prio,
79 __entry->success, __entry->target_cpu)
80);
81
82DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
83 TP_PROTO(struct task_struct *p, int success),
84 TP_ARGS(p, success));
85
86/*
87 * Tracepoint for waking up a new task:
88 */
89DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
90 TP_PROTO(struct task_struct *p, int success),
91 TP_ARGS(p, success));
92
93#ifdef CREATE_TRACE_POINTS
94static inline long __trace_sched_switch_state(struct task_struct *p)
95{
96 long state = p->state;
97
98#ifdef CONFIG_PREEMPT
99 /*
100 * For all intents and purposes a preempted task is a running task.
101 */
102 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
103 state = TASK_RUNNING;
104#endif
105
106 return state;
107}
108#endif
109
110/*
111 * Tracepoint for task switches, performed by the scheduler:
112 */
113TRACE_EVENT(sched_switch,
114
115 TP_PROTO(struct task_struct *prev,
116 struct task_struct *next),
117
118 TP_ARGS(prev, next),
119
120 TP_STRUCT__entry(
121 __array( char, prev_comm, TASK_COMM_LEN )
122 __field( pid_t, prev_pid )
123 __field( int, prev_prio )
124 __field( long, prev_state )
125 __array( char, next_comm, TASK_COMM_LEN )
126 __field( pid_t, next_pid )
127 __field( int, next_prio )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
132 __entry->prev_pid = prev->pid;
133 __entry->prev_prio = prev->prio;
134 __entry->prev_state = __trace_sched_switch_state(prev);
135 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
136 __entry->next_pid = next->pid;
137 __entry->next_prio = next->prio;
138 ),
139
140 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
141 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
142 __entry->prev_state ?
143 __print_flags(__entry->prev_state, "|",
144 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
145 { 16, "Z" }, { 32, "X" }, { 64, "x" },
146 { 128, "W" }) : "R",
147 __entry->next_comm, __entry->next_pid, __entry->next_prio)
148);
149
150/*
151 * Tracepoint for a task being migrated:
152 */
153TRACE_EVENT(sched_migrate_task,
154
155 TP_PROTO(struct task_struct *p, int dest_cpu),
156
157 TP_ARGS(p, dest_cpu),
158
159 TP_STRUCT__entry(
160 __array( char, comm, TASK_COMM_LEN )
161 __field( pid_t, pid )
162 __field( int, prio )
163 __field( int, orig_cpu )
164 __field( int, dest_cpu )
165 ),
166
167 TP_fast_assign(
168 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
169 __entry->pid = p->pid;
170 __entry->prio = p->prio;
171 __entry->orig_cpu = task_cpu(p);
172 __entry->dest_cpu = dest_cpu;
173 ),
174
175 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
176 __entry->comm, __entry->pid, __entry->prio,
177 __entry->orig_cpu, __entry->dest_cpu)
178);
179
180DECLARE_EVENT_CLASS(sched_process_template,
181
182 TP_PROTO(struct task_struct *p),
183
184 TP_ARGS(p),
185
186 TP_STRUCT__entry(
187 __array( char, comm, TASK_COMM_LEN )
188 __field( pid_t, pid )
189 __field( int, prio )
190 ),
191
192 TP_fast_assign(
193 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
194 __entry->pid = p->pid;
195 __entry->prio = p->prio;
196 ),
197
198 TP_printk("comm=%s pid=%d prio=%d",
199 __entry->comm, __entry->pid, __entry->prio)
200);
201
202/*
203 * Tracepoint for freeing a task:
204 */
205DEFINE_EVENT(sched_process_template, sched_process_free,
206 TP_PROTO(struct task_struct *p),
207 TP_ARGS(p));
208
209
210/*
211 * Tracepoint for a task exiting:
212 */
213DEFINE_EVENT(sched_process_template, sched_process_exit,
214 TP_PROTO(struct task_struct *p),
215 TP_ARGS(p));
216
217/*
218 * Tracepoint for waiting on task to unschedule:
219 */
220DEFINE_EVENT(sched_process_template, sched_wait_task,
221 TP_PROTO(struct task_struct *p),
222 TP_ARGS(p));
223
224/*
225 * Tracepoint for a waiting task:
226 */
227TRACE_EVENT(sched_process_wait,
228
229 TP_PROTO(struct pid *pid),
230
231 TP_ARGS(pid),
232
233 TP_STRUCT__entry(
234 __array( char, comm, TASK_COMM_LEN )
235 __field( pid_t, pid )
236 __field( int, prio )
237 ),
238
239 TP_fast_assign(
240 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
241 __entry->pid = pid_nr(pid);
242 __entry->prio = current->prio;
243 ),
244
245 TP_printk("comm=%s pid=%d prio=%d",
246 __entry->comm, __entry->pid, __entry->prio)
247);
248
249/*
250 * Tracepoint for do_fork:
251 */
252TRACE_EVENT(sched_process_fork,
253
254 TP_PROTO(struct task_struct *parent, struct task_struct *child),
255
256 TP_ARGS(parent, child),
257
258 TP_STRUCT__entry(
259 __array( char, parent_comm, TASK_COMM_LEN )
260 __field( pid_t, parent_pid )
261 __array( char, child_comm, TASK_COMM_LEN )
262 __field( pid_t, child_pid )
263 ),
264
265 TP_fast_assign(
266 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
267 __entry->parent_pid = parent->pid;
268 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
269 __entry->child_pid = child->pid;
270 ),
271
272 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
273 __entry->parent_comm, __entry->parent_pid,
274 __entry->child_comm, __entry->child_pid)
275);
276
277/*
278 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
279 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
280 */
281DECLARE_EVENT_CLASS(sched_stat_template,
282
283 TP_PROTO(struct task_struct *tsk, u64 delay),
284
285 TP_ARGS(tsk, delay),
286
287 TP_STRUCT__entry(
288 __array( char, comm, TASK_COMM_LEN )
289 __field( pid_t, pid )
290 __field( u64, delay )
291 ),
292
293 TP_fast_assign(
294 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
295 __entry->pid = tsk->pid;
296 __entry->delay = delay;
297 )
298 TP_perf_assign(
299 __perf_count(delay);
300 ),
301
302 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
303 __entry->comm, __entry->pid,
304 (unsigned long long)__entry->delay)
305);
306
307
308/*
309 * Tracepoint for accounting wait time (time the task is runnable
310 * but not actually running due to scheduler contention).
311 */
312DEFINE_EVENT(sched_stat_template, sched_stat_wait,
313 TP_PROTO(struct task_struct *tsk, u64 delay),
314 TP_ARGS(tsk, delay));
315
316/*
317 * Tracepoint for accounting sleep time (time the task is not runnable,
318 * including iowait, see below).
319 */
320DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
321 TP_PROTO(struct task_struct *tsk, u64 delay),
322 TP_ARGS(tsk, delay));
323
324/*
325 * Tracepoint for accounting iowait time (time the task is not runnable
326 * due to waiting on IO to complete).
327 */
328DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
329 TP_PROTO(struct task_struct *tsk, u64 delay),
330 TP_ARGS(tsk, delay));
331
332/*
333 * Tracepoint for accounting runtime (time the task is executing
334 * on a CPU).
335 */
336TRACE_EVENT(sched_stat_runtime,
337
338 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
339
340 TP_ARGS(tsk, runtime, vruntime),
341
342 TP_STRUCT__entry(
343 __array( char, comm, TASK_COMM_LEN )
344 __field( pid_t, pid )
345 __field( u64, runtime )
346 __field( u64, vruntime )
347 ),
348
349 TP_fast_assign(
350 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
351 __entry->pid = tsk->pid;
352 __entry->runtime = runtime;
353 __entry->vruntime = vruntime;
354 )
355 TP_perf_assign(
356 __perf_count(runtime);
357 ),
358
359 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
360 __entry->comm, __entry->pid,
361 (unsigned long long)__entry->runtime,
362 (unsigned long long)__entry->vruntime)
363);
364
365/*
366 * Tracepoint for showing priority inheritance modifying a tasks
367 * priority.
368 */
369TRACE_EVENT(sched_pi_setprio,
370
371 TP_PROTO(struct task_struct *tsk, int newprio),
372
373 TP_ARGS(tsk, newprio),
374
375 TP_STRUCT__entry(
376 __array( char, comm, TASK_COMM_LEN )
377 __field( pid_t, pid )
378 __field( int, oldprio )
379 __field( int, newprio )
380 ),
381
382 TP_fast_assign(
383 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
384 __entry->pid = tsk->pid;
385 __entry->oldprio = tsk->prio;
386 __entry->newprio = newprio;
387 ),
388
389 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
390 __entry->comm, __entry->pid,
391 __entry->oldprio, __entry->newprio)
392);
393
394#endif /* _TRACE_SCHED_H */
395
396/* This part must be outside protection */
397#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
deleted file mode 100644
index 5a4c04a75b3d..000000000000
--- a/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
+++ /dev/null
@@ -1,75 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
4
5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_EVENTS_SYSCALLS_H
7
8#include <linux/tracepoint.h>
9
10#include <asm/ptrace.h>
11#include <asm/syscall.h>
12
13
14#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
15
16extern void syscall_regfunc(void);
17extern void syscall_unregfunc(void);
18
19TRACE_EVENT_FN(sys_enter,
20
21 TP_PROTO(struct pt_regs *regs, long id),
22
23 TP_ARGS(regs, id),
24
25 TP_STRUCT__entry(
26 __field( long, id )
27 __array( unsigned long, args, 6 )
28 ),
29
30 TP_fast_assign(
31 __entry->id = id;
32 syscall_get_arguments(current, regs, 0, 6, __entry->args);
33 ),
34
35 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
36 __entry->id,
37 __entry->args[0], __entry->args[1], __entry->args[2],
38 __entry->args[3], __entry->args[4], __entry->args[5]),
39
40 syscall_regfunc, syscall_unregfunc
41);
42
43TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
44
45TRACE_EVENT_FN(sys_exit,
46
47 TP_PROTO(struct pt_regs *regs, long ret),
48
49 TP_ARGS(regs, ret),
50
51 TP_STRUCT__entry(
52 __field( long, id )
53 __field( long, ret )
54 ),
55
56 TP_fast_assign(
57 __entry->id = syscall_get_nr(current, regs);
58 __entry->ret = ret;
59 ),
60
61 TP_printk("NR %ld = %ld",
62 __entry->id, __entry->ret),
63
64 syscall_regfunc, syscall_unregfunc
65);
66
67TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
68
69#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
70
71#endif /* _TRACE_EVENTS_SYSCALLS_H */
72
73/* This part must be outside protection */
74#include <trace/define_trace.h>
75
diff --git a/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4 b/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4
deleted file mode 100644
index b2294725feeb..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4
+++ /dev/null
@@ -1,263 +0,0 @@
1syscall sys_read nr 0 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
2syscall sys_write nr 1 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
3syscall sys_open nr 2 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
4syscall sys_close nr 3 nbargs 1 types: (unsigned int) args: (fd)
5syscall sys_newstat nr 4 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
6syscall sys_newfstat nr 5 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
7syscall sys_newlstat nr 6 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
8syscall sys_poll nr 7 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
9syscall sys_lseek nr 8 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
10syscall sys_mmap nr 9 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, off)
11syscall sys_mprotect nr 10 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
12syscall sys_munmap nr 11 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
13syscall sys_brk nr 12 nbargs 1 types: (unsigned long) args: (brk)
14syscall sys_rt_sigaction nr 13 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
15syscall sys_rt_sigprocmask nr 14 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
16syscall sys_ioctl nr 16 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
17syscall sys_readv nr 19 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
18syscall sys_writev nr 20 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
19syscall sys_access nr 21 nbargs 2 types: (const char *, int) args: (filename, mode)
20syscall sys_pipe nr 22 nbargs 1 types: (int *) args: (fildes)
21syscall sys_select nr 23 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
22syscall sys_sched_yield nr 24 nbargs 0 types: () args: ()
23syscall sys_mremap nr 25 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
24syscall sys_msync nr 26 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
25syscall sys_mincore nr 27 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
26syscall sys_madvise nr 28 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
27syscall sys_shmget nr 29 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
28syscall sys_shmat nr 30 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
29syscall sys_shmctl nr 31 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
30syscall sys_dup nr 32 nbargs 1 types: (unsigned int) args: (fildes)
31syscall sys_dup2 nr 33 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
32syscall sys_pause nr 34 nbargs 0 types: () args: ()
33syscall sys_nanosleep nr 35 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
34syscall sys_getitimer nr 36 nbargs 2 types: (int, struct itimerval *) args: (which, value)
35syscall sys_alarm nr 37 nbargs 1 types: (unsigned int) args: (seconds)
36syscall sys_setitimer nr 38 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
37syscall sys_getpid nr 39 nbargs 0 types: () args: ()
38syscall sys_sendfile64 nr 40 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
39syscall sys_socket nr 41 nbargs 3 types: (int, int, int) args: (family, type, protocol)
40syscall sys_connect nr 42 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
41syscall sys_accept nr 43 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
42syscall sys_sendto nr 44 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
43syscall sys_recvfrom nr 45 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
44syscall sys_sendmsg nr 46 nbargs 3 types: (int, struct msghdr *, unsigned) args: (fd, msg, flags)
45syscall sys_recvmsg nr 47 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
46syscall sys_shutdown nr 48 nbargs 2 types: (int, int) args: (fd, how)
47syscall sys_bind nr 49 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
48syscall sys_listen nr 50 nbargs 2 types: (int, int) args: (fd, backlog)
49syscall sys_getsockname nr 51 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
50syscall sys_getpeername nr 52 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
51syscall sys_socketpair nr 53 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
52syscall sys_setsockopt nr 54 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
53syscall sys_getsockopt nr 55 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
54syscall sys_exit nr 60 nbargs 1 types: (int) args: (error_code)
55syscall sys_wait4 nr 61 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
56syscall sys_kill nr 62 nbargs 2 types: (pid_t, int) args: (pid, sig)
57syscall sys_newuname nr 63 nbargs 1 types: (struct new_utsname *) args: (name)
58syscall sys_semget nr 64 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
59syscall sys_semop nr 65 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
60syscall sys_shmdt nr 67 nbargs 1 types: (char *) args: (shmaddr)
61syscall sys_msgget nr 68 nbargs 2 types: (key_t, int) args: (key, msgflg)
62syscall sys_msgsnd nr 69 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
63syscall sys_msgrcv nr 70 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
64syscall sys_msgctl nr 71 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
65syscall sys_fcntl nr 72 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
66syscall sys_flock nr 73 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
67syscall sys_fsync nr 74 nbargs 1 types: (unsigned int) args: (fd)
68syscall sys_fdatasync nr 75 nbargs 1 types: (unsigned int) args: (fd)
69syscall sys_truncate nr 76 nbargs 2 types: (const char *, long) args: (path, length)
70syscall sys_ftruncate nr 77 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
71syscall sys_getdents nr 78 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
72syscall sys_getcwd nr 79 nbargs 2 types: (char *, unsigned long) args: (buf, size)
73syscall sys_chdir nr 80 nbargs 1 types: (const char *) args: (filename)
74syscall sys_fchdir nr 81 nbargs 1 types: (unsigned int) args: (fd)
75syscall sys_rename nr 82 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
76syscall sys_mkdir nr 83 nbargs 2 types: (const char *, int) args: (pathname, mode)
77syscall sys_rmdir nr 84 nbargs 1 types: (const char *) args: (pathname)
78syscall sys_creat nr 85 nbargs 2 types: (const char *, int) args: (pathname, mode)
79syscall sys_link nr 86 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
80syscall sys_unlink nr 87 nbargs 1 types: (const char *) args: (pathname)
81syscall sys_symlink nr 88 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
82syscall sys_readlink nr 89 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
83syscall sys_chmod nr 90 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
84syscall sys_fchmod nr 91 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
85syscall sys_chown nr 92 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
86syscall sys_fchown nr 93 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
87syscall sys_lchown nr 94 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
88syscall sys_umask nr 95 nbargs 1 types: (int) args: (mask)
89syscall sys_gettimeofday nr 96 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
90syscall sys_getrlimit nr 97 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
91syscall sys_getrusage nr 98 nbargs 2 types: (int, struct rusage *) args: (who, ru)
92syscall sys_sysinfo nr 99 nbargs 1 types: (struct sysinfo *) args: (info)
93syscall sys_times nr 100 nbargs 1 types: (struct tms *) args: (tbuf)
94syscall sys_ptrace nr 101 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
95syscall sys_getuid nr 102 nbargs 0 types: () args: ()
96syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
97syscall sys_getgid nr 104 nbargs 0 types: () args: ()
98syscall sys_setuid nr 105 nbargs 1 types: (uid_t) args: (uid)
99syscall sys_setgid nr 106 nbargs 1 types: (gid_t) args: (gid)
100syscall sys_geteuid nr 107 nbargs 0 types: () args: ()
101syscall sys_getegid nr 108 nbargs 0 types: () args: ()
102syscall sys_setpgid nr 109 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
103syscall sys_getppid nr 110 nbargs 0 types: () args: ()
104syscall sys_getpgrp nr 111 nbargs 0 types: () args: ()
105syscall sys_setsid nr 112 nbargs 0 types: () args: ()
106syscall sys_setreuid nr 113 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
107syscall sys_setregid nr 114 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
108syscall sys_getgroups nr 115 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
109syscall sys_setgroups nr 116 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
110syscall sys_setresuid nr 117 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
111syscall sys_getresuid nr 118 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
112syscall sys_setresgid nr 119 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
113syscall sys_getresgid nr 120 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
114syscall sys_getpgid nr 121 nbargs 1 types: (pid_t) args: (pid)
115syscall sys_setfsuid nr 122 nbargs 1 types: (uid_t) args: (uid)
116syscall sys_setfsgid nr 123 nbargs 1 types: (gid_t) args: (gid)
117syscall sys_getsid nr 124 nbargs 1 types: (pid_t) args: (pid)
118syscall sys_capget nr 125 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
119syscall sys_capset nr 126 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
120syscall sys_rt_sigpending nr 127 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
121syscall sys_rt_sigtimedwait nr 128 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
122syscall sys_rt_sigqueueinfo nr 129 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
123syscall sys_rt_sigsuspend nr 130 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
124syscall sys_utime nr 132 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
125syscall sys_mknod nr 133 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
126syscall sys_personality nr 135 nbargs 1 types: (unsigned int) args: (personality)
127syscall sys_ustat nr 136 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
128syscall sys_statfs nr 137 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
129syscall sys_fstatfs nr 138 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
130syscall sys_sysfs nr 139 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
131syscall sys_getpriority nr 140 nbargs 2 types: (int, int) args: (which, who)
132syscall sys_setpriority nr 141 nbargs 3 types: (int, int, int) args: (which, who, niceval)
133syscall sys_sched_setparam nr 142 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
134syscall sys_sched_getparam nr 143 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
135syscall sys_sched_setscheduler nr 144 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
136syscall sys_sched_getscheduler nr 145 nbargs 1 types: (pid_t) args: (pid)
137syscall sys_sched_get_priority_max nr 146 nbargs 1 types: (int) args: (policy)
138syscall sys_sched_get_priority_min nr 147 nbargs 1 types: (int) args: (policy)
139syscall sys_sched_rr_get_interval nr 148 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
140syscall sys_mlock nr 149 nbargs 2 types: (unsigned long, size_t) args: (start, len)
141syscall sys_munlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
142syscall sys_mlockall nr 151 nbargs 1 types: (int) args: (flags)
143syscall sys_munlockall nr 152 nbargs 0 types: () args: ()
144syscall sys_vhangup nr 153 nbargs 0 types: () args: ()
145syscall sys_pivot_root nr 155 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
146syscall sys_sysctl nr 156 nbargs 1 types: (struct __sysctl_args *) args: (args)
147syscall sys_prctl nr 157 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
148syscall sys_adjtimex nr 159 nbargs 1 types: (struct timex *) args: (txc_p)
149syscall sys_setrlimit nr 160 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
150syscall sys_chroot nr 161 nbargs 1 types: (const char *) args: (filename)
151syscall sys_sync nr 162 nbargs 0 types: () args: ()
152syscall sys_settimeofday nr 164 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
153syscall sys_mount nr 165 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
154syscall sys_umount nr 166 nbargs 2 types: (char *, int) args: (name, flags)
155syscall sys_swapon nr 167 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
156syscall sys_swapoff nr 168 nbargs 1 types: (const char *) args: (specialfile)
157syscall sys_reboot nr 169 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
158syscall sys_sethostname nr 170 nbargs 2 types: (char *, int) args: (name, len)
159syscall sys_setdomainname nr 171 nbargs 2 types: (char *, int) args: (name, len)
160syscall sys_init_module nr 175 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
161syscall sys_delete_module nr 176 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
162syscall sys_nfsservctl nr 180 nbargs 3 types: (int, struct nfsctl_arg *, void *) args: (cmd, arg, res)
163syscall sys_gettid nr 186 nbargs 0 types: () args: ()
164syscall sys_setxattr nr 188 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
165syscall sys_lsetxattr nr 189 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
166syscall sys_fsetxattr nr 190 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
167syscall sys_getxattr nr 191 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
168syscall sys_lgetxattr nr 192 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
169syscall sys_fgetxattr nr 193 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
170syscall sys_listxattr nr 194 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
171syscall sys_llistxattr nr 195 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
172syscall sys_flistxattr nr 196 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
173syscall sys_removexattr nr 197 nbargs 2 types: (const char *, const char *) args: (pathname, name)
174syscall sys_lremovexattr nr 198 nbargs 2 types: (const char *, const char *) args: (pathname, name)
175syscall sys_fremovexattr nr 199 nbargs 2 types: (int, const char *) args: (fd, name)
176syscall sys_tkill nr 200 nbargs 2 types: (pid_t, int) args: (pid, sig)
177syscall sys_time nr 201 nbargs 1 types: (time_t *) args: (tloc)
178syscall sys_futex nr 202 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
179syscall sys_sched_setaffinity nr 203 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
180syscall sys_sched_getaffinity nr 204 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
181syscall sys_io_setup nr 206 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
182syscall sys_io_destroy nr 207 nbargs 1 types: (aio_context_t) args: (ctx)
183syscall sys_io_getevents nr 208 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
184syscall sys_io_submit nr 209 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
185syscall sys_io_cancel nr 210 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
186syscall sys_epoll_create nr 213 nbargs 1 types: (int) args: (size)
187syscall sys_remap_file_pages nr 216 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
188syscall sys_getdents64 nr 217 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
189syscall sys_set_tid_address nr 218 nbargs 1 types: (int *) args: (tidptr)
190syscall sys_restart_syscall nr 219 nbargs 0 types: () args: ()
191syscall sys_semtimedop nr 220 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
192syscall sys_timer_create nr 222 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
193syscall sys_timer_settime nr 223 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
194syscall sys_timer_gettime nr 224 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
195syscall sys_timer_getoverrun nr 225 nbargs 1 types: (timer_t) args: (timer_id)
196syscall sys_timer_delete nr 226 nbargs 1 types: (timer_t) args: (timer_id)
197syscall sys_clock_settime nr 227 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
198syscall sys_clock_gettime nr 228 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
199syscall sys_clock_getres nr 229 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
200syscall sys_clock_nanosleep nr 230 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
201syscall sys_exit_group nr 231 nbargs 1 types: (int) args: (error_code)
202syscall sys_epoll_wait nr 232 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
203syscall sys_epoll_ctl nr 233 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
204syscall sys_tgkill nr 234 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
205syscall sys_utimes nr 235 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
206syscall sys_mq_open nr 240 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
207syscall sys_mq_unlink nr 241 nbargs 1 types: (const char *) args: (u_name)
208syscall sys_mq_timedsend nr 242 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
209syscall sys_mq_timedreceive nr 243 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
210syscall sys_mq_notify nr 244 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
211syscall sys_mq_getsetattr nr 245 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
212syscall sys_kexec_load nr 246 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
213syscall sys_waitid nr 247 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
214syscall sys_ioprio_set nr 251 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
215syscall sys_ioprio_get nr 252 nbargs 2 types: (int, int) args: (which, who)
216syscall sys_inotify_init nr 253 nbargs 0 types: () args: ()
217syscall sys_inotify_add_watch nr 254 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
218syscall sys_inotify_rm_watch nr 255 nbargs 2 types: (int, __s32) args: (fd, wd)
219syscall sys_openat nr 257 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
220syscall sys_mkdirat nr 258 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
221syscall sys_mknodat nr 259 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
222syscall sys_fchownat nr 260 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
223syscall sys_futimesat nr 261 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
224syscall sys_newfstatat nr 262 nbargs 4 types: (int, const char *, struct stat *, int) args: (dfd, filename, statbuf, flag)
225syscall sys_unlinkat nr 263 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
226syscall sys_renameat nr 264 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
227syscall sys_linkat nr 265 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
228syscall sys_symlinkat nr 266 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
229syscall sys_readlinkat nr 267 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
230syscall sys_fchmodat nr 268 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
231syscall sys_faccessat nr 269 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
232syscall sys_pselect6 nr 270 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
233syscall sys_ppoll nr 271 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
234syscall sys_unshare nr 272 nbargs 1 types: (unsigned long) args: (unshare_flags)
235syscall sys_set_robust_list nr 273 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
236syscall sys_get_robust_list nr 274 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
237syscall sys_splice nr 275 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
238syscall sys_tee nr 276 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
239syscall sys_vmsplice nr 278 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
240syscall sys_utimensat nr 280 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
241syscall sys_epoll_pwait nr 281 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
242syscall sys_signalfd nr 282 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
243syscall sys_timerfd_create nr 283 nbargs 2 types: (int, int) args: (clockid, flags)
244syscall sys_eventfd nr 284 nbargs 1 types: (unsigned int) args: (count)
245syscall sys_timerfd_settime nr 286 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
246syscall sys_timerfd_gettime nr 287 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
247syscall sys_accept4 nr 288 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
248syscall sys_signalfd4 nr 289 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
249syscall sys_eventfd2 nr 290 nbargs 2 types: (unsigned int, int) args: (count, flags)
250syscall sys_epoll_create1 nr 291 nbargs 1 types: (int) args: (flags)
251syscall sys_dup3 nr 292 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
252syscall sys_pipe2 nr 293 nbargs 2 types: (int *, int) args: (fildes, flags)
253syscall sys_inotify_init1 nr 294 nbargs 1 types: (int) args: (flags)
254syscall sys_preadv nr 295 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
255syscall sys_pwritev nr 296 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
256syscall sys_rt_tgsigqueueinfo nr 297 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
257syscall sys_perf_event_open nr 298 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
258syscall sys_recvmmsg nr 299 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
259syscall sys_prlimit64 nr 302 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
260syscall sys_clock_adjtime nr 305 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
261syscall sys_syncfs nr 306 nbargs 1 types: (int) args: (fd)
262syscall sys_sendmmsg nr 307 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
263syscall sys_setns nr 308 nbargs 2 types: (int, int) args: (fd, nstype)
diff --git a/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6 b/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
deleted file mode 100644
index 130c1e3ee44e..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
+++ /dev/null
@@ -1,291 +0,0 @@
1syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
2syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
3syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
4syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
5syscall sys_open nr 5 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
6syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
7syscall sys_waitpid nr 7 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
8syscall sys_creat nr 8 nbargs 2 types: (const char *, int) args: (pathname, mode)
9syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
10syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
11syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
12syscall sys_time nr 13 nbargs 1 types: (time_t *) args: (tloc)
13syscall sys_mknod nr 14 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
14syscall sys_chmod nr 15 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
15syscall sys_lchown16 nr 16 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
16syscall sys_stat nr 18 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
17syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
18syscall sys_getpid nr 20 nbargs 0 types: () args: ()
19syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
20syscall sys_oldumount nr 22 nbargs 1 types: (char *) args: (name)
21syscall sys_setuid16 nr 23 nbargs 1 types: (old_uid_t) args: (uid)
22syscall sys_getuid16 nr 24 nbargs 0 types: () args: ()
23syscall sys_stime nr 25 nbargs 1 types: (time_t *) args: (tptr)
24syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
25syscall sys_alarm nr 27 nbargs 1 types: (unsigned int) args: (seconds)
26syscall sys_fstat nr 28 nbargs 2 types: (unsigned int, struct __old_kernel_stat *) args: (fd, statbuf)
27syscall sys_pause nr 29 nbargs 0 types: () args: ()
28syscall sys_utime nr 30 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
29syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
30syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
31syscall sys_sync nr 36 nbargs 0 types: () args: ()
32syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
33syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
34syscall sys_mkdir nr 39 nbargs 2 types: (const char *, int) args: (pathname, mode)
35syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
36syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
37syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
38syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
39syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
40syscall sys_setgid16 nr 46 nbargs 1 types: (old_gid_t) args: (gid)
41syscall sys_getgid16 nr 47 nbargs 0 types: () args: ()
42syscall sys_signal nr 48 nbargs 2 types: (int, __sighandler_t) args: (sig, handler)
43syscall sys_geteuid16 nr 49 nbargs 0 types: () args: ()
44syscall sys_getegid16 nr 50 nbargs 0 types: () args: ()
45syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
46syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
47syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
48syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
49syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
50syscall sys_olduname nr 59 nbargs 1 types: (struct oldold_utsname *) args: (name)
51syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
52syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
53syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
54syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
55syscall sys_getppid nr 64 nbargs 0 types: () args: ()
56syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
57syscall sys_setsid nr 66 nbargs 0 types: () args: ()
58syscall sys_sgetmask nr 68 nbargs 0 types: () args: ()
59syscall sys_ssetmask nr 69 nbargs 1 types: (int) args: (newmask)
60syscall sys_setreuid16 nr 70 nbargs 2 types: (old_uid_t, old_uid_t) args: (ruid, euid)
61syscall sys_setregid16 nr 71 nbargs 2 types: (old_gid_t, old_gid_t) args: (rgid, egid)
62syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
63syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
64syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
65syscall sys_old_getrlimit nr 76 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
66syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
67syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
68syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
69syscall sys_getgroups16 nr 80 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
70syscall sys_setgroups16 nr 81 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
71syscall sys_old_select nr 82 nbargs 1 types: (struct sel_arg_struct *) args: (arg)
72syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
73syscall sys_lstat nr 84 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
74syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
75syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
76syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
77syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
78syscall sys_old_readdir nr 89 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
79syscall sys_old_mmap nr 90 nbargs 1 types: (struct mmap_arg_struct *) args: (arg)
80syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
81syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
82syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
83syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
84syscall sys_fchown16 nr 95 nbargs 3 types: (unsigned int, old_uid_t, old_gid_t) args: (fd, user, group)
85syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
86syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
87syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
88syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
89syscall sys_socketcall nr 102 nbargs 2 types: (int, unsigned long *) args: (call, args)
90syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
91syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
92syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
93syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
94syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
95syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
96syscall sys_uname nr 109 nbargs 1 types: (struct old_utsname *) args: (name)
97syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
98syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
99syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
100syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
101syscall sys_ipc nr 117 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
102syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
103syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
104syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
105syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
106syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
107syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
108syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
109syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
110syscall sys_quotactl nr 131 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
111syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
112syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
113syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
114syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
115syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
116syscall sys_setfsuid16 nr 138 nbargs 1 types: (old_uid_t) args: (uid)
117syscall sys_setfsgid16 nr 139 nbargs 1 types: (old_gid_t) args: (gid)
118syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
119syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
120syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
121syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
122syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
123syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
124syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
125syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
126syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
127syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
128syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
129syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
130syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
131syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
132syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
133syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
134syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
135syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
136syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
137syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
138syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
139syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
140syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
141syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
142syscall sys_setresuid16 nr 164 nbargs 3 types: (old_uid_t, old_uid_t, old_uid_t) args: (ruid, euid, suid)
143syscall sys_getresuid16 nr 165 nbargs 3 types: (old_uid_t *, old_uid_t *, old_uid_t *) args: (ruid, euid, suid)
144syscall sys_poll nr 168 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
145syscall sys_setresgid16 nr 170 nbargs 3 types: (old_gid_t, old_gid_t, old_gid_t) args: (rgid, egid, sgid)
146syscall sys_getresgid16 nr 171 nbargs 3 types: (old_gid_t *, old_gid_t *, old_gid_t *) args: (rgid, egid, sgid)
147syscall sys_prctl nr 172 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
148syscall sys_rt_sigaction nr 174 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
149syscall sys_rt_sigprocmask nr 175 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
150syscall sys_rt_sigpending nr 176 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
151syscall sys_rt_sigtimedwait nr 177 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
152syscall sys_rt_sigqueueinfo nr 178 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
153syscall sys_rt_sigsuspend nr 179 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
154syscall sys_chown16 nr 182 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
155syscall sys_getcwd nr 183 nbargs 2 types: (char *, unsigned long) args: (buf, size)
156syscall sys_capget nr 184 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
157syscall sys_capset nr 185 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
158syscall sys_sendfile nr 187 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
159syscall sys_getrlimit nr 191 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
160syscall sys_mmap_pgoff nr 192 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, pgoff)
161syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
162syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
163syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
164syscall sys_lchown nr 198 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
165syscall sys_getuid nr 199 nbargs 0 types: () args: ()
166syscall sys_getgid nr 200 nbargs 0 types: () args: ()
167syscall sys_geteuid nr 201 nbargs 0 types: () args: ()
168syscall sys_getegid nr 202 nbargs 0 types: () args: ()
169syscall sys_setreuid nr 203 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
170syscall sys_setregid nr 204 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
171syscall sys_getgroups nr 205 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
172syscall sys_setgroups nr 206 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
173syscall sys_fchown nr 207 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
174syscall sys_setresuid nr 208 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
175syscall sys_getresuid nr 209 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
176syscall sys_setresgid nr 210 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
177syscall sys_getresgid nr 211 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
178syscall sys_chown nr 212 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
179syscall sys_setuid nr 213 nbargs 1 types: (uid_t) args: (uid)
180syscall sys_setgid nr 214 nbargs 1 types: (gid_t) args: (gid)
181syscall sys_setfsuid nr 215 nbargs 1 types: (uid_t) args: (uid)
182syscall sys_setfsgid nr 216 nbargs 1 types: (gid_t) args: (gid)
183syscall sys_pivot_root nr 217 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
184syscall sys_mincore nr 218 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
185syscall sys_madvise nr 219 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
186syscall sys_getdents64 nr 220 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
187syscall sys_fcntl64 nr 221 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
188syscall sys_gettid nr 224 nbargs 0 types: () args: ()
189syscall sys_setxattr nr 226 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
190syscall sys_lsetxattr nr 227 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
191syscall sys_fsetxattr nr 228 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
192syscall sys_getxattr nr 229 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
193syscall sys_lgetxattr nr 230 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
194syscall sys_fgetxattr nr 231 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
195syscall sys_listxattr nr 232 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
196syscall sys_llistxattr nr 233 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
197syscall sys_flistxattr nr 234 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
198syscall sys_removexattr nr 235 nbargs 2 types: (const char *, const char *) args: (pathname, name)
199syscall sys_lremovexattr nr 236 nbargs 2 types: (const char *, const char *) args: (pathname, name)
200syscall sys_fremovexattr nr 237 nbargs 2 types: (int, const char *) args: (fd, name)
201syscall sys_tkill nr 238 nbargs 2 types: (pid_t, int) args: (pid, sig)
202syscall sys_sendfile64 nr 239 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
203syscall sys_futex nr 240 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
204syscall sys_sched_setaffinity nr 241 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
205syscall sys_sched_getaffinity nr 242 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
206syscall sys_io_setup nr 245 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
207syscall sys_io_destroy nr 246 nbargs 1 types: (aio_context_t) args: (ctx)
208syscall sys_io_getevents nr 247 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
209syscall sys_io_submit nr 248 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
210syscall sys_io_cancel nr 249 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
211syscall sys_exit_group nr 252 nbargs 1 types: (int) args: (error_code)
212syscall sys_epoll_create nr 254 nbargs 1 types: (int) args: (size)
213syscall sys_epoll_ctl nr 255 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
214syscall sys_epoll_wait nr 256 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
215syscall sys_remap_file_pages nr 257 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
216syscall sys_set_tid_address nr 258 nbargs 1 types: (int *) args: (tidptr)
217syscall sys_timer_create nr 259 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
218syscall sys_timer_settime nr 260 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
219syscall sys_timer_gettime nr 261 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
220syscall sys_timer_getoverrun nr 262 nbargs 1 types: (timer_t) args: (timer_id)
221syscall sys_timer_delete nr 263 nbargs 1 types: (timer_t) args: (timer_id)
222syscall sys_clock_settime nr 264 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
223syscall sys_clock_gettime nr 265 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
224syscall sys_clock_getres nr 266 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
225syscall sys_clock_nanosleep nr 267 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
226syscall sys_statfs64 nr 268 nbargs 3 types: (const char *, size_t, struct statfs64 *) args: (pathname, sz, buf)
227syscall sys_fstatfs64 nr 269 nbargs 3 types: (unsigned int, size_t, struct statfs64 *) args: (fd, sz, buf)
228syscall sys_tgkill nr 270 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
229syscall sys_utimes nr 271 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
230syscall sys_mq_open nr 277 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
231syscall sys_mq_unlink nr 278 nbargs 1 types: (const char *) args: (u_name)
232syscall sys_mq_timedsend nr 279 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
233syscall sys_mq_timedreceive nr 280 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
234syscall sys_mq_notify nr 281 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
235syscall sys_mq_getsetattr nr 282 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
236syscall sys_kexec_load nr 283 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
237syscall sys_waitid nr 284 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
238syscall sys_add_key nr 286 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
239syscall sys_request_key nr 287 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
240syscall sys_keyctl nr 288 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
241syscall sys_ioprio_set nr 289 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
242syscall sys_ioprio_get nr 290 nbargs 2 types: (int, int) args: (which, who)
243syscall sys_inotify_init nr 291 nbargs 0 types: () args: ()
244syscall sys_inotify_add_watch nr 292 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
245syscall sys_inotify_rm_watch nr 293 nbargs 2 types: (int, __s32) args: (fd, wd)
246syscall sys_openat nr 295 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
247syscall sys_mkdirat nr 296 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
248syscall sys_mknodat nr 297 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
249syscall sys_fchownat nr 298 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
250syscall sys_futimesat nr 299 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
251syscall sys_fstatat64 nr 300 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
252syscall sys_unlinkat nr 301 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
253syscall sys_renameat nr 302 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
254syscall sys_linkat nr 303 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
255syscall sys_symlinkat nr 304 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
256syscall sys_readlinkat nr 305 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
257syscall sys_fchmodat nr 306 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
258syscall sys_faccessat nr 307 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
259syscall sys_pselect6 nr 308 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
260syscall sys_ppoll nr 309 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
261syscall sys_unshare nr 310 nbargs 1 types: (unsigned long) args: (unshare_flags)
262syscall sys_set_robust_list nr 311 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
263syscall sys_get_robust_list nr 312 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
264syscall sys_splice nr 313 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
265syscall sys_tee nr 315 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
266syscall sys_vmsplice nr 316 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
267syscall sys_getcpu nr 318 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
268syscall sys_epoll_pwait nr 319 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
269syscall sys_utimensat nr 320 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
270syscall sys_signalfd nr 321 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
271syscall sys_timerfd_create nr 322 nbargs 2 types: (int, int) args: (clockid, flags)
272syscall sys_eventfd nr 323 nbargs 1 types: (unsigned int) args: (count)
273syscall sys_timerfd_settime nr 325 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
274syscall sys_timerfd_gettime nr 326 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
275syscall sys_signalfd4 nr 327 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
276syscall sys_eventfd2 nr 328 nbargs 2 types: (unsigned int, int) args: (count, flags)
277syscall sys_epoll_create1 nr 329 nbargs 1 types: (int) args: (flags)
278syscall sys_dup3 nr 330 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
279syscall sys_pipe2 nr 331 nbargs 2 types: (int *, int) args: (fildes, flags)
280syscall sys_inotify_init1 nr 332 nbargs 1 types: (int) args: (flags)
281syscall sys_preadv nr 333 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
282syscall sys_pwritev nr 334 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
283syscall sys_rt_tgsigqueueinfo nr 335 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
284syscall sys_perf_event_open nr 336 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
285syscall sys_recvmmsg nr 337 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
286syscall sys_fanotify_init nr 338 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
287syscall sys_prlimit64 nr 340 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
288syscall sys_clock_adjtime nr 343 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
289syscall sys_syncfs nr 344 nbargs 1 types: (int) args: (fd)
290syscall sys_sendmmsg nr 345 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
291syscall sys_setns nr 346 nbargs 2 types: (int, int) args: (fd, nstype)
diff --git a/drivers/staging/lttng/instrumentation/syscalls/README b/drivers/staging/lttng/instrumentation/syscalls/README
deleted file mode 100644
index 6c235e150d46..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/README
+++ /dev/null
@@ -1,18 +0,0 @@
1LTTng system call tracing
2
31) lttng-syscall-extractor
4
5You need to build a kernel with CONFIG_FTRACE_SYSCALLS=y and
6CONFIG_KALLSYMS_ALL=y for extraction. Apply the linker patch to get your
7kernel to keep the system call metadata after boot. Then build and load
8the LTTng syscall extractor module. The module will fail to load (this
9is expected). See the dmesg output for system call metadata.
10
112) Generate system call TRACE_EVENT().
12
13Take the dmesg metadata and feed it to lttng-syscalls-generate-headers.sh, e.g.,
14from the instrumentation/syscalls directory. See the script header for
15usage example.
16
17After these are created, we just need to follow the new system call additions,
18no need to regenerate the whole thing, since system calls are only appended to.
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
deleted file mode 100644
index dabc4bf731e5..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
+++ /dev/null
@@ -1,3 +0,0 @@
1#ifdef CONFIG_X86_64
2#include "x86-32-syscalls-3.1.0-rc6_integers.h"
3#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
deleted file mode 100644
index a84423c7e5da..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
+++ /dev/null
@@ -1,3 +0,0 @@
1#ifdef CONFIG_X86_64
2#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
3#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
deleted file mode 100644
index 41db916163ce..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifdef CONFIG_X86_64
2#include "x86-64-syscalls-3.0.4_integers.h"
3#endif
4
5#ifdef CONFIG_X86_32
6#include "x86-32-syscalls-3.1.0-rc6_integers.h"
7#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
deleted file mode 100644
index 276d9a60adc9..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#define OVERRIDE_32_sys_mmap
2#define OVERRIDE_64_sys_mmap
3
4#ifndef CREATE_SYSCALL_TABLE
5
6SC_TRACE_EVENT(sys_mmap,
7 TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
8 TP_ARGS(addr, len, prot, flags, fd, off),
9 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len) __field(int, prot) __field(int, flags) __field(int, fd) __field(off_t, offset)),
10 TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, off)),
11 TP_printk()
12)
13
14#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
deleted file mode 100644
index 32238902187d..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifdef CONFIG_X86_64
2#include "x86-64-syscalls-3.0.4_pointers.h"
3#endif
4
5#ifdef CONFIG_X86_32
6#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
7#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
deleted file mode 100644
index e464a4ee4e7a..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
+++ /dev/null
@@ -1,4 +0,0 @@
1/*
2 * This is a place-holder for override defines for system calls with
3 * pointers (all architectures).
4 */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
deleted file mode 100644
index 4582d03d731f..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#if !defined(_TRACE_SYSCALLS_UNKNOWN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SYSCALLS_UNKNOWN_H
3
4#include <linux/tracepoint.h>
5#include <linux/syscalls.h>
6
7#define UNKNOWN_SYSCALL_NRARGS 6
8
9TRACE_EVENT(sys_unknown,
10 TP_PROTO(unsigned int id, unsigned long *args),
11 TP_ARGS(id, args),
12 TP_STRUCT__entry(
13 __field(unsigned int, id)
14 __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
15 ),
16 TP_fast_assign(
17 tp_assign(id, id)
18 tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
19 ),
20 TP_printk()
21)
22TRACE_EVENT(compat_sys_unknown,
23 TP_PROTO(unsigned int id, unsigned long *args),
24 TP_ARGS(id, args),
25 TP_STRUCT__entry(
26 __field(unsigned int, id)
27 __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
28 ),
29 TP_fast_assign(
30 tp_assign(id, id)
31 tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
32 ),
33 TP_printk()
34)
35/*
36 * This is going to hook on sys_exit in the kernel.
37 * We change the name so we don't clash with the sys_exit syscall entry
38 * event.
39 */
40TRACE_EVENT(exit_syscall,
41 TP_PROTO(struct pt_regs *regs, long ret),
42 TP_ARGS(regs, ret),
43 TP_STRUCT__entry(
44 __field(long, ret)
45 ),
46 TP_fast_assign(
47 tp_assign(ret, ret)
48 ),
49 TP_printk()
50)
51
52#endif /* _TRACE_SYSCALLS_UNKNOWN_H */
53
54/* This part must be outside protection */
55#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
deleted file mode 100644
index f4ee16c36511..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
+++ /dev/null
@@ -1,1163 +0,0 @@
1/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
2#ifndef CREATE_SYSCALL_TABLE
3
4#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SYSCALLS_INTEGERS_H
6
7#include <linux/tracepoint.h>
8#include <linux/syscalls.h>
9#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
10#include "syscalls_integers_override.h"
11
12SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
13 TP_STRUCT__entry(),
14 TP_fast_assign(),
15 TP_printk()
16)
17#ifndef OVERRIDE_32_sys_restart_syscall
18SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
19#endif
20#ifndef OVERRIDE_32_sys_getpid
21SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
22#endif
23#ifndef OVERRIDE_32_sys_getuid16
24SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid16)
25#endif
26#ifndef OVERRIDE_32_sys_pause
27SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
28#endif
29#ifndef OVERRIDE_32_sys_sync
30SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
31#endif
32#ifndef OVERRIDE_32_sys_getgid16
33SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid16)
34#endif
35#ifndef OVERRIDE_32_sys_geteuid16
36SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid16)
37#endif
38#ifndef OVERRIDE_32_sys_getegid16
39SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid16)
40#endif
41#ifndef OVERRIDE_32_sys_getppid
42SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
43#endif
44#ifndef OVERRIDE_32_sys_getpgrp
45SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
46#endif
47#ifndef OVERRIDE_32_sys_setsid
48SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
49#endif
50#ifndef OVERRIDE_32_sys_sgetmask
51SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
52#endif
53#ifndef OVERRIDE_32_sys_vhangup
54SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
55#endif
56#ifndef OVERRIDE_32_sys_munlockall
57SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
58#endif
59#ifndef OVERRIDE_32_sys_sched_yield
60SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
61#endif
62#ifndef OVERRIDE_32_sys_getuid
63SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
64#endif
65#ifndef OVERRIDE_32_sys_getgid
66SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
67#endif
68#ifndef OVERRIDE_32_sys_geteuid
69SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
70#endif
71#ifndef OVERRIDE_32_sys_getegid
72SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
73#endif
74#ifndef OVERRIDE_32_sys_gettid
75SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
76#endif
77#ifndef OVERRIDE_32_sys_inotify_init
78SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
79#endif
80#ifndef OVERRIDE_32_sys_exit
81SC_TRACE_EVENT(sys_exit,
82 TP_PROTO(int error_code),
83 TP_ARGS(error_code),
84 TP_STRUCT__entry(__field(int, error_code)),
85 TP_fast_assign(tp_assign(error_code, error_code)),
86 TP_printk()
87)
88#endif
89#ifndef OVERRIDE_32_sys_close
90SC_TRACE_EVENT(sys_close,
91 TP_PROTO(unsigned int fd),
92 TP_ARGS(fd),
93 TP_STRUCT__entry(__field(unsigned int, fd)),
94 TP_fast_assign(tp_assign(fd, fd)),
95 TP_printk()
96)
97#endif
98#ifndef OVERRIDE_32_sys_setuid16
99SC_TRACE_EVENT(sys_setuid16,
100 TP_PROTO(old_uid_t uid),
101 TP_ARGS(uid),
102 TP_STRUCT__entry(__field(old_uid_t, uid)),
103 TP_fast_assign(tp_assign(uid, uid)),
104 TP_printk()
105)
106#endif
107#ifndef OVERRIDE_32_sys_alarm
108SC_TRACE_EVENT(sys_alarm,
109 TP_PROTO(unsigned int seconds),
110 TP_ARGS(seconds),
111 TP_STRUCT__entry(__field(unsigned int, seconds)),
112 TP_fast_assign(tp_assign(seconds, seconds)),
113 TP_printk()
114)
115#endif
116#ifndef OVERRIDE_32_sys_nice
117SC_TRACE_EVENT(sys_nice,
118 TP_PROTO(int increment),
119 TP_ARGS(increment),
120 TP_STRUCT__entry(__field(int, increment)),
121 TP_fast_assign(tp_assign(increment, increment)),
122 TP_printk()
123)
124#endif
125#ifndef OVERRIDE_32_sys_dup
126SC_TRACE_EVENT(sys_dup,
127 TP_PROTO(unsigned int fildes),
128 TP_ARGS(fildes),
129 TP_STRUCT__entry(__field(unsigned int, fildes)),
130 TP_fast_assign(tp_assign(fildes, fildes)),
131 TP_printk()
132)
133#endif
134#ifndef OVERRIDE_32_sys_brk
135SC_TRACE_EVENT(sys_brk,
136 TP_PROTO(unsigned long brk),
137 TP_ARGS(brk),
138 TP_STRUCT__entry(__field(unsigned long, brk)),
139 TP_fast_assign(tp_assign(brk, brk)),
140 TP_printk()
141)
142#endif
143#ifndef OVERRIDE_32_sys_setgid16
144SC_TRACE_EVENT(sys_setgid16,
145 TP_PROTO(old_gid_t gid),
146 TP_ARGS(gid),
147 TP_STRUCT__entry(__field(old_gid_t, gid)),
148 TP_fast_assign(tp_assign(gid, gid)),
149 TP_printk()
150)
151#endif
152#ifndef OVERRIDE_32_sys_umask
153SC_TRACE_EVENT(sys_umask,
154 TP_PROTO(int mask),
155 TP_ARGS(mask),
156 TP_STRUCT__entry(__field(int, mask)),
157 TP_fast_assign(tp_assign(mask, mask)),
158 TP_printk()
159)
160#endif
161#ifndef OVERRIDE_32_sys_ssetmask
162SC_TRACE_EVENT(sys_ssetmask,
163 TP_PROTO(int newmask),
164 TP_ARGS(newmask),
165 TP_STRUCT__entry(__field(int, newmask)),
166 TP_fast_assign(tp_assign(newmask, newmask)),
167 TP_printk()
168)
169#endif
170#ifndef OVERRIDE_32_sys_fsync
171SC_TRACE_EVENT(sys_fsync,
172 TP_PROTO(unsigned int fd),
173 TP_ARGS(fd),
174 TP_STRUCT__entry(__field(unsigned int, fd)),
175 TP_fast_assign(tp_assign(fd, fd)),
176 TP_printk()
177)
178#endif
179#ifndef OVERRIDE_32_sys_getpgid
180SC_TRACE_EVENT(sys_getpgid,
181 TP_PROTO(pid_t pid),
182 TP_ARGS(pid),
183 TP_STRUCT__entry(__field(pid_t, pid)),
184 TP_fast_assign(tp_assign(pid, pid)),
185 TP_printk()
186)
187#endif
188#ifndef OVERRIDE_32_sys_fchdir
189SC_TRACE_EVENT(sys_fchdir,
190 TP_PROTO(unsigned int fd),
191 TP_ARGS(fd),
192 TP_STRUCT__entry(__field(unsigned int, fd)),
193 TP_fast_assign(tp_assign(fd, fd)),
194 TP_printk()
195)
196#endif
197#ifndef OVERRIDE_32_sys_personality
198SC_TRACE_EVENT(sys_personality,
199 TP_PROTO(unsigned int personality),
200 TP_ARGS(personality),
201 TP_STRUCT__entry(__field(unsigned int, personality)),
202 TP_fast_assign(tp_assign(personality, personality)),
203 TP_printk()
204)
205#endif
206#ifndef OVERRIDE_32_sys_setfsuid16
207SC_TRACE_EVENT(sys_setfsuid16,
208 TP_PROTO(old_uid_t uid),
209 TP_ARGS(uid),
210 TP_STRUCT__entry(__field(old_uid_t, uid)),
211 TP_fast_assign(tp_assign(uid, uid)),
212 TP_printk()
213)
214#endif
215#ifndef OVERRIDE_32_sys_setfsgid16
216SC_TRACE_EVENT(sys_setfsgid16,
217 TP_PROTO(old_gid_t gid),
218 TP_ARGS(gid),
219 TP_STRUCT__entry(__field(old_gid_t, gid)),
220 TP_fast_assign(tp_assign(gid, gid)),
221 TP_printk()
222)
223#endif
224#ifndef OVERRIDE_32_sys_getsid
225SC_TRACE_EVENT(sys_getsid,
226 TP_PROTO(pid_t pid),
227 TP_ARGS(pid),
228 TP_STRUCT__entry(__field(pid_t, pid)),
229 TP_fast_assign(tp_assign(pid, pid)),
230 TP_printk()
231)
232#endif
233#ifndef OVERRIDE_32_sys_fdatasync
234SC_TRACE_EVENT(sys_fdatasync,
235 TP_PROTO(unsigned int fd),
236 TP_ARGS(fd),
237 TP_STRUCT__entry(__field(unsigned int, fd)),
238 TP_fast_assign(tp_assign(fd, fd)),
239 TP_printk()
240)
241#endif
242#ifndef OVERRIDE_32_sys_mlockall
243SC_TRACE_EVENT(sys_mlockall,
244 TP_PROTO(int flags),
245 TP_ARGS(flags),
246 TP_STRUCT__entry(__field(int, flags)),
247 TP_fast_assign(tp_assign(flags, flags)),
248 TP_printk()
249)
250#endif
251#ifndef OVERRIDE_32_sys_sched_getscheduler
252SC_TRACE_EVENT(sys_sched_getscheduler,
253 TP_PROTO(pid_t pid),
254 TP_ARGS(pid),
255 TP_STRUCT__entry(__field(pid_t, pid)),
256 TP_fast_assign(tp_assign(pid, pid)),
257 TP_printk()
258)
259#endif
260#ifndef OVERRIDE_32_sys_sched_get_priority_max
261SC_TRACE_EVENT(sys_sched_get_priority_max,
262 TP_PROTO(int policy),
263 TP_ARGS(policy),
264 TP_STRUCT__entry(__field(int, policy)),
265 TP_fast_assign(tp_assign(policy, policy)),
266 TP_printk()
267)
268#endif
269#ifndef OVERRIDE_32_sys_sched_get_priority_min
270SC_TRACE_EVENT(sys_sched_get_priority_min,
271 TP_PROTO(int policy),
272 TP_ARGS(policy),
273 TP_STRUCT__entry(__field(int, policy)),
274 TP_fast_assign(tp_assign(policy, policy)),
275 TP_printk()
276)
277#endif
278#ifndef OVERRIDE_32_sys_setuid
279SC_TRACE_EVENT(sys_setuid,
280 TP_PROTO(uid_t uid),
281 TP_ARGS(uid),
282 TP_STRUCT__entry(__field(uid_t, uid)),
283 TP_fast_assign(tp_assign(uid, uid)),
284 TP_printk()
285)
286#endif
287#ifndef OVERRIDE_32_sys_setgid
288SC_TRACE_EVENT(sys_setgid,
289 TP_PROTO(gid_t gid),
290 TP_ARGS(gid),
291 TP_STRUCT__entry(__field(gid_t, gid)),
292 TP_fast_assign(tp_assign(gid, gid)),
293 TP_printk()
294)
295#endif
296#ifndef OVERRIDE_32_sys_setfsuid
297SC_TRACE_EVENT(sys_setfsuid,
298 TP_PROTO(uid_t uid),
299 TP_ARGS(uid),
300 TP_STRUCT__entry(__field(uid_t, uid)),
301 TP_fast_assign(tp_assign(uid, uid)),
302 TP_printk()
303)
304#endif
305#ifndef OVERRIDE_32_sys_setfsgid
306SC_TRACE_EVENT(sys_setfsgid,
307 TP_PROTO(gid_t gid),
308 TP_ARGS(gid),
309 TP_STRUCT__entry(__field(gid_t, gid)),
310 TP_fast_assign(tp_assign(gid, gid)),
311 TP_printk()
312)
313#endif
314#ifndef OVERRIDE_32_sys_io_destroy
315SC_TRACE_EVENT(sys_io_destroy,
316 TP_PROTO(aio_context_t ctx),
317 TP_ARGS(ctx),
318 TP_STRUCT__entry(__field(aio_context_t, ctx)),
319 TP_fast_assign(tp_assign(ctx, ctx)),
320 TP_printk()
321)
322#endif
323#ifndef OVERRIDE_32_sys_exit_group
324SC_TRACE_EVENT(sys_exit_group,
325 TP_PROTO(int error_code),
326 TP_ARGS(error_code),
327 TP_STRUCT__entry(__field(int, error_code)),
328 TP_fast_assign(tp_assign(error_code, error_code)),
329 TP_printk()
330)
331#endif
332#ifndef OVERRIDE_32_sys_epoll_create
333SC_TRACE_EVENT(sys_epoll_create,
334 TP_PROTO(int size),
335 TP_ARGS(size),
336 TP_STRUCT__entry(__field(int, size)),
337 TP_fast_assign(tp_assign(size, size)),
338 TP_printk()
339)
340#endif
341#ifndef OVERRIDE_32_sys_timer_getoverrun
342SC_TRACE_EVENT(sys_timer_getoverrun,
343 TP_PROTO(timer_t timer_id),
344 TP_ARGS(timer_id),
345 TP_STRUCT__entry(__field(timer_t, timer_id)),
346 TP_fast_assign(tp_assign(timer_id, timer_id)),
347 TP_printk()
348)
349#endif
350#ifndef OVERRIDE_32_sys_timer_delete
351SC_TRACE_EVENT(sys_timer_delete,
352 TP_PROTO(timer_t timer_id),
353 TP_ARGS(timer_id),
354 TP_STRUCT__entry(__field(timer_t, timer_id)),
355 TP_fast_assign(tp_assign(timer_id, timer_id)),
356 TP_printk()
357)
358#endif
359#ifndef OVERRIDE_32_sys_unshare
360SC_TRACE_EVENT(sys_unshare,
361 TP_PROTO(unsigned long unshare_flags),
362 TP_ARGS(unshare_flags),
363 TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
364 TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
365 TP_printk()
366)
367#endif
368#ifndef OVERRIDE_32_sys_eventfd
369SC_TRACE_EVENT(sys_eventfd,
370 TP_PROTO(unsigned int count),
371 TP_ARGS(count),
372 TP_STRUCT__entry(__field(unsigned int, count)),
373 TP_fast_assign(tp_assign(count, count)),
374 TP_printk()
375)
376#endif
377#ifndef OVERRIDE_32_sys_epoll_create1
378SC_TRACE_EVENT(sys_epoll_create1,
379 TP_PROTO(int flags),
380 TP_ARGS(flags),
381 TP_STRUCT__entry(__field(int, flags)),
382 TP_fast_assign(tp_assign(flags, flags)),
383 TP_printk()
384)
385#endif
386#ifndef OVERRIDE_32_sys_inotify_init1
387SC_TRACE_EVENT(sys_inotify_init1,
388 TP_PROTO(int flags),
389 TP_ARGS(flags),
390 TP_STRUCT__entry(__field(int, flags)),
391 TP_fast_assign(tp_assign(flags, flags)),
392 TP_printk()
393)
394#endif
395#ifndef OVERRIDE_32_sys_syncfs
396SC_TRACE_EVENT(sys_syncfs,
397 TP_PROTO(int fd),
398 TP_ARGS(fd),
399 TP_STRUCT__entry(__field(int, fd)),
400 TP_fast_assign(tp_assign(fd, fd)),
401 TP_printk()
402)
403#endif
404#ifndef OVERRIDE_32_sys_kill
405SC_TRACE_EVENT(sys_kill,
406 TP_PROTO(pid_t pid, int sig),
407 TP_ARGS(pid, sig),
408 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
409 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
410 TP_printk()
411)
412#endif
413#ifndef OVERRIDE_32_sys_signal
414SC_TRACE_EVENT(sys_signal,
415 TP_PROTO(int sig, __sighandler_t handler),
416 TP_ARGS(sig, handler),
417 TP_STRUCT__entry(__field(int, sig) __field(__sighandler_t, handler)),
418 TP_fast_assign(tp_assign(sig, sig) tp_assign(handler, handler)),
419 TP_printk()
420)
421#endif
422#ifndef OVERRIDE_32_sys_setpgid
423SC_TRACE_EVENT(sys_setpgid,
424 TP_PROTO(pid_t pid, pid_t pgid),
425 TP_ARGS(pid, pgid),
426 TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
427 TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
428 TP_printk()
429)
430#endif
431#ifndef OVERRIDE_32_sys_dup2
432SC_TRACE_EVENT(sys_dup2,
433 TP_PROTO(unsigned int oldfd, unsigned int newfd),
434 TP_ARGS(oldfd, newfd),
435 TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
436 TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
437 TP_printk()
438)
439#endif
440#ifndef OVERRIDE_32_sys_setreuid16
441SC_TRACE_EVENT(sys_setreuid16,
442 TP_PROTO(old_uid_t ruid, old_uid_t euid),
443 TP_ARGS(ruid, euid),
444 TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid)),
445 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
446 TP_printk()
447)
448#endif
449#ifndef OVERRIDE_32_sys_setregid16
450SC_TRACE_EVENT(sys_setregid16,
451 TP_PROTO(old_gid_t rgid, old_gid_t egid),
452 TP_ARGS(rgid, egid),
453 TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid)),
454 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
455 TP_printk()
456)
457#endif
458#ifndef OVERRIDE_32_sys_munmap
459SC_TRACE_EVENT(sys_munmap,
460 TP_PROTO(unsigned long addr, size_t len),
461 TP_ARGS(addr, len),
462 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
463 TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
464 TP_printk()
465)
466#endif
467#ifndef OVERRIDE_32_sys_ftruncate
468SC_TRACE_EVENT(sys_ftruncate,
469 TP_PROTO(unsigned int fd, unsigned long length),
470 TP_ARGS(fd, length),
471 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
472 TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
473 TP_printk()
474)
475#endif
476#ifndef OVERRIDE_32_sys_fchmod
477SC_TRACE_EVENT(sys_fchmod,
478 TP_PROTO(unsigned int fd, mode_t mode),
479 TP_ARGS(fd, mode),
480 TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
481 TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
482 TP_printk()
483)
484#endif
485#ifndef OVERRIDE_32_sys_getpriority
486SC_TRACE_EVENT(sys_getpriority,
487 TP_PROTO(int which, int who),
488 TP_ARGS(which, who),
489 TP_STRUCT__entry(__field(int, which) __field(int, who)),
490 TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
491 TP_printk()
492)
493#endif
494#ifndef OVERRIDE_32_sys_bdflush
495SC_TRACE_EVENT(sys_bdflush,
496 TP_PROTO(int func, long data),
497 TP_ARGS(func, data),
498 TP_STRUCT__entry(__field(int, func) __field(long, data)),
499 TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
500 TP_printk()
501)
502#endif
503#ifndef OVERRIDE_32_sys_flock
504SC_TRACE_EVENT(sys_flock,
505 TP_PROTO(unsigned int fd, unsigned int cmd),
506 TP_ARGS(fd, cmd),
507 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
508 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
509 TP_printk()
510)
511#endif
512#ifndef OVERRIDE_32_sys_mlock
513SC_TRACE_EVENT(sys_mlock,
514 TP_PROTO(unsigned long start, size_t len),
515 TP_ARGS(start, len),
516 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
517 TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
518 TP_printk()
519)
520#endif
521#ifndef OVERRIDE_32_sys_munlock
522SC_TRACE_EVENT(sys_munlock,
523 TP_PROTO(unsigned long start, size_t len),
524 TP_ARGS(start, len),
525 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
526 TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
527 TP_printk()
528)
529#endif
530#ifndef OVERRIDE_32_sys_setreuid
531SC_TRACE_EVENT(sys_setreuid,
532 TP_PROTO(uid_t ruid, uid_t euid),
533 TP_ARGS(ruid, euid),
534 TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
535 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
536 TP_printk()
537)
538#endif
539#ifndef OVERRIDE_32_sys_setregid
540SC_TRACE_EVENT(sys_setregid,
541 TP_PROTO(gid_t rgid, gid_t egid),
542 TP_ARGS(rgid, egid),
543 TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
544 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
545 TP_printk()
546)
547#endif
548#ifndef OVERRIDE_32_sys_tkill
549SC_TRACE_EVENT(sys_tkill,
550 TP_PROTO(pid_t pid, int sig),
551 TP_ARGS(pid, sig),
552 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
553 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
554 TP_printk()
555)
556#endif
557#ifndef OVERRIDE_32_sys_ioprio_get
558SC_TRACE_EVENT(sys_ioprio_get,
559 TP_PROTO(int which, int who),
560 TP_ARGS(which, who),
561 TP_STRUCT__entry(__field(int, which) __field(int, who)),
562 TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
563 TP_printk()
564)
565#endif
566#ifndef OVERRIDE_32_sys_inotify_rm_watch
567SC_TRACE_EVENT(sys_inotify_rm_watch,
568 TP_PROTO(int fd, __s32 wd),
569 TP_ARGS(fd, wd),
570 TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
571 TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
572 TP_printk()
573)
574#endif
575#ifndef OVERRIDE_32_sys_timerfd_create
576SC_TRACE_EVENT(sys_timerfd_create,
577 TP_PROTO(int clockid, int flags),
578 TP_ARGS(clockid, flags),
579 TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
580 TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
581 TP_printk()
582)
583#endif
584#ifndef OVERRIDE_32_sys_eventfd2
585SC_TRACE_EVENT(sys_eventfd2,
586 TP_PROTO(unsigned int count, int flags),
587 TP_ARGS(count, flags),
588 TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
589 TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
590 TP_printk()
591)
592#endif
593#ifndef OVERRIDE_32_sys_fanotify_init
594SC_TRACE_EVENT(sys_fanotify_init,
595 TP_PROTO(unsigned int flags, unsigned int event_f_flags),
596 TP_ARGS(flags, event_f_flags),
597 TP_STRUCT__entry(__field(unsigned int, flags) __field(unsigned int, event_f_flags)),
598 TP_fast_assign(tp_assign(flags, flags) tp_assign(event_f_flags, event_f_flags)),
599 TP_printk()
600)
601#endif
602#ifndef OVERRIDE_32_sys_setns
603SC_TRACE_EVENT(sys_setns,
604 TP_PROTO(int fd, int nstype),
605 TP_ARGS(fd, nstype),
606 TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
607 TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
608 TP_printk()
609)
610#endif
611#ifndef OVERRIDE_32_sys_lseek
612SC_TRACE_EVENT(sys_lseek,
613 TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
614 TP_ARGS(fd, offset, origin),
615 TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
616 TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
617 TP_printk()
618)
619#endif
620#ifndef OVERRIDE_32_sys_ioctl
621SC_TRACE_EVENT(sys_ioctl,
622 TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
623 TP_ARGS(fd, cmd, arg),
624 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
625 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
626 TP_printk()
627)
628#endif
629#ifndef OVERRIDE_32_sys_fcntl
630SC_TRACE_EVENT(sys_fcntl,
631 TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
632 TP_ARGS(fd, cmd, arg),
633 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
634 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
635 TP_printk()
636)
637#endif
638#ifndef OVERRIDE_32_sys_fchown16
639SC_TRACE_EVENT(sys_fchown16,
640 TP_PROTO(unsigned int fd, old_uid_t user, old_gid_t group),
641 TP_ARGS(fd, user, group),
642 TP_STRUCT__entry(__field(unsigned int, fd) __field(old_uid_t, user) __field(old_gid_t, group)),
643 TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
644 TP_printk()
645)
646#endif
647#ifndef OVERRIDE_32_sys_setpriority
648SC_TRACE_EVENT(sys_setpriority,
649 TP_PROTO(int which, int who, int niceval),
650 TP_ARGS(which, who, niceval),
651 TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
652 TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
653 TP_printk()
654)
655#endif
656#ifndef OVERRIDE_32_sys_mprotect
657SC_TRACE_EVENT(sys_mprotect,
658 TP_PROTO(unsigned long start, size_t len, unsigned long prot),
659 TP_ARGS(start, len, prot),
660 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
661 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
662 TP_printk()
663)
664#endif
665#ifndef OVERRIDE_32_sys_sysfs
666SC_TRACE_EVENT(sys_sysfs,
667 TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
668 TP_ARGS(option, arg1, arg2),
669 TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
670 TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
671 TP_printk()
672)
673#endif
674#ifndef OVERRIDE_32_sys_msync
675SC_TRACE_EVENT(sys_msync,
676 TP_PROTO(unsigned long start, size_t len, int flags),
677 TP_ARGS(start, len, flags),
678 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
679 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
680 TP_printk()
681)
682#endif
683#ifndef OVERRIDE_32_sys_setresuid16
684SC_TRACE_EVENT(sys_setresuid16,
685 TP_PROTO(old_uid_t ruid, old_uid_t euid, old_uid_t suid),
686 TP_ARGS(ruid, euid, suid),
687 TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid) __field(old_uid_t, suid)),
688 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
689 TP_printk()
690)
691#endif
692#ifndef OVERRIDE_32_sys_setresgid16
693SC_TRACE_EVENT(sys_setresgid16,
694 TP_PROTO(old_gid_t rgid, old_gid_t egid, old_gid_t sgid),
695 TP_ARGS(rgid, egid, sgid),
696 TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid) __field(old_gid_t, sgid)),
697 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
698 TP_printk()
699)
700#endif
701#ifndef OVERRIDE_32_sys_fchown
702SC_TRACE_EVENT(sys_fchown,
703 TP_PROTO(unsigned int fd, uid_t user, gid_t group),
704 TP_ARGS(fd, user, group),
705 TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
706 TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
707 TP_printk()
708)
709#endif
710#ifndef OVERRIDE_32_sys_setresuid
711SC_TRACE_EVENT(sys_setresuid,
712 TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
713 TP_ARGS(ruid, euid, suid),
714 TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
715 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
716 TP_printk()
717)
718#endif
719#ifndef OVERRIDE_32_sys_setresgid
720SC_TRACE_EVENT(sys_setresgid,
721 TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
722 TP_ARGS(rgid, egid, sgid),
723 TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
724 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
725 TP_printk()
726)
727#endif
728#ifndef OVERRIDE_32_sys_madvise
729SC_TRACE_EVENT(sys_madvise,
730 TP_PROTO(unsigned long start, size_t len_in, int behavior),
731 TP_ARGS(start, len_in, behavior),
732 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
733 TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
734 TP_printk()
735)
736#endif
737#ifndef OVERRIDE_32_sys_fcntl64
738SC_TRACE_EVENT(sys_fcntl64,
739 TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
740 TP_ARGS(fd, cmd, arg),
741 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
742 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
743 TP_printk()
744)
745#endif
746#ifndef OVERRIDE_32_sys_tgkill
747SC_TRACE_EVENT(sys_tgkill,
748 TP_PROTO(pid_t tgid, pid_t pid, int sig),
749 TP_ARGS(tgid, pid, sig),
750 TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
751 TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
752 TP_printk()
753)
754#endif
755#ifndef OVERRIDE_32_sys_ioprio_set
756SC_TRACE_EVENT(sys_ioprio_set,
757 TP_PROTO(int which, int who, int ioprio),
758 TP_ARGS(which, who, ioprio),
759 TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
760 TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
761 TP_printk()
762)
763#endif
764#ifndef OVERRIDE_32_sys_dup3
765SC_TRACE_EVENT(sys_dup3,
766 TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
767 TP_ARGS(oldfd, newfd, flags),
768 TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
769 TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
770 TP_printk()
771)
772#endif
773#ifndef OVERRIDE_32_sys_ptrace
774SC_TRACE_EVENT(sys_ptrace,
775 TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
776 TP_ARGS(request, pid, addr, data),
777 TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
778 TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
779 TP_printk()
780)
781#endif
782#ifndef OVERRIDE_32_sys_tee
783SC_TRACE_EVENT(sys_tee,
784 TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
785 TP_ARGS(fdin, fdout, len, flags),
786 TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
787 TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
788 TP_printk()
789)
790#endif
791#ifndef OVERRIDE_32_sys_mremap
792SC_TRACE_EVENT(sys_mremap,
793 TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
794 TP_ARGS(addr, old_len, new_len, flags, new_addr),
795 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
796 TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
797 TP_printk()
798)
799#endif
800#ifndef OVERRIDE_32_sys_prctl
801SC_TRACE_EVENT(sys_prctl,
802 TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
803 TP_ARGS(option, arg2, arg3, arg4, arg5),
804 TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
805 TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
806 TP_printk()
807)
808#endif
809#ifndef OVERRIDE_32_sys_remap_file_pages
810SC_TRACE_EVENT(sys_remap_file_pages,
811 TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
812 TP_ARGS(start, size, prot, pgoff, flags),
813 TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
814 TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
815 TP_printk()
816)
817#endif
818#ifndef OVERRIDE_32_sys_keyctl
819SC_TRACE_EVENT(sys_keyctl,
820 TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
821 TP_ARGS(option, arg2, arg3, arg4, arg5),
822 TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
823 TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
824 TP_printk()
825)
826#endif
827#ifndef OVERRIDE_32_sys_mmap_pgoff
828SC_TRACE_EVENT(sys_mmap_pgoff,
829 TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff),
830 TP_ARGS(addr, len, prot, flags, fd, pgoff),
831 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, pgoff)),
832 TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(pgoff, pgoff)),
833 TP_printk()
834)
835#endif
836
837#endif /* _TRACE_SYSCALLS_INTEGERS_H */
838
839/* This part must be outside protection */
840#include "../../../probes/define_trace.h"
841
842#else /* CREATE_SYSCALL_TABLE */
843
844#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
845#include "syscalls_integers_override.h"
846
847#ifndef OVERRIDE_TABLE_32_sys_restart_syscall
848TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 0, 0)
849#endif
850#ifndef OVERRIDE_TABLE_32_sys_getpid
851TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 20, 0)
852#endif
853#ifndef OVERRIDE_TABLE_32_sys_getuid16
854TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid16, 24, 0)
855#endif
856#ifndef OVERRIDE_TABLE_32_sys_pause
857TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 29, 0)
858#endif
859#ifndef OVERRIDE_TABLE_32_sys_sync
860TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 36, 0)
861#endif
862#ifndef OVERRIDE_TABLE_32_sys_getgid16
863TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid16, 47, 0)
864#endif
865#ifndef OVERRIDE_TABLE_32_sys_geteuid16
866TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid16, 49, 0)
867#endif
868#ifndef OVERRIDE_TABLE_32_sys_getegid16
869TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid16, 50, 0)
870#endif
871#ifndef OVERRIDE_TABLE_32_sys_getppid
872TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 64, 0)
873#endif
874#ifndef OVERRIDE_TABLE_32_sys_getpgrp
875TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 65, 0)
876#endif
877#ifndef OVERRIDE_TABLE_32_sys_setsid
878TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 66, 0)
879#endif
880#ifndef OVERRIDE_TABLE_32_sys_sgetmask
881TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 68, 0)
882#endif
883#ifndef OVERRIDE_TABLE_32_sys_vhangup
884TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 111, 0)
885#endif
886#ifndef OVERRIDE_TABLE_32_sys_munlockall
887TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 153, 0)
888#endif
889#ifndef OVERRIDE_TABLE_32_sys_sched_yield
890TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 158, 0)
891#endif
892#ifndef OVERRIDE_TABLE_32_sys_getuid
893TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 199, 0)
894#endif
895#ifndef OVERRIDE_TABLE_32_sys_getgid
896TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 200, 0)
897#endif
898#ifndef OVERRIDE_TABLE_32_sys_geteuid
899TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 201, 0)
900#endif
901#ifndef OVERRIDE_TABLE_32_sys_getegid
902TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 202, 0)
903#endif
904#ifndef OVERRIDE_TABLE_32_sys_gettid
905TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 224, 0)
906#endif
907#ifndef OVERRIDE_TABLE_32_sys_inotify_init
908TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 291, 0)
909#endif
910#ifndef OVERRIDE_TABLE_32_sys_exit
911TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 1, 1)
912#endif
913#ifndef OVERRIDE_TABLE_32_sys_close
914TRACE_SYSCALL_TABLE(sys_close, sys_close, 6, 1)
915#endif
916#ifndef OVERRIDE_TABLE_32_sys_lseek
917TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 19, 3)
918#endif
919#ifndef OVERRIDE_TABLE_32_sys_setuid16
920TRACE_SYSCALL_TABLE(sys_setuid16, sys_setuid16, 23, 1)
921#endif
922#ifndef OVERRIDE_TABLE_32_sys_ptrace
923TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 26, 4)
924#endif
925#ifndef OVERRIDE_TABLE_32_sys_alarm
926TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 27, 1)
927#endif
928#ifndef OVERRIDE_TABLE_32_sys_nice
929TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 34, 1)
930#endif
931#ifndef OVERRIDE_TABLE_32_sys_kill
932TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 37, 2)
933#endif
934#ifndef OVERRIDE_TABLE_32_sys_dup
935TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 41, 1)
936#endif
937#ifndef OVERRIDE_TABLE_32_sys_brk
938TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 45, 1)
939#endif
940#ifndef OVERRIDE_TABLE_32_sys_setgid16
941TRACE_SYSCALL_TABLE(sys_setgid16, sys_setgid16, 46, 1)
942#endif
943#ifndef OVERRIDE_TABLE_32_sys_signal
944TRACE_SYSCALL_TABLE(sys_signal, sys_signal, 48, 2)
945#endif
946#ifndef OVERRIDE_TABLE_32_sys_ioctl
947TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 54, 3)
948#endif
949#ifndef OVERRIDE_TABLE_32_sys_fcntl
950TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 55, 3)
951#endif
952#ifndef OVERRIDE_TABLE_32_sys_setpgid
953TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 57, 2)
954#endif
955#ifndef OVERRIDE_TABLE_32_sys_umask
956TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 60, 1)
957#endif
958#ifndef OVERRIDE_TABLE_32_sys_dup2
959TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 63, 2)
960#endif
961#ifndef OVERRIDE_TABLE_32_sys_ssetmask
962TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 69, 1)
963#endif
964#ifndef OVERRIDE_TABLE_32_sys_setreuid16
965TRACE_SYSCALL_TABLE(sys_setreuid16, sys_setreuid16, 70, 2)
966#endif
967#ifndef OVERRIDE_TABLE_32_sys_setregid16
968TRACE_SYSCALL_TABLE(sys_setregid16, sys_setregid16, 71, 2)
969#endif
970#ifndef OVERRIDE_TABLE_32_sys_munmap
971TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 91, 2)
972#endif
973#ifndef OVERRIDE_TABLE_32_sys_ftruncate
974TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 93, 2)
975#endif
976#ifndef OVERRIDE_TABLE_32_sys_fchmod
977TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 94, 2)
978#endif
979#ifndef OVERRIDE_TABLE_32_sys_fchown16
980TRACE_SYSCALL_TABLE(sys_fchown16, sys_fchown16, 95, 3)
981#endif
982#ifndef OVERRIDE_TABLE_32_sys_getpriority
983TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 96, 2)
984#endif
985#ifndef OVERRIDE_TABLE_32_sys_setpriority
986TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 97, 3)
987#endif
988#ifndef OVERRIDE_TABLE_32_sys_fsync
989TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 118, 1)
990#endif
991#ifndef OVERRIDE_TABLE_32_sys_mprotect
992TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 125, 3)
993#endif
994#ifndef OVERRIDE_TABLE_32_sys_getpgid
995TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 132, 1)
996#endif
997#ifndef OVERRIDE_TABLE_32_sys_fchdir
998TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 133, 1)
999#endif
1000#ifndef OVERRIDE_TABLE_32_sys_bdflush
1001TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 134, 2)
1002#endif
1003#ifndef OVERRIDE_TABLE_32_sys_sysfs
1004TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 135, 3)
1005#endif
1006#ifndef OVERRIDE_TABLE_32_sys_personality
1007TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 136, 1)
1008#endif
1009#ifndef OVERRIDE_TABLE_32_sys_setfsuid16
1010TRACE_SYSCALL_TABLE(sys_setfsuid16, sys_setfsuid16, 138, 1)
1011#endif
1012#ifndef OVERRIDE_TABLE_32_sys_setfsgid16
1013TRACE_SYSCALL_TABLE(sys_setfsgid16, sys_setfsgid16, 139, 1)
1014#endif
1015#ifndef OVERRIDE_TABLE_32_sys_flock
1016TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 143, 2)
1017#endif
1018#ifndef OVERRIDE_TABLE_32_sys_msync
1019TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 144, 3)
1020#endif
1021#ifndef OVERRIDE_TABLE_32_sys_getsid
1022TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 147, 1)
1023#endif
1024#ifndef OVERRIDE_TABLE_32_sys_fdatasync
1025TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 148, 1)
1026#endif
1027#ifndef OVERRIDE_TABLE_32_sys_mlock
1028TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 150, 2)
1029#endif
1030#ifndef OVERRIDE_TABLE_32_sys_munlock
1031TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 151, 2)
1032#endif
1033#ifndef OVERRIDE_TABLE_32_sys_mlockall
1034TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 152, 1)
1035#endif
1036#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
1037TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 157, 1)
1038#endif
1039#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
1040TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 159, 1)
1041#endif
1042#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
1043TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 160, 1)
1044#endif
1045#ifndef OVERRIDE_TABLE_32_sys_mremap
1046TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 163, 5)
1047#endif
1048#ifndef OVERRIDE_TABLE_32_sys_setresuid16
1049TRACE_SYSCALL_TABLE(sys_setresuid16, sys_setresuid16, 164, 3)
1050#endif
1051#ifndef OVERRIDE_TABLE_32_sys_setresgid16
1052TRACE_SYSCALL_TABLE(sys_setresgid16, sys_setresgid16, 170, 3)
1053#endif
1054#ifndef OVERRIDE_TABLE_32_sys_prctl
1055TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 172, 5)
1056#endif
1057#ifndef OVERRIDE_TABLE_32_sys_mmap_pgoff
1058TRACE_SYSCALL_TABLE(sys_mmap_pgoff, sys_mmap_pgoff, 192, 6)
1059#endif
1060#ifndef OVERRIDE_TABLE_32_sys_setreuid
1061TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 203, 2)
1062#endif
1063#ifndef OVERRIDE_TABLE_32_sys_setregid
1064TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 204, 2)
1065#endif
1066#ifndef OVERRIDE_TABLE_32_sys_fchown
1067TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 207, 3)
1068#endif
1069#ifndef OVERRIDE_TABLE_32_sys_setresuid
1070TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 208, 3)
1071#endif
1072#ifndef OVERRIDE_TABLE_32_sys_setresgid
1073TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 210, 3)
1074#endif
1075#ifndef OVERRIDE_TABLE_32_sys_setuid
1076TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 213, 1)
1077#endif
1078#ifndef OVERRIDE_TABLE_32_sys_setgid
1079TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 214, 1)
1080#endif
1081#ifndef OVERRIDE_TABLE_32_sys_setfsuid
1082TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 215, 1)
1083#endif
1084#ifndef OVERRIDE_TABLE_32_sys_setfsgid
1085TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 216, 1)
1086#endif
1087#ifndef OVERRIDE_TABLE_32_sys_madvise
1088TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 219, 3)
1089#endif
1090#ifndef OVERRIDE_TABLE_32_sys_fcntl64
1091TRACE_SYSCALL_TABLE(sys_fcntl64, sys_fcntl64, 221, 3)
1092#endif
1093#ifndef OVERRIDE_TABLE_32_sys_tkill
1094TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 238, 2)
1095#endif
1096#ifndef OVERRIDE_TABLE_32_sys_io_destroy
1097TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 246, 1)
1098#endif
1099#ifndef OVERRIDE_TABLE_32_sys_exit_group
1100TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 252, 1)
1101#endif
1102#ifndef OVERRIDE_TABLE_32_sys_epoll_create
1103TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 254, 1)
1104#endif
1105#ifndef OVERRIDE_TABLE_32_sys_remap_file_pages
1106TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 257, 5)
1107#endif
1108#ifndef OVERRIDE_TABLE_32_sys_timer_getoverrun
1109TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 262, 1)
1110#endif
1111#ifndef OVERRIDE_TABLE_32_sys_timer_delete
1112TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 263, 1)
1113#endif
1114#ifndef OVERRIDE_TABLE_32_sys_tgkill
1115TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 270, 3)
1116#endif
1117#ifndef OVERRIDE_TABLE_32_sys_keyctl
1118TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 288, 5)
1119#endif
1120#ifndef OVERRIDE_TABLE_32_sys_ioprio_set
1121TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 289, 3)
1122#endif
1123#ifndef OVERRIDE_TABLE_32_sys_ioprio_get
1124TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 290, 2)
1125#endif
1126#ifndef OVERRIDE_TABLE_32_sys_inotify_rm_watch
1127TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 293, 2)
1128#endif
1129#ifndef OVERRIDE_TABLE_32_sys_unshare
1130TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 310, 1)
1131#endif
1132#ifndef OVERRIDE_TABLE_32_sys_tee
1133TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 315, 4)
1134#endif
1135#ifndef OVERRIDE_TABLE_32_sys_timerfd_create
1136TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 322, 2)
1137#endif
1138#ifndef OVERRIDE_TABLE_32_sys_eventfd
1139TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 323, 1)
1140#endif
1141#ifndef OVERRIDE_TABLE_32_sys_eventfd2
1142TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 328, 2)
1143#endif
1144#ifndef OVERRIDE_TABLE_32_sys_epoll_create1
1145TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 329, 1)
1146#endif
1147#ifndef OVERRIDE_TABLE_32_sys_dup3
1148TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 330, 3)
1149#endif
1150#ifndef OVERRIDE_TABLE_32_sys_inotify_init1
1151TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 332, 1)
1152#endif
1153#ifndef OVERRIDE_TABLE_32_sys_fanotify_init
1154TRACE_SYSCALL_TABLE(sys_fanotify_init, sys_fanotify_init, 338, 2)
1155#endif
1156#ifndef OVERRIDE_TABLE_32_sys_syncfs
1157TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 344, 1)
1158#endif
1159#ifndef OVERRIDE_TABLE_32_sys_setns
1160TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 346, 2)
1161#endif
1162
1163#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
deleted file mode 100644
index ed2cf1fe4fee..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef CONFIG_UID16
2
3#define OVERRIDE_32_sys_getuid16
4#define OVERRIDE_32_sys_getgid16
5#define OVERRIDE_32_sys_geteuid16
6#define OVERRIDE_32_sys_getegid16
7#define OVERRIDE_32_sys_setuid16
8#define OVERRIDE_32_sys_setgid16
9#define OVERRIDE_32_sys_setfsuid16
10#define OVERRIDE_32_sys_setfsgid16
11#define OVERRIDE_32_sys_setreuid16
12#define OVERRIDE_32_sys_setregid16
13#define OVERRIDE_32_sys_fchown16
14#define OVERRIDE_32_sys_setresuid16
15#define OVERRIDE_32_sys_setresgid16
16
17#define OVERRIDE_TABLE_32_sys_getuid16
18#define OVERRIDE_TABLE_32_sys_getgid16
19#define OVERRIDE_TABLE_32_sys_geteuid16
20#define OVERRIDE_TABLE_32_sys_getegid16
21#define OVERRIDE_TABLE_32_sys_setuid16
22#define OVERRIDE_TABLE_32_sys_setgid16
23#define OVERRIDE_TABLE_32_sys_setreuid16
24#define OVERRIDE_TABLE_32_sys_setregid16
25#define OVERRIDE_TABLE_32_sys_fchown16
26#define OVERRIDE_TABLE_32_sys_setfsuid16
27#define OVERRIDE_TABLE_32_sys_setfsgid16
28#define OVERRIDE_TABLE_32_sys_setresuid16
29#define OVERRIDE_TABLE_32_sys_setresgid16
30
31#endif
32
33#ifdef CREATE_SYSCALL_TABLE
34
35#define OVERRIDE_TABLE_32_sys_mmap
36TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
37
38#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
deleted file mode 100644
index ec5b3016e913..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
+++ /dev/null
@@ -1,2232 +0,0 @@
1/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
2#ifndef CREATE_SYSCALL_TABLE
3
4#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SYSCALLS_POINTERS_H
6
7#include <linux/tracepoint.h>
8#include <linux/syscalls.h>
9#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
10#include "syscalls_pointers_override.h"
11
12#ifndef OVERRIDE_32_sys_unlink
13SC_TRACE_EVENT(sys_unlink,
14 TP_PROTO(const char * pathname),
15 TP_ARGS(pathname),
16 TP_STRUCT__entry(__string_from_user(pathname, pathname)),
17 TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
18 TP_printk()
19)
20#endif
21#ifndef OVERRIDE_32_sys_chdir
22SC_TRACE_EVENT(sys_chdir,
23 TP_PROTO(const char * filename),
24 TP_ARGS(filename),
25 TP_STRUCT__entry(__string_from_user(filename, filename)),
26 TP_fast_assign(tp_copy_string_from_user(filename, filename)),
27 TP_printk()
28)
29#endif
30#ifndef OVERRIDE_32_sys_time
31SC_TRACE_EVENT(sys_time,
32 TP_PROTO(time_t * tloc),
33 TP_ARGS(tloc),
34 TP_STRUCT__entry(__field_hex(time_t *, tloc)),
35 TP_fast_assign(tp_assign(tloc, tloc)),
36 TP_printk()
37)
38#endif
39#ifndef OVERRIDE_32_sys_oldumount
40SC_TRACE_EVENT(sys_oldumount,
41 TP_PROTO(char * name),
42 TP_ARGS(name),
43 TP_STRUCT__entry(__string_from_user(name, name)),
44 TP_fast_assign(tp_copy_string_from_user(name, name)),
45 TP_printk()
46)
47#endif
48#ifndef OVERRIDE_32_sys_stime
49SC_TRACE_EVENT(sys_stime,
50 TP_PROTO(time_t * tptr),
51 TP_ARGS(tptr),
52 TP_STRUCT__entry(__field_hex(time_t *, tptr)),
53 TP_fast_assign(tp_assign(tptr, tptr)),
54 TP_printk()
55)
56#endif
57#ifndef OVERRIDE_32_sys_rmdir
58SC_TRACE_EVENT(sys_rmdir,
59 TP_PROTO(const char * pathname),
60 TP_ARGS(pathname),
61 TP_STRUCT__entry(__string_from_user(pathname, pathname)),
62 TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
63 TP_printk()
64)
65#endif
66#ifndef OVERRIDE_32_sys_pipe
67SC_TRACE_EVENT(sys_pipe,
68 TP_PROTO(int * fildes),
69 TP_ARGS(fildes),
70 TP_STRUCT__entry(__field_hex(int *, fildes)),
71 TP_fast_assign(tp_assign(fildes, fildes)),
72 TP_printk()
73)
74#endif
75#ifndef OVERRIDE_32_sys_times
76SC_TRACE_EVENT(sys_times,
77 TP_PROTO(struct tms * tbuf),
78 TP_ARGS(tbuf),
79 TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
80 TP_fast_assign(tp_assign(tbuf, tbuf)),
81 TP_printk()
82)
83#endif
84#ifndef OVERRIDE_32_sys_acct
85SC_TRACE_EVENT(sys_acct,
86 TP_PROTO(const char * name),
87 TP_ARGS(name),
88 TP_STRUCT__entry(__string_from_user(name, name)),
89 TP_fast_assign(tp_copy_string_from_user(name, name)),
90 TP_printk()
91)
92#endif
93#ifndef OVERRIDE_32_sys_olduname
94SC_TRACE_EVENT(sys_olduname,
95 TP_PROTO(struct oldold_utsname * name),
96 TP_ARGS(name),
97 TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
98 TP_fast_assign(tp_assign(name, name)),
99 TP_printk()
100)
101#endif
102#ifndef OVERRIDE_32_sys_chroot
103SC_TRACE_EVENT(sys_chroot,
104 TP_PROTO(const char * filename),
105 TP_ARGS(filename),
106 TP_STRUCT__entry(__string_from_user(filename, filename)),
107 TP_fast_assign(tp_copy_string_from_user(filename, filename)),
108 TP_printk()
109)
110#endif
111#ifndef OVERRIDE_32_sys_sigpending
112SC_TRACE_EVENT(sys_sigpending,
113 TP_PROTO(old_sigset_t * set),
114 TP_ARGS(set),
115 TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
116 TP_fast_assign(tp_assign(set, set)),
117 TP_printk()
118)
119#endif
120#ifndef OVERRIDE_32_sys_old_select
121SC_TRACE_EVENT(sys_old_select,
122 TP_PROTO(struct sel_arg_struct * arg),
123 TP_ARGS(arg),
124 TP_STRUCT__entry(__field_hex(struct sel_arg_struct *, arg)),
125 TP_fast_assign(tp_assign(arg, arg)),
126 TP_printk()
127)
128#endif
129#ifndef OVERRIDE_32_sys_uselib
130SC_TRACE_EVENT(sys_uselib,
131 TP_PROTO(const char * library),
132 TP_ARGS(library),
133 TP_STRUCT__entry(__field_hex(const char *, library)),
134 TP_fast_assign(tp_assign(library, library)),
135 TP_printk()
136)
137#endif
138#ifndef OVERRIDE_32_sys_old_mmap
139SC_TRACE_EVENT(sys_old_mmap,
140 TP_PROTO(struct mmap_arg_struct * arg),
141 TP_ARGS(arg),
142 TP_STRUCT__entry(__field_hex(struct mmap_arg_struct *, arg)),
143 TP_fast_assign(tp_assign(arg, arg)),
144 TP_printk()
145)
146#endif
147#ifndef OVERRIDE_32_sys_uname
148SC_TRACE_EVENT(sys_uname,
149 TP_PROTO(struct old_utsname * name),
150 TP_ARGS(name),
151 TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
152 TP_fast_assign(tp_assign(name, name)),
153 TP_printk()
154)
155#endif
156#ifndef OVERRIDE_32_sys_swapoff
157SC_TRACE_EVENT(sys_swapoff,
158 TP_PROTO(const char * specialfile),
159 TP_ARGS(specialfile),
160 TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
161 TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
162 TP_printk()
163)
164#endif
165#ifndef OVERRIDE_32_sys_sysinfo
166SC_TRACE_EVENT(sys_sysinfo,
167 TP_PROTO(struct sysinfo * info),
168 TP_ARGS(info),
169 TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
170 TP_fast_assign(tp_assign(info, info)),
171 TP_printk()
172)
173#endif
174#ifndef OVERRIDE_32_sys_newuname
175SC_TRACE_EVENT(sys_newuname,
176 TP_PROTO(struct new_utsname * name),
177 TP_ARGS(name),
178 TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
179 TP_fast_assign(tp_assign(name, name)),
180 TP_printk()
181)
182#endif
183#ifndef OVERRIDE_32_sys_adjtimex
184SC_TRACE_EVENT(sys_adjtimex,
185 TP_PROTO(struct timex * txc_p),
186 TP_ARGS(txc_p),
187 TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
188 TP_fast_assign(tp_assign(txc_p, txc_p)),
189 TP_printk()
190)
191#endif
192#ifndef OVERRIDE_32_sys_sysctl
193SC_TRACE_EVENT(sys_sysctl,
194 TP_PROTO(struct __sysctl_args * args),
195 TP_ARGS(args),
196 TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
197 TP_fast_assign(tp_assign(args, args)),
198 TP_printk()
199)
200#endif
201#ifndef OVERRIDE_32_sys_set_tid_address
202SC_TRACE_EVENT(sys_set_tid_address,
203 TP_PROTO(int * tidptr),
204 TP_ARGS(tidptr),
205 TP_STRUCT__entry(__field_hex(int *, tidptr)),
206 TP_fast_assign(tp_assign(tidptr, tidptr)),
207 TP_printk()
208)
209#endif
210#ifndef OVERRIDE_32_sys_mq_unlink
211SC_TRACE_EVENT(sys_mq_unlink,
212 TP_PROTO(const char * u_name),
213 TP_ARGS(u_name),
214 TP_STRUCT__entry(__string_from_user(u_name, u_name)),
215 TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
216 TP_printk()
217)
218#endif
219#ifndef OVERRIDE_32_sys_creat
220SC_TRACE_EVENT(sys_creat,
221 TP_PROTO(const char * pathname, int mode),
222 TP_ARGS(pathname, mode),
223 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
224 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
225 TP_printk()
226)
227#endif
228#ifndef OVERRIDE_32_sys_link
229SC_TRACE_EVENT(sys_link,
230 TP_PROTO(const char * oldname, const char * newname),
231 TP_ARGS(oldname, newname),
232 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
233 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
234 TP_printk()
235)
236#endif
237#ifndef OVERRIDE_32_sys_chmod
238SC_TRACE_EVENT(sys_chmod,
239 TP_PROTO(const char * filename, mode_t mode),
240 TP_ARGS(filename, mode),
241 TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
242 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
243 TP_printk()
244)
245#endif
246#ifndef OVERRIDE_32_sys_stat
247SC_TRACE_EVENT(sys_stat,
248 TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
249 TP_ARGS(filename, statbuf),
250 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
251 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
252 TP_printk()
253)
254#endif
255#ifndef OVERRIDE_32_sys_fstat
256SC_TRACE_EVENT(sys_fstat,
257 TP_PROTO(unsigned int fd, struct __old_kernel_stat * statbuf),
258 TP_ARGS(fd, statbuf),
259 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct __old_kernel_stat *, statbuf)),
260 TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
261 TP_printk()
262)
263#endif
264#ifndef OVERRIDE_32_sys_utime
265SC_TRACE_EVENT(sys_utime,
266 TP_PROTO(char * filename, struct utimbuf * times),
267 TP_ARGS(filename, times),
268 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
269 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
270 TP_printk()
271)
272#endif
273#ifndef OVERRIDE_32_sys_access
274SC_TRACE_EVENT(sys_access,
275 TP_PROTO(const char * filename, int mode),
276 TP_ARGS(filename, mode),
277 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
278 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
279 TP_printk()
280)
281#endif
282#ifndef OVERRIDE_32_sys_rename
283SC_TRACE_EVENT(sys_rename,
284 TP_PROTO(const char * oldname, const char * newname),
285 TP_ARGS(oldname, newname),
286 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
287 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
288 TP_printk()
289)
290#endif
291#ifndef OVERRIDE_32_sys_mkdir
292SC_TRACE_EVENT(sys_mkdir,
293 TP_PROTO(const char * pathname, int mode),
294 TP_ARGS(pathname, mode),
295 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
296 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
297 TP_printk()
298)
299#endif
300#ifndef OVERRIDE_32_sys_umount
301SC_TRACE_EVENT(sys_umount,
302 TP_PROTO(char * name, int flags),
303 TP_ARGS(name, flags),
304 TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
305 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
306 TP_printk()
307)
308#endif
309#ifndef OVERRIDE_32_sys_ustat
310SC_TRACE_EVENT(sys_ustat,
311 TP_PROTO(unsigned dev, struct ustat * ubuf),
312 TP_ARGS(dev, ubuf),
313 TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
314 TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
315 TP_printk()
316)
317#endif
318#ifndef OVERRIDE_32_sys_sethostname
319SC_TRACE_EVENT(sys_sethostname,
320 TP_PROTO(char * name, int len),
321 TP_ARGS(name, len),
322 TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
323 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
324 TP_printk()
325)
326#endif
327#ifndef OVERRIDE_32_sys_setrlimit
328SC_TRACE_EVENT(sys_setrlimit,
329 TP_PROTO(unsigned int resource, struct rlimit * rlim),
330 TP_ARGS(resource, rlim),
331 TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
332 TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
333 TP_printk()
334)
335#endif
336#ifndef OVERRIDE_32_sys_old_getrlimit
337SC_TRACE_EVENT(sys_old_getrlimit,
338 TP_PROTO(unsigned int resource, struct rlimit * rlim),
339 TP_ARGS(resource, rlim),
340 TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
341 TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
342 TP_printk()
343)
344#endif
345#ifndef OVERRIDE_32_sys_getrusage
346SC_TRACE_EVENT(sys_getrusage,
347 TP_PROTO(int who, struct rusage * ru),
348 TP_ARGS(who, ru),
349 TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
350 TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
351 TP_printk()
352)
353#endif
354#ifndef OVERRIDE_32_sys_gettimeofday
355SC_TRACE_EVENT(sys_gettimeofday,
356 TP_PROTO(struct timeval * tv, struct timezone * tz),
357 TP_ARGS(tv, tz),
358 TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
359 TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
360 TP_printk()
361)
362#endif
363#ifndef OVERRIDE_32_sys_settimeofday
364SC_TRACE_EVENT(sys_settimeofday,
365 TP_PROTO(struct timeval * tv, struct timezone * tz),
366 TP_ARGS(tv, tz),
367 TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
368 TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
369 TP_printk()
370)
371#endif
372#ifndef OVERRIDE_32_sys_getgroups16
373SC_TRACE_EVENT(sys_getgroups16,
374 TP_PROTO(int gidsetsize, old_gid_t * grouplist),
375 TP_ARGS(gidsetsize, grouplist),
376 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
377 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
378 TP_printk()
379)
380#endif
381#ifndef OVERRIDE_32_sys_setgroups16
382SC_TRACE_EVENT(sys_setgroups16,
383 TP_PROTO(int gidsetsize, old_gid_t * grouplist),
384 TP_ARGS(gidsetsize, grouplist),
385 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
386 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
387 TP_printk()
388)
389#endif
390#ifndef OVERRIDE_32_sys_symlink
391SC_TRACE_EVENT(sys_symlink,
392 TP_PROTO(const char * oldname, const char * newname),
393 TP_ARGS(oldname, newname),
394 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
395 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
396 TP_printk()
397)
398#endif
399#ifndef OVERRIDE_32_sys_lstat
400SC_TRACE_EVENT(sys_lstat,
401 TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
402 TP_ARGS(filename, statbuf),
403 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
404 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
405 TP_printk()
406)
407#endif
408#ifndef OVERRIDE_32_sys_swapon
409SC_TRACE_EVENT(sys_swapon,
410 TP_PROTO(const char * specialfile, int swap_flags),
411 TP_ARGS(specialfile, swap_flags),
412 TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
413 TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
414 TP_printk()
415)
416#endif
417#ifndef OVERRIDE_32_sys_truncate
418SC_TRACE_EVENT(sys_truncate,
419 TP_PROTO(const char * path, long length),
420 TP_ARGS(path, length),
421 TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
422 TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
423 TP_printk()
424)
425#endif
426#ifndef OVERRIDE_32_sys_statfs
427SC_TRACE_EVENT(sys_statfs,
428 TP_PROTO(const char * pathname, struct statfs * buf),
429 TP_ARGS(pathname, buf),
430 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
431 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
432 TP_printk()
433)
434#endif
435#ifndef OVERRIDE_32_sys_fstatfs
436SC_TRACE_EVENT(sys_fstatfs,
437 TP_PROTO(unsigned int fd, struct statfs * buf),
438 TP_ARGS(fd, buf),
439 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
440 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
441 TP_printk()
442)
443#endif
444#ifndef OVERRIDE_32_sys_socketcall
445SC_TRACE_EVENT(sys_socketcall,
446 TP_PROTO(int call, unsigned long * args),
447 TP_ARGS(call, args),
448 TP_STRUCT__entry(__field(int, call) __field_hex(unsigned long *, args)),
449 TP_fast_assign(tp_assign(call, call) tp_assign(args, args)),
450 TP_printk()
451)
452#endif
453#ifndef OVERRIDE_32_sys_getitimer
454SC_TRACE_EVENT(sys_getitimer,
455 TP_PROTO(int which, struct itimerval * value),
456 TP_ARGS(which, value),
457 TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
458 TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
459 TP_printk()
460)
461#endif
462#ifndef OVERRIDE_32_sys_newstat
463SC_TRACE_EVENT(sys_newstat,
464 TP_PROTO(const char * filename, struct stat * statbuf),
465 TP_ARGS(filename, statbuf),
466 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
467 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
468 TP_printk()
469)
470#endif
471#ifndef OVERRIDE_32_sys_newlstat
472SC_TRACE_EVENT(sys_newlstat,
473 TP_PROTO(const char * filename, struct stat * statbuf),
474 TP_ARGS(filename, statbuf),
475 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
476 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
477 TP_printk()
478)
479#endif
480#ifndef OVERRIDE_32_sys_newfstat
481SC_TRACE_EVENT(sys_newfstat,
482 TP_PROTO(unsigned int fd, struct stat * statbuf),
483 TP_ARGS(fd, statbuf),
484 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
485 TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
486 TP_printk()
487)
488#endif
489#ifndef OVERRIDE_32_sys_setdomainname
490SC_TRACE_EVENT(sys_setdomainname,
491 TP_PROTO(char * name, int len),
492 TP_ARGS(name, len),
493 TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
494 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
495 TP_printk()
496)
497#endif
498#ifndef OVERRIDE_32_sys_delete_module
499SC_TRACE_EVENT(sys_delete_module,
500 TP_PROTO(const char * name_user, unsigned int flags),
501 TP_ARGS(name_user, flags),
502 TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
503 TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
504 TP_printk()
505)
506#endif
507#ifndef OVERRIDE_32_sys_sched_setparam
508SC_TRACE_EVENT(sys_sched_setparam,
509 TP_PROTO(pid_t pid, struct sched_param * param),
510 TP_ARGS(pid, param),
511 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
512 TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
513 TP_printk()
514)
515#endif
516#ifndef OVERRIDE_32_sys_sched_getparam
517SC_TRACE_EVENT(sys_sched_getparam,
518 TP_PROTO(pid_t pid, struct sched_param * param),
519 TP_ARGS(pid, param),
520 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
521 TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
522 TP_printk()
523)
524#endif
525#ifndef OVERRIDE_32_sys_sched_rr_get_interval
526SC_TRACE_EVENT(sys_sched_rr_get_interval,
527 TP_PROTO(pid_t pid, struct timespec * interval),
528 TP_ARGS(pid, interval),
529 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
530 TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
531 TP_printk()
532)
533#endif
534#ifndef OVERRIDE_32_sys_nanosleep
535SC_TRACE_EVENT(sys_nanosleep,
536 TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
537 TP_ARGS(rqtp, rmtp),
538 TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
539 TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
540 TP_printk()
541)
542#endif
543#ifndef OVERRIDE_32_sys_rt_sigpending
544SC_TRACE_EVENT(sys_rt_sigpending,
545 TP_PROTO(sigset_t * set, size_t sigsetsize),
546 TP_ARGS(set, sigsetsize),
547 TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
548 TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
549 TP_printk()
550)
551#endif
552#ifndef OVERRIDE_32_sys_rt_sigsuspend
553SC_TRACE_EVENT(sys_rt_sigsuspend,
554 TP_PROTO(sigset_t * unewset, size_t sigsetsize),
555 TP_ARGS(unewset, sigsetsize),
556 TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
557 TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
558 TP_printk()
559)
560#endif
561#ifndef OVERRIDE_32_sys_getcwd
562SC_TRACE_EVENT(sys_getcwd,
563 TP_PROTO(char * buf, unsigned long size),
564 TP_ARGS(buf, size),
565 TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
566 TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
567 TP_printk()
568)
569#endif
570#ifndef OVERRIDE_32_sys_getrlimit
571SC_TRACE_EVENT(sys_getrlimit,
572 TP_PROTO(unsigned int resource, struct rlimit * rlim),
573 TP_ARGS(resource, rlim),
574 TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
575 TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
576 TP_printk()
577)
578#endif
579#ifndef OVERRIDE_32_sys_stat64
580SC_TRACE_EVENT(sys_stat64,
581 TP_PROTO(const char * filename, struct stat64 * statbuf),
582 TP_ARGS(filename, statbuf),
583 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
584 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
585 TP_printk()
586)
587#endif
588#ifndef OVERRIDE_32_sys_lstat64
589SC_TRACE_EVENT(sys_lstat64,
590 TP_PROTO(const char * filename, struct stat64 * statbuf),
591 TP_ARGS(filename, statbuf),
592 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
593 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
594 TP_printk()
595)
596#endif
597#ifndef OVERRIDE_32_sys_fstat64
598SC_TRACE_EVENT(sys_fstat64,
599 TP_PROTO(unsigned long fd, struct stat64 * statbuf),
600 TP_ARGS(fd, statbuf),
601 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(struct stat64 *, statbuf)),
602 TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
603 TP_printk()
604)
605#endif
606#ifndef OVERRIDE_32_sys_getgroups
607SC_TRACE_EVENT(sys_getgroups,
608 TP_PROTO(int gidsetsize, gid_t * grouplist),
609 TP_ARGS(gidsetsize, grouplist),
610 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
611 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
612 TP_printk()
613)
614#endif
615#ifndef OVERRIDE_32_sys_setgroups
616SC_TRACE_EVENT(sys_setgroups,
617 TP_PROTO(int gidsetsize, gid_t * grouplist),
618 TP_ARGS(gidsetsize, grouplist),
619 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
620 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
621 TP_printk()
622)
623#endif
624#ifndef OVERRIDE_32_sys_pivot_root
625SC_TRACE_EVENT(sys_pivot_root,
626 TP_PROTO(const char * new_root, const char * put_old),
627 TP_ARGS(new_root, put_old),
628 TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
629 TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
630 TP_printk()
631)
632#endif
633#ifndef OVERRIDE_32_sys_removexattr
634SC_TRACE_EVENT(sys_removexattr,
635 TP_PROTO(const char * pathname, const char * name),
636 TP_ARGS(pathname, name),
637 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
638 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
639 TP_printk()
640)
641#endif
642#ifndef OVERRIDE_32_sys_lremovexattr
643SC_TRACE_EVENT(sys_lremovexattr,
644 TP_PROTO(const char * pathname, const char * name),
645 TP_ARGS(pathname, name),
646 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
647 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
648 TP_printk()
649)
650#endif
651#ifndef OVERRIDE_32_sys_fremovexattr
652SC_TRACE_EVENT(sys_fremovexattr,
653 TP_PROTO(int fd, const char * name),
654 TP_ARGS(fd, name),
655 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
656 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
657 TP_printk()
658)
659#endif
660#ifndef OVERRIDE_32_sys_io_setup
661SC_TRACE_EVENT(sys_io_setup,
662 TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
663 TP_ARGS(nr_events, ctxp),
664 TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
665 TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
666 TP_printk()
667)
668#endif
669#ifndef OVERRIDE_32_sys_timer_gettime
670SC_TRACE_EVENT(sys_timer_gettime,
671 TP_PROTO(timer_t timer_id, struct itimerspec * setting),
672 TP_ARGS(timer_id, setting),
673 TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
674 TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
675 TP_printk()
676)
677#endif
678#ifndef OVERRIDE_32_sys_clock_settime
679SC_TRACE_EVENT(sys_clock_settime,
680 TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
681 TP_ARGS(which_clock, tp),
682 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
683 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
684 TP_printk()
685)
686#endif
687#ifndef OVERRIDE_32_sys_clock_gettime
688SC_TRACE_EVENT(sys_clock_gettime,
689 TP_PROTO(const clockid_t which_clock, struct timespec * tp),
690 TP_ARGS(which_clock, tp),
691 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
692 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
693 TP_printk()
694)
695#endif
696#ifndef OVERRIDE_32_sys_clock_getres
697SC_TRACE_EVENT(sys_clock_getres,
698 TP_PROTO(const clockid_t which_clock, struct timespec * tp),
699 TP_ARGS(which_clock, tp),
700 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
701 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
702 TP_printk()
703)
704#endif
705#ifndef OVERRIDE_32_sys_utimes
706SC_TRACE_EVENT(sys_utimes,
707 TP_PROTO(char * filename, struct timeval * utimes),
708 TP_ARGS(filename, utimes),
709 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
710 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
711 TP_printk()
712)
713#endif
714#ifndef OVERRIDE_32_sys_mq_notify
715SC_TRACE_EVENT(sys_mq_notify,
716 TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
717 TP_ARGS(mqdes, u_notification),
718 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
719 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
720 TP_printk()
721)
722#endif
723#ifndef OVERRIDE_32_sys_set_robust_list
724SC_TRACE_EVENT(sys_set_robust_list,
725 TP_PROTO(struct robust_list_head * head, size_t len),
726 TP_ARGS(head, len),
727 TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
728 TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
729 TP_printk()
730)
731#endif
732#ifndef OVERRIDE_32_sys_timerfd_gettime
733SC_TRACE_EVENT(sys_timerfd_gettime,
734 TP_PROTO(int ufd, struct itimerspec * otmr),
735 TP_ARGS(ufd, otmr),
736 TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
737 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
738 TP_printk()
739)
740#endif
741#ifndef OVERRIDE_32_sys_pipe2
742SC_TRACE_EVENT(sys_pipe2,
743 TP_PROTO(int * fildes, int flags),
744 TP_ARGS(fildes, flags),
745 TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
746 TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
747 TP_printk()
748)
749#endif
750#ifndef OVERRIDE_32_sys_clock_adjtime
751SC_TRACE_EVENT(sys_clock_adjtime,
752 TP_PROTO(const clockid_t which_clock, struct timex * utx),
753 TP_ARGS(which_clock, utx),
754 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
755 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
756 TP_printk()
757)
758#endif
759#ifndef OVERRIDE_32_sys_read
760SC_TRACE_EVENT(sys_read,
761 TP_PROTO(unsigned int fd, char * buf, size_t count),
762 TP_ARGS(fd, buf, count),
763 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
764 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
765 TP_printk()
766)
767#endif
768#ifndef OVERRIDE_32_sys_write
769SC_TRACE_EVENT(sys_write,
770 TP_PROTO(unsigned int fd, const char * buf, size_t count),
771 TP_ARGS(fd, buf, count),
772 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
773 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
774 TP_printk()
775)
776#endif
777#ifndef OVERRIDE_32_sys_open
778SC_TRACE_EVENT(sys_open,
779 TP_PROTO(const char * filename, int flags, int mode),
780 TP_ARGS(filename, flags, mode),
781 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
782 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
783 TP_printk()
784)
785#endif
786#ifndef OVERRIDE_32_sys_waitpid
787SC_TRACE_EVENT(sys_waitpid,
788 TP_PROTO(pid_t pid, int * stat_addr, int options),
789 TP_ARGS(pid, stat_addr, options),
790 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
791 TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
792 TP_printk()
793)
794#endif
795#ifndef OVERRIDE_32_sys_mknod
796SC_TRACE_EVENT(sys_mknod,
797 TP_PROTO(const char * filename, int mode, unsigned dev),
798 TP_ARGS(filename, mode, dev),
799 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
800 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
801 TP_printk()
802)
803#endif
804#ifndef OVERRIDE_32_sys_lchown16
805SC_TRACE_EVENT(sys_lchown16,
806 TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
807 TP_ARGS(filename, user, group),
808 TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
809 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
810 TP_printk()
811)
812#endif
813#ifndef OVERRIDE_32_sys_readlink
814SC_TRACE_EVENT(sys_readlink,
815 TP_PROTO(const char * path, char * buf, int bufsiz),
816 TP_ARGS(path, buf, bufsiz),
817 TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
818 TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
819 TP_printk()
820)
821#endif
822#ifndef OVERRIDE_32_sys_old_readdir
823SC_TRACE_EVENT(sys_old_readdir,
824 TP_PROTO(unsigned int fd, struct old_linux_dirent * dirent, unsigned int count),
825 TP_ARGS(fd, dirent, count),
826 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct old_linux_dirent *, dirent) __field(unsigned int, count)),
827 TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
828 TP_printk()
829)
830#endif
831#ifndef OVERRIDE_32_sys_syslog
832SC_TRACE_EVENT(sys_syslog,
833 TP_PROTO(int type, char * buf, int len),
834 TP_ARGS(type, buf, len),
835 TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
836 TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
837 TP_printk()
838)
839#endif
840#ifndef OVERRIDE_32_sys_setitimer
841SC_TRACE_EVENT(sys_setitimer,
842 TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
843 TP_ARGS(which, value, ovalue),
844 TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
845 TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
846 TP_printk()
847)
848#endif
849#ifndef OVERRIDE_32_sys_sigprocmask
850SC_TRACE_EVENT(sys_sigprocmask,
851 TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
852 TP_ARGS(how, nset, oset),
853 TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
854 TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
855 TP_printk()
856)
857#endif
858#ifndef OVERRIDE_32_sys_init_module
859SC_TRACE_EVENT(sys_init_module,
860 TP_PROTO(void * umod, unsigned long len, const char * uargs),
861 TP_ARGS(umod, len, uargs),
862 TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
863 TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
864 TP_printk()
865)
866#endif
867#ifndef OVERRIDE_32_sys_getdents
868SC_TRACE_EVENT(sys_getdents,
869 TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
870 TP_ARGS(fd, dirent, count),
871 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
872 TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
873 TP_printk()
874)
875#endif
876#ifndef OVERRIDE_32_sys_readv
877SC_TRACE_EVENT(sys_readv,
878 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
879 TP_ARGS(fd, vec, vlen),
880 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
881 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
882 TP_printk()
883)
884#endif
885#ifndef OVERRIDE_32_sys_writev
886SC_TRACE_EVENT(sys_writev,
887 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
888 TP_ARGS(fd, vec, vlen),
889 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
890 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
891 TP_printk()
892)
893#endif
894#ifndef OVERRIDE_32_sys_sched_setscheduler
895SC_TRACE_EVENT(sys_sched_setscheduler,
896 TP_PROTO(pid_t pid, int policy, struct sched_param * param),
897 TP_ARGS(pid, policy, param),
898 TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
899 TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
900 TP_printk()
901)
902#endif
903#ifndef OVERRIDE_32_sys_getresuid16
904SC_TRACE_EVENT(sys_getresuid16,
905 TP_PROTO(old_uid_t * ruid, old_uid_t * euid, old_uid_t * suid),
906 TP_ARGS(ruid, euid, suid),
907 TP_STRUCT__entry(__field_hex(old_uid_t *, ruid) __field_hex(old_uid_t *, euid) __field_hex(old_uid_t *, suid)),
908 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
909 TP_printk()
910)
911#endif
912#ifndef OVERRIDE_32_sys_poll
913SC_TRACE_EVENT(sys_poll,
914 TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
915 TP_ARGS(ufds, nfds, timeout_msecs),
916 TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
917 TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
918 TP_printk()
919)
920#endif
921#ifndef OVERRIDE_32_sys_getresgid16
922SC_TRACE_EVENT(sys_getresgid16,
923 TP_PROTO(old_gid_t * rgid, old_gid_t * egid, old_gid_t * sgid),
924 TP_ARGS(rgid, egid, sgid),
925 TP_STRUCT__entry(__field_hex(old_gid_t *, rgid) __field_hex(old_gid_t *, egid) __field_hex(old_gid_t *, sgid)),
926 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
927 TP_printk()
928)
929#endif
930#ifndef OVERRIDE_32_sys_rt_sigqueueinfo
931SC_TRACE_EVENT(sys_rt_sigqueueinfo,
932 TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
933 TP_ARGS(pid, sig, uinfo),
934 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
935 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
936 TP_printk()
937)
938#endif
939#ifndef OVERRIDE_32_sys_chown16
940SC_TRACE_EVENT(sys_chown16,
941 TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
942 TP_ARGS(filename, user, group),
943 TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
944 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
945 TP_printk()
946)
947#endif
948#ifndef OVERRIDE_32_sys_lchown
949SC_TRACE_EVENT(sys_lchown,
950 TP_PROTO(const char * filename, uid_t user, gid_t group),
951 TP_ARGS(filename, user, group),
952 TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
953 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
954 TP_printk()
955)
956#endif
957#ifndef OVERRIDE_32_sys_getresuid
958SC_TRACE_EVENT(sys_getresuid,
959 TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
960 TP_ARGS(ruid, euid, suid),
961 TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
962 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
963 TP_printk()
964)
965#endif
966#ifndef OVERRIDE_32_sys_getresgid
967SC_TRACE_EVENT(sys_getresgid,
968 TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
969 TP_ARGS(rgid, egid, sgid),
970 TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
971 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
972 TP_printk()
973)
974#endif
975#ifndef OVERRIDE_32_sys_chown
976SC_TRACE_EVENT(sys_chown,
977 TP_PROTO(const char * filename, uid_t user, gid_t group),
978 TP_ARGS(filename, user, group),
979 TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
980 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
981 TP_printk()
982)
983#endif
984#ifndef OVERRIDE_32_sys_mincore
985SC_TRACE_EVENT(sys_mincore,
986 TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
987 TP_ARGS(start, len, vec),
988 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
989 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
990 TP_printk()
991)
992#endif
993#ifndef OVERRIDE_32_sys_getdents64
994SC_TRACE_EVENT(sys_getdents64,
995 TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
996 TP_ARGS(fd, dirent, count),
997 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
998 TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
999 TP_printk()
1000)
1001#endif
1002#ifndef OVERRIDE_32_sys_listxattr
1003SC_TRACE_EVENT(sys_listxattr,
1004 TP_PROTO(const char * pathname, char * list, size_t size),
1005 TP_ARGS(pathname, list, size),
1006 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
1007 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
1008 TP_printk()
1009)
1010#endif
1011#ifndef OVERRIDE_32_sys_llistxattr
1012SC_TRACE_EVENT(sys_llistxattr,
1013 TP_PROTO(const char * pathname, char * list, size_t size),
1014 TP_ARGS(pathname, list, size),
1015 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
1016 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
1017 TP_printk()
1018)
1019#endif
1020#ifndef OVERRIDE_32_sys_flistxattr
1021SC_TRACE_EVENT(sys_flistxattr,
1022 TP_PROTO(int fd, char * list, size_t size),
1023 TP_ARGS(fd, list, size),
1024 TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
1025 TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
1026 TP_printk()
1027)
1028#endif
1029#ifndef OVERRIDE_32_sys_sched_setaffinity
1030SC_TRACE_EVENT(sys_sched_setaffinity,
1031 TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
1032 TP_ARGS(pid, len, user_mask_ptr),
1033 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
1034 TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
1035 TP_printk()
1036)
1037#endif
1038#ifndef OVERRIDE_32_sys_sched_getaffinity
1039SC_TRACE_EVENT(sys_sched_getaffinity,
1040 TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
1041 TP_ARGS(pid, len, user_mask_ptr),
1042 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
1043 TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
1044 TP_printk()
1045)
1046#endif
1047#ifndef OVERRIDE_32_sys_io_submit
1048SC_TRACE_EVENT(sys_io_submit,
1049 TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
1050 TP_ARGS(ctx_id, nr, iocbpp),
1051 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
1052 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
1053 TP_printk()
1054)
1055#endif
1056#ifndef OVERRIDE_32_sys_io_cancel
1057SC_TRACE_EVENT(sys_io_cancel,
1058 TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
1059 TP_ARGS(ctx_id, iocb, result),
1060 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
1061 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
1062 TP_printk()
1063)
1064#endif
1065#ifndef OVERRIDE_32_sys_timer_create
1066SC_TRACE_EVENT(sys_timer_create,
1067 TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
1068 TP_ARGS(which_clock, timer_event_spec, created_timer_id),
1069 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
1070 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
1071 TP_printk()
1072)
1073#endif
1074#ifndef OVERRIDE_32_sys_statfs64
1075SC_TRACE_EVENT(sys_statfs64,
1076 TP_PROTO(const char * pathname, size_t sz, struct statfs64 * buf),
1077 TP_ARGS(pathname, sz, buf),
1078 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
1079 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(sz, sz) tp_assign(buf, buf)),
1080 TP_printk()
1081)
1082#endif
1083#ifndef OVERRIDE_32_sys_fstatfs64
1084SC_TRACE_EVENT(sys_fstatfs64,
1085 TP_PROTO(unsigned int fd, size_t sz, struct statfs64 * buf),
1086 TP_ARGS(fd, sz, buf),
1087 TP_STRUCT__entry(__field(unsigned int, fd) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
1088 TP_fast_assign(tp_assign(fd, fd) tp_assign(sz, sz) tp_assign(buf, buf)),
1089 TP_printk()
1090)
1091#endif
1092#ifndef OVERRIDE_32_sys_mq_getsetattr
1093SC_TRACE_EVENT(sys_mq_getsetattr,
1094 TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
1095 TP_ARGS(mqdes, u_mqstat, u_omqstat),
1096 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
1097 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
1098 TP_printk()
1099)
1100#endif
1101#ifndef OVERRIDE_32_sys_inotify_add_watch
1102SC_TRACE_EVENT(sys_inotify_add_watch,
1103 TP_PROTO(int fd, const char * pathname, u32 mask),
1104 TP_ARGS(fd, pathname, mask),
1105 TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
1106 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
1107 TP_printk()
1108)
1109#endif
1110#ifndef OVERRIDE_32_sys_mkdirat
1111SC_TRACE_EVENT(sys_mkdirat,
1112 TP_PROTO(int dfd, const char * pathname, int mode),
1113 TP_ARGS(dfd, pathname, mode),
1114 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
1115 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
1116 TP_printk()
1117)
1118#endif
1119#ifndef OVERRIDE_32_sys_futimesat
1120SC_TRACE_EVENT(sys_futimesat,
1121 TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
1122 TP_ARGS(dfd, filename, utimes),
1123 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
1124 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
1125 TP_printk()
1126)
1127#endif
1128#ifndef OVERRIDE_32_sys_unlinkat
1129SC_TRACE_EVENT(sys_unlinkat,
1130 TP_PROTO(int dfd, const char * pathname, int flag),
1131 TP_ARGS(dfd, pathname, flag),
1132 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
1133 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
1134 TP_printk()
1135)
1136#endif
1137#ifndef OVERRIDE_32_sys_symlinkat
1138SC_TRACE_EVENT(sys_symlinkat,
1139 TP_PROTO(const char * oldname, int newdfd, const char * newname),
1140 TP_ARGS(oldname, newdfd, newname),
1141 TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
1142 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
1143 TP_printk()
1144)
1145#endif
1146#ifndef OVERRIDE_32_sys_fchmodat
1147SC_TRACE_EVENT(sys_fchmodat,
1148 TP_PROTO(int dfd, const char * filename, mode_t mode),
1149 TP_ARGS(dfd, filename, mode),
1150 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
1151 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
1152 TP_printk()
1153)
1154#endif
1155#ifndef OVERRIDE_32_sys_faccessat
1156SC_TRACE_EVENT(sys_faccessat,
1157 TP_PROTO(int dfd, const char * filename, int mode),
1158 TP_ARGS(dfd, filename, mode),
1159 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
1160 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
1161 TP_printk()
1162)
1163#endif
1164#ifndef OVERRIDE_32_sys_get_robust_list
1165SC_TRACE_EVENT(sys_get_robust_list,
1166 TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
1167 TP_ARGS(pid, head_ptr, len_ptr),
1168 TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
1169 TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
1170 TP_printk()
1171)
1172#endif
1173#ifndef OVERRIDE_32_sys_getcpu
1174SC_TRACE_EVENT(sys_getcpu,
1175 TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
1176 TP_ARGS(cpup, nodep, unused),
1177 TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
1178 TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
1179 TP_printk()
1180)
1181#endif
1182#ifndef OVERRIDE_32_sys_signalfd
1183SC_TRACE_EVENT(sys_signalfd,
1184 TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
1185 TP_ARGS(ufd, user_mask, sizemask),
1186 TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
1187 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
1188 TP_printk()
1189)
1190#endif
1191#ifndef OVERRIDE_32_sys_reboot
1192SC_TRACE_EVENT(sys_reboot,
1193 TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
1194 TP_ARGS(magic1, magic2, cmd, arg),
1195 TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
1196 TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
1197 TP_printk()
1198)
1199#endif
1200#ifndef OVERRIDE_32_sys_wait4
1201SC_TRACE_EVENT(sys_wait4,
1202 TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
1203 TP_ARGS(upid, stat_addr, options, ru),
1204 TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
1205 TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
1206 TP_printk()
1207)
1208#endif
1209#ifndef OVERRIDE_32_sys_quotactl
1210SC_TRACE_EVENT(sys_quotactl,
1211 TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
1212 TP_ARGS(cmd, special, id, addr),
1213 TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
1214 TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
1215 TP_printk()
1216)
1217#endif
1218#ifndef OVERRIDE_32_sys_rt_sigaction
1219SC_TRACE_EVENT(sys_rt_sigaction,
1220 TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
1221 TP_ARGS(sig, act, oact, sigsetsize),
1222 TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
1223 TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
1224 TP_printk()
1225)
1226#endif
1227#ifndef OVERRIDE_32_sys_rt_sigprocmask
1228SC_TRACE_EVENT(sys_rt_sigprocmask,
1229 TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
1230 TP_ARGS(how, nset, oset, sigsetsize),
1231 TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
1232 TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
1233 TP_printk()
1234)
1235#endif
1236#ifndef OVERRIDE_32_sys_rt_sigtimedwait
1237SC_TRACE_EVENT(sys_rt_sigtimedwait,
1238 TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
1239 TP_ARGS(uthese, uinfo, uts, sigsetsize),
1240 TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
1241 TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
1242 TP_printk()
1243)
1244#endif
1245#ifndef OVERRIDE_32_sys_sendfile
1246SC_TRACE_EVENT(sys_sendfile,
1247 TP_PROTO(int out_fd, int in_fd, off_t * offset, size_t count),
1248 TP_ARGS(out_fd, in_fd, offset, count),
1249 TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(off_t *, offset) __field(size_t, count)),
1250 TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
1251 TP_printk()
1252)
1253#endif
1254#ifndef OVERRIDE_32_sys_getxattr
1255SC_TRACE_EVENT(sys_getxattr,
1256 TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
1257 TP_ARGS(pathname, name, value, size),
1258 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1259 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1260 TP_printk()
1261)
1262#endif
1263#ifndef OVERRIDE_32_sys_lgetxattr
1264SC_TRACE_EVENT(sys_lgetxattr,
1265 TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
1266 TP_ARGS(pathname, name, value, size),
1267 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1268 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1269 TP_printk()
1270)
1271#endif
1272#ifndef OVERRIDE_32_sys_fgetxattr
1273SC_TRACE_EVENT(sys_fgetxattr,
1274 TP_PROTO(int fd, const char * name, void * value, size_t size),
1275 TP_ARGS(fd, name, value, size),
1276 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1277 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1278 TP_printk()
1279)
1280#endif
1281#ifndef OVERRIDE_32_sys_sendfile64
1282SC_TRACE_EVENT(sys_sendfile64,
1283 TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
1284 TP_ARGS(out_fd, in_fd, offset, count),
1285 TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
1286 TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
1287 TP_printk()
1288)
1289#endif
1290#ifndef OVERRIDE_32_sys_epoll_ctl
1291SC_TRACE_EVENT(sys_epoll_ctl,
1292 TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
1293 TP_ARGS(epfd, op, fd, event),
1294 TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
1295 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
1296 TP_printk()
1297)
1298#endif
1299#ifndef OVERRIDE_32_sys_epoll_wait
1300SC_TRACE_EVENT(sys_epoll_wait,
1301 TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
1302 TP_ARGS(epfd, events, maxevents, timeout),
1303 TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
1304 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
1305 TP_printk()
1306)
1307#endif
1308#ifndef OVERRIDE_32_sys_timer_settime
1309SC_TRACE_EVENT(sys_timer_settime,
1310 TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
1311 TP_ARGS(timer_id, flags, new_setting, old_setting),
1312 TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
1313 TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
1314 TP_printk()
1315)
1316#endif
1317#ifndef OVERRIDE_32_sys_clock_nanosleep
1318SC_TRACE_EVENT(sys_clock_nanosleep,
1319 TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
1320 TP_ARGS(which_clock, flags, rqtp, rmtp),
1321 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
1322 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
1323 TP_printk()
1324)
1325#endif
1326#ifndef OVERRIDE_32_sys_mq_open
1327SC_TRACE_EVENT(sys_mq_open,
1328 TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
1329 TP_ARGS(u_name, oflag, mode, u_attr),
1330 TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
1331 TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
1332 TP_printk()
1333)
1334#endif
1335#ifndef OVERRIDE_32_sys_kexec_load
1336SC_TRACE_EVENT(sys_kexec_load,
1337 TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
1338 TP_ARGS(entry, nr_segments, segments, flags),
1339 TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
1340 TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
1341 TP_printk()
1342)
1343#endif
1344#ifndef OVERRIDE_32_sys_request_key
1345SC_TRACE_EVENT(sys_request_key,
1346 TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
1347 TP_ARGS(_type, _description, _callout_info, destringid),
1348 TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
1349 TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
1350 TP_printk()
1351)
1352#endif
1353#ifndef OVERRIDE_32_sys_openat
1354SC_TRACE_EVENT(sys_openat,
1355 TP_PROTO(int dfd, const char * filename, int flags, int mode),
1356 TP_ARGS(dfd, filename, flags, mode),
1357 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
1358 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
1359 TP_printk()
1360)
1361#endif
1362#ifndef OVERRIDE_32_sys_mknodat
1363SC_TRACE_EVENT(sys_mknodat,
1364 TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
1365 TP_ARGS(dfd, filename, mode, dev),
1366 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
1367 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
1368 TP_printk()
1369)
1370#endif
1371#ifndef OVERRIDE_32_sys_fstatat64
1372SC_TRACE_EVENT(sys_fstatat64,
1373 TP_PROTO(int dfd, const char * filename, struct stat64 * statbuf, int flag),
1374 TP_ARGS(dfd, filename, statbuf, flag),
1375 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf) __field(int, flag)),
1376 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
1377 TP_printk()
1378)
1379#endif
1380#ifndef OVERRIDE_32_sys_renameat
1381SC_TRACE_EVENT(sys_renameat,
1382 TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
1383 TP_ARGS(olddfd, oldname, newdfd, newname),
1384 TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
1385 TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
1386 TP_printk()
1387)
1388#endif
1389#ifndef OVERRIDE_32_sys_readlinkat
1390SC_TRACE_EVENT(sys_readlinkat,
1391 TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
1392 TP_ARGS(dfd, pathname, buf, bufsiz),
1393 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
1394 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
1395 TP_printk()
1396)
1397#endif
1398#ifndef OVERRIDE_32_sys_vmsplice
1399SC_TRACE_EVENT(sys_vmsplice,
1400 TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
1401 TP_ARGS(fd, iov, nr_segs, flags),
1402 TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
1403 TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
1404 TP_printk()
1405)
1406#endif
1407#ifndef OVERRIDE_32_sys_utimensat
1408SC_TRACE_EVENT(sys_utimensat,
1409 TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
1410 TP_ARGS(dfd, filename, utimes, flags),
1411 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
1412 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
1413 TP_printk()
1414)
1415#endif
1416#ifndef OVERRIDE_32_sys_timerfd_settime
1417SC_TRACE_EVENT(sys_timerfd_settime,
1418 TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
1419 TP_ARGS(ufd, flags, utmr, otmr),
1420 TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
1421 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
1422 TP_printk()
1423)
1424#endif
1425#ifndef OVERRIDE_32_sys_signalfd4
1426SC_TRACE_EVENT(sys_signalfd4,
1427 TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
1428 TP_ARGS(ufd, user_mask, sizemask, flags),
1429 TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
1430 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
1431 TP_printk()
1432)
1433#endif
1434#ifndef OVERRIDE_32_sys_rt_tgsigqueueinfo
1435SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
1436 TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
1437 TP_ARGS(tgid, pid, sig, uinfo),
1438 TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
1439 TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
1440 TP_printk()
1441)
1442#endif
1443#ifndef OVERRIDE_32_sys_prlimit64
1444SC_TRACE_EVENT(sys_prlimit64,
1445 TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
1446 TP_ARGS(pid, resource, new_rlim, old_rlim),
1447 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
1448 TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
1449 TP_printk()
1450)
1451#endif
1452#ifndef OVERRIDE_32_sys_sendmmsg
1453SC_TRACE_EVENT(sys_sendmmsg,
1454 TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
1455 TP_ARGS(fd, mmsg, vlen, flags),
1456 TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
1457 TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
1458 TP_printk()
1459)
1460#endif
1461#ifndef OVERRIDE_32_sys_mount
1462SC_TRACE_EVENT(sys_mount,
1463 TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
1464 TP_ARGS(dev_name, dir_name, type, flags, data),
1465 TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
1466 TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
1467 TP_printk()
1468)
1469#endif
1470#ifndef OVERRIDE_32_sys_llseek
1471SC_TRACE_EVENT(sys_llseek,
1472 TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
1473 TP_ARGS(fd, offset_high, offset_low, result, origin),
1474 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
1475 TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
1476 TP_printk()
1477)
1478#endif
1479#ifndef OVERRIDE_32_sys_select
1480SC_TRACE_EVENT(sys_select,
1481 TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
1482 TP_ARGS(n, inp, outp, exp, tvp),
1483 TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
1484 TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
1485 TP_printk()
1486)
1487#endif
1488#ifndef OVERRIDE_32_sys_setxattr
1489SC_TRACE_EVENT(sys_setxattr,
1490 TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
1491 TP_ARGS(pathname, name, value, size, flags),
1492 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1493 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1494 TP_printk()
1495)
1496#endif
1497#ifndef OVERRIDE_32_sys_lsetxattr
1498SC_TRACE_EVENT(sys_lsetxattr,
1499 TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
1500 TP_ARGS(pathname, name, value, size, flags),
1501 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1502 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1503 TP_printk()
1504)
1505#endif
1506#ifndef OVERRIDE_32_sys_fsetxattr
1507SC_TRACE_EVENT(sys_fsetxattr,
1508 TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
1509 TP_ARGS(fd, name, value, size, flags),
1510 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1511 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1512 TP_printk()
1513)
1514#endif
1515#ifndef OVERRIDE_32_sys_io_getevents
1516SC_TRACE_EVENT(sys_io_getevents,
1517 TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
1518 TP_ARGS(ctx_id, min_nr, nr, events, timeout),
1519 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
1520 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
1521 TP_printk()
1522)
1523#endif
1524#ifndef OVERRIDE_32_sys_mq_timedsend
1525SC_TRACE_EVENT(sys_mq_timedsend,
1526 TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
1527 TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
1528 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
1529 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
1530 TP_printk()
1531)
1532#endif
1533#ifndef OVERRIDE_32_sys_mq_timedreceive
1534SC_TRACE_EVENT(sys_mq_timedreceive,
1535 TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
1536 TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
1537 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
1538 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
1539 TP_printk()
1540)
1541#endif
1542#ifndef OVERRIDE_32_sys_waitid
1543SC_TRACE_EVENT(sys_waitid,
1544 TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
1545 TP_ARGS(which, upid, infop, options, ru),
1546 TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
1547 TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
1548 TP_printk()
1549)
1550#endif
1551#ifndef OVERRIDE_32_sys_add_key
1552SC_TRACE_EVENT(sys_add_key,
1553 TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
1554 TP_ARGS(_type, _description, _payload, plen, ringid),
1555 TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
1556 TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
1557 TP_printk()
1558)
1559#endif
1560#ifndef OVERRIDE_32_sys_fchownat
1561SC_TRACE_EVENT(sys_fchownat,
1562 TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
1563 TP_ARGS(dfd, filename, user, group, flag),
1564 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
1565 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
1566 TP_printk()
1567)
1568#endif
1569#ifndef OVERRIDE_32_sys_linkat
1570SC_TRACE_EVENT(sys_linkat,
1571 TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
1572 TP_ARGS(olddfd, oldname, newdfd, newname, flags),
1573 TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
1574 TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
1575 TP_printk()
1576)
1577#endif
1578#ifndef OVERRIDE_32_sys_ppoll
1579SC_TRACE_EVENT(sys_ppoll,
1580 TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
1581 TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
1582 TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
1583 TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
1584 TP_printk()
1585)
1586#endif
1587#ifndef OVERRIDE_32_sys_preadv
1588SC_TRACE_EVENT(sys_preadv,
1589 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
1590 TP_ARGS(fd, vec, vlen, pos_l, pos_h),
1591 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
1592 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
1593 TP_printk()
1594)
1595#endif
1596#ifndef OVERRIDE_32_sys_pwritev
1597SC_TRACE_EVENT(sys_pwritev,
1598 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
1599 TP_ARGS(fd, vec, vlen, pos_l, pos_h),
1600 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
1601 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
1602 TP_printk()
1603)
1604#endif
1605#ifndef OVERRIDE_32_sys_perf_event_open
1606SC_TRACE_EVENT(sys_perf_event_open,
1607 TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
1608 TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
1609 TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
1610 TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
1611 TP_printk()
1612)
1613#endif
1614#ifndef OVERRIDE_32_sys_recvmmsg
1615SC_TRACE_EVENT(sys_recvmmsg,
1616 TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
1617 TP_ARGS(fd, mmsg, vlen, flags, timeout),
1618 TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
1619 TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
1620 TP_printk()
1621)
1622#endif
1623#ifndef OVERRIDE_32_sys_ipc
1624SC_TRACE_EVENT(sys_ipc,
1625 TP_PROTO(unsigned int call, int first, unsigned long second, unsigned long third, void * ptr, long fifth),
1626 TP_ARGS(call, first, second, third, ptr, fifth),
1627 TP_STRUCT__entry(__field(unsigned int, call) __field(int, first) __field(unsigned long, second) __field(unsigned long, third) __field_hex(void *, ptr) __field(long, fifth)),
1628 TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
1629 TP_printk()
1630)
1631#endif
1632#ifndef OVERRIDE_32_sys_futex
1633SC_TRACE_EVENT(sys_futex,
1634 TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
1635 TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
1636 TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
1637 TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
1638 TP_printk()
1639)
1640#endif
1641#ifndef OVERRIDE_32_sys_pselect6
1642SC_TRACE_EVENT(sys_pselect6,
1643 TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
1644 TP_ARGS(n, inp, outp, exp, tsp, sig),
1645 TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
1646 TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
1647 TP_printk()
1648)
1649#endif
1650#ifndef OVERRIDE_32_sys_splice
1651SC_TRACE_EVENT(sys_splice,
1652 TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
1653 TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
1654 TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
1655 TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
1656 TP_printk()
1657)
1658#endif
1659#ifndef OVERRIDE_32_sys_epoll_pwait
1660SC_TRACE_EVENT(sys_epoll_pwait,
1661 TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
1662 TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
1663 TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
1664 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
1665 TP_printk()
1666)
1667#endif
1668
1669#endif /* _TRACE_SYSCALLS_POINTERS_H */
1670
1671/* This part must be outside protection */
1672#include "../../../probes/define_trace.h"
1673
1674#else /* CREATE_SYSCALL_TABLE */
1675
1676#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
1677#include "syscalls_pointers_override.h"
1678
1679#ifndef OVERRIDE_TABLE_32_sys_read
1680TRACE_SYSCALL_TABLE(sys_read, sys_read, 3, 3)
1681#endif
1682#ifndef OVERRIDE_TABLE_32_sys_write
1683TRACE_SYSCALL_TABLE(sys_write, sys_write, 4, 3)
1684#endif
1685#ifndef OVERRIDE_TABLE_32_sys_open
1686TRACE_SYSCALL_TABLE(sys_open, sys_open, 5, 3)
1687#endif
1688#ifndef OVERRIDE_TABLE_32_sys_waitpid
1689TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 7, 3)
1690#endif
1691#ifndef OVERRIDE_TABLE_32_sys_creat
1692TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 8, 2)
1693#endif
1694#ifndef OVERRIDE_TABLE_32_sys_link
1695TRACE_SYSCALL_TABLE(sys_link, sys_link, 9, 2)
1696#endif
1697#ifndef OVERRIDE_TABLE_32_sys_unlink
1698TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 10, 1)
1699#endif
1700#ifndef OVERRIDE_TABLE_32_sys_chdir
1701TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 12, 1)
1702#endif
1703#ifndef OVERRIDE_TABLE_32_sys_time
1704TRACE_SYSCALL_TABLE(sys_time, sys_time, 13, 1)
1705#endif
1706#ifndef OVERRIDE_TABLE_32_sys_mknod
1707TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 14, 3)
1708#endif
1709#ifndef OVERRIDE_TABLE_32_sys_chmod
1710TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 15, 2)
1711#endif
1712#ifndef OVERRIDE_TABLE_32_sys_lchown16
1713TRACE_SYSCALL_TABLE(sys_lchown16, sys_lchown16, 16, 3)
1714#endif
1715#ifndef OVERRIDE_TABLE_32_sys_stat
1716TRACE_SYSCALL_TABLE(sys_stat, sys_stat, 18, 2)
1717#endif
1718#ifndef OVERRIDE_TABLE_32_sys_mount
1719TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 21, 5)
1720#endif
1721#ifndef OVERRIDE_TABLE_32_sys_oldumount
1722TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 22, 1)
1723#endif
1724#ifndef OVERRIDE_TABLE_32_sys_stime
1725TRACE_SYSCALL_TABLE(sys_stime, sys_stime, 25, 1)
1726#endif
1727#ifndef OVERRIDE_TABLE_32_sys_fstat
1728TRACE_SYSCALL_TABLE(sys_fstat, sys_fstat, 28, 2)
1729#endif
1730#ifndef OVERRIDE_TABLE_32_sys_utime
1731TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 30, 2)
1732#endif
1733#ifndef OVERRIDE_TABLE_32_sys_access
1734TRACE_SYSCALL_TABLE(sys_access, sys_access, 33, 2)
1735#endif
1736#ifndef OVERRIDE_TABLE_32_sys_rename
1737TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 38, 2)
1738#endif
1739#ifndef OVERRIDE_TABLE_32_sys_mkdir
1740TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 39, 2)
1741#endif
1742#ifndef OVERRIDE_TABLE_32_sys_rmdir
1743TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 40, 1)
1744#endif
1745#ifndef OVERRIDE_TABLE_32_sys_pipe
1746TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 42, 1)
1747#endif
1748#ifndef OVERRIDE_TABLE_32_sys_times
1749TRACE_SYSCALL_TABLE(sys_times, sys_times, 43, 1)
1750#endif
1751#ifndef OVERRIDE_TABLE_32_sys_acct
1752TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 51, 1)
1753#endif
1754#ifndef OVERRIDE_TABLE_32_sys_umount
1755TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 52, 2)
1756#endif
1757#ifndef OVERRIDE_TABLE_32_sys_olduname
1758TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 59, 1)
1759#endif
1760#ifndef OVERRIDE_TABLE_32_sys_chroot
1761TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 61, 1)
1762#endif
1763#ifndef OVERRIDE_TABLE_32_sys_ustat
1764TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 62, 2)
1765#endif
1766#ifndef OVERRIDE_TABLE_32_sys_sigpending
1767TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 73, 1)
1768#endif
1769#ifndef OVERRIDE_TABLE_32_sys_sethostname
1770TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 74, 2)
1771#endif
1772#ifndef OVERRIDE_TABLE_32_sys_setrlimit
1773TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 75, 2)
1774#endif
1775#ifndef OVERRIDE_TABLE_32_sys_old_getrlimit
1776TRACE_SYSCALL_TABLE(sys_old_getrlimit, sys_old_getrlimit, 76, 2)
1777#endif
1778#ifndef OVERRIDE_TABLE_32_sys_getrusage
1779TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 77, 2)
1780#endif
1781#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
1782TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 78, 2)
1783#endif
1784#ifndef OVERRIDE_TABLE_32_sys_settimeofday
1785TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 79, 2)
1786#endif
1787#ifndef OVERRIDE_TABLE_32_sys_getgroups16
1788TRACE_SYSCALL_TABLE(sys_getgroups16, sys_getgroups16, 80, 2)
1789#endif
1790#ifndef OVERRIDE_TABLE_32_sys_setgroups16
1791TRACE_SYSCALL_TABLE(sys_setgroups16, sys_setgroups16, 81, 2)
1792#endif
1793#ifndef OVERRIDE_TABLE_32_sys_old_select
1794TRACE_SYSCALL_TABLE(sys_old_select, sys_old_select, 82, 1)
1795#endif
1796#ifndef OVERRIDE_TABLE_32_sys_symlink
1797TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 83, 2)
1798#endif
1799#ifndef OVERRIDE_TABLE_32_sys_lstat
1800TRACE_SYSCALL_TABLE(sys_lstat, sys_lstat, 84, 2)
1801#endif
1802#ifndef OVERRIDE_TABLE_32_sys_readlink
1803TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 85, 3)
1804#endif
1805#ifndef OVERRIDE_TABLE_32_sys_uselib
1806TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 86, 1)
1807#endif
1808#ifndef OVERRIDE_TABLE_32_sys_swapon
1809TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 87, 2)
1810#endif
1811#ifndef OVERRIDE_TABLE_32_sys_reboot
1812TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 88, 4)
1813#endif
1814#ifndef OVERRIDE_TABLE_32_sys_old_readdir
1815TRACE_SYSCALL_TABLE(sys_old_readdir, sys_old_readdir, 89, 3)
1816#endif
1817#ifndef OVERRIDE_TABLE_32_sys_old_mmap
1818TRACE_SYSCALL_TABLE(sys_old_mmap, sys_old_mmap, 90, 1)
1819#endif
1820#ifndef OVERRIDE_TABLE_32_sys_truncate
1821TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 92, 2)
1822#endif
1823#ifndef OVERRIDE_TABLE_32_sys_statfs
1824TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 99, 2)
1825#endif
1826#ifndef OVERRIDE_TABLE_32_sys_fstatfs
1827TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 100, 2)
1828#endif
1829#ifndef OVERRIDE_TABLE_32_sys_socketcall
1830TRACE_SYSCALL_TABLE(sys_socketcall, sys_socketcall, 102, 2)
1831#endif
1832#ifndef OVERRIDE_TABLE_32_sys_syslog
1833TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
1834#endif
1835#ifndef OVERRIDE_TABLE_32_sys_setitimer
1836TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 104, 3)
1837#endif
1838#ifndef OVERRIDE_TABLE_32_sys_getitimer
1839TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 105, 2)
1840#endif
1841#ifndef OVERRIDE_TABLE_32_sys_newstat
1842TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 106, 2)
1843#endif
1844#ifndef OVERRIDE_TABLE_32_sys_newlstat
1845TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 107, 2)
1846#endif
1847#ifndef OVERRIDE_TABLE_32_sys_newfstat
1848TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 108, 2)
1849#endif
1850#ifndef OVERRIDE_TABLE_32_sys_uname
1851TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 109, 1)
1852#endif
1853#ifndef OVERRIDE_TABLE_32_sys_wait4
1854TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 114, 4)
1855#endif
1856#ifndef OVERRIDE_TABLE_32_sys_swapoff
1857TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 115, 1)
1858#endif
1859#ifndef OVERRIDE_TABLE_32_sys_sysinfo
1860TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 116, 1)
1861#endif
1862#ifndef OVERRIDE_TABLE_32_sys_ipc
1863TRACE_SYSCALL_TABLE(sys_ipc, sys_ipc, 117, 6)
1864#endif
1865#ifndef OVERRIDE_TABLE_32_sys_setdomainname
1866TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 121, 2)
1867#endif
1868#ifndef OVERRIDE_TABLE_32_sys_newuname
1869TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 122, 1)
1870#endif
1871#ifndef OVERRIDE_TABLE_32_sys_adjtimex
1872TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 124, 1)
1873#endif
1874#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
1875TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 126, 3)
1876#endif
1877#ifndef OVERRIDE_TABLE_32_sys_init_module
1878TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 128, 3)
1879#endif
1880#ifndef OVERRIDE_TABLE_32_sys_delete_module
1881TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 129, 2)
1882#endif
1883#ifndef OVERRIDE_TABLE_32_sys_quotactl
1884TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 131, 4)
1885#endif
1886#ifndef OVERRIDE_TABLE_32_sys_llseek
1887TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 140, 5)
1888#endif
1889#ifndef OVERRIDE_TABLE_32_sys_getdents
1890TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 141, 3)
1891#endif
1892#ifndef OVERRIDE_TABLE_32_sys_select
1893TRACE_SYSCALL_TABLE(sys_select, sys_select, 142, 5)
1894#endif
1895#ifndef OVERRIDE_TABLE_32_sys_readv
1896TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 145, 3)
1897#endif
1898#ifndef OVERRIDE_TABLE_32_sys_writev
1899TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 146, 3)
1900#endif
1901#ifndef OVERRIDE_TABLE_32_sys_sysctl
1902TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 149, 1)
1903#endif
1904#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
1905TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 154, 2)
1906#endif
1907#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
1908TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 155, 2)
1909#endif
1910#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
1911TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 156, 3)
1912#endif
1913#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
1914TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 161, 2)
1915#endif
1916#ifndef OVERRIDE_TABLE_32_sys_nanosleep
1917TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 162, 2)
1918#endif
1919#ifndef OVERRIDE_TABLE_32_sys_getresuid16
1920TRACE_SYSCALL_TABLE(sys_getresuid16, sys_getresuid16, 165, 3)
1921#endif
1922#ifndef OVERRIDE_TABLE_32_sys_poll
1923TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 168, 3)
1924#endif
1925#ifndef OVERRIDE_TABLE_32_sys_getresgid16
1926TRACE_SYSCALL_TABLE(sys_getresgid16, sys_getresgid16, 171, 3)
1927#endif
1928#ifndef OVERRIDE_TABLE_32_sys_rt_sigaction
1929TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 174, 4)
1930#endif
1931#ifndef OVERRIDE_TABLE_32_sys_rt_sigprocmask
1932TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 175, 4)
1933#endif
1934#ifndef OVERRIDE_TABLE_32_sys_rt_sigpending
1935TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 176, 2)
1936#endif
1937#ifndef OVERRIDE_TABLE_32_sys_rt_sigtimedwait
1938TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 177, 4)
1939#endif
1940#ifndef OVERRIDE_TABLE_32_sys_rt_sigqueueinfo
1941TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 178, 3)
1942#endif
1943#ifndef OVERRIDE_TABLE_32_sys_rt_sigsuspend
1944TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 179, 2)
1945#endif
1946#ifndef OVERRIDE_TABLE_32_sys_chown16
1947TRACE_SYSCALL_TABLE(sys_chown16, sys_chown16, 182, 3)
1948#endif
1949#ifndef OVERRIDE_TABLE_32_sys_getcwd
1950TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 183, 2)
1951#endif
1952#ifndef OVERRIDE_TABLE_32_sys_sendfile
1953TRACE_SYSCALL_TABLE(sys_sendfile, sys_sendfile, 187, 4)
1954#endif
1955#ifndef OVERRIDE_TABLE_32_sys_getrlimit
1956TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 191, 2)
1957#endif
1958#ifndef OVERRIDE_TABLE_32_sys_stat64
1959TRACE_SYSCALL_TABLE(sys_stat64, sys_stat64, 195, 2)
1960#endif
1961#ifndef OVERRIDE_TABLE_32_sys_lstat64
1962TRACE_SYSCALL_TABLE(sys_lstat64, sys_lstat64, 196, 2)
1963#endif
1964#ifndef OVERRIDE_TABLE_32_sys_fstat64
1965TRACE_SYSCALL_TABLE(sys_fstat64, sys_fstat64, 197, 2)
1966#endif
1967#ifndef OVERRIDE_TABLE_32_sys_lchown
1968TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 198, 3)
1969#endif
1970#ifndef OVERRIDE_TABLE_32_sys_getgroups
1971TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 205, 2)
1972#endif
1973#ifndef OVERRIDE_TABLE_32_sys_setgroups
1974TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 206, 2)
1975#endif
1976#ifndef OVERRIDE_TABLE_32_sys_getresuid
1977TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 209, 3)
1978#endif
1979#ifndef OVERRIDE_TABLE_32_sys_getresgid
1980TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 211, 3)
1981#endif
1982#ifndef OVERRIDE_TABLE_32_sys_chown
1983TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 212, 3)
1984#endif
1985#ifndef OVERRIDE_TABLE_32_sys_pivot_root
1986TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 217, 2)
1987#endif
1988#ifndef OVERRIDE_TABLE_32_sys_mincore
1989TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 218, 3)
1990#endif
1991#ifndef OVERRIDE_TABLE_32_sys_getdents64
1992TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 220, 3)
1993#endif
1994#ifndef OVERRIDE_TABLE_32_sys_setxattr
1995TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 226, 5)
1996#endif
1997#ifndef OVERRIDE_TABLE_32_sys_lsetxattr
1998TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 227, 5)
1999#endif
2000#ifndef OVERRIDE_TABLE_32_sys_fsetxattr
2001TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 228, 5)
2002#endif
2003#ifndef OVERRIDE_TABLE_32_sys_getxattr
2004TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 229, 4)
2005#endif
2006#ifndef OVERRIDE_TABLE_32_sys_lgetxattr
2007TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 230, 4)
2008#endif
2009#ifndef OVERRIDE_TABLE_32_sys_fgetxattr
2010TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 231, 4)
2011#endif
2012#ifndef OVERRIDE_TABLE_32_sys_listxattr
2013TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 232, 3)
2014#endif
2015#ifndef OVERRIDE_TABLE_32_sys_llistxattr
2016TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 233, 3)
2017#endif
2018#ifndef OVERRIDE_TABLE_32_sys_flistxattr
2019TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 234, 3)
2020#endif
2021#ifndef OVERRIDE_TABLE_32_sys_removexattr
2022TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 235, 2)
2023#endif
2024#ifndef OVERRIDE_TABLE_32_sys_lremovexattr
2025TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 236, 2)
2026#endif
2027#ifndef OVERRIDE_TABLE_32_sys_fremovexattr
2028TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 237, 2)
2029#endif
2030#ifndef OVERRIDE_TABLE_32_sys_sendfile64
2031TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 239, 4)
2032#endif
2033#ifndef OVERRIDE_TABLE_32_sys_futex
2034TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 240, 6)
2035#endif
2036#ifndef OVERRIDE_TABLE_32_sys_sched_setaffinity
2037TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 241, 3)
2038#endif
2039#ifndef OVERRIDE_TABLE_32_sys_sched_getaffinity
2040TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 242, 3)
2041#endif
2042#ifndef OVERRIDE_TABLE_32_sys_io_setup
2043TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 245, 2)
2044#endif
2045#ifndef OVERRIDE_TABLE_32_sys_io_getevents
2046TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 247, 5)
2047#endif
2048#ifndef OVERRIDE_TABLE_32_sys_io_submit
2049TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 248, 3)
2050#endif
2051#ifndef OVERRIDE_TABLE_32_sys_io_cancel
2052TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 249, 3)
2053#endif
2054#ifndef OVERRIDE_TABLE_32_sys_epoll_ctl
2055TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 255, 4)
2056#endif
2057#ifndef OVERRIDE_TABLE_32_sys_epoll_wait
2058TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 256, 4)
2059#endif
2060#ifndef OVERRIDE_TABLE_32_sys_set_tid_address
2061TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 258, 1)
2062#endif
2063#ifndef OVERRIDE_TABLE_32_sys_timer_create
2064TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 259, 3)
2065#endif
2066#ifndef OVERRIDE_TABLE_32_sys_timer_settime
2067TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 260, 4)
2068#endif
2069#ifndef OVERRIDE_TABLE_32_sys_timer_gettime
2070TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 261, 2)
2071#endif
2072#ifndef OVERRIDE_TABLE_32_sys_clock_settime
2073TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 264, 2)
2074#endif
2075#ifndef OVERRIDE_TABLE_32_sys_clock_gettime
2076TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 265, 2)
2077#endif
2078#ifndef OVERRIDE_TABLE_32_sys_clock_getres
2079TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 266, 2)
2080#endif
2081#ifndef OVERRIDE_TABLE_32_sys_clock_nanosleep
2082TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 267, 4)
2083#endif
2084#ifndef OVERRIDE_TABLE_32_sys_statfs64
2085TRACE_SYSCALL_TABLE(sys_statfs64, sys_statfs64, 268, 3)
2086#endif
2087#ifndef OVERRIDE_TABLE_32_sys_fstatfs64
2088TRACE_SYSCALL_TABLE(sys_fstatfs64, sys_fstatfs64, 269, 3)
2089#endif
2090#ifndef OVERRIDE_TABLE_32_sys_utimes
2091TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 271, 2)
2092#endif
2093#ifndef OVERRIDE_TABLE_32_sys_mq_open
2094TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 277, 4)
2095#endif
2096#ifndef OVERRIDE_TABLE_32_sys_mq_unlink
2097TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 278, 1)
2098#endif
2099#ifndef OVERRIDE_TABLE_32_sys_mq_timedsend
2100TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 279, 5)
2101#endif
2102#ifndef OVERRIDE_TABLE_32_sys_mq_timedreceive
2103TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 280, 5)
2104#endif
2105#ifndef OVERRIDE_TABLE_32_sys_mq_notify
2106TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 281, 2)
2107#endif
2108#ifndef OVERRIDE_TABLE_32_sys_mq_getsetattr
2109TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 282, 3)
2110#endif
2111#ifndef OVERRIDE_TABLE_32_sys_kexec_load
2112TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 283, 4)
2113#endif
2114#ifndef OVERRIDE_TABLE_32_sys_waitid
2115TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 284, 5)
2116#endif
2117#ifndef OVERRIDE_TABLE_32_sys_add_key
2118TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 286, 5)
2119#endif
2120#ifndef OVERRIDE_TABLE_32_sys_request_key
2121TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 287, 4)
2122#endif
2123#ifndef OVERRIDE_TABLE_32_sys_inotify_add_watch
2124TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 292, 3)
2125#endif
2126#ifndef OVERRIDE_TABLE_32_sys_openat
2127TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 295, 4)
2128#endif
2129#ifndef OVERRIDE_TABLE_32_sys_mkdirat
2130TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 296, 3)
2131#endif
2132#ifndef OVERRIDE_TABLE_32_sys_mknodat
2133TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 297, 4)
2134#endif
2135#ifndef OVERRIDE_TABLE_32_sys_fchownat
2136TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 298, 5)
2137#endif
2138#ifndef OVERRIDE_TABLE_32_sys_futimesat
2139TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 299, 3)
2140#endif
2141#ifndef OVERRIDE_TABLE_32_sys_fstatat64
2142TRACE_SYSCALL_TABLE(sys_fstatat64, sys_fstatat64, 300, 4)
2143#endif
2144#ifndef OVERRIDE_TABLE_32_sys_unlinkat
2145TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 301, 3)
2146#endif
2147#ifndef OVERRIDE_TABLE_32_sys_renameat
2148TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 302, 4)
2149#endif
2150#ifndef OVERRIDE_TABLE_32_sys_linkat
2151TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 303, 5)
2152#endif
2153#ifndef OVERRIDE_TABLE_32_sys_symlinkat
2154TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 304, 3)
2155#endif
2156#ifndef OVERRIDE_TABLE_32_sys_readlinkat
2157TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 305, 4)
2158#endif
2159#ifndef OVERRIDE_TABLE_32_sys_fchmodat
2160TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 306, 3)
2161#endif
2162#ifndef OVERRIDE_TABLE_32_sys_faccessat
2163TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 307, 3)
2164#endif
2165#ifndef OVERRIDE_TABLE_32_sys_pselect6
2166TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 308, 6)
2167#endif
2168#ifndef OVERRIDE_TABLE_32_sys_ppoll
2169TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 309, 5)
2170#endif
2171#ifndef OVERRIDE_TABLE_32_sys_set_robust_list
2172TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 311, 2)
2173#endif
2174#ifndef OVERRIDE_TABLE_32_sys_get_robust_list
2175TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 312, 3)
2176#endif
2177#ifndef OVERRIDE_TABLE_32_sys_splice
2178TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 313, 6)
2179#endif
2180#ifndef OVERRIDE_TABLE_32_sys_vmsplice
2181TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 316, 4)
2182#endif
2183#ifndef OVERRIDE_TABLE_32_sys_getcpu
2184TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 318, 3)
2185#endif
2186#ifndef OVERRIDE_TABLE_32_sys_epoll_pwait
2187TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 319, 6)
2188#endif
2189#ifndef OVERRIDE_TABLE_32_sys_utimensat
2190TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 320, 4)
2191#endif
2192#ifndef OVERRIDE_TABLE_32_sys_signalfd
2193TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 321, 3)
2194#endif
2195#ifndef OVERRIDE_TABLE_32_sys_timerfd_settime
2196TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 325, 4)
2197#endif
2198#ifndef OVERRIDE_TABLE_32_sys_timerfd_gettime
2199TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 326, 2)
2200#endif
2201#ifndef OVERRIDE_TABLE_32_sys_signalfd4
2202TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 327, 4)
2203#endif
2204#ifndef OVERRIDE_TABLE_32_sys_pipe2
2205TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 331, 2)
2206#endif
2207#ifndef OVERRIDE_TABLE_32_sys_preadv
2208TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 333, 5)
2209#endif
2210#ifndef OVERRIDE_TABLE_32_sys_pwritev
2211TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 334, 5)
2212#endif
2213#ifndef OVERRIDE_TABLE_32_sys_rt_tgsigqueueinfo
2214TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 335, 4)
2215#endif
2216#ifndef OVERRIDE_TABLE_32_sys_perf_event_open
2217TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 336, 5)
2218#endif
2219#ifndef OVERRIDE_TABLE_32_sys_recvmmsg
2220TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 337, 5)
2221#endif
2222#ifndef OVERRIDE_TABLE_32_sys_prlimit64
2223TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 340, 4)
2224#endif
2225#ifndef OVERRIDE_TABLE_32_sys_clock_adjtime
2226TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 343, 2)
2227#endif
2228#ifndef OVERRIDE_TABLE_32_sys_sendmmsg
2229TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 345, 4)
2230#endif
2231
2232#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
deleted file mode 100644
index d35657c8e0ae..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef CONFIG_UID16
2
3#define OVERRIDE_32_sys_getgroups16
4#define OVERRIDE_32_sys_setgroups16
5#define OVERRIDE_32_sys_lchown16
6#define OVERRIDE_32_sys_getresuid16
7#define OVERRIDE_32_sys_getresgid16
8#define OVERRIDE_32_sys_chown16
9
10#define OVERRIDE_TABLE_32_sys_getgroups16
11#define OVERRIDE_TABLE_32_sys_setgroups16
12#define OVERRIDE_TABLE_32_sys_lchown16
13#define OVERRIDE_TABLE_32_sys_getresuid16
14#define OVERRIDE_TABLE_32_sys_getresgid16
15#define OVERRIDE_TABLE_32_sys_chown16
16
17#endif
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h
deleted file mode 100644
index 6d0dbb90807f..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h
+++ /dev/null
@@ -1,1013 +0,0 @@
1/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
2#ifndef CREATE_SYSCALL_TABLE
3
4#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SYSCALLS_INTEGERS_H
6
7#include <linux/tracepoint.h>
8#include <linux/syscalls.h>
9#include "x86-64-syscalls-3.0.4_integers_override.h"
10#include "syscalls_integers_override.h"
11
12SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
13 TP_STRUCT__entry(),
14 TP_fast_assign(),
15 TP_printk()
16)
17#ifndef OVERRIDE_64_sys_sched_yield
18SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
19#endif
20#ifndef OVERRIDE_64_sys_pause
21SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
22#endif
23#ifndef OVERRIDE_64_sys_getpid
24SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
25#endif
26#ifndef OVERRIDE_64_sys_getuid
27SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
28#endif
29#ifndef OVERRIDE_64_sys_getgid
30SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
31#endif
32#ifndef OVERRIDE_64_sys_geteuid
33SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
34#endif
35#ifndef OVERRIDE_64_sys_getegid
36SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
37#endif
38#ifndef OVERRIDE_64_sys_getppid
39SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
40#endif
41#ifndef OVERRIDE_64_sys_getpgrp
42SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
43#endif
44#ifndef OVERRIDE_64_sys_setsid
45SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
46#endif
47#ifndef OVERRIDE_64_sys_munlockall
48SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
49#endif
50#ifndef OVERRIDE_64_sys_vhangup
51SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
52#endif
53#ifndef OVERRIDE_64_sys_sync
54SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
55#endif
56#ifndef OVERRIDE_64_sys_gettid
57SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
58#endif
59#ifndef OVERRIDE_64_sys_restart_syscall
60SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
61#endif
62#ifndef OVERRIDE_64_sys_inotify_init
63SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
64#endif
65#ifndef OVERRIDE_64_sys_close
66SC_TRACE_EVENT(sys_close,
67 TP_PROTO(unsigned int fd),
68 TP_ARGS(fd),
69 TP_STRUCT__entry(__field(unsigned int, fd)),
70 TP_fast_assign(tp_assign(fd, fd)),
71 TP_printk()
72)
73#endif
74#ifndef OVERRIDE_64_sys_brk
75SC_TRACE_EVENT(sys_brk,
76 TP_PROTO(unsigned long brk),
77 TP_ARGS(brk),
78 TP_STRUCT__entry(__field(unsigned long, brk)),
79 TP_fast_assign(tp_assign(brk, brk)),
80 TP_printk()
81)
82#endif
83#ifndef OVERRIDE_64_sys_dup
84SC_TRACE_EVENT(sys_dup,
85 TP_PROTO(unsigned int fildes),
86 TP_ARGS(fildes),
87 TP_STRUCT__entry(__field(unsigned int, fildes)),
88 TP_fast_assign(tp_assign(fildes, fildes)),
89 TP_printk()
90)
91#endif
92#ifndef OVERRIDE_64_sys_alarm
93SC_TRACE_EVENT(sys_alarm,
94 TP_PROTO(unsigned int seconds),
95 TP_ARGS(seconds),
96 TP_STRUCT__entry(__field(unsigned int, seconds)),
97 TP_fast_assign(tp_assign(seconds, seconds)),
98 TP_printk()
99)
100#endif
101#ifndef OVERRIDE_64_sys_exit
102SC_TRACE_EVENT(sys_exit,
103 TP_PROTO(int error_code),
104 TP_ARGS(error_code),
105 TP_STRUCT__entry(__field(int, error_code)),
106 TP_fast_assign(tp_assign(error_code, error_code)),
107 TP_printk()
108)
109#endif
110#ifndef OVERRIDE_64_sys_fsync
111SC_TRACE_EVENT(sys_fsync,
112 TP_PROTO(unsigned int fd),
113 TP_ARGS(fd),
114 TP_STRUCT__entry(__field(unsigned int, fd)),
115 TP_fast_assign(tp_assign(fd, fd)),
116 TP_printk()
117)
118#endif
119#ifndef OVERRIDE_64_sys_fdatasync
120SC_TRACE_EVENT(sys_fdatasync,
121 TP_PROTO(unsigned int fd),
122 TP_ARGS(fd),
123 TP_STRUCT__entry(__field(unsigned int, fd)),
124 TP_fast_assign(tp_assign(fd, fd)),
125 TP_printk()
126)
127#endif
128#ifndef OVERRIDE_64_sys_fchdir
129SC_TRACE_EVENT(sys_fchdir,
130 TP_PROTO(unsigned int fd),
131 TP_ARGS(fd),
132 TP_STRUCT__entry(__field(unsigned int, fd)),
133 TP_fast_assign(tp_assign(fd, fd)),
134 TP_printk()
135)
136#endif
137#ifndef OVERRIDE_64_sys_umask
138SC_TRACE_EVENT(sys_umask,
139 TP_PROTO(int mask),
140 TP_ARGS(mask),
141 TP_STRUCT__entry(__field(int, mask)),
142 TP_fast_assign(tp_assign(mask, mask)),
143 TP_printk()
144)
145#endif
146#ifndef OVERRIDE_64_sys_setuid
147SC_TRACE_EVENT(sys_setuid,
148 TP_PROTO(uid_t uid),
149 TP_ARGS(uid),
150 TP_STRUCT__entry(__field(uid_t, uid)),
151 TP_fast_assign(tp_assign(uid, uid)),
152 TP_printk()
153)
154#endif
155#ifndef OVERRIDE_64_sys_setgid
156SC_TRACE_EVENT(sys_setgid,
157 TP_PROTO(gid_t gid),
158 TP_ARGS(gid),
159 TP_STRUCT__entry(__field(gid_t, gid)),
160 TP_fast_assign(tp_assign(gid, gid)),
161 TP_printk()
162)
163#endif
164#ifndef OVERRIDE_64_sys_getpgid
165SC_TRACE_EVENT(sys_getpgid,
166 TP_PROTO(pid_t pid),
167 TP_ARGS(pid),
168 TP_STRUCT__entry(__field(pid_t, pid)),
169 TP_fast_assign(tp_assign(pid, pid)),
170 TP_printk()
171)
172#endif
173#ifndef OVERRIDE_64_sys_setfsuid
174SC_TRACE_EVENT(sys_setfsuid,
175 TP_PROTO(uid_t uid),
176 TP_ARGS(uid),
177 TP_STRUCT__entry(__field(uid_t, uid)),
178 TP_fast_assign(tp_assign(uid, uid)),
179 TP_printk()
180)
181#endif
182#ifndef OVERRIDE_64_sys_setfsgid
183SC_TRACE_EVENT(sys_setfsgid,
184 TP_PROTO(gid_t gid),
185 TP_ARGS(gid),
186 TP_STRUCT__entry(__field(gid_t, gid)),
187 TP_fast_assign(tp_assign(gid, gid)),
188 TP_printk()
189)
190#endif
191#ifndef OVERRIDE_64_sys_getsid
192SC_TRACE_EVENT(sys_getsid,
193 TP_PROTO(pid_t pid),
194 TP_ARGS(pid),
195 TP_STRUCT__entry(__field(pid_t, pid)),
196 TP_fast_assign(tp_assign(pid, pid)),
197 TP_printk()
198)
199#endif
200#ifndef OVERRIDE_64_sys_personality
201SC_TRACE_EVENT(sys_personality,
202 TP_PROTO(unsigned int personality),
203 TP_ARGS(personality),
204 TP_STRUCT__entry(__field(unsigned int, personality)),
205 TP_fast_assign(tp_assign(personality, personality)),
206 TP_printk()
207)
208#endif
209#ifndef OVERRIDE_64_sys_sched_getscheduler
210SC_TRACE_EVENT(sys_sched_getscheduler,
211 TP_PROTO(pid_t pid),
212 TP_ARGS(pid),
213 TP_STRUCT__entry(__field(pid_t, pid)),
214 TP_fast_assign(tp_assign(pid, pid)),
215 TP_printk()
216)
217#endif
218#ifndef OVERRIDE_64_sys_sched_get_priority_max
219SC_TRACE_EVENT(sys_sched_get_priority_max,
220 TP_PROTO(int policy),
221 TP_ARGS(policy),
222 TP_STRUCT__entry(__field(int, policy)),
223 TP_fast_assign(tp_assign(policy, policy)),
224 TP_printk()
225)
226#endif
227#ifndef OVERRIDE_64_sys_sched_get_priority_min
228SC_TRACE_EVENT(sys_sched_get_priority_min,
229 TP_PROTO(int policy),
230 TP_ARGS(policy),
231 TP_STRUCT__entry(__field(int, policy)),
232 TP_fast_assign(tp_assign(policy, policy)),
233 TP_printk()
234)
235#endif
236#ifndef OVERRIDE_64_sys_mlockall
237SC_TRACE_EVENT(sys_mlockall,
238 TP_PROTO(int flags),
239 TP_ARGS(flags),
240 TP_STRUCT__entry(__field(int, flags)),
241 TP_fast_assign(tp_assign(flags, flags)),
242 TP_printk()
243)
244#endif
245#ifndef OVERRIDE_64_sys_io_destroy
246SC_TRACE_EVENT(sys_io_destroy,
247 TP_PROTO(aio_context_t ctx),
248 TP_ARGS(ctx),
249 TP_STRUCT__entry(__field(aio_context_t, ctx)),
250 TP_fast_assign(tp_assign(ctx, ctx)),
251 TP_printk()
252)
253#endif
254#ifndef OVERRIDE_64_sys_epoll_create
255SC_TRACE_EVENT(sys_epoll_create,
256 TP_PROTO(int size),
257 TP_ARGS(size),
258 TP_STRUCT__entry(__field(int, size)),
259 TP_fast_assign(tp_assign(size, size)),
260 TP_printk()
261)
262#endif
263#ifndef OVERRIDE_64_sys_timer_getoverrun
264SC_TRACE_EVENT(sys_timer_getoverrun,
265 TP_PROTO(timer_t timer_id),
266 TP_ARGS(timer_id),
267 TP_STRUCT__entry(__field(timer_t, timer_id)),
268 TP_fast_assign(tp_assign(timer_id, timer_id)),
269 TP_printk()
270)
271#endif
272#ifndef OVERRIDE_64_sys_timer_delete
273SC_TRACE_EVENT(sys_timer_delete,
274 TP_PROTO(timer_t timer_id),
275 TP_ARGS(timer_id),
276 TP_STRUCT__entry(__field(timer_t, timer_id)),
277 TP_fast_assign(tp_assign(timer_id, timer_id)),
278 TP_printk()
279)
280#endif
281#ifndef OVERRIDE_64_sys_exit_group
282SC_TRACE_EVENT(sys_exit_group,
283 TP_PROTO(int error_code),
284 TP_ARGS(error_code),
285 TP_STRUCT__entry(__field(int, error_code)),
286 TP_fast_assign(tp_assign(error_code, error_code)),
287 TP_printk()
288)
289#endif
290#ifndef OVERRIDE_64_sys_unshare
291SC_TRACE_EVENT(sys_unshare,
292 TP_PROTO(unsigned long unshare_flags),
293 TP_ARGS(unshare_flags),
294 TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
295 TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
296 TP_printk()
297)
298#endif
299#ifndef OVERRIDE_64_sys_eventfd
300SC_TRACE_EVENT(sys_eventfd,
301 TP_PROTO(unsigned int count),
302 TP_ARGS(count),
303 TP_STRUCT__entry(__field(unsigned int, count)),
304 TP_fast_assign(tp_assign(count, count)),
305 TP_printk()
306)
307#endif
308#ifndef OVERRIDE_64_sys_epoll_create1
309SC_TRACE_EVENT(sys_epoll_create1,
310 TP_PROTO(int flags),
311 TP_ARGS(flags),
312 TP_STRUCT__entry(__field(int, flags)),
313 TP_fast_assign(tp_assign(flags, flags)),
314 TP_printk()
315)
316#endif
317#ifndef OVERRIDE_64_sys_inotify_init1
318SC_TRACE_EVENT(sys_inotify_init1,
319 TP_PROTO(int flags),
320 TP_ARGS(flags),
321 TP_STRUCT__entry(__field(int, flags)),
322 TP_fast_assign(tp_assign(flags, flags)),
323 TP_printk()
324)
325#endif
326#ifndef OVERRIDE_64_sys_syncfs
327SC_TRACE_EVENT(sys_syncfs,
328 TP_PROTO(int fd),
329 TP_ARGS(fd),
330 TP_STRUCT__entry(__field(int, fd)),
331 TP_fast_assign(tp_assign(fd, fd)),
332 TP_printk()
333)
334#endif
335#ifndef OVERRIDE_64_sys_munmap
336SC_TRACE_EVENT(sys_munmap,
337 TP_PROTO(unsigned long addr, size_t len),
338 TP_ARGS(addr, len),
339 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
340 TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
341 TP_printk()
342)
343#endif
344#ifndef OVERRIDE_64_sys_dup2
345SC_TRACE_EVENT(sys_dup2,
346 TP_PROTO(unsigned int oldfd, unsigned int newfd),
347 TP_ARGS(oldfd, newfd),
348 TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
349 TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
350 TP_printk()
351)
352#endif
353#ifndef OVERRIDE_64_sys_shutdown
354SC_TRACE_EVENT(sys_shutdown,
355 TP_PROTO(int fd, int how),
356 TP_ARGS(fd, how),
357 TP_STRUCT__entry(__field(int, fd) __field(int, how)),
358 TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
359 TP_printk()
360)
361#endif
362#ifndef OVERRIDE_64_sys_listen
363SC_TRACE_EVENT(sys_listen,
364 TP_PROTO(int fd, int backlog),
365 TP_ARGS(fd, backlog),
366 TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
367 TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
368 TP_printk()
369)
370#endif
371#ifndef OVERRIDE_64_sys_kill
372SC_TRACE_EVENT(sys_kill,
373 TP_PROTO(pid_t pid, int sig),
374 TP_ARGS(pid, sig),
375 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
376 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
377 TP_printk()
378)
379#endif
380#ifndef OVERRIDE_64_sys_msgget
381SC_TRACE_EVENT(sys_msgget,
382 TP_PROTO(key_t key, int msgflg),
383 TP_ARGS(key, msgflg),
384 TP_STRUCT__entry(__field(key_t, key) __field(int, msgflg)),
385 TP_fast_assign(tp_assign(key, key) tp_assign(msgflg, msgflg)),
386 TP_printk()
387)
388#endif
389#ifndef OVERRIDE_64_sys_flock
390SC_TRACE_EVENT(sys_flock,
391 TP_PROTO(unsigned int fd, unsigned int cmd),
392 TP_ARGS(fd, cmd),
393 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
394 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
395 TP_printk()
396)
397#endif
398#ifndef OVERRIDE_64_sys_ftruncate
399SC_TRACE_EVENT(sys_ftruncate,
400 TP_PROTO(unsigned int fd, unsigned long length),
401 TP_ARGS(fd, length),
402 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
403 TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
404 TP_printk()
405)
406#endif
407#ifndef OVERRIDE_64_sys_fchmod
408SC_TRACE_EVENT(sys_fchmod,
409 TP_PROTO(unsigned int fd, mode_t mode),
410 TP_ARGS(fd, mode),
411 TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
412 TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
413 TP_printk()
414)
415#endif
416#ifndef OVERRIDE_64_sys_setpgid
417SC_TRACE_EVENT(sys_setpgid,
418 TP_PROTO(pid_t pid, pid_t pgid),
419 TP_ARGS(pid, pgid),
420 TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
421 TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
422 TP_printk()
423)
424#endif
425#ifndef OVERRIDE_64_sys_setreuid
426SC_TRACE_EVENT(sys_setreuid,
427 TP_PROTO(uid_t ruid, uid_t euid),
428 TP_ARGS(ruid, euid),
429 TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
430 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
431 TP_printk()
432)
433#endif
434#ifndef OVERRIDE_64_sys_setregid
435SC_TRACE_EVENT(sys_setregid,
436 TP_PROTO(gid_t rgid, gid_t egid),
437 TP_ARGS(rgid, egid),
438 TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
439 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
440 TP_printk()
441)
442#endif
443#ifndef OVERRIDE_64_sys_getpriority
444SC_TRACE_EVENT(sys_getpriority,
445 TP_PROTO(int which, int who),
446 TP_ARGS(which, who),
447 TP_STRUCT__entry(__field(int, which) __field(int, who)),
448 TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
449 TP_printk()
450)
451#endif
452#ifndef OVERRIDE_64_sys_mlock
453SC_TRACE_EVENT(sys_mlock,
454 TP_PROTO(unsigned long start, size_t len),
455 TP_ARGS(start, len),
456 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
457 TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
458 TP_printk()
459)
460#endif
461#ifndef OVERRIDE_64_sys_munlock
462SC_TRACE_EVENT(sys_munlock,
463 TP_PROTO(unsigned long start, size_t len),
464 TP_ARGS(start, len),
465 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
466 TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
467 TP_printk()
468)
469#endif
470#ifndef OVERRIDE_64_sys_tkill
471SC_TRACE_EVENT(sys_tkill,
472 TP_PROTO(pid_t pid, int sig),
473 TP_ARGS(pid, sig),
474 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
475 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
476 TP_printk()
477)
478#endif
479#ifndef OVERRIDE_64_sys_ioprio_get
480SC_TRACE_EVENT(sys_ioprio_get,
481 TP_PROTO(int which, int who),
482 TP_ARGS(which, who),
483 TP_STRUCT__entry(__field(int, which) __field(int, who)),
484 TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
485 TP_printk()
486)
487#endif
488#ifndef OVERRIDE_64_sys_inotify_rm_watch
489SC_TRACE_EVENT(sys_inotify_rm_watch,
490 TP_PROTO(int fd, __s32 wd),
491 TP_ARGS(fd, wd),
492 TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
493 TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
494 TP_printk()
495)
496#endif
497#ifndef OVERRIDE_64_sys_timerfd_create
498SC_TRACE_EVENT(sys_timerfd_create,
499 TP_PROTO(int clockid, int flags),
500 TP_ARGS(clockid, flags),
501 TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
502 TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
503 TP_printk()
504)
505#endif
506#ifndef OVERRIDE_64_sys_eventfd2
507SC_TRACE_EVENT(sys_eventfd2,
508 TP_PROTO(unsigned int count, int flags),
509 TP_ARGS(count, flags),
510 TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
511 TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
512 TP_printk()
513)
514#endif
515#ifndef OVERRIDE_64_sys_setns
516SC_TRACE_EVENT(sys_setns,
517 TP_PROTO(int fd, int nstype),
518 TP_ARGS(fd, nstype),
519 TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
520 TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
521 TP_printk()
522)
523#endif
524#ifndef OVERRIDE_64_sys_lseek
525SC_TRACE_EVENT(sys_lseek,
526 TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
527 TP_ARGS(fd, offset, origin),
528 TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
529 TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
530 TP_printk()
531)
532#endif
533#ifndef OVERRIDE_64_sys_mprotect
534SC_TRACE_EVENT(sys_mprotect,
535 TP_PROTO(unsigned long start, size_t len, unsigned long prot),
536 TP_ARGS(start, len, prot),
537 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
538 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
539 TP_printk()
540)
541#endif
542#ifndef OVERRIDE_64_sys_ioctl
543SC_TRACE_EVENT(sys_ioctl,
544 TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
545 TP_ARGS(fd, cmd, arg),
546 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
547 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
548 TP_printk()
549)
550#endif
551#ifndef OVERRIDE_64_sys_msync
552SC_TRACE_EVENT(sys_msync,
553 TP_PROTO(unsigned long start, size_t len, int flags),
554 TP_ARGS(start, len, flags),
555 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
556 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
557 TP_printk()
558)
559#endif
560#ifndef OVERRIDE_64_sys_madvise
561SC_TRACE_EVENT(sys_madvise,
562 TP_PROTO(unsigned long start, size_t len_in, int behavior),
563 TP_ARGS(start, len_in, behavior),
564 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
565 TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
566 TP_printk()
567)
568#endif
569#ifndef OVERRIDE_64_sys_shmget
570SC_TRACE_EVENT(sys_shmget,
571 TP_PROTO(key_t key, size_t size, int shmflg),
572 TP_ARGS(key, size, shmflg),
573 TP_STRUCT__entry(__field(key_t, key) __field(size_t, size) __field(int, shmflg)),
574 TP_fast_assign(tp_assign(key, key) tp_assign(size, size) tp_assign(shmflg, shmflg)),
575 TP_printk()
576)
577#endif
578#ifndef OVERRIDE_64_sys_socket
579SC_TRACE_EVENT(sys_socket,
580 TP_PROTO(int family, int type, int protocol),
581 TP_ARGS(family, type, protocol),
582 TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
583 TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
584 TP_printk()
585)
586#endif
587#ifndef OVERRIDE_64_sys_semget
588SC_TRACE_EVENT(sys_semget,
589 TP_PROTO(key_t key, int nsems, int semflg),
590 TP_ARGS(key, nsems, semflg),
591 TP_STRUCT__entry(__field(key_t, key) __field(int, nsems) __field(int, semflg)),
592 TP_fast_assign(tp_assign(key, key) tp_assign(nsems, nsems) tp_assign(semflg, semflg)),
593 TP_printk()
594)
595#endif
596#ifndef OVERRIDE_64_sys_fcntl
597SC_TRACE_EVENT(sys_fcntl,
598 TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
599 TP_ARGS(fd, cmd, arg),
600 TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
601 TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
602 TP_printk()
603)
604#endif
605#ifndef OVERRIDE_64_sys_fchown
606SC_TRACE_EVENT(sys_fchown,
607 TP_PROTO(unsigned int fd, uid_t user, gid_t group),
608 TP_ARGS(fd, user, group),
609 TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
610 TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
611 TP_printk()
612)
613#endif
614#ifndef OVERRIDE_64_sys_setresuid
615SC_TRACE_EVENT(sys_setresuid,
616 TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
617 TP_ARGS(ruid, euid, suid),
618 TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
619 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
620 TP_printk()
621)
622#endif
623#ifndef OVERRIDE_64_sys_setresgid
624SC_TRACE_EVENT(sys_setresgid,
625 TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
626 TP_ARGS(rgid, egid, sgid),
627 TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
628 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
629 TP_printk()
630)
631#endif
632#ifndef OVERRIDE_64_sys_sysfs
633SC_TRACE_EVENT(sys_sysfs,
634 TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
635 TP_ARGS(option, arg1, arg2),
636 TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
637 TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
638 TP_printk()
639)
640#endif
641#ifndef OVERRIDE_64_sys_setpriority
642SC_TRACE_EVENT(sys_setpriority,
643 TP_PROTO(int which, int who, int niceval),
644 TP_ARGS(which, who, niceval),
645 TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
646 TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
647 TP_printk()
648)
649#endif
650#ifndef OVERRIDE_64_sys_tgkill
651SC_TRACE_EVENT(sys_tgkill,
652 TP_PROTO(pid_t tgid, pid_t pid, int sig),
653 TP_ARGS(tgid, pid, sig),
654 TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
655 TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
656 TP_printk()
657)
658#endif
659#ifndef OVERRIDE_64_sys_ioprio_set
660SC_TRACE_EVENT(sys_ioprio_set,
661 TP_PROTO(int which, int who, int ioprio),
662 TP_ARGS(which, who, ioprio),
663 TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
664 TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
665 TP_printk()
666)
667#endif
668#ifndef OVERRIDE_64_sys_dup3
669SC_TRACE_EVENT(sys_dup3,
670 TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
671 TP_ARGS(oldfd, newfd, flags),
672 TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
673 TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
674 TP_printk()
675)
676#endif
677#ifndef OVERRIDE_64_sys_ptrace
678SC_TRACE_EVENT(sys_ptrace,
679 TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
680 TP_ARGS(request, pid, addr, data),
681 TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
682 TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
683 TP_printk()
684)
685#endif
686#ifndef OVERRIDE_64_sys_tee
687SC_TRACE_EVENT(sys_tee,
688 TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
689 TP_ARGS(fdin, fdout, len, flags),
690 TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
691 TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
692 TP_printk()
693)
694#endif
695#ifndef OVERRIDE_64_sys_mremap
696SC_TRACE_EVENT(sys_mremap,
697 TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
698 TP_ARGS(addr, old_len, new_len, flags, new_addr),
699 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
700 TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
701 TP_printk()
702)
703#endif
704#ifndef OVERRIDE_64_sys_prctl
705SC_TRACE_EVENT(sys_prctl,
706 TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
707 TP_ARGS(option, arg2, arg3, arg4, arg5),
708 TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
709 TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
710 TP_printk()
711)
712#endif
713#ifndef OVERRIDE_64_sys_remap_file_pages
714SC_TRACE_EVENT(sys_remap_file_pages,
715 TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
716 TP_ARGS(start, size, prot, pgoff, flags),
717 TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
718 TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
719 TP_printk()
720)
721#endif
722#ifndef OVERRIDE_64_sys_mmap
723SC_TRACE_EVENT(sys_mmap,
724 TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
725 TP_ARGS(addr, len, prot, flags, fd, off),
726 TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, off)),
727 TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(off, off)),
728 TP_printk()
729)
730#endif
731
732#endif /* _TRACE_SYSCALLS_INTEGERS_H */
733
734/* This part must be outside protection */
735#include "../../../probes/define_trace.h"
736
737#else /* CREATE_SYSCALL_TABLE */
738
739#include "x86-64-syscalls-3.0.4_integers_override.h"
740#include "syscalls_integers_override.h"
741
742#ifndef OVERRIDE_TABLE_64_sys_sched_yield
743TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 24, 0)
744#endif
745#ifndef OVERRIDE_TABLE_64_sys_pause
746TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 34, 0)
747#endif
748#ifndef OVERRIDE_TABLE_64_sys_getpid
749TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 39, 0)
750#endif
751#ifndef OVERRIDE_TABLE_64_sys_getuid
752TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 102, 0)
753#endif
754#ifndef OVERRIDE_TABLE_64_sys_getgid
755TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 104, 0)
756#endif
757#ifndef OVERRIDE_TABLE_64_sys_geteuid
758TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 107, 0)
759#endif
760#ifndef OVERRIDE_TABLE_64_sys_getegid
761TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 108, 0)
762#endif
763#ifndef OVERRIDE_TABLE_64_sys_getppid
764TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 110, 0)
765#endif
766#ifndef OVERRIDE_TABLE_64_sys_getpgrp
767TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 111, 0)
768#endif
769#ifndef OVERRIDE_TABLE_64_sys_setsid
770TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 112, 0)
771#endif
772#ifndef OVERRIDE_TABLE_64_sys_munlockall
773TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 152, 0)
774#endif
775#ifndef OVERRIDE_TABLE_64_sys_vhangup
776TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 153, 0)
777#endif
778#ifndef OVERRIDE_TABLE_64_sys_sync
779TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 162, 0)
780#endif
781#ifndef OVERRIDE_TABLE_64_sys_gettid
782TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 186, 0)
783#endif
784#ifndef OVERRIDE_TABLE_64_sys_restart_syscall
785TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 219, 0)
786#endif
787#ifndef OVERRIDE_TABLE_64_sys_inotify_init
788TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 253, 0)
789#endif
790#ifndef OVERRIDE_TABLE_64_sys_close
791TRACE_SYSCALL_TABLE(sys_close, sys_close, 3, 1)
792#endif
793#ifndef OVERRIDE_TABLE_64_sys_lseek
794TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 8, 3)
795#endif
796#ifndef OVERRIDE_TABLE_64_sys_mmap
797TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 9, 6)
798#endif
799#ifndef OVERRIDE_TABLE_64_sys_mprotect
800TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 10, 3)
801#endif
802#ifndef OVERRIDE_TABLE_64_sys_munmap
803TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 11, 2)
804#endif
805#ifndef OVERRIDE_TABLE_64_sys_brk
806TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 12, 1)
807#endif
808#ifndef OVERRIDE_TABLE_64_sys_ioctl
809TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 16, 3)
810#endif
811#ifndef OVERRIDE_TABLE_64_sys_mremap
812TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 25, 5)
813#endif
814#ifndef OVERRIDE_TABLE_64_sys_msync
815TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 26, 3)
816#endif
817#ifndef OVERRIDE_TABLE_64_sys_madvise
818TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 28, 3)
819#endif
820#ifndef OVERRIDE_TABLE_64_sys_shmget
821TRACE_SYSCALL_TABLE(sys_shmget, sys_shmget, 29, 3)
822#endif
823#ifndef OVERRIDE_TABLE_64_sys_dup
824TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 32, 1)
825#endif
826#ifndef OVERRIDE_TABLE_64_sys_dup2
827TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 33, 2)
828#endif
829#ifndef OVERRIDE_TABLE_64_sys_alarm
830TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 37, 1)
831#endif
832#ifndef OVERRIDE_TABLE_64_sys_socket
833TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 41, 3)
834#endif
835#ifndef OVERRIDE_TABLE_64_sys_shutdown
836TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 48, 2)
837#endif
838#ifndef OVERRIDE_TABLE_64_sys_listen
839TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 50, 2)
840#endif
841#ifndef OVERRIDE_TABLE_64_sys_exit
842TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 60, 1)
843#endif
844#ifndef OVERRIDE_TABLE_64_sys_kill
845TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 62, 2)
846#endif
847#ifndef OVERRIDE_TABLE_64_sys_semget
848TRACE_SYSCALL_TABLE(sys_semget, sys_semget, 64, 3)
849#endif
850#ifndef OVERRIDE_TABLE_64_sys_msgget
851TRACE_SYSCALL_TABLE(sys_msgget, sys_msgget, 68, 2)
852#endif
853#ifndef OVERRIDE_TABLE_64_sys_fcntl
854TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 72, 3)
855#endif
856#ifndef OVERRIDE_TABLE_64_sys_flock
857TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 73, 2)
858#endif
859#ifndef OVERRIDE_TABLE_64_sys_fsync
860TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 74, 1)
861#endif
862#ifndef OVERRIDE_TABLE_64_sys_fdatasync
863TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 75, 1)
864#endif
865#ifndef OVERRIDE_TABLE_64_sys_ftruncate
866TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 77, 2)
867#endif
868#ifndef OVERRIDE_TABLE_64_sys_fchdir
869TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 81, 1)
870#endif
871#ifndef OVERRIDE_TABLE_64_sys_fchmod
872TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 91, 2)
873#endif
874#ifndef OVERRIDE_TABLE_64_sys_fchown
875TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 93, 3)
876#endif
877#ifndef OVERRIDE_TABLE_64_sys_umask
878TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 95, 1)
879#endif
880#ifndef OVERRIDE_TABLE_64_sys_ptrace
881TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 101, 4)
882#endif
883#ifndef OVERRIDE_TABLE_64_sys_setuid
884TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 105, 1)
885#endif
886#ifndef OVERRIDE_TABLE_64_sys_setgid
887TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 106, 1)
888#endif
889#ifndef OVERRIDE_TABLE_64_sys_setpgid
890TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 109, 2)
891#endif
892#ifndef OVERRIDE_TABLE_64_sys_setreuid
893TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 113, 2)
894#endif
895#ifndef OVERRIDE_TABLE_64_sys_setregid
896TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 114, 2)
897#endif
898#ifndef OVERRIDE_TABLE_64_sys_setresuid
899TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 117, 3)
900#endif
901#ifndef OVERRIDE_TABLE_64_sys_setresgid
902TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 119, 3)
903#endif
904#ifndef OVERRIDE_TABLE_64_sys_getpgid
905TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 121, 1)
906#endif
907#ifndef OVERRIDE_TABLE_64_sys_setfsuid
908TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 122, 1)
909#endif
910#ifndef OVERRIDE_TABLE_64_sys_setfsgid
911TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 123, 1)
912#endif
913#ifndef OVERRIDE_TABLE_64_sys_getsid
914TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 124, 1)
915#endif
916#ifndef OVERRIDE_TABLE_64_sys_personality
917TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 135, 1)
918#endif
919#ifndef OVERRIDE_TABLE_64_sys_sysfs
920TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 139, 3)
921#endif
922#ifndef OVERRIDE_TABLE_64_sys_getpriority
923TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 140, 2)
924#endif
925#ifndef OVERRIDE_TABLE_64_sys_setpriority
926TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 141, 3)
927#endif
928#ifndef OVERRIDE_TABLE_64_sys_sched_getscheduler
929TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 145, 1)
930#endif
931#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_max
932TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 146, 1)
933#endif
934#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_min
935TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 147, 1)
936#endif
937#ifndef OVERRIDE_TABLE_64_sys_mlock
938TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 149, 2)
939#endif
940#ifndef OVERRIDE_TABLE_64_sys_munlock
941TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 150, 2)
942#endif
943#ifndef OVERRIDE_TABLE_64_sys_mlockall
944TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 151, 1)
945#endif
946#ifndef OVERRIDE_TABLE_64_sys_prctl
947TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 157, 5)
948#endif
949#ifndef OVERRIDE_TABLE_64_sys_tkill
950TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 200, 2)
951#endif
952#ifndef OVERRIDE_TABLE_64_sys_io_destroy
953TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 207, 1)
954#endif
955#ifndef OVERRIDE_TABLE_64_sys_epoll_create
956TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 213, 1)
957#endif
958#ifndef OVERRIDE_TABLE_64_sys_remap_file_pages
959TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 216, 5)
960#endif
961#ifndef OVERRIDE_TABLE_64_sys_timer_getoverrun
962TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 225, 1)
963#endif
964#ifndef OVERRIDE_TABLE_64_sys_timer_delete
965TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 226, 1)
966#endif
967#ifndef OVERRIDE_TABLE_64_sys_exit_group
968TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 231, 1)
969#endif
970#ifndef OVERRIDE_TABLE_64_sys_tgkill
971TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 234, 3)
972#endif
973#ifndef OVERRIDE_TABLE_64_sys_ioprio_set
974TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 251, 3)
975#endif
976#ifndef OVERRIDE_TABLE_64_sys_ioprio_get
977TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 252, 2)
978#endif
979#ifndef OVERRIDE_TABLE_64_sys_inotify_rm_watch
980TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 255, 2)
981#endif
982#ifndef OVERRIDE_TABLE_64_sys_unshare
983TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 272, 1)
984#endif
985#ifndef OVERRIDE_TABLE_64_sys_tee
986TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 276, 4)
987#endif
988#ifndef OVERRIDE_TABLE_64_sys_timerfd_create
989TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 283, 2)
990#endif
991#ifndef OVERRIDE_TABLE_64_sys_eventfd
992TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 284, 1)
993#endif
994#ifndef OVERRIDE_TABLE_64_sys_eventfd2
995TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 290, 2)
996#endif
997#ifndef OVERRIDE_TABLE_64_sys_epoll_create1
998TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 291, 1)
999#endif
1000#ifndef OVERRIDE_TABLE_64_sys_dup3
1001TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 292, 3)
1002#endif
1003#ifndef OVERRIDE_TABLE_64_sys_inotify_init1
1004TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 294, 1)
1005#endif
1006#ifndef OVERRIDE_TABLE_64_sys_syncfs
1007TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 306, 1)
1008#endif
1009#ifndef OVERRIDE_TABLE_64_sys_setns
1010TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 308, 2)
1011#endif
1012
1013#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
deleted file mode 100644
index 3d400f7e5022..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/*
2 * this is a place-holder for x86_64 interger syscall definition override.
3 */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h
deleted file mode 100644
index e926a60abf01..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h
+++ /dev/null
@@ -1,2076 +0,0 @@
1/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
2#ifndef CREATE_SYSCALL_TABLE
3
4#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SYSCALLS_POINTERS_H
6
7#include <linux/tracepoint.h>
8#include <linux/syscalls.h>
9#include "x86-64-syscalls-3.0.4_pointers_override.h"
10#include "syscalls_pointers_override.h"
11
12#ifndef OVERRIDE_64_sys_pipe
13SC_TRACE_EVENT(sys_pipe,
14 TP_PROTO(int * fildes),
15 TP_ARGS(fildes),
16 TP_STRUCT__entry(__field_hex(int *, fildes)),
17 TP_fast_assign(tp_assign(fildes, fildes)),
18 TP_printk()
19)
20#endif
21#ifndef OVERRIDE_64_sys_newuname
22SC_TRACE_EVENT(sys_newuname,
23 TP_PROTO(struct new_utsname * name),
24 TP_ARGS(name),
25 TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
26 TP_fast_assign(tp_assign(name, name)),
27 TP_printk()
28)
29#endif
30#ifndef OVERRIDE_64_sys_shmdt
31SC_TRACE_EVENT(sys_shmdt,
32 TP_PROTO(char * shmaddr),
33 TP_ARGS(shmaddr),
34 TP_STRUCT__entry(__field_hex(char *, shmaddr)),
35 TP_fast_assign(tp_assign(shmaddr, shmaddr)),
36 TP_printk()
37)
38#endif
39#ifndef OVERRIDE_64_sys_chdir
40SC_TRACE_EVENT(sys_chdir,
41 TP_PROTO(const char * filename),
42 TP_ARGS(filename),
43 TP_STRUCT__entry(__string_from_user(filename, filename)),
44 TP_fast_assign(tp_copy_string_from_user(filename, filename)),
45 TP_printk()
46)
47#endif
48#ifndef OVERRIDE_64_sys_rmdir
49SC_TRACE_EVENT(sys_rmdir,
50 TP_PROTO(const char * pathname),
51 TP_ARGS(pathname),
52 TP_STRUCT__entry(__string_from_user(pathname, pathname)),
53 TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
54 TP_printk()
55)
56#endif
57#ifndef OVERRIDE_64_sys_unlink
58SC_TRACE_EVENT(sys_unlink,
59 TP_PROTO(const char * pathname),
60 TP_ARGS(pathname),
61 TP_STRUCT__entry(__string_from_user(pathname, pathname)),
62 TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
63 TP_printk()
64)
65#endif
66#ifndef OVERRIDE_64_sys_sysinfo
67SC_TRACE_EVENT(sys_sysinfo,
68 TP_PROTO(struct sysinfo * info),
69 TP_ARGS(info),
70 TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
71 TP_fast_assign(tp_assign(info, info)),
72 TP_printk()
73)
74#endif
75#ifndef OVERRIDE_64_sys_times
76SC_TRACE_EVENT(sys_times,
77 TP_PROTO(struct tms * tbuf),
78 TP_ARGS(tbuf),
79 TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
80 TP_fast_assign(tp_assign(tbuf, tbuf)),
81 TP_printk()
82)
83#endif
84#ifndef OVERRIDE_64_sys_sysctl
85SC_TRACE_EVENT(sys_sysctl,
86 TP_PROTO(struct __sysctl_args * args),
87 TP_ARGS(args),
88 TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
89 TP_fast_assign(tp_assign(args, args)),
90 TP_printk()
91)
92#endif
93#ifndef OVERRIDE_64_sys_adjtimex
94SC_TRACE_EVENT(sys_adjtimex,
95 TP_PROTO(struct timex * txc_p),
96 TP_ARGS(txc_p),
97 TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
98 TP_fast_assign(tp_assign(txc_p, txc_p)),
99 TP_printk()
100)
101#endif
102#ifndef OVERRIDE_64_sys_chroot
103SC_TRACE_EVENT(sys_chroot,
104 TP_PROTO(const char * filename),
105 TP_ARGS(filename),
106 TP_STRUCT__entry(__string_from_user(filename, filename)),
107 TP_fast_assign(tp_copy_string_from_user(filename, filename)),
108 TP_printk()
109)
110#endif
111#ifndef OVERRIDE_64_sys_swapoff
112SC_TRACE_EVENT(sys_swapoff,
113 TP_PROTO(const char * specialfile),
114 TP_ARGS(specialfile),
115 TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
116 TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
117 TP_printk()
118)
119#endif
120#ifndef OVERRIDE_64_sys_time
121SC_TRACE_EVENT(sys_time,
122 TP_PROTO(time_t * tloc),
123 TP_ARGS(tloc),
124 TP_STRUCT__entry(__field_hex(time_t *, tloc)),
125 TP_fast_assign(tp_assign(tloc, tloc)),
126 TP_printk()
127)
128#endif
129#ifndef OVERRIDE_64_sys_set_tid_address
130SC_TRACE_EVENT(sys_set_tid_address,
131 TP_PROTO(int * tidptr),
132 TP_ARGS(tidptr),
133 TP_STRUCT__entry(__field_hex(int *, tidptr)),
134 TP_fast_assign(tp_assign(tidptr, tidptr)),
135 TP_printk()
136)
137#endif
138#ifndef OVERRIDE_64_sys_mq_unlink
139SC_TRACE_EVENT(sys_mq_unlink,
140 TP_PROTO(const char * u_name),
141 TP_ARGS(u_name),
142 TP_STRUCT__entry(__string_from_user(u_name, u_name)),
143 TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
144 TP_printk()
145)
146#endif
147#ifndef OVERRIDE_64_sys_newstat
148SC_TRACE_EVENT(sys_newstat,
149 TP_PROTO(const char * filename, struct stat * statbuf),
150 TP_ARGS(filename, statbuf),
151 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
152 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
153 TP_printk()
154)
155#endif
156#ifndef OVERRIDE_64_sys_newfstat
157SC_TRACE_EVENT(sys_newfstat,
158 TP_PROTO(unsigned int fd, struct stat * statbuf),
159 TP_ARGS(fd, statbuf),
160 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
161 TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
162 TP_printk()
163)
164#endif
165#ifndef OVERRIDE_64_sys_newlstat
166SC_TRACE_EVENT(sys_newlstat,
167 TP_PROTO(const char * filename, struct stat * statbuf),
168 TP_ARGS(filename, statbuf),
169 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
170 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
171 TP_printk()
172)
173#endif
174#ifndef OVERRIDE_64_sys_access
175SC_TRACE_EVENT(sys_access,
176 TP_PROTO(const char * filename, int mode),
177 TP_ARGS(filename, mode),
178 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
179 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
180 TP_printk()
181)
182#endif
183#ifndef OVERRIDE_64_sys_nanosleep
184SC_TRACE_EVENT(sys_nanosleep,
185 TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
186 TP_ARGS(rqtp, rmtp),
187 TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
188 TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
189 TP_printk()
190)
191#endif
192#ifndef OVERRIDE_64_sys_getitimer
193SC_TRACE_EVENT(sys_getitimer,
194 TP_PROTO(int which, struct itimerval * value),
195 TP_ARGS(which, value),
196 TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
197 TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
198 TP_printk()
199)
200#endif
201#ifndef OVERRIDE_64_sys_truncate
202SC_TRACE_EVENT(sys_truncate,
203 TP_PROTO(const char * path, long length),
204 TP_ARGS(path, length),
205 TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
206 TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
207 TP_printk()
208)
209#endif
210#ifndef OVERRIDE_64_sys_getcwd
211SC_TRACE_EVENT(sys_getcwd,
212 TP_PROTO(char * buf, unsigned long size),
213 TP_ARGS(buf, size),
214 TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
215 TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
216 TP_printk()
217)
218#endif
219#ifndef OVERRIDE_64_sys_rename
220SC_TRACE_EVENT(sys_rename,
221 TP_PROTO(const char * oldname, const char * newname),
222 TP_ARGS(oldname, newname),
223 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
224 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
225 TP_printk()
226)
227#endif
228#ifndef OVERRIDE_64_sys_mkdir
229SC_TRACE_EVENT(sys_mkdir,
230 TP_PROTO(const char * pathname, int mode),
231 TP_ARGS(pathname, mode),
232 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
233 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
234 TP_printk()
235)
236#endif
237#ifndef OVERRIDE_64_sys_creat
238SC_TRACE_EVENT(sys_creat,
239 TP_PROTO(const char * pathname, int mode),
240 TP_ARGS(pathname, mode),
241 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
242 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
243 TP_printk()
244)
245#endif
246#ifndef OVERRIDE_64_sys_link
247SC_TRACE_EVENT(sys_link,
248 TP_PROTO(const char * oldname, const char * newname),
249 TP_ARGS(oldname, newname),
250 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
251 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
252 TP_printk()
253)
254#endif
255#ifndef OVERRIDE_64_sys_symlink
256SC_TRACE_EVENT(sys_symlink,
257 TP_PROTO(const char * oldname, const char * newname),
258 TP_ARGS(oldname, newname),
259 TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
260 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
261 TP_printk()
262)
263#endif
264#ifndef OVERRIDE_64_sys_chmod
265SC_TRACE_EVENT(sys_chmod,
266 TP_PROTO(const char * filename, mode_t mode),
267 TP_ARGS(filename, mode),
268 TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
269 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
270 TP_printk()
271)
272#endif
273#ifndef OVERRIDE_64_sys_gettimeofday
274SC_TRACE_EVENT(sys_gettimeofday,
275 TP_PROTO(struct timeval * tv, struct timezone * tz),
276 TP_ARGS(tv, tz),
277 TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
278 TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
279 TP_printk()
280)
281#endif
282#ifndef OVERRIDE_64_sys_getrlimit
283SC_TRACE_EVENT(sys_getrlimit,
284 TP_PROTO(unsigned int resource, struct rlimit * rlim),
285 TP_ARGS(resource, rlim),
286 TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
287 TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
288 TP_printk()
289)
290#endif
291#ifndef OVERRIDE_64_sys_getrusage
292SC_TRACE_EVENT(sys_getrusage,
293 TP_PROTO(int who, struct rusage * ru),
294 TP_ARGS(who, ru),
295 TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
296 TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
297 TP_printk()
298)
299#endif
300#ifndef OVERRIDE_64_sys_getgroups
301SC_TRACE_EVENT(sys_getgroups,
302 TP_PROTO(int gidsetsize, gid_t * grouplist),
303 TP_ARGS(gidsetsize, grouplist),
304 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
305 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
306 TP_printk()
307)
308#endif
309#ifndef OVERRIDE_64_sys_setgroups
310SC_TRACE_EVENT(sys_setgroups,
311 TP_PROTO(int gidsetsize, gid_t * grouplist),
312 TP_ARGS(gidsetsize, grouplist),
313 TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
314 TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
315 TP_printk()
316)
317#endif
318#ifndef OVERRIDE_64_sys_rt_sigpending
319SC_TRACE_EVENT(sys_rt_sigpending,
320 TP_PROTO(sigset_t * set, size_t sigsetsize),
321 TP_ARGS(set, sigsetsize),
322 TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
323 TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
324 TP_printk()
325)
326#endif
327#ifndef OVERRIDE_64_sys_rt_sigsuspend
328SC_TRACE_EVENT(sys_rt_sigsuspend,
329 TP_PROTO(sigset_t * unewset, size_t sigsetsize),
330 TP_ARGS(unewset, sigsetsize),
331 TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
332 TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
333 TP_printk()
334)
335#endif
336#ifndef OVERRIDE_64_sys_utime
337SC_TRACE_EVENT(sys_utime,
338 TP_PROTO(char * filename, struct utimbuf * times),
339 TP_ARGS(filename, times),
340 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
341 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
342 TP_printk()
343)
344#endif
345#ifndef OVERRIDE_64_sys_ustat
346SC_TRACE_EVENT(sys_ustat,
347 TP_PROTO(unsigned dev, struct ustat * ubuf),
348 TP_ARGS(dev, ubuf),
349 TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
350 TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
351 TP_printk()
352)
353#endif
354#ifndef OVERRIDE_64_sys_statfs
355SC_TRACE_EVENT(sys_statfs,
356 TP_PROTO(const char * pathname, struct statfs * buf),
357 TP_ARGS(pathname, buf),
358 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
359 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
360 TP_printk()
361)
362#endif
363#ifndef OVERRIDE_64_sys_fstatfs
364SC_TRACE_EVENT(sys_fstatfs,
365 TP_PROTO(unsigned int fd, struct statfs * buf),
366 TP_ARGS(fd, buf),
367 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
368 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
369 TP_printk()
370)
371#endif
372#ifndef OVERRIDE_64_sys_sched_setparam
373SC_TRACE_EVENT(sys_sched_setparam,
374 TP_PROTO(pid_t pid, struct sched_param * param),
375 TP_ARGS(pid, param),
376 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
377 TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
378 TP_printk()
379)
380#endif
381#ifndef OVERRIDE_64_sys_sched_getparam
382SC_TRACE_EVENT(sys_sched_getparam,
383 TP_PROTO(pid_t pid, struct sched_param * param),
384 TP_ARGS(pid, param),
385 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
386 TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
387 TP_printk()
388)
389#endif
390#ifndef OVERRIDE_64_sys_sched_rr_get_interval
391SC_TRACE_EVENT(sys_sched_rr_get_interval,
392 TP_PROTO(pid_t pid, struct timespec * interval),
393 TP_ARGS(pid, interval),
394 TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
395 TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
396 TP_printk()
397)
398#endif
399#ifndef OVERRIDE_64_sys_pivot_root
400SC_TRACE_EVENT(sys_pivot_root,
401 TP_PROTO(const char * new_root, const char * put_old),
402 TP_ARGS(new_root, put_old),
403 TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
404 TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
405 TP_printk()
406)
407#endif
408#ifndef OVERRIDE_64_sys_setrlimit
409SC_TRACE_EVENT(sys_setrlimit,
410 TP_PROTO(unsigned int resource, struct rlimit * rlim),
411 TP_ARGS(resource, rlim),
412 TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
413 TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
414 TP_printk()
415)
416#endif
417#ifndef OVERRIDE_64_sys_settimeofday
418SC_TRACE_EVENT(sys_settimeofday,
419 TP_PROTO(struct timeval * tv, struct timezone * tz),
420 TP_ARGS(tv, tz),
421 TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
422 TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
423 TP_printk()
424)
425#endif
426#ifndef OVERRIDE_64_sys_umount
427SC_TRACE_EVENT(sys_umount,
428 TP_PROTO(char * name, int flags),
429 TP_ARGS(name, flags),
430 TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
431 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
432 TP_printk()
433)
434#endif
435#ifndef OVERRIDE_64_sys_swapon
436SC_TRACE_EVENT(sys_swapon,
437 TP_PROTO(const char * specialfile, int swap_flags),
438 TP_ARGS(specialfile, swap_flags),
439 TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
440 TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
441 TP_printk()
442)
443#endif
444#ifndef OVERRIDE_64_sys_sethostname
445SC_TRACE_EVENT(sys_sethostname,
446 TP_PROTO(char * name, int len),
447 TP_ARGS(name, len),
448 TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
449 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
450 TP_printk()
451)
452#endif
453#ifndef OVERRIDE_64_sys_setdomainname
454SC_TRACE_EVENT(sys_setdomainname,
455 TP_PROTO(char * name, int len),
456 TP_ARGS(name, len),
457 TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
458 TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
459 TP_printk()
460)
461#endif
462#ifndef OVERRIDE_64_sys_delete_module
463SC_TRACE_EVENT(sys_delete_module,
464 TP_PROTO(const char * name_user, unsigned int flags),
465 TP_ARGS(name_user, flags),
466 TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
467 TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
468 TP_printk()
469)
470#endif
471#ifndef OVERRIDE_64_sys_removexattr
472SC_TRACE_EVENT(sys_removexattr,
473 TP_PROTO(const char * pathname, const char * name),
474 TP_ARGS(pathname, name),
475 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
476 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
477 TP_printk()
478)
479#endif
480#ifndef OVERRIDE_64_sys_lremovexattr
481SC_TRACE_EVENT(sys_lremovexattr,
482 TP_PROTO(const char * pathname, const char * name),
483 TP_ARGS(pathname, name),
484 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
485 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
486 TP_printk()
487)
488#endif
489#ifndef OVERRIDE_64_sys_fremovexattr
490SC_TRACE_EVENT(sys_fremovexattr,
491 TP_PROTO(int fd, const char * name),
492 TP_ARGS(fd, name),
493 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
494 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
495 TP_printk()
496)
497#endif
498#ifndef OVERRIDE_64_sys_io_setup
499SC_TRACE_EVENT(sys_io_setup,
500 TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
501 TP_ARGS(nr_events, ctxp),
502 TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
503 TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
504 TP_printk()
505)
506#endif
507#ifndef OVERRIDE_64_sys_timer_gettime
508SC_TRACE_EVENT(sys_timer_gettime,
509 TP_PROTO(timer_t timer_id, struct itimerspec * setting),
510 TP_ARGS(timer_id, setting),
511 TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
512 TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
513 TP_printk()
514)
515#endif
516#ifndef OVERRIDE_64_sys_clock_settime
517SC_TRACE_EVENT(sys_clock_settime,
518 TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
519 TP_ARGS(which_clock, tp),
520 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
521 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
522 TP_printk()
523)
524#endif
525#ifndef OVERRIDE_64_sys_clock_gettime
526SC_TRACE_EVENT(sys_clock_gettime,
527 TP_PROTO(const clockid_t which_clock, struct timespec * tp),
528 TP_ARGS(which_clock, tp),
529 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
530 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
531 TP_printk()
532)
533#endif
534#ifndef OVERRIDE_64_sys_clock_getres
535SC_TRACE_EVENT(sys_clock_getres,
536 TP_PROTO(const clockid_t which_clock, struct timespec * tp),
537 TP_ARGS(which_clock, tp),
538 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
539 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
540 TP_printk()
541)
542#endif
543#ifndef OVERRIDE_64_sys_utimes
544SC_TRACE_EVENT(sys_utimes,
545 TP_PROTO(char * filename, struct timeval * utimes),
546 TP_ARGS(filename, utimes),
547 TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
548 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
549 TP_printk()
550)
551#endif
552#ifndef OVERRIDE_64_sys_mq_notify
553SC_TRACE_EVENT(sys_mq_notify,
554 TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
555 TP_ARGS(mqdes, u_notification),
556 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
557 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
558 TP_printk()
559)
560#endif
561#ifndef OVERRIDE_64_sys_set_robust_list
562SC_TRACE_EVENT(sys_set_robust_list,
563 TP_PROTO(struct robust_list_head * head, size_t len),
564 TP_ARGS(head, len),
565 TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
566 TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
567 TP_printk()
568)
569#endif
570#ifndef OVERRIDE_64_sys_timerfd_gettime
571SC_TRACE_EVENT(sys_timerfd_gettime,
572 TP_PROTO(int ufd, struct itimerspec * otmr),
573 TP_ARGS(ufd, otmr),
574 TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
575 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
576 TP_printk()
577)
578#endif
579#ifndef OVERRIDE_64_sys_pipe2
580SC_TRACE_EVENT(sys_pipe2,
581 TP_PROTO(int * fildes, int flags),
582 TP_ARGS(fildes, flags),
583 TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
584 TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
585 TP_printk()
586)
587#endif
588#ifndef OVERRIDE_64_sys_clock_adjtime
589SC_TRACE_EVENT(sys_clock_adjtime,
590 TP_PROTO(const clockid_t which_clock, struct timex * utx),
591 TP_ARGS(which_clock, utx),
592 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
593 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
594 TP_printk()
595)
596#endif
597#ifndef OVERRIDE_64_sys_read
598SC_TRACE_EVENT(sys_read,
599 TP_PROTO(unsigned int fd, char * buf, size_t count),
600 TP_ARGS(fd, buf, count),
601 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
602 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
603 TP_printk()
604)
605#endif
606#ifndef OVERRIDE_64_sys_write
607SC_TRACE_EVENT(sys_write,
608 TP_PROTO(unsigned int fd, const char * buf, size_t count),
609 TP_ARGS(fd, buf, count),
610 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
611 TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
612 TP_printk()
613)
614#endif
615#ifndef OVERRIDE_64_sys_open
616SC_TRACE_EVENT(sys_open,
617 TP_PROTO(const char * filename, int flags, int mode),
618 TP_ARGS(filename, flags, mode),
619 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
620 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
621 TP_printk()
622)
623#endif
624#ifndef OVERRIDE_64_sys_poll
625SC_TRACE_EVENT(sys_poll,
626 TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
627 TP_ARGS(ufds, nfds, timeout_msecs),
628 TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
629 TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
630 TP_printk()
631)
632#endif
633#ifndef OVERRIDE_64_sys_readv
634SC_TRACE_EVENT(sys_readv,
635 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
636 TP_ARGS(fd, vec, vlen),
637 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
638 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
639 TP_printk()
640)
641#endif
642#ifndef OVERRIDE_64_sys_writev
643SC_TRACE_EVENT(sys_writev,
644 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
645 TP_ARGS(fd, vec, vlen),
646 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
647 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
648 TP_printk()
649)
650#endif
651#ifndef OVERRIDE_64_sys_mincore
652SC_TRACE_EVENT(sys_mincore,
653 TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
654 TP_ARGS(start, len, vec),
655 TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
656 TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
657 TP_printk()
658)
659#endif
660#ifndef OVERRIDE_64_sys_shmat
661SC_TRACE_EVENT(sys_shmat,
662 TP_PROTO(int shmid, char * shmaddr, int shmflg),
663 TP_ARGS(shmid, shmaddr, shmflg),
664 TP_STRUCT__entry(__field(int, shmid) __field_hex(char *, shmaddr) __field(int, shmflg)),
665 TP_fast_assign(tp_assign(shmid, shmid) tp_assign(shmaddr, shmaddr) tp_assign(shmflg, shmflg)),
666 TP_printk()
667)
668#endif
669#ifndef OVERRIDE_64_sys_shmctl
670SC_TRACE_EVENT(sys_shmctl,
671 TP_PROTO(int shmid, int cmd, struct shmid_ds * buf),
672 TP_ARGS(shmid, cmd, buf),
673 TP_STRUCT__entry(__field(int, shmid) __field(int, cmd) __field_hex(struct shmid_ds *, buf)),
674 TP_fast_assign(tp_assign(shmid, shmid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
675 TP_printk()
676)
677#endif
678#ifndef OVERRIDE_64_sys_setitimer
679SC_TRACE_EVENT(sys_setitimer,
680 TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
681 TP_ARGS(which, value, ovalue),
682 TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
683 TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
684 TP_printk()
685)
686#endif
687#ifndef OVERRIDE_64_sys_connect
688SC_TRACE_EVENT(sys_connect,
689 TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
690 TP_ARGS(fd, uservaddr, addrlen),
691 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
692 TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
693 TP_printk()
694)
695#endif
696#ifndef OVERRIDE_64_sys_accept
697SC_TRACE_EVENT(sys_accept,
698 TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
699 TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
700 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
701 TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
702 TP_printk()
703)
704#endif
705#ifndef OVERRIDE_64_sys_sendmsg
706SC_TRACE_EVENT(sys_sendmsg,
707 TP_PROTO(int fd, struct msghdr * msg, unsigned flags),
708 TP_ARGS(fd, msg, flags),
709 TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned, flags)),
710 TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
711 TP_printk()
712)
713#endif
714#ifndef OVERRIDE_64_sys_recvmsg
715SC_TRACE_EVENT(sys_recvmsg,
716 TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
717 TP_ARGS(fd, msg, flags),
718 TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
719 TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
720 TP_printk()
721)
722#endif
723#ifndef OVERRIDE_64_sys_bind
724SC_TRACE_EVENT(sys_bind,
725 TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
726 TP_ARGS(fd, umyaddr, addrlen),
727 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
728 TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
729 TP_printk()
730)
731#endif
732#ifndef OVERRIDE_64_sys_getsockname
733SC_TRACE_EVENT(sys_getsockname,
734 TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
735 TP_ARGS(fd, usockaddr, usockaddr_len),
736 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
737 TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
738 TP_printk()
739)
740#endif
741#ifndef OVERRIDE_64_sys_getpeername
742SC_TRACE_EVENT(sys_getpeername,
743 TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
744 TP_ARGS(fd, usockaddr, usockaddr_len),
745 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
746 TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
747 TP_printk()
748)
749#endif
750#ifndef OVERRIDE_64_sys_semop
751SC_TRACE_EVENT(sys_semop,
752 TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops),
753 TP_ARGS(semid, tsops, nsops),
754 TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops)),
755 TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops)),
756 TP_printk()
757)
758#endif
759#ifndef OVERRIDE_64_sys_msgctl
760SC_TRACE_EVENT(sys_msgctl,
761 TP_PROTO(int msqid, int cmd, struct msqid_ds * buf),
762 TP_ARGS(msqid, cmd, buf),
763 TP_STRUCT__entry(__field(int, msqid) __field(int, cmd) __field_hex(struct msqid_ds *, buf)),
764 TP_fast_assign(tp_assign(msqid, msqid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
765 TP_printk()
766)
767#endif
768#ifndef OVERRIDE_64_sys_getdents
769SC_TRACE_EVENT(sys_getdents,
770 TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
771 TP_ARGS(fd, dirent, count),
772 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
773 TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
774 TP_printk()
775)
776#endif
777#ifndef OVERRIDE_64_sys_readlink
778SC_TRACE_EVENT(sys_readlink,
779 TP_PROTO(const char * path, char * buf, int bufsiz),
780 TP_ARGS(path, buf, bufsiz),
781 TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
782 TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
783 TP_printk()
784)
785#endif
786#ifndef OVERRIDE_64_sys_chown
787SC_TRACE_EVENT(sys_chown,
788 TP_PROTO(const char * filename, uid_t user, gid_t group),
789 TP_ARGS(filename, user, group),
790 TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
791 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
792 TP_printk()
793)
794#endif
795#ifndef OVERRIDE_64_sys_lchown
796SC_TRACE_EVENT(sys_lchown,
797 TP_PROTO(const char * filename, uid_t user, gid_t group),
798 TP_ARGS(filename, user, group),
799 TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
800 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
801 TP_printk()
802)
803#endif
804#ifndef OVERRIDE_64_sys_syslog
805SC_TRACE_EVENT(sys_syslog,
806 TP_PROTO(int type, char * buf, int len),
807 TP_ARGS(type, buf, len),
808 TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
809 TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
810 TP_printk()
811)
812#endif
813#ifndef OVERRIDE_64_sys_getresuid
814SC_TRACE_EVENT(sys_getresuid,
815 TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
816 TP_ARGS(ruid, euid, suid),
817 TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
818 TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
819 TP_printk()
820)
821#endif
822#ifndef OVERRIDE_64_sys_getresgid
823SC_TRACE_EVENT(sys_getresgid,
824 TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
825 TP_ARGS(rgid, egid, sgid),
826 TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
827 TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
828 TP_printk()
829)
830#endif
831#ifndef OVERRIDE_64_sys_rt_sigqueueinfo
832SC_TRACE_EVENT(sys_rt_sigqueueinfo,
833 TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
834 TP_ARGS(pid, sig, uinfo),
835 TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
836 TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
837 TP_printk()
838)
839#endif
840#ifndef OVERRIDE_64_sys_mknod
841SC_TRACE_EVENT(sys_mknod,
842 TP_PROTO(const char * filename, int mode, unsigned dev),
843 TP_ARGS(filename, mode, dev),
844 TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
845 TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
846 TP_printk()
847)
848#endif
849#ifndef OVERRIDE_64_sys_sched_setscheduler
850SC_TRACE_EVENT(sys_sched_setscheduler,
851 TP_PROTO(pid_t pid, int policy, struct sched_param * param),
852 TP_ARGS(pid, policy, param),
853 TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
854 TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
855 TP_printk()
856)
857#endif
858#ifndef OVERRIDE_64_sys_init_module
859SC_TRACE_EVENT(sys_init_module,
860 TP_PROTO(void * umod, unsigned long len, const char * uargs),
861 TP_ARGS(umod, len, uargs),
862 TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
863 TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
864 TP_printk()
865)
866#endif
867#ifndef OVERRIDE_64_sys_nfsservctl
868SC_TRACE_EVENT(sys_nfsservctl,
869 TP_PROTO(int cmd, struct nfsctl_arg * arg, void * res),
870 TP_ARGS(cmd, arg, res),
871 TP_STRUCT__entry(__field(int, cmd) __field_hex(struct nfsctl_arg *, arg) __field_hex(void *, res)),
872 TP_fast_assign(tp_assign(cmd, cmd) tp_assign(arg, arg) tp_assign(res, res)),
873 TP_printk()
874)
875#endif
876#ifndef OVERRIDE_64_sys_listxattr
877SC_TRACE_EVENT(sys_listxattr,
878 TP_PROTO(const char * pathname, char * list, size_t size),
879 TP_ARGS(pathname, list, size),
880 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
881 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
882 TP_printk()
883)
884#endif
885#ifndef OVERRIDE_64_sys_llistxattr
886SC_TRACE_EVENT(sys_llistxattr,
887 TP_PROTO(const char * pathname, char * list, size_t size),
888 TP_ARGS(pathname, list, size),
889 TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
890 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
891 TP_printk()
892)
893#endif
894#ifndef OVERRIDE_64_sys_flistxattr
895SC_TRACE_EVENT(sys_flistxattr,
896 TP_PROTO(int fd, char * list, size_t size),
897 TP_ARGS(fd, list, size),
898 TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
899 TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
900 TP_printk()
901)
902#endif
903#ifndef OVERRIDE_64_sys_sched_setaffinity
904SC_TRACE_EVENT(sys_sched_setaffinity,
905 TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
906 TP_ARGS(pid, len, user_mask_ptr),
907 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
908 TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
909 TP_printk()
910)
911#endif
912#ifndef OVERRIDE_64_sys_sched_getaffinity
913SC_TRACE_EVENT(sys_sched_getaffinity,
914 TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
915 TP_ARGS(pid, len, user_mask_ptr),
916 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
917 TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
918 TP_printk()
919)
920#endif
921#ifndef OVERRIDE_64_sys_io_submit
922SC_TRACE_EVENT(sys_io_submit,
923 TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
924 TP_ARGS(ctx_id, nr, iocbpp),
925 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
926 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
927 TP_printk()
928)
929#endif
930#ifndef OVERRIDE_64_sys_io_cancel
931SC_TRACE_EVENT(sys_io_cancel,
932 TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
933 TP_ARGS(ctx_id, iocb, result),
934 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
935 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
936 TP_printk()
937)
938#endif
939#ifndef OVERRIDE_64_sys_getdents64
940SC_TRACE_EVENT(sys_getdents64,
941 TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
942 TP_ARGS(fd, dirent, count),
943 TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
944 TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
945 TP_printk()
946)
947#endif
948#ifndef OVERRIDE_64_sys_timer_create
949SC_TRACE_EVENT(sys_timer_create,
950 TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
951 TP_ARGS(which_clock, timer_event_spec, created_timer_id),
952 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
953 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
954 TP_printk()
955)
956#endif
957#ifndef OVERRIDE_64_sys_mq_getsetattr
958SC_TRACE_EVENT(sys_mq_getsetattr,
959 TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
960 TP_ARGS(mqdes, u_mqstat, u_omqstat),
961 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
962 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
963 TP_printk()
964)
965#endif
966#ifndef OVERRIDE_64_sys_inotify_add_watch
967SC_TRACE_EVENT(sys_inotify_add_watch,
968 TP_PROTO(int fd, const char * pathname, u32 mask),
969 TP_ARGS(fd, pathname, mask),
970 TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
971 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
972 TP_printk()
973)
974#endif
975#ifndef OVERRIDE_64_sys_mkdirat
976SC_TRACE_EVENT(sys_mkdirat,
977 TP_PROTO(int dfd, const char * pathname, int mode),
978 TP_ARGS(dfd, pathname, mode),
979 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
980 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
981 TP_printk()
982)
983#endif
984#ifndef OVERRIDE_64_sys_futimesat
985SC_TRACE_EVENT(sys_futimesat,
986 TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
987 TP_ARGS(dfd, filename, utimes),
988 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
989 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
990 TP_printk()
991)
992#endif
993#ifndef OVERRIDE_64_sys_unlinkat
994SC_TRACE_EVENT(sys_unlinkat,
995 TP_PROTO(int dfd, const char * pathname, int flag),
996 TP_ARGS(dfd, pathname, flag),
997 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
998 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
999 TP_printk()
1000)
1001#endif
1002#ifndef OVERRIDE_64_sys_symlinkat
1003SC_TRACE_EVENT(sys_symlinkat,
1004 TP_PROTO(const char * oldname, int newdfd, const char * newname),
1005 TP_ARGS(oldname, newdfd, newname),
1006 TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
1007 TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
1008 TP_printk()
1009)
1010#endif
1011#ifndef OVERRIDE_64_sys_fchmodat
1012SC_TRACE_EVENT(sys_fchmodat,
1013 TP_PROTO(int dfd, const char * filename, mode_t mode),
1014 TP_ARGS(dfd, filename, mode),
1015 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
1016 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
1017 TP_printk()
1018)
1019#endif
1020#ifndef OVERRIDE_64_sys_faccessat
1021SC_TRACE_EVENT(sys_faccessat,
1022 TP_PROTO(int dfd, const char * filename, int mode),
1023 TP_ARGS(dfd, filename, mode),
1024 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
1025 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
1026 TP_printk()
1027)
1028#endif
1029#ifndef OVERRIDE_64_sys_get_robust_list
1030SC_TRACE_EVENT(sys_get_robust_list,
1031 TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
1032 TP_ARGS(pid, head_ptr, len_ptr),
1033 TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
1034 TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
1035 TP_printk()
1036)
1037#endif
1038#ifndef OVERRIDE_64_sys_signalfd
1039SC_TRACE_EVENT(sys_signalfd,
1040 TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
1041 TP_ARGS(ufd, user_mask, sizemask),
1042 TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
1043 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
1044 TP_printk()
1045)
1046#endif
1047#ifndef OVERRIDE_64_sys_rt_sigaction
1048SC_TRACE_EVENT(sys_rt_sigaction,
1049 TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
1050 TP_ARGS(sig, act, oact, sigsetsize),
1051 TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
1052 TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
1053 TP_printk()
1054)
1055#endif
1056#ifndef OVERRIDE_64_sys_rt_sigprocmask
1057SC_TRACE_EVENT(sys_rt_sigprocmask,
1058 TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
1059 TP_ARGS(how, nset, oset, sigsetsize),
1060 TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
1061 TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
1062 TP_printk()
1063)
1064#endif
1065#ifndef OVERRIDE_64_sys_sendfile64
1066SC_TRACE_EVENT(sys_sendfile64,
1067 TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
1068 TP_ARGS(out_fd, in_fd, offset, count),
1069 TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
1070 TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
1071 TP_printk()
1072)
1073#endif
1074#ifndef OVERRIDE_64_sys_socketpair
1075SC_TRACE_EVENT(sys_socketpair,
1076 TP_PROTO(int family, int type, int protocol, int * usockvec),
1077 TP_ARGS(family, type, protocol, usockvec),
1078 TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
1079 TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
1080 TP_printk()
1081)
1082#endif
1083#ifndef OVERRIDE_64_sys_wait4
1084SC_TRACE_EVENT(sys_wait4,
1085 TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
1086 TP_ARGS(upid, stat_addr, options, ru),
1087 TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
1088 TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
1089 TP_printk()
1090)
1091#endif
1092#ifndef OVERRIDE_64_sys_msgsnd
1093SC_TRACE_EVENT(sys_msgsnd,
1094 TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, int msgflg),
1095 TP_ARGS(msqid, msgp, msgsz, msgflg),
1096 TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(int, msgflg)),
1097 TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
1098 TP_printk()
1099)
1100#endif
1101#ifndef OVERRIDE_64_sys_rt_sigtimedwait
1102SC_TRACE_EVENT(sys_rt_sigtimedwait,
1103 TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
1104 TP_ARGS(uthese, uinfo, uts, sigsetsize),
1105 TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
1106 TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
1107 TP_printk()
1108)
1109#endif
1110#ifndef OVERRIDE_64_sys_reboot
1111SC_TRACE_EVENT(sys_reboot,
1112 TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
1113 TP_ARGS(magic1, magic2, cmd, arg),
1114 TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
1115 TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
1116 TP_printk()
1117)
1118#endif
1119#ifndef OVERRIDE_64_sys_getxattr
1120SC_TRACE_EVENT(sys_getxattr,
1121 TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
1122 TP_ARGS(pathname, name, value, size),
1123 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1124 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1125 TP_printk()
1126)
1127#endif
1128#ifndef OVERRIDE_64_sys_lgetxattr
1129SC_TRACE_EVENT(sys_lgetxattr,
1130 TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
1131 TP_ARGS(pathname, name, value, size),
1132 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1133 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1134 TP_printk()
1135)
1136#endif
1137#ifndef OVERRIDE_64_sys_fgetxattr
1138SC_TRACE_EVENT(sys_fgetxattr,
1139 TP_PROTO(int fd, const char * name, void * value, size_t size),
1140 TP_ARGS(fd, name, value, size),
1141 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
1142 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
1143 TP_printk()
1144)
1145#endif
1146#ifndef OVERRIDE_64_sys_semtimedop
1147SC_TRACE_EVENT(sys_semtimedop,
1148 TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops, const struct timespec * timeout),
1149 TP_ARGS(semid, tsops, nsops, timeout),
1150 TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops) __field_hex(const struct timespec *, timeout)),
1151 TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops) tp_assign(timeout, timeout)),
1152 TP_printk()
1153)
1154#endif
1155#ifndef OVERRIDE_64_sys_timer_settime
1156SC_TRACE_EVENT(sys_timer_settime,
1157 TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
1158 TP_ARGS(timer_id, flags, new_setting, old_setting),
1159 TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
1160 TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
1161 TP_printk()
1162)
1163#endif
1164#ifndef OVERRIDE_64_sys_clock_nanosleep
1165SC_TRACE_EVENT(sys_clock_nanosleep,
1166 TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
1167 TP_ARGS(which_clock, flags, rqtp, rmtp),
1168 TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
1169 TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
1170 TP_printk()
1171)
1172#endif
1173#ifndef OVERRIDE_64_sys_epoll_wait
1174SC_TRACE_EVENT(sys_epoll_wait,
1175 TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
1176 TP_ARGS(epfd, events, maxevents, timeout),
1177 TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
1178 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
1179 TP_printk()
1180)
1181#endif
1182#ifndef OVERRIDE_64_sys_epoll_ctl
1183SC_TRACE_EVENT(sys_epoll_ctl,
1184 TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
1185 TP_ARGS(epfd, op, fd, event),
1186 TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
1187 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
1188 TP_printk()
1189)
1190#endif
1191#ifndef OVERRIDE_64_sys_mq_open
1192SC_TRACE_EVENT(sys_mq_open,
1193 TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
1194 TP_ARGS(u_name, oflag, mode, u_attr),
1195 TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
1196 TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
1197 TP_printk()
1198)
1199#endif
1200#ifndef OVERRIDE_64_sys_kexec_load
1201SC_TRACE_EVENT(sys_kexec_load,
1202 TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
1203 TP_ARGS(entry, nr_segments, segments, flags),
1204 TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
1205 TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
1206 TP_printk()
1207)
1208#endif
1209#ifndef OVERRIDE_64_sys_openat
1210SC_TRACE_EVENT(sys_openat,
1211 TP_PROTO(int dfd, const char * filename, int flags, int mode),
1212 TP_ARGS(dfd, filename, flags, mode),
1213 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
1214 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
1215 TP_printk()
1216)
1217#endif
1218#ifndef OVERRIDE_64_sys_mknodat
1219SC_TRACE_EVENT(sys_mknodat,
1220 TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
1221 TP_ARGS(dfd, filename, mode, dev),
1222 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
1223 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
1224 TP_printk()
1225)
1226#endif
1227#ifndef OVERRIDE_64_sys_newfstatat
1228SC_TRACE_EVENT(sys_newfstatat,
1229 TP_PROTO(int dfd, const char * filename, struct stat * statbuf, int flag),
1230 TP_ARGS(dfd, filename, statbuf, flag),
1231 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat *, statbuf) __field(int, flag)),
1232 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
1233 TP_printk()
1234)
1235#endif
1236#ifndef OVERRIDE_64_sys_renameat
1237SC_TRACE_EVENT(sys_renameat,
1238 TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
1239 TP_ARGS(olddfd, oldname, newdfd, newname),
1240 TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
1241 TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
1242 TP_printk()
1243)
1244#endif
1245#ifndef OVERRIDE_64_sys_readlinkat
1246SC_TRACE_EVENT(sys_readlinkat,
1247 TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
1248 TP_ARGS(dfd, pathname, buf, bufsiz),
1249 TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
1250 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
1251 TP_printk()
1252)
1253#endif
1254#ifndef OVERRIDE_64_sys_vmsplice
1255SC_TRACE_EVENT(sys_vmsplice,
1256 TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
1257 TP_ARGS(fd, iov, nr_segs, flags),
1258 TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
1259 TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
1260 TP_printk()
1261)
1262#endif
1263#ifndef OVERRIDE_64_sys_utimensat
1264SC_TRACE_EVENT(sys_utimensat,
1265 TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
1266 TP_ARGS(dfd, filename, utimes, flags),
1267 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
1268 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
1269 TP_printk()
1270)
1271#endif
1272#ifndef OVERRIDE_64_sys_timerfd_settime
1273SC_TRACE_EVENT(sys_timerfd_settime,
1274 TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
1275 TP_ARGS(ufd, flags, utmr, otmr),
1276 TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
1277 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
1278 TP_printk()
1279)
1280#endif
1281#ifndef OVERRIDE_64_sys_accept4
1282SC_TRACE_EVENT(sys_accept4,
1283 TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
1284 TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
1285 TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
1286 TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
1287 TP_printk()
1288)
1289#endif
1290#ifndef OVERRIDE_64_sys_signalfd4
1291SC_TRACE_EVENT(sys_signalfd4,
1292 TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
1293 TP_ARGS(ufd, user_mask, sizemask, flags),
1294 TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
1295 TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
1296 TP_printk()
1297)
1298#endif
1299#ifndef OVERRIDE_64_sys_rt_tgsigqueueinfo
1300SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
1301 TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
1302 TP_ARGS(tgid, pid, sig, uinfo),
1303 TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
1304 TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
1305 TP_printk()
1306)
1307#endif
1308#ifndef OVERRIDE_64_sys_prlimit64
1309SC_TRACE_EVENT(sys_prlimit64,
1310 TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
1311 TP_ARGS(pid, resource, new_rlim, old_rlim),
1312 TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
1313 TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
1314 TP_printk()
1315)
1316#endif
1317#ifndef OVERRIDE_64_sys_sendmmsg
1318SC_TRACE_EVENT(sys_sendmmsg,
1319 TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
1320 TP_ARGS(fd, mmsg, vlen, flags),
1321 TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
1322 TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
1323 TP_printk()
1324)
1325#endif
1326#ifndef OVERRIDE_64_sys_select
1327SC_TRACE_EVENT(sys_select,
1328 TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
1329 TP_ARGS(n, inp, outp, exp, tvp),
1330 TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
1331 TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
1332 TP_printk()
1333)
1334#endif
1335#ifndef OVERRIDE_64_sys_setsockopt
1336SC_TRACE_EVENT(sys_setsockopt,
1337 TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
1338 TP_ARGS(fd, level, optname, optval, optlen),
1339 TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
1340 TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
1341 TP_printk()
1342)
1343#endif
1344#ifndef OVERRIDE_64_sys_getsockopt
1345SC_TRACE_EVENT(sys_getsockopt,
1346 TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
1347 TP_ARGS(fd, level, optname, optval, optlen),
1348 TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
1349 TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
1350 TP_printk()
1351)
1352#endif
1353#ifndef OVERRIDE_64_sys_msgrcv
1354SC_TRACE_EVENT(sys_msgrcv,
1355 TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, long msgtyp, int msgflg),
1356 TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
1357 TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(long, msgtyp) __field(int, msgflg)),
1358 TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
1359 TP_printk()
1360)
1361#endif
1362#ifndef OVERRIDE_64_sys_mount
1363SC_TRACE_EVENT(sys_mount,
1364 TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
1365 TP_ARGS(dev_name, dir_name, type, flags, data),
1366 TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
1367 TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
1368 TP_printk()
1369)
1370#endif
1371#ifndef OVERRIDE_64_sys_setxattr
1372SC_TRACE_EVENT(sys_setxattr,
1373 TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
1374 TP_ARGS(pathname, name, value, size, flags),
1375 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1376 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1377 TP_printk()
1378)
1379#endif
1380#ifndef OVERRIDE_64_sys_lsetxattr
1381SC_TRACE_EVENT(sys_lsetxattr,
1382 TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
1383 TP_ARGS(pathname, name, value, size, flags),
1384 TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1385 TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1386 TP_printk()
1387)
1388#endif
1389#ifndef OVERRIDE_64_sys_fsetxattr
1390SC_TRACE_EVENT(sys_fsetxattr,
1391 TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
1392 TP_ARGS(fd, name, value, size, flags),
1393 TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
1394 TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
1395 TP_printk()
1396)
1397#endif
1398#ifndef OVERRIDE_64_sys_io_getevents
1399SC_TRACE_EVENT(sys_io_getevents,
1400 TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
1401 TP_ARGS(ctx_id, min_nr, nr, events, timeout),
1402 TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
1403 TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
1404 TP_printk()
1405)
1406#endif
1407#ifndef OVERRIDE_64_sys_mq_timedsend
1408SC_TRACE_EVENT(sys_mq_timedsend,
1409 TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
1410 TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
1411 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
1412 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
1413 TP_printk()
1414)
1415#endif
1416#ifndef OVERRIDE_64_sys_mq_timedreceive
1417SC_TRACE_EVENT(sys_mq_timedreceive,
1418 TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
1419 TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
1420 TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
1421 TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
1422 TP_printk()
1423)
1424#endif
1425#ifndef OVERRIDE_64_sys_waitid
1426SC_TRACE_EVENT(sys_waitid,
1427 TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
1428 TP_ARGS(which, upid, infop, options, ru),
1429 TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
1430 TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
1431 TP_printk()
1432)
1433#endif
1434#ifndef OVERRIDE_64_sys_fchownat
1435SC_TRACE_EVENT(sys_fchownat,
1436 TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
1437 TP_ARGS(dfd, filename, user, group, flag),
1438 TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
1439 TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
1440 TP_printk()
1441)
1442#endif
1443#ifndef OVERRIDE_64_sys_linkat
1444SC_TRACE_EVENT(sys_linkat,
1445 TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
1446 TP_ARGS(olddfd, oldname, newdfd, newname, flags),
1447 TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
1448 TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
1449 TP_printk()
1450)
1451#endif
1452#ifndef OVERRIDE_64_sys_ppoll
1453SC_TRACE_EVENT(sys_ppoll,
1454 TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
1455 TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
1456 TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
1457 TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
1458 TP_printk()
1459)
1460#endif
1461#ifndef OVERRIDE_64_sys_preadv
1462SC_TRACE_EVENT(sys_preadv,
1463 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
1464 TP_ARGS(fd, vec, vlen, pos_l, pos_h),
1465 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
1466 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
1467 TP_printk()
1468)
1469#endif
1470#ifndef OVERRIDE_64_sys_pwritev
1471SC_TRACE_EVENT(sys_pwritev,
1472 TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
1473 TP_ARGS(fd, vec, vlen, pos_l, pos_h),
1474 TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
1475 TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
1476 TP_printk()
1477)
1478#endif
1479#ifndef OVERRIDE_64_sys_perf_event_open
1480SC_TRACE_EVENT(sys_perf_event_open,
1481 TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
1482 TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
1483 TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
1484 TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
1485 TP_printk()
1486)
1487#endif
1488#ifndef OVERRIDE_64_sys_recvmmsg
1489SC_TRACE_EVENT(sys_recvmmsg,
1490 TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
1491 TP_ARGS(fd, mmsg, vlen, flags, timeout),
1492 TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
1493 TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
1494 TP_printk()
1495)
1496#endif
1497#ifndef OVERRIDE_64_sys_sendto
1498SC_TRACE_EVENT(sys_sendto,
1499 TP_PROTO(int fd, void * buff, size_t len, unsigned flags, struct sockaddr * addr, int addr_len),
1500 TP_ARGS(fd, buff, len, flags, addr, addr_len),
1501 TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
1502 TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
1503 TP_printk()
1504)
1505#endif
1506#ifndef OVERRIDE_64_sys_recvfrom
1507SC_TRACE_EVENT(sys_recvfrom,
1508 TP_PROTO(int fd, void * ubuf, size_t size, unsigned flags, struct sockaddr * addr, int * addr_len),
1509 TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
1510 TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
1511 TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
1512 TP_printk()
1513)
1514#endif
1515#ifndef OVERRIDE_64_sys_futex
1516SC_TRACE_EVENT(sys_futex,
1517 TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
1518 TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
1519 TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
1520 TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
1521 TP_printk()
1522)
1523#endif
1524#ifndef OVERRIDE_64_sys_pselect6
1525SC_TRACE_EVENT(sys_pselect6,
1526 TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
1527 TP_ARGS(n, inp, outp, exp, tsp, sig),
1528 TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
1529 TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
1530 TP_printk()
1531)
1532#endif
1533#ifndef OVERRIDE_64_sys_splice
1534SC_TRACE_EVENT(sys_splice,
1535 TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
1536 TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
1537 TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
1538 TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
1539 TP_printk()
1540)
1541#endif
1542#ifndef OVERRIDE_64_sys_epoll_pwait
1543SC_TRACE_EVENT(sys_epoll_pwait,
1544 TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
1545 TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
1546 TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
1547 TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
1548 TP_printk()
1549)
1550#endif
1551
1552#endif /* _TRACE_SYSCALLS_POINTERS_H */
1553
1554/* This part must be outside protection */
1555#include "../../../probes/define_trace.h"
1556
1557#else /* CREATE_SYSCALL_TABLE */
1558
1559#include "x86-64-syscalls-3.0.4_pointers_override.h"
1560#include "syscalls_pointers_override.h"
1561
1562#ifndef OVERRIDE_TABLE_64_sys_read
1563TRACE_SYSCALL_TABLE(sys_read, sys_read, 0, 3)
1564#endif
1565#ifndef OVERRIDE_TABLE_64_sys_write
1566TRACE_SYSCALL_TABLE(sys_write, sys_write, 1, 3)
1567#endif
1568#ifndef OVERRIDE_TABLE_64_sys_open
1569TRACE_SYSCALL_TABLE(sys_open, sys_open, 2, 3)
1570#endif
1571#ifndef OVERRIDE_TABLE_64_sys_newstat
1572TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 4, 2)
1573#endif
1574#ifndef OVERRIDE_TABLE_64_sys_newfstat
1575TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 5, 2)
1576#endif
1577#ifndef OVERRIDE_TABLE_64_sys_newlstat
1578TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 6, 2)
1579#endif
1580#ifndef OVERRIDE_TABLE_64_sys_poll
1581TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 7, 3)
1582#endif
1583#ifndef OVERRIDE_TABLE_64_sys_rt_sigaction
1584TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 13, 4)
1585#endif
1586#ifndef OVERRIDE_TABLE_64_sys_rt_sigprocmask
1587TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 14, 4)
1588#endif
1589#ifndef OVERRIDE_TABLE_64_sys_readv
1590TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 19, 3)
1591#endif
1592#ifndef OVERRIDE_TABLE_64_sys_writev
1593TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 20, 3)
1594#endif
1595#ifndef OVERRIDE_TABLE_64_sys_access
1596TRACE_SYSCALL_TABLE(sys_access, sys_access, 21, 2)
1597#endif
1598#ifndef OVERRIDE_TABLE_64_sys_pipe
1599TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 22, 1)
1600#endif
1601#ifndef OVERRIDE_TABLE_64_sys_select
1602TRACE_SYSCALL_TABLE(sys_select, sys_select, 23, 5)
1603#endif
1604#ifndef OVERRIDE_TABLE_64_sys_mincore
1605TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 27, 3)
1606#endif
1607#ifndef OVERRIDE_TABLE_64_sys_shmat
1608TRACE_SYSCALL_TABLE(sys_shmat, sys_shmat, 30, 3)
1609#endif
1610#ifndef OVERRIDE_TABLE_64_sys_shmctl
1611TRACE_SYSCALL_TABLE(sys_shmctl, sys_shmctl, 31, 3)
1612#endif
1613#ifndef OVERRIDE_TABLE_64_sys_nanosleep
1614TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 35, 2)
1615#endif
1616#ifndef OVERRIDE_TABLE_64_sys_getitimer
1617TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 36, 2)
1618#endif
1619#ifndef OVERRIDE_TABLE_64_sys_setitimer
1620TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 38, 3)
1621#endif
1622#ifndef OVERRIDE_TABLE_64_sys_sendfile64
1623TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 40, 4)
1624#endif
1625#ifndef OVERRIDE_TABLE_64_sys_connect
1626TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 42, 3)
1627#endif
1628#ifndef OVERRIDE_TABLE_64_sys_accept
1629TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 43, 3)
1630#endif
1631#ifndef OVERRIDE_TABLE_64_sys_sendto
1632TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 44, 6)
1633#endif
1634#ifndef OVERRIDE_TABLE_64_sys_recvfrom
1635TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 45, 6)
1636#endif
1637#ifndef OVERRIDE_TABLE_64_sys_sendmsg
1638TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 46, 3)
1639#endif
1640#ifndef OVERRIDE_TABLE_64_sys_recvmsg
1641TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 47, 3)
1642#endif
1643#ifndef OVERRIDE_TABLE_64_sys_bind
1644TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 49, 3)
1645#endif
1646#ifndef OVERRIDE_TABLE_64_sys_getsockname
1647TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 51, 3)
1648#endif
1649#ifndef OVERRIDE_TABLE_64_sys_getpeername
1650TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 52, 3)
1651#endif
1652#ifndef OVERRIDE_TABLE_64_sys_socketpair
1653TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 53, 4)
1654#endif
1655#ifndef OVERRIDE_TABLE_64_sys_setsockopt
1656TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 54, 5)
1657#endif
1658#ifndef OVERRIDE_TABLE_64_sys_getsockopt
1659TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 55, 5)
1660#endif
1661#ifndef OVERRIDE_TABLE_64_sys_wait4
1662TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 61, 4)
1663#endif
1664#ifndef OVERRIDE_TABLE_64_sys_newuname
1665TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 63, 1)
1666#endif
1667#ifndef OVERRIDE_TABLE_64_sys_semop
1668TRACE_SYSCALL_TABLE(sys_semop, sys_semop, 65, 3)
1669#endif
1670#ifndef OVERRIDE_TABLE_64_sys_shmdt
1671TRACE_SYSCALL_TABLE(sys_shmdt, sys_shmdt, 67, 1)
1672#endif
1673#ifndef OVERRIDE_TABLE_64_sys_msgsnd
1674TRACE_SYSCALL_TABLE(sys_msgsnd, sys_msgsnd, 69, 4)
1675#endif
1676#ifndef OVERRIDE_TABLE_64_sys_msgrcv
1677TRACE_SYSCALL_TABLE(sys_msgrcv, sys_msgrcv, 70, 5)
1678#endif
1679#ifndef OVERRIDE_TABLE_64_sys_msgctl
1680TRACE_SYSCALL_TABLE(sys_msgctl, sys_msgctl, 71, 3)
1681#endif
1682#ifndef OVERRIDE_TABLE_64_sys_truncate
1683TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 76, 2)
1684#endif
1685#ifndef OVERRIDE_TABLE_64_sys_getdents
1686TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 78, 3)
1687#endif
1688#ifndef OVERRIDE_TABLE_64_sys_getcwd
1689TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 79, 2)
1690#endif
1691#ifndef OVERRIDE_TABLE_64_sys_chdir
1692TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 80, 1)
1693#endif
1694#ifndef OVERRIDE_TABLE_64_sys_rename
1695TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 82, 2)
1696#endif
1697#ifndef OVERRIDE_TABLE_64_sys_mkdir
1698TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 83, 2)
1699#endif
1700#ifndef OVERRIDE_TABLE_64_sys_rmdir
1701TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 84, 1)
1702#endif
1703#ifndef OVERRIDE_TABLE_64_sys_creat
1704TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 85, 2)
1705#endif
1706#ifndef OVERRIDE_TABLE_64_sys_link
1707TRACE_SYSCALL_TABLE(sys_link, sys_link, 86, 2)
1708#endif
1709#ifndef OVERRIDE_TABLE_64_sys_unlink
1710TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 87, 1)
1711#endif
1712#ifndef OVERRIDE_TABLE_64_sys_symlink
1713TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 88, 2)
1714#endif
1715#ifndef OVERRIDE_TABLE_64_sys_readlink
1716TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 89, 3)
1717#endif
1718#ifndef OVERRIDE_TABLE_64_sys_chmod
1719TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 90, 2)
1720#endif
1721#ifndef OVERRIDE_TABLE_64_sys_chown
1722TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 92, 3)
1723#endif
1724#ifndef OVERRIDE_TABLE_64_sys_lchown
1725TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 94, 3)
1726#endif
1727#ifndef OVERRIDE_TABLE_64_sys_gettimeofday
1728TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 96, 2)
1729#endif
1730#ifndef OVERRIDE_TABLE_64_sys_getrlimit
1731TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 97, 2)
1732#endif
1733#ifndef OVERRIDE_TABLE_64_sys_getrusage
1734TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 98, 2)
1735#endif
1736#ifndef OVERRIDE_TABLE_64_sys_sysinfo
1737TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 99, 1)
1738#endif
1739#ifndef OVERRIDE_TABLE_64_sys_times
1740TRACE_SYSCALL_TABLE(sys_times, sys_times, 100, 1)
1741#endif
1742#ifndef OVERRIDE_TABLE_64_sys_syslog
1743TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
1744#endif
1745#ifndef OVERRIDE_TABLE_64_sys_getgroups
1746TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 115, 2)
1747#endif
1748#ifndef OVERRIDE_TABLE_64_sys_setgroups
1749TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 116, 2)
1750#endif
1751#ifndef OVERRIDE_TABLE_64_sys_getresuid
1752TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 118, 3)
1753#endif
1754#ifndef OVERRIDE_TABLE_64_sys_getresgid
1755TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 120, 3)
1756#endif
1757#ifndef OVERRIDE_TABLE_64_sys_rt_sigpending
1758TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 127, 2)
1759#endif
1760#ifndef OVERRIDE_TABLE_64_sys_rt_sigtimedwait
1761TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 128, 4)
1762#endif
1763#ifndef OVERRIDE_TABLE_64_sys_rt_sigqueueinfo
1764TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 129, 3)
1765#endif
1766#ifndef OVERRIDE_TABLE_64_sys_rt_sigsuspend
1767TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 130, 2)
1768#endif
1769#ifndef OVERRIDE_TABLE_64_sys_utime
1770TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 132, 2)
1771#endif
1772#ifndef OVERRIDE_TABLE_64_sys_mknod
1773TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 133, 3)
1774#endif
1775#ifndef OVERRIDE_TABLE_64_sys_ustat
1776TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 136, 2)
1777#endif
1778#ifndef OVERRIDE_TABLE_64_sys_statfs
1779TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 137, 2)
1780#endif
1781#ifndef OVERRIDE_TABLE_64_sys_fstatfs
1782TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 138, 2)
1783#endif
1784#ifndef OVERRIDE_TABLE_64_sys_sched_setparam
1785TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 142, 2)
1786#endif
1787#ifndef OVERRIDE_TABLE_64_sys_sched_getparam
1788TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 143, 2)
1789#endif
1790#ifndef OVERRIDE_TABLE_64_sys_sched_setscheduler
1791TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 144, 3)
1792#endif
1793#ifndef OVERRIDE_TABLE_64_sys_sched_rr_get_interval
1794TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 148, 2)
1795#endif
1796#ifndef OVERRIDE_TABLE_64_sys_pivot_root
1797TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 155, 2)
1798#endif
1799#ifndef OVERRIDE_TABLE_64_sys_sysctl
1800TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 156, 1)
1801#endif
1802#ifndef OVERRIDE_TABLE_64_sys_adjtimex
1803TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 159, 1)
1804#endif
1805#ifndef OVERRIDE_TABLE_64_sys_setrlimit
1806TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 160, 2)
1807#endif
1808#ifndef OVERRIDE_TABLE_64_sys_chroot
1809TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 161, 1)
1810#endif
1811#ifndef OVERRIDE_TABLE_64_sys_settimeofday
1812TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 164, 2)
1813#endif
1814#ifndef OVERRIDE_TABLE_64_sys_mount
1815TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 165, 5)
1816#endif
1817#ifndef OVERRIDE_TABLE_64_sys_umount
1818TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 166, 2)
1819#endif
1820#ifndef OVERRIDE_TABLE_64_sys_swapon
1821TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 167, 2)
1822#endif
1823#ifndef OVERRIDE_TABLE_64_sys_swapoff
1824TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 168, 1)
1825#endif
1826#ifndef OVERRIDE_TABLE_64_sys_reboot
1827TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 169, 4)
1828#endif
1829#ifndef OVERRIDE_TABLE_64_sys_sethostname
1830TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 170, 2)
1831#endif
1832#ifndef OVERRIDE_TABLE_64_sys_setdomainname
1833TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 171, 2)
1834#endif
1835#ifndef OVERRIDE_TABLE_64_sys_init_module
1836TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 175, 3)
1837#endif
1838#ifndef OVERRIDE_TABLE_64_sys_delete_module
1839TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 176, 2)
1840#endif
1841#ifndef OVERRIDE_TABLE_64_sys_nfsservctl
1842TRACE_SYSCALL_TABLE(sys_nfsservctl, sys_nfsservctl, 180, 3)
1843#endif
1844#ifndef OVERRIDE_TABLE_64_sys_setxattr
1845TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 188, 5)
1846#endif
1847#ifndef OVERRIDE_TABLE_64_sys_lsetxattr
1848TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 189, 5)
1849#endif
1850#ifndef OVERRIDE_TABLE_64_sys_fsetxattr
1851TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 190, 5)
1852#endif
1853#ifndef OVERRIDE_TABLE_64_sys_getxattr
1854TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 191, 4)
1855#endif
1856#ifndef OVERRIDE_TABLE_64_sys_lgetxattr
1857TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 192, 4)
1858#endif
1859#ifndef OVERRIDE_TABLE_64_sys_fgetxattr
1860TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 193, 4)
1861#endif
1862#ifndef OVERRIDE_TABLE_64_sys_listxattr
1863TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 194, 3)
1864#endif
1865#ifndef OVERRIDE_TABLE_64_sys_llistxattr
1866TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 195, 3)
1867#endif
1868#ifndef OVERRIDE_TABLE_64_sys_flistxattr
1869TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 196, 3)
1870#endif
1871#ifndef OVERRIDE_TABLE_64_sys_removexattr
1872TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 197, 2)
1873#endif
1874#ifndef OVERRIDE_TABLE_64_sys_lremovexattr
1875TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 198, 2)
1876#endif
1877#ifndef OVERRIDE_TABLE_64_sys_fremovexattr
1878TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 199, 2)
1879#endif
1880#ifndef OVERRIDE_TABLE_64_sys_time
1881TRACE_SYSCALL_TABLE(sys_time, sys_time, 201, 1)
1882#endif
1883#ifndef OVERRIDE_TABLE_64_sys_futex
1884TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 202, 6)
1885#endif
1886#ifndef OVERRIDE_TABLE_64_sys_sched_setaffinity
1887TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 203, 3)
1888#endif
1889#ifndef OVERRIDE_TABLE_64_sys_sched_getaffinity
1890TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 204, 3)
1891#endif
1892#ifndef OVERRIDE_TABLE_64_sys_io_setup
1893TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 206, 2)
1894#endif
1895#ifndef OVERRIDE_TABLE_64_sys_io_getevents
1896TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 208, 5)
1897#endif
1898#ifndef OVERRIDE_TABLE_64_sys_io_submit
1899TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 209, 3)
1900#endif
1901#ifndef OVERRIDE_TABLE_64_sys_io_cancel
1902TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 210, 3)
1903#endif
1904#ifndef OVERRIDE_TABLE_64_sys_getdents64
1905TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 217, 3)
1906#endif
1907#ifndef OVERRIDE_TABLE_64_sys_set_tid_address
1908TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 218, 1)
1909#endif
1910#ifndef OVERRIDE_TABLE_64_sys_semtimedop
1911TRACE_SYSCALL_TABLE(sys_semtimedop, sys_semtimedop, 220, 4)
1912#endif
1913#ifndef OVERRIDE_TABLE_64_sys_timer_create
1914TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 222, 3)
1915#endif
1916#ifndef OVERRIDE_TABLE_64_sys_timer_settime
1917TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 223, 4)
1918#endif
1919#ifndef OVERRIDE_TABLE_64_sys_timer_gettime
1920TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 224, 2)
1921#endif
1922#ifndef OVERRIDE_TABLE_64_sys_clock_settime
1923TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 227, 2)
1924#endif
1925#ifndef OVERRIDE_TABLE_64_sys_clock_gettime
1926TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 228, 2)
1927#endif
1928#ifndef OVERRIDE_TABLE_64_sys_clock_getres
1929TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 229, 2)
1930#endif
1931#ifndef OVERRIDE_TABLE_64_sys_clock_nanosleep
1932TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 230, 4)
1933#endif
1934#ifndef OVERRIDE_TABLE_64_sys_epoll_wait
1935TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 232, 4)
1936#endif
1937#ifndef OVERRIDE_TABLE_64_sys_epoll_ctl
1938TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 233, 4)
1939#endif
1940#ifndef OVERRIDE_TABLE_64_sys_utimes
1941TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 235, 2)
1942#endif
1943#ifndef OVERRIDE_TABLE_64_sys_mq_open
1944TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 240, 4)
1945#endif
1946#ifndef OVERRIDE_TABLE_64_sys_mq_unlink
1947TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 241, 1)
1948#endif
1949#ifndef OVERRIDE_TABLE_64_sys_mq_timedsend
1950TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 242, 5)
1951#endif
1952#ifndef OVERRIDE_TABLE_64_sys_mq_timedreceive
1953TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 243, 5)
1954#endif
1955#ifndef OVERRIDE_TABLE_64_sys_mq_notify
1956TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 244, 2)
1957#endif
1958#ifndef OVERRIDE_TABLE_64_sys_mq_getsetattr
1959TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 245, 3)
1960#endif
1961#ifndef OVERRIDE_TABLE_64_sys_kexec_load
1962TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 246, 4)
1963#endif
1964#ifndef OVERRIDE_TABLE_64_sys_waitid
1965TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 247, 5)
1966#endif
1967#ifndef OVERRIDE_TABLE_64_sys_inotify_add_watch
1968TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 254, 3)
1969#endif
1970#ifndef OVERRIDE_TABLE_64_sys_openat
1971TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 257, 4)
1972#endif
1973#ifndef OVERRIDE_TABLE_64_sys_mkdirat
1974TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 258, 3)
1975#endif
1976#ifndef OVERRIDE_TABLE_64_sys_mknodat
1977TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 259, 4)
1978#endif
1979#ifndef OVERRIDE_TABLE_64_sys_fchownat
1980TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 260, 5)
1981#endif
1982#ifndef OVERRIDE_TABLE_64_sys_futimesat
1983TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 261, 3)
1984#endif
1985#ifndef OVERRIDE_TABLE_64_sys_newfstatat
1986TRACE_SYSCALL_TABLE(sys_newfstatat, sys_newfstatat, 262, 4)
1987#endif
1988#ifndef OVERRIDE_TABLE_64_sys_unlinkat
1989TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 263, 3)
1990#endif
1991#ifndef OVERRIDE_TABLE_64_sys_renameat
1992TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 264, 4)
1993#endif
1994#ifndef OVERRIDE_TABLE_64_sys_linkat
1995TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 265, 5)
1996#endif
1997#ifndef OVERRIDE_TABLE_64_sys_symlinkat
1998TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 266, 3)
1999#endif
2000#ifndef OVERRIDE_TABLE_64_sys_readlinkat
2001TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 267, 4)
2002#endif
2003#ifndef OVERRIDE_TABLE_64_sys_fchmodat
2004TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 268, 3)
2005#endif
2006#ifndef OVERRIDE_TABLE_64_sys_faccessat
2007TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 269, 3)
2008#endif
2009#ifndef OVERRIDE_TABLE_64_sys_pselect6
2010TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 270, 6)
2011#endif
2012#ifndef OVERRIDE_TABLE_64_sys_ppoll
2013TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 271, 5)
2014#endif
2015#ifndef OVERRIDE_TABLE_64_sys_set_robust_list
2016TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 273, 2)
2017#endif
2018#ifndef OVERRIDE_TABLE_64_sys_get_robust_list
2019TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 274, 3)
2020#endif
2021#ifndef OVERRIDE_TABLE_64_sys_splice
2022TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 275, 6)
2023#endif
2024#ifndef OVERRIDE_TABLE_64_sys_vmsplice
2025TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 278, 4)
2026#endif
2027#ifndef OVERRIDE_TABLE_64_sys_utimensat
2028TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 280, 4)
2029#endif
2030#ifndef OVERRIDE_TABLE_64_sys_epoll_pwait
2031TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 281, 6)
2032#endif
2033#ifndef OVERRIDE_TABLE_64_sys_signalfd
2034TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 282, 3)
2035#endif
2036#ifndef OVERRIDE_TABLE_64_sys_timerfd_settime
2037TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 286, 4)
2038#endif
2039#ifndef OVERRIDE_TABLE_64_sys_timerfd_gettime
2040TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 287, 2)
2041#endif
2042#ifndef OVERRIDE_TABLE_64_sys_accept4
2043TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 288, 4)
2044#endif
2045#ifndef OVERRIDE_TABLE_64_sys_signalfd4
2046TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 289, 4)
2047#endif
2048#ifndef OVERRIDE_TABLE_64_sys_pipe2
2049TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 293, 2)
2050#endif
2051#ifndef OVERRIDE_TABLE_64_sys_preadv
2052TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 295, 5)
2053#endif
2054#ifndef OVERRIDE_TABLE_64_sys_pwritev
2055TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 296, 5)
2056#endif
2057#ifndef OVERRIDE_TABLE_64_sys_rt_tgsigqueueinfo
2058TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 297, 4)
2059#endif
2060#ifndef OVERRIDE_TABLE_64_sys_perf_event_open
2061TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 298, 5)
2062#endif
2063#ifndef OVERRIDE_TABLE_64_sys_recvmmsg
2064TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 299, 5)
2065#endif
2066#ifndef OVERRIDE_TABLE_64_sys_prlimit64
2067TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 302, 4)
2068#endif
2069#ifndef OVERRIDE_TABLE_64_sys_clock_adjtime
2070TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 305, 2)
2071#endif
2072#ifndef OVERRIDE_TABLE_64_sys_sendmmsg
2073TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 307, 4)
2074#endif
2075
2076#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
deleted file mode 100644
index 0cdb32a1d41e..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifndef CREATE_SYSCALL_TABLE
2
3#else /* CREATE_SYSCALL_TABLE */
4
5#endif /* CREATE_SYSCALL_TABLE */
diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
deleted file mode 100644
index 4beb88c05c3a..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-m += lttng-syscalls-extractor.o
diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
deleted file mode 100644
index 06c0da13cad2..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 * Copyright 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
4 *
5 * Dump syscall metadata to console.
6 *
7 * GPLv2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/list.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include <linux/kallsyms.h>
17#include <linux/dcache.h>
18#include <linux/ftrace_event.h>
19#include <trace/syscall.h>
20
21#ifndef CONFIG_FTRACE_SYSCALLS
22#error "You need to set CONFIG_FTRACE_SYSCALLS=y"
23#endif
24
25#ifndef CONFIG_KALLSYMS_ALL
26#error "You need to set CONFIG_KALLSYMS_ALL=y"
27#endif
28
29static struct syscall_metadata **__start_syscalls_metadata;
30static struct syscall_metadata **__stop_syscalls_metadata;
31
32static __init
33struct syscall_metadata *find_syscall_meta(unsigned long syscall)
34{
35 struct syscall_metadata **iter;
36
37 for (iter = __start_syscalls_metadata;
38 iter < __stop_syscalls_metadata; iter++) {
39 if ((*iter)->syscall_nr == syscall)
40 return (*iter);
41 }
42 return NULL;
43}
44
45int init_module(void)
46{
47 struct syscall_metadata *meta;
48 int i;
49
50 __start_syscalls_metadata = (void *) kallsyms_lookup_name("__start_syscalls_metadata");
51 __stop_syscalls_metadata = (void *) kallsyms_lookup_name("__stop_syscalls_metadata");
52
53 for (i = 0; i < NR_syscalls; i++) {
54 int j;
55
56 meta = find_syscall_meta(i);
57 if (!meta)
58 continue;
59 printk("syscall %s nr %d nbargs %d ",
60 meta->name, meta->syscall_nr, meta->nb_args);
61 printk("types: (");
62 for (j = 0; j < meta->nb_args; j++) {
63 if (j > 0)
64 printk(", ");
65 printk("%s", meta->types[j]);
66 }
67 printk(") ");
68 printk("args: (");
69 for (j = 0; j < meta->nb_args; j++) {
70 if (j > 0)
71 printk(", ");
72 printk("%s", meta->args[j]);
73 }
74 printk(")\n");
75 }
76 printk("SUCCESS\n");
77
78 return -1;
79}
80
81void cleanup_module(void)
82{
83}
84
85MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
deleted file mode 100644
index 5eddb2746801..000000000000
--- a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
+++ /dev/null
@@ -1,275 +0,0 @@
1#!/bin/sh
2
3# Generate system call probe description macros from syscall metadata dump file.
4# example usage:
5#
6# lttng-syscalls-generate-headers.sh integers 3.0.4 x86-64-syscalls-3.0.4 64
7# lttng-syscalls-generate-headers.sh pointers 3.0.4 x86-64-syscalls-3.0.4 64
8
9CLASS=$1
10INPUTDIR=$2
11INPUTFILE=$3
12BITNESS=$4
13INPUT=${INPUTDIR}/${INPUTFILE}
14SRCFILE=gen.tmp.0
15TMPFILE=gen.tmp.1
16HEADER=headers/${INPUTFILE}_${CLASS}.h
17
18cp ${INPUT} ${SRCFILE}
19
20#Cleanup
21perl -p -e 's/^\[.*\] //g' ${SRCFILE} > ${TMPFILE}
22mv ${TMPFILE} ${SRCFILE}
23
24perl -p -e 's/^syscall sys_([^ ]*)/syscall $1/g' ${SRCFILE} > ${TMPFILE}
25mv ${TMPFILE} ${SRCFILE}
26
27#Filter
28
29if [ "$CLASS" = integers ]; then
30 #select integers and no-args.
31 CLASSCAP=INTEGERS
32 grep -v "\\*\|cap_user_header_t" ${SRCFILE} > ${TMPFILE}
33 mv ${TMPFILE} ${SRCFILE}
34fi
35
36
37if [ "$CLASS" = pointers ]; then
38 #select system calls using pointers.
39 CLASSCAP=POINTERS
40 grep "\\*\|cap_#user_header_t" ${SRCFILE} > ${TMPFILE}
41 mv ${TMPFILE} ${SRCFILE}
42fi
43
44echo "/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */" > ${HEADER}
45
46echo \
47"#ifndef CREATE_SYSCALL_TABLE
48
49#if !defined(_TRACE_SYSCALLS_${CLASSCAP}_H) || defined(TRACE_HEADER_MULTI_READ)
50#define _TRACE_SYSCALLS_${CLASSCAP}_H
51
52#include <linux/tracepoint.h>
53#include <linux/syscalls.h>
54#include \"${INPUTFILE}_${CLASS}_override.h\"
55#include \"syscalls_${CLASS}_override.h\"
56" >> ${HEADER}
57
58if [ "$CLASS" = integers ]; then
59
60NRARGS=0
61
62echo \
63'SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,\n'\
64' TP_STRUCT__entry(),\n'\
65' TP_fast_assign(),\n'\
66' TP_printk()\n'\
67')'\
68 >> ${HEADER}
69
70grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
71perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
72'types: \(([^)]*)\) '\
73'args: \(([^)]*)\)/'\
74'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
75'SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_$1)\n'\
76'#endif/g'\
77 ${TMPFILE} >> ${HEADER}
78
79fi
80
81
82# types: 4
83# args 5
84
85NRARGS=1
86grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
87perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
88'types: \(([^)]*)\) '\
89'args: \(([^)]*)\)/'\
90'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
91'SC_TRACE_EVENT(sys_$1,\n'\
92' TP_PROTO($4 $5),\n'\
93' TP_ARGS($5),\n'\
94' TP_STRUCT__entry(__field($4, $5)),\n'\
95' TP_fast_assign(tp_assign($4, $5, $5)),\n'\
96' TP_printk()\n'\
97')\n'\
98'#endif/g'\
99 ${TMPFILE} >> ${HEADER}
100
101# types: 4 5
102# args 6 7
103
104NRARGS=2
105grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
106perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
107'types: \(([^,]*), ([^)]*)\) '\
108'args: \(([^,]*), ([^)]*)\)/'\
109'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
110'SC_TRACE_EVENT(sys_$1,\n'\
111' TP_PROTO($4 $6, $5 $7),\n'\
112' TP_ARGS($6, $7),\n'\
113' TP_STRUCT__entry(__field($4, $6) __field($5, $7)),\n'\
114' TP_fast_assign(tp_assign($4, $6, $6) tp_assign($5, $7, $7)),\n'\
115' TP_printk()\n'\
116')\n'\
117'#endif/g'\
118 ${TMPFILE} >> ${HEADER}
119
120# types: 4 5 6
121# args 7 8 9
122
123NRARGS=3
124grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
125perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
126'types: \(([^,]*), ([^,]*), ([^)]*)\) '\
127'args: \(([^,]*), ([^,]*), ([^)]*)\)/'\
128'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
129'SC_TRACE_EVENT(sys_$1,\n'\
130' TP_PROTO($4 $7, $5 $8, $6 $9),\n'\
131' TP_ARGS($7, $8, $9),\n'\
132' TP_STRUCT__entry(__field($4, $7) __field($5, $8) __field($6, $9)),\n'\
133' TP_fast_assign(tp_assign($4, $7, $7) tp_assign($5, $8, $8) tp_assign($6, $9, $9)),\n'\
134' TP_printk()\n'\
135')\n'\
136'#endif/g'\
137 ${TMPFILE} >> ${HEADER}
138
139
140# types: 4 5 6 7
141# args 8 9 10 11
142
143NRARGS=4
144grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
145perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
146'types: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
147'args: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
148'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
149'SC_TRACE_EVENT(sys_$1,\n'\
150' TP_PROTO($4 $8, $5 $9, $6 $10, $7 $11),\n'\
151' TP_ARGS($8, $9, $10, $11),\n'\
152' TP_STRUCT__entry(__field($4, $8) __field($5, $9) __field($6, $10) __field($7, $11)),\n'\
153' TP_fast_assign(tp_assign($4, $8, $8) tp_assign($5, $9, $9) tp_assign($6, $10, $10) tp_assign($7, $11, $11)),\n'\
154' TP_printk()\n'\
155')\n'\
156'#endif/g'\
157 ${TMPFILE} >> ${HEADER}
158
159# types: 4 5 6 7 8
160# args 9 10 11 12 13
161
162NRARGS=5
163grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
164perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
165'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
166'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
167'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
168'SC_TRACE_EVENT(sys_$1,\n'\
169' TP_PROTO($4 $9, $5 $10, $6 $11, $7 $12, $8 $13),\n'\
170' TP_ARGS($9, $10, $11, $12, $13),\n'\
171' TP_STRUCT__entry(__field($4, $9) __field($5, $10) __field($6, $11) __field($7, $12) __field($8, $13)),\n'\
172' TP_fast_assign(tp_assign($4, $9, $9) tp_assign($5, $10, $10) tp_assign($6, $11, $11) tp_assign($7, $12, $12) tp_assign($8, $13, $13)),\n'\
173' TP_printk()\n'\
174')\n'\
175'#endif/g'\
176 ${TMPFILE} >> ${HEADER}
177
178
179# types: 4 5 6 7 8 9
180# args 10 11 12 13 14 15
181
182NRARGS=6
183grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
184perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
185'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\) '\
186'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\)/'\
187'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
188'SC_TRACE_EVENT(sys_$1,\n'\
189' TP_PROTO($4 $10, $5 $11, $6 $12, $7 $13, $8 $14, $9 $15),\n'\
190' TP_ARGS($10, $11, $12, $13, $14, $15),\n'\
191' TP_STRUCT__entry(__field($4, $10) __field($5, $11) __field($6, $12) __field($7, $13) __field($8, $14) __field($9, $15)),\n'\
192' TP_fast_assign(tp_assign($4, $10, $10) tp_assign($5, $11, $11) tp_assign($6, $12, $12) tp_assign($7, $13, $13) tp_assign($8, $14, $14) tp_assign($9, $15, $15)),\n'\
193' TP_printk()\n'\
194')\n'\
195'#endif/g'\
196 ${TMPFILE} >> ${HEADER}
197
198# Macro for tracing syscall table
199
200rm -f ${TMPFILE}
201for NRARGS in $(seq 0 6); do
202 grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} >> ${TMPFILE}
203done
204
205echo \
206"
207#endif /* _TRACE_SYSCALLS_${CLASSCAP}_H */
208
209/* This part must be outside protection */
210#include \"../../../probes/define_trace.h\"
211
212#else /* CREATE_SYSCALL_TABLE */
213
214#include \"${INPUTFILE}_${CLASS}_override.h\"
215#include \"syscalls_${CLASS}_override.h\"
216" >> ${HEADER}
217
218NRARGS=0
219
220if [ "$CLASS" = integers ]; then
221#noargs
222grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
223perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
224'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
225'TRACE_SYSCALL_TABLE\(syscalls_noargs, sys_$1, $2, $3\)\n'\
226'#endif/g'\
227 ${TMPFILE} >> ${HEADER}
228fi
229
230#others.
231grep -v "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
232perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
233'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
234'TRACE_SYSCALL_TABLE(sys_$1, sys_$1, $2, $3)\n'\
235'#endif/g'\
236 ${TMPFILE} >> ${HEADER}
237
238echo -n \
239"
240#endif /* CREATE_SYSCALL_TABLE */
241" >> ${HEADER}
242
243#fields names: ...char * type with *name* or *file* or *path* or *root*
244# or *put_old* or *type*
245cp -f ${HEADER} ${TMPFILE}
246rm -f ${HEADER}
247perl -p -e 's/__field\(([^,)]*char \*), ([^\)]*)(name|file|path|root|put_old|type)([^\)]*)\)/__string_from_user($2$3$4, $2$3$4)/g'\
248 ${TMPFILE} >> ${HEADER}
249cp -f ${HEADER} ${TMPFILE}
250rm -f ${HEADER}
251perl -p -e 's/tp_assign\(([^,)]*char \*), ([^,]*)(name|file|path|root|put_old|type)([^,]*), ([^\)]*)\)/tp_copy_string_from_user($2$3$4, $5)/g'\
252 ${TMPFILE} >> ${HEADER}
253
254#prettify addresses heuristics.
255#field names with addr or ptr
256cp -f ${HEADER} ${TMPFILE}
257rm -f ${HEADER}
258perl -p -e 's/__field\(([^,)]*), ([^,)]*addr|[^,)]*ptr)([^),]*)\)/__field_hex($1, $2$3)/g'\
259 ${TMPFILE} >> ${HEADER}
260
261#field types ending with '*'
262cp -f ${HEADER} ${TMPFILE}
263rm -f ${HEADER}
264perl -p -e 's/__field\(([^,)]*\*), ([^),]*)\)/__field_hex($1, $2)/g'\
265 ${TMPFILE} >> ${HEADER}
266
267#strip the extra type information from tp_assign.
268cp -f ${HEADER} ${TMPFILE}
269rm -f ${HEADER}
270perl -p -e 's/tp_assign\(([^,)]*), ([^,]*), ([^\)]*)\)/tp_assign($2, $3)/g'\
271 ${TMPFILE} >> ${HEADER}
272
273rm -f ${INPUTFILE}.tmp
274rm -f ${TMPFILE}
275rm -f ${SRCFILE}
diff --git a/drivers/staging/lttng/lib/Makefile b/drivers/staging/lttng/lib/Makefile
deleted file mode 100644
index e5735ecaa6fc..000000000000
--- a/drivers/staging/lttng/lib/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1obj-m += lib-ring-buffer.o
2
3lib-ring-buffer-objs := \
4 ringbuffer/ring_buffer_backend.o \
5 ringbuffer/ring_buffer_frontend.o \
6 ringbuffer/ring_buffer_iterator.o \
7 ringbuffer/ring_buffer_vfs.o \
8 ringbuffer/ring_buffer_splice.o \
9 ringbuffer/ring_buffer_mmap.o \
10 prio_heap/lttng_prio_heap.o \
11 ../wrapper/splice.o
diff --git a/drivers/staging/lttng/lib/align.h b/drivers/staging/lttng/lib/align.h
deleted file mode 100644
index 0b861000300c..000000000000
--- a/drivers/staging/lttng/lib/align.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _LTTNG_ALIGN_H
2#define _LTTNG_ALIGN_H
3
4/*
5 * lib/align.h
6 *
7 * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12#ifdef __KERNEL__
13
14#include <linux/types.h>
15#include "bug.h"
16
17#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1)
18#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask))
19#define PTR_ALIGN_FLOOR(p, a) \
20 ((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a))
21
22/*
23 * Align pointer on natural object alignment.
24 */
25#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj)))
26#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj)))
27
28/**
29 * offset_align - Calculate the offset needed to align an object on its natural
30 * alignment towards higher addresses.
31 * @align_drift: object offset from an "alignment"-aligned address.
32 * @alignment: natural object alignment. Must be non-zero, power of 2.
33 *
34 * Returns the offset that must be added to align towards higher
35 * addresses.
36 */
37#define offset_align(align_drift, alignment) \
38 ({ \
39 BUILD_RUNTIME_BUG_ON((alignment) == 0 \
40 || ((alignment) & ((alignment) - 1))); \
41 (((alignment) - (align_drift)) & ((alignment) - 1)); \
42 })
43
44/**
45 * offset_align_floor - Calculate the offset needed to align an object
46 * on its natural alignment towards lower addresses.
47 * @align_drift: object offset from an "alignment"-aligned address.
48 * @alignment: natural object alignment. Must be non-zero, power of 2.
49 *
50 * Returns the offset that must be substracted to align towards lower addresses.
51 */
52#define offset_align_floor(align_drift, alignment) \
53 ({ \
54 BUILD_RUNTIME_BUG_ON((alignment) == 0 \
55 || ((alignment) & ((alignment) - 1))); \
56 (((align_drift) - (alignment)) & ((alignment) - 1); \
57 })
58
59#endif /* __KERNEL__ */
60
61#endif
diff --git a/drivers/staging/lttng/lib/bitfield.h b/drivers/staging/lttng/lib/bitfield.h
deleted file mode 100644
index 861e6dcd8107..000000000000
--- a/drivers/staging/lttng/lib/bitfield.h
+++ /dev/null
@@ -1,400 +0,0 @@
1#ifndef _BABELTRACE_BITFIELD_H
2#define _BABELTRACE_BITFIELD_H
3
4/*
5 * BabelTrace
6 *
7 * Bitfields read/write functions.
8 *
9 * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 */
21
22#include "../ltt-endian.h"
23
24#ifndef CHAR_BIT
25#define CHAR_BIT 8
26#endif
27
28/* We can't shift a int from 32 bit, >> 32 and << 32 on int is undefined */
29#define _bt_piecewise_rshift(_v, _shift) \
30({ \
31 typeof(_v) ___v = (_v); \
32 typeof(_shift) ___shift = (_shift); \
33 unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
34 unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
35 \
36 for (; sb; sb--) \
37 ___v >>= sizeof(___v) * CHAR_BIT - 1; \
38 ___v >>= final; \
39})
40
41#define _bt_piecewise_lshift(_v, _shift) \
42({ \
43 typeof(_v) ___v = (_v); \
44 typeof(_shift) ___shift = (_shift); \
45 unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
46 unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
47 \
48 for (; sb; sb--) \
49 ___v <<= sizeof(___v) * CHAR_BIT - 1; \
50 ___v <<= final; \
51})
52
53#define _bt_is_signed_type(type) (((type)(-1)) < 0)
54
55#define _bt_unsigned_cast(type, v) \
56({ \
57 (sizeof(v) < sizeof(type)) ? \
58 ((type) (v)) & (~(~(type) 0 << (sizeof(v) * CHAR_BIT))) : \
59 (type) (v); \
60})
61
62/*
63 * bt_bitfield_write - write integer to a bitfield in native endianness
64 *
65 * Save integer to the bitfield, which starts at the "start" bit, has "len"
66 * bits.
67 * The inside of a bitfield is from high bits to low bits.
68 * Uses native endianness.
69 * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
70 * For signed "v", sign-extend v if bitfield is larger than v.
71 *
72 * On little endian, bytes are placed from the less significant to the most
73 * significant. Also, consecutive bitfields are placed from lower bits to higher
74 * bits.
75 *
76 * On big endian, bytes are places from most significant to less significant.
77 * Also, consecutive bitfields are placed from higher to lower bits.
78 */
79
80#define _bt_bitfield_write_le(_ptr, type, _start, _length, _v) \
81do { \
82 typeof(_v) __v = (_v); \
83 type *__ptr = (void *) (_ptr); \
84 unsigned long __start = (_start), __length = (_length); \
85 type mask, cmask; \
86 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
87 unsigned long start_unit, end_unit, this_unit; \
88 unsigned long end, cshift; /* cshift is "complement shift" */ \
89 \
90 if (!__length) \
91 break; \
92 \
93 end = __start + __length; \
94 start_unit = __start / ts; \
95 end_unit = (end + (ts - 1)) / ts; \
96 \
97 /* Trim v high bits */ \
98 if (__length < sizeof(__v) * CHAR_BIT) \
99 __v &= ~((~(typeof(__v)) 0) << __length); \
100 \
101 /* We can now append v with a simple "or", shift it piece-wise */ \
102 this_unit = start_unit; \
103 if (start_unit == end_unit - 1) { \
104 mask = ~((~(type) 0) << (__start % ts)); \
105 if (end % ts) \
106 mask |= (~(type) 0) << (end % ts); \
107 cmask = (type) __v << (__start % ts); \
108 cmask &= ~mask; \
109 __ptr[this_unit] &= mask; \
110 __ptr[this_unit] |= cmask; \
111 break; \
112 } \
113 if (__start % ts) { \
114 cshift = __start % ts; \
115 mask = ~((~(type) 0) << cshift); \
116 cmask = (type) __v << cshift; \
117 cmask &= ~mask; \
118 __ptr[this_unit] &= mask; \
119 __ptr[this_unit] |= cmask; \
120 __v = _bt_piecewise_rshift(__v, ts - cshift); \
121 __start += ts - cshift; \
122 this_unit++; \
123 } \
124 for (; this_unit < end_unit - 1; this_unit++) { \
125 __ptr[this_unit] = (type) __v; \
126 __v = _bt_piecewise_rshift(__v, ts); \
127 __start += ts; \
128 } \
129 if (end % ts) { \
130 mask = (~(type) 0) << (end % ts); \
131 cmask = (type) __v; \
132 cmask &= ~mask; \
133 __ptr[this_unit] &= mask; \
134 __ptr[this_unit] |= cmask; \
135 } else \
136 __ptr[this_unit] = (type) __v; \
137} while (0)
138
139#define _bt_bitfield_write_be(_ptr, type, _start, _length, _v) \
140do { \
141 typeof(_v) __v = (_v); \
142 type *__ptr = (void *) (_ptr); \
143 unsigned long __start = (_start), __length = (_length); \
144 type mask, cmask; \
145 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
146 unsigned long start_unit, end_unit, this_unit; \
147 unsigned long end, cshift; /* cshift is "complement shift" */ \
148 \
149 if (!__length) \
150 break; \
151 \
152 end = __start + __length; \
153 start_unit = __start / ts; \
154 end_unit = (end + (ts - 1)) / ts; \
155 \
156 /* Trim v high bits */ \
157 if (__length < sizeof(__v) * CHAR_BIT) \
158 __v &= ~((~(typeof(__v)) 0) << __length); \
159 \
160 /* We can now append v with a simple "or", shift it piece-wise */ \
161 this_unit = end_unit - 1; \
162 if (start_unit == end_unit - 1) { \
163 mask = ~((~(type) 0) << ((ts - (end % ts)) % ts)); \
164 if (__start % ts) \
165 mask |= (~((type) 0)) << (ts - (__start % ts)); \
166 cmask = (type) __v << ((ts - (end % ts)) % ts); \
167 cmask &= ~mask; \
168 __ptr[this_unit] &= mask; \
169 __ptr[this_unit] |= cmask; \
170 break; \
171 } \
172 if (end % ts) { \
173 cshift = end % ts; \
174 mask = ~((~(type) 0) << (ts - cshift)); \
175 cmask = (type) __v << (ts - cshift); \
176 cmask &= ~mask; \
177 __ptr[this_unit] &= mask; \
178 __ptr[this_unit] |= cmask; \
179 __v = _bt_piecewise_rshift(__v, cshift); \
180 end -= cshift; \
181 this_unit--; \
182 } \
183 for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
184 __ptr[this_unit] = (type) __v; \
185 __v = _bt_piecewise_rshift(__v, ts); \
186 end -= ts; \
187 } \
188 if (__start % ts) { \
189 mask = (~(type) 0) << (ts - (__start % ts)); \
190 cmask = (type) __v; \
191 cmask &= ~mask; \
192 __ptr[this_unit] &= mask; \
193 __ptr[this_unit] |= cmask; \
194 } else \
195 __ptr[this_unit] = (type) __v; \
196} while (0)
197
198/*
199 * bt_bitfield_write - write integer to a bitfield in native endianness
200 * bt_bitfield_write_le - write integer to a bitfield in little endian
201 * bt_bitfield_write_be - write integer to a bitfield in big endian
202 */
203
204#if (__BYTE_ORDER == __LITTLE_ENDIAN)
205
206#define bt_bitfield_write(ptr, type, _start, _length, _v) \
207 _bt_bitfield_write_le(ptr, type, _start, _length, _v)
208
209#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
210 _bt_bitfield_write_le(ptr, type, _start, _length, _v)
211
212#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
213 _bt_bitfield_write_be(ptr, unsigned char, _start, _length, _v)
214
215#elif (__BYTE_ORDER == __BIG_ENDIAN)
216
217#define bt_bitfield_write(ptr, type, _start, _length, _v) \
218 _bt_bitfield_write_be(ptr, type, _start, _length, _v)
219
220#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
221 _bt_bitfield_write_le(ptr, unsigned char, _start, _length, _v)
222
223#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
224 _bt_bitfield_write_be(ptr, type, _start, _length, _v)
225
226#else /* (BYTE_ORDER == PDP_ENDIAN) */
227
228#error "Byte order not supported"
229
230#endif
231
232#define _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
233do { \
234 typeof(*(_vptr)) *__vptr = (_vptr); \
235 typeof(*__vptr) __v; \
236 type *__ptr = (void *) (_ptr); \
237 unsigned long __start = (_start), __length = (_length); \
238 type mask, cmask; \
239 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
240 unsigned long start_unit, end_unit, this_unit; \
241 unsigned long end, cshift; /* cshift is "complement shift" */ \
242 \
243 if (!__length) { \
244 *__vptr = 0; \
245 break; \
246 } \
247 \
248 end = __start + __length; \
249 start_unit = __start / ts; \
250 end_unit = (end + (ts - 1)) / ts; \
251 \
252 this_unit = end_unit - 1; \
253 if (_bt_is_signed_type(typeof(__v)) \
254 && (__ptr[this_unit] & ((type) 1 << ((end % ts ? : ts) - 1)))) \
255 __v = ~(typeof(__v)) 0; \
256 else \
257 __v = 0; \
258 if (start_unit == end_unit - 1) { \
259 cmask = __ptr[this_unit]; \
260 cmask >>= (__start % ts); \
261 if ((end - __start) % ts) { \
262 mask = ~((~(type) 0) << (end - __start)); \
263 cmask &= mask; \
264 } \
265 __v = _bt_piecewise_lshift(__v, end - __start); \
266 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
267 *__vptr = __v; \
268 break; \
269 } \
270 if (end % ts) { \
271 cshift = end % ts; \
272 mask = ~((~(type) 0) << cshift); \
273 cmask = __ptr[this_unit]; \
274 cmask &= mask; \
275 __v = _bt_piecewise_lshift(__v, cshift); \
276 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
277 end -= cshift; \
278 this_unit--; \
279 } \
280 for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
281 __v = _bt_piecewise_lshift(__v, ts); \
282 __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
283 end -= ts; \
284 } \
285 if (__start % ts) { \
286 mask = ~((~(type) 0) << (ts - (__start % ts))); \
287 cmask = __ptr[this_unit]; \
288 cmask >>= (__start % ts); \
289 cmask &= mask; \
290 __v = _bt_piecewise_lshift(__v, ts - (__start % ts)); \
291 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
292 } else { \
293 __v = _bt_piecewise_lshift(__v, ts); \
294 __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
295 } \
296 *__vptr = __v; \
297} while (0)
298
299#define _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
300do { \
301 typeof(*(_vptr)) *__vptr = (_vptr); \
302 typeof(*__vptr) __v; \
303 type *__ptr = (void *) (_ptr); \
304 unsigned long __start = (_start), __length = (_length); \
305 type mask, cmask; \
306 unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
307 unsigned long start_unit, end_unit, this_unit; \
308 unsigned long end, cshift; /* cshift is "complement shift" */ \
309 \
310 if (!__length) { \
311 *__vptr = 0; \
312 break; \
313 } \
314 \
315 end = __start + __length; \
316 start_unit = __start / ts; \
317 end_unit = (end + (ts - 1)) / ts; \
318 \
319 this_unit = start_unit; \
320 if (_bt_is_signed_type(typeof(__v)) \
321 && (__ptr[this_unit] & ((type) 1 << (ts - (__start % ts) - 1)))) \
322 __v = ~(typeof(__v)) 0; \
323 else \
324 __v = 0; \
325 if (start_unit == end_unit - 1) { \
326 cmask = __ptr[this_unit]; \
327 cmask >>= (ts - (end % ts)) % ts; \
328 if ((end - __start) % ts) { \
329 mask = ~((~(type) 0) << (end - __start)); \
330 cmask &= mask; \
331 } \
332 __v = _bt_piecewise_lshift(__v, end - __start); \
333 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
334 *__vptr = __v; \
335 break; \
336 } \
337 if (__start % ts) { \
338 cshift = __start % ts; \
339 mask = ~((~(type) 0) << (ts - cshift)); \
340 cmask = __ptr[this_unit]; \
341 cmask &= mask; \
342 __v = _bt_piecewise_lshift(__v, ts - cshift); \
343 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
344 __start += ts - cshift; \
345 this_unit++; \
346 } \
347 for (; this_unit < end_unit - 1; this_unit++) { \
348 __v = _bt_piecewise_lshift(__v, ts); \
349 __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
350 __start += ts; \
351 } \
352 if (end % ts) { \
353 mask = ~((~(type) 0) << (end % ts)); \
354 cmask = __ptr[this_unit]; \
355 cmask >>= ts - (end % ts); \
356 cmask &= mask; \
357 __v = _bt_piecewise_lshift(__v, end % ts); \
358 __v |= _bt_unsigned_cast(typeof(__v), cmask); \
359 } else { \
360 __v = _bt_piecewise_lshift(__v, ts); \
361 __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
362 } \
363 *__vptr = __v; \
364} while (0)
365
366/*
367 * bt_bitfield_read - read integer from a bitfield in native endianness
368 * bt_bitfield_read_le - read integer from a bitfield in little endian
369 * bt_bitfield_read_be - read integer from a bitfield in big endian
370 */
371
372#if (__BYTE_ORDER == __LITTLE_ENDIAN)
373
374#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
375 _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
376
377#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
378 _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
379
380#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
381 _bt_bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
382
383#elif (__BYTE_ORDER == __BIG_ENDIAN)
384
385#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
386 _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
387
388#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
389 _bt_bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
390
391#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
392 _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
393
394#else /* (__BYTE_ORDER == __PDP_ENDIAN) */
395
396#error "Byte order not supported"
397
398#endif
399
400#endif /* _BABELTRACE_BITFIELD_H */
diff --git a/drivers/staging/lttng/lib/bug.h b/drivers/staging/lttng/lib/bug.h
deleted file mode 100644
index 8243cc94a7b7..000000000000
--- a/drivers/staging/lttng/lib/bug.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _LTTNG_BUG_H
2#define _LTTNG_BUG_H
3
4/*
5 * lib/bug.h
6 *
7 * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12/**
13 * BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime
14 * @condition: the condition which should be false.
15 *
16 * If the condition is a constant and true, the compiler will generate a build
17 * error. If the condition is not constant, a BUG will be triggered at runtime
18 * if the condition is ever true. If the condition is constant and false, no
19 * code is emitted.
20 */
21#define BUILD_RUNTIME_BUG_ON(condition) \
22 do { \
23 if (__builtin_constant_p(condition)) \
24 BUILD_BUG_ON(condition); \
25 else \
26 BUG_ON(condition); \
27 } while (0)
28
29#endif
diff --git a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c
deleted file mode 100644
index 2fce143c1d3e..000000000000
--- a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * lttng_prio_heap.c
3 *
4 * Priority heap containing pointers. Based on CLRS, chapter 6.
5 *
6 * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 */
18
19#include <linux/slab.h>
20#include "lttng_prio_heap.h"
21
22#ifdef DEBUG_HEAP
23void lttng_check_heap(const struct lttng_ptr_heap *heap)
24{
25 size_t i;
26
27 if (!heap->len)
28 return;
29
30 for (i = 1; i < heap->len; i++)
31 WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
32}
33#endif
34
35static
36size_t parent(size_t i)
37{
38 return (i -1) >> 1;
39}
40
41static
42size_t left(size_t i)
43{
44 return (i << 1) + 1;
45}
46
47static
48size_t right(size_t i)
49{
50 return (i << 1) + 2;
51}
52
53/*
54 * Copy of heap->ptrs pointer is invalid after heap_grow.
55 */
56static
57int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
58{
59 void **new_ptrs;
60
61 if (heap->alloc_len >= new_len)
62 return 0;
63
64 heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
65 new_ptrs = kmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
66 if (!new_ptrs)
67 return -ENOMEM;
68 if (heap->ptrs)
69 memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
70 kfree(heap->ptrs);
71 heap->ptrs = new_ptrs;
72 return 0;
73}
74
75static
76int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
77{
78 int ret;
79
80 ret = heap_grow(heap, new_len);
81 if (ret)
82 return ret;
83 heap->len = new_len;
84 return 0;
85}
86
87int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
88 gfp_t gfpmask, int gt(void *a, void *b))
89{
90 heap->ptrs = NULL;
91 heap->len = 0;
92 heap->alloc_len = 0;
93 heap->gt = gt;
94 heap->gfpmask = gfpmask;
95 /*
96 * Minimum size allocated is 1 entry to ensure memory allocation
97 * never fails within heap_replace_max.
98 */
99 return heap_grow(heap, max_t(size_t, 1, alloc_len));
100}
101
102void lttng_heap_free(struct lttng_ptr_heap *heap)
103{
104 kfree(heap->ptrs);
105}
106
107static void heapify(struct lttng_ptr_heap *heap, size_t i)
108{
109 void **ptrs = heap->ptrs;
110 size_t l, r, largest;
111
112 for (;;) {
113 void *tmp;
114
115 l = left(i);
116 r = right(i);
117 if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
118 largest = l;
119 else
120 largest = i;
121 if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
122 largest = r;
123 if (largest == i)
124 break;
125 tmp = ptrs[i];
126 ptrs[i] = ptrs[largest];
127 ptrs[largest] = tmp;
128 i = largest;
129 }
130 lttng_check_heap(heap);
131}
132
133void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
134{
135 void *res;
136
137 if (!heap->len) {
138 (void) heap_set_len(heap, 1);
139 heap->ptrs[0] = p;
140 lttng_check_heap(heap);
141 return NULL;
142 }
143
144 /* Replace the current max and heapify */
145 res = heap->ptrs[0];
146 heap->ptrs[0] = p;
147 heapify(heap, 0);
148 return res;
149}
150
151int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
152{
153 void **ptrs;
154 size_t pos;
155 int ret;
156
157 ret = heap_set_len(heap, heap->len + 1);
158 if (ret)
159 return ret;
160 ptrs = heap->ptrs;
161 pos = heap->len - 1;
162 while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
163 /* Move parent down until we find the right spot */
164 ptrs[pos] = ptrs[parent(pos)];
165 pos = parent(pos);
166 }
167 ptrs[pos] = p;
168 lttng_check_heap(heap);
169 return 0;
170}
171
172void *lttng_heap_remove(struct lttng_ptr_heap *heap)
173{
174 switch (heap->len) {
175 case 0:
176 return NULL;
177 case 1:
178 (void) heap_set_len(heap, 0);
179 return heap->ptrs[0];
180 }
181 /* Shrink, replace the current max by previous last entry and heapify */
182 heap_set_len(heap, heap->len - 1);
183 /* len changed. previous last entry is at heap->len */
184 return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
185}
186
187void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
188{
189 size_t pos, len = heap->len;
190
191 for (pos = 0; pos < len; pos++)
192 if (heap->ptrs[pos] == p)
193 goto found;
194 return NULL;
195found:
196 if (heap->len == 1) {
197 (void) heap_set_len(heap, 0);
198 lttng_check_heap(heap);
199 return heap->ptrs[0];
200 }
201 /* Replace p with previous last entry and heapify. */
202 heap_set_len(heap, heap->len - 1);
203 /* len changed. previous last entry is at heap->len */
204 heap->ptrs[pos] = heap->ptrs[heap->len];
205 heapify(heap, pos);
206 return p;
207}
diff --git a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h
deleted file mode 100644
index ea8dbb8f7e0e..000000000000
--- a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h
+++ /dev/null
@@ -1,117 +0,0 @@
1#ifndef _LTTNG_PRIO_HEAP_H
2#define _LTTNG_PRIO_HEAP_H
3
4/*
5 * lttng_prio_heap.h
6 *
7 * Priority heap containing pointers. Based on CLRS, chapter 6.
8 *
9 * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 */
21
22#include <linux/gfp.h>
23
24struct lttng_ptr_heap {
25 size_t len, alloc_len;
26 void **ptrs;
27 int (*gt)(void *a, void *b);
28 gfp_t gfpmask;
29};
30
31#ifdef DEBUG_HEAP
32void lttng_check_heap(const struct lttng_ptr_heap *heap);
33#else
34static inline
35void lttng_check_heap(const struct lttng_ptr_heap *heap)
36{
37}
38#endif
39
40/**
41 * lttng_heap_maximum - return the largest element in the heap
42 * @heap: the heap to be operated on
43 *
44 * Returns the largest element in the heap, without performing any modification
45 * to the heap structure. Returns NULL if the heap is empty.
46 */
47static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap)
48{
49 lttng_check_heap(heap);
50 return heap->len ? heap->ptrs[0] : NULL;
51}
52
53/**
54 * lttng_heap_init - initialize the heap
55 * @heap: the heap to initialize
56 * @alloc_len: number of elements initially allocated
57 * @gfp: allocation flags
58 * @gt: function to compare the elements
59 *
60 * Returns -ENOMEM if out of memory.
61 */
62extern int lttng_heap_init(struct lttng_ptr_heap *heap,
63 size_t alloc_len, gfp_t gfpmask,
64 int gt(void *a, void *b));
65
66/**
67 * lttng_heap_free - free the heap
68 * @heap: the heap to free
69 */
70extern void lttng_heap_free(struct lttng_ptr_heap *heap);
71
72/**
73 * lttng_heap_insert - insert an element into the heap
74 * @heap: the heap to be operated on
75 * @p: the element to add
76 *
77 * Insert an element into the heap.
78 *
79 * Returns -ENOMEM if out of memory.
80 */
81extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p);
82
83/**
84 * lttng_heap_remove - remove the largest element from the heap
85 * @heap: the heap to be operated on
86 *
87 * Returns the largest element in the heap. It removes this element from the
88 * heap. Returns NULL if the heap is empty.
89 */
90extern void *lttng_heap_remove(struct lttng_ptr_heap *heap);
91
92/**
93 * lttng_heap_cherrypick - remove a given element from the heap
94 * @heap: the heap to be operated on
95 * @p: the element
96 *
97 * Remove the given element from the heap. Return the element if present, else
98 * return NULL. This algorithm has a complexity of O(n), which is higher than
99 * O(log(n)) provided by the rest of this API.
100 */
101extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p);
102
103/**
104 * lttng_heap_replace_max - replace the the largest element from the heap
105 * @heap: the heap to be operated on
106 * @p: the pointer to be inserted as topmost element replacement
107 *
108 * Returns the largest element in the heap. It removes this element from the
109 * heap. The heap is rebalanced only once after the insertion. Returns NULL if
110 * the heap is empty.
111 *
112 * This is the equivalent of calling heap_remove() and then heap_insert(), but
113 * it only rebalances the heap once. It never allocates memory.
114 */
115extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p);
116
117#endif /* _LTTNG_PRIO_HEAP_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/api.h b/drivers/staging/lttng/lib/ringbuffer/api.h
deleted file mode 100644
index f8a1145b10fe..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/api.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_API_H
2#define _LINUX_RING_BUFFER_API_H
3
4/*
5 * linux/ringbuffer/api.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers "mathieu.desnoyers@efficios.com"
8 *
9 * Ring Buffer API.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include "../../wrapper/ringbuffer/backend.h"
15#include "../../wrapper/ringbuffer/frontend.h"
16#include "../../wrapper/ringbuffer/vfs.h"
17
18/*
19 * ring_buffer_frontend_api.h contains static inline functions that depend on
20 * client static inlines. Hence the inclusion of this "api" header only
21 * within the client.
22 */
23#include "../../wrapper/ringbuffer/frontend_api.h"
24
25#endif /* _LINUX_RING_BUFFER_API_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/backend.h b/drivers/staging/lttng/lib/ringbuffer/backend.h
deleted file mode 100644
index 541dc531ca76..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/backend.h
+++ /dev/null
@@ -1,250 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_BACKEND_H
2#define _LINUX_RING_BUFFER_BACKEND_H
3
4/*
5 * linux/ringbuffer/backend.h
6 *
7 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer backend (API).
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 *
13 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
14 * the reader in flight recorder mode.
15 */
16
17#include <linux/types.h>
18#include <linux/sched.h>
19#include <linux/timer.h>
20#include <linux/wait.h>
21#include <linux/poll.h>
22#include <linux/list.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25
26/* Internal helpers */
27#include "../../wrapper/ringbuffer/backend_internal.h"
28#include "../../wrapper/ringbuffer/frontend_internal.h"
29
30/* Ring buffer backend API */
31
32/* Ring buffer backend access (read/write) */
33
34extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
35 size_t offset, void *dest, size_t len);
36
37extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
38 size_t offset, void __user *dest,
39 size_t len);
40
41extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
42 size_t offset, void *dest, size_t len);
43
44extern struct page **
45lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
46 void ***virt);
47
48/*
49 * Return the address where a given offset is located.
50 * Should be used to get the current subbuffer header pointer. Given we know
51 * it's never on a page boundary, it's safe to write directly to this address,
52 * as long as the write is never bigger than a page size.
53 */
54extern void *
55lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
56 size_t offset);
57extern void *
58lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
59 size_t offset);
60
61/**
62 * lib_ring_buffer_write - write data to a buffer backend
63 * @config : ring buffer instance configuration
64 * @ctx: ring buffer context. (input arguments only)
65 * @src : source pointer to copy from
66 * @len : length of data to copy
67 *
68 * This function copies "len" bytes of data from a source pointer to a buffer
69 * backend, at the current context offset. This is more or less a buffer
70 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
71 * if copy is crossing a page boundary.
72 */
73static inline
74void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
75 struct lib_ring_buffer_ctx *ctx,
76 const void *src, size_t len)
77{
78 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
79 struct channel_backend *chanb = &ctx->chan->backend;
80 size_t sbidx, index;
81 size_t offset = ctx->buf_offset;
82 ssize_t pagecpy;
83 struct lib_ring_buffer_backend_pages *rpages;
84 unsigned long sb_bindex, id;
85
86 offset &= chanb->buf_size - 1;
87 sbidx = offset >> chanb->subbuf_size_order;
88 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
89 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
90 id = bufb->buf_wsb[sbidx].id;
91 sb_bindex = subbuffer_id_get_index(config, id);
92 rpages = bufb->array[sb_bindex];
93 CHAN_WARN_ON(ctx->chan,
94 config->mode == RING_BUFFER_OVERWRITE
95 && subbuffer_id_is_noref(config, id));
96 if (likely(pagecpy == len))
97 lib_ring_buffer_do_copy(config,
98 rpages->p[index].virt
99 + (offset & ~PAGE_MASK),
100 src, len);
101 else
102 _lib_ring_buffer_write(bufb, offset, src, len, 0);
103 ctx->buf_offset += len;
104}
105
106/**
107 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
108 * @config : ring buffer instance configuration
109 * @bufb : ring buffer backend
110 * @offset : offset within the buffer
111 * @c : the byte to copy
112 * @len : number of bytes to copy
113 *
114 * This function writes "len" bytes of "c" to a buffer backend, at a specific
115 * offset. This is more or less a buffer backend-specific memset() operation.
116 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
117 * boundary.
118 */
119static inline
120void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
121 struct lib_ring_buffer_ctx *ctx, int c, size_t len)
122{
123
124 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
125 struct channel_backend *chanb = &ctx->chan->backend;
126 size_t sbidx, index;
127 size_t offset = ctx->buf_offset;
128 ssize_t pagecpy;
129 struct lib_ring_buffer_backend_pages *rpages;
130 unsigned long sb_bindex, id;
131
132 offset &= chanb->buf_size - 1;
133 sbidx = offset >> chanb->subbuf_size_order;
134 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
135 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
136 id = bufb->buf_wsb[sbidx].id;
137 sb_bindex = subbuffer_id_get_index(config, id);
138 rpages = bufb->array[sb_bindex];
139 CHAN_WARN_ON(ctx->chan,
140 config->mode == RING_BUFFER_OVERWRITE
141 && subbuffer_id_is_noref(config, id));
142 if (likely(pagecpy == len))
143 lib_ring_buffer_do_memset(rpages->p[index].virt
144 + (offset & ~PAGE_MASK),
145 c, len);
146 else
147 _lib_ring_buffer_memset(bufb, offset, c, len, 0);
148 ctx->buf_offset += len;
149}
150
151/**
152 * lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
153 * @config : ring buffer instance configuration
154 * @ctx: ring buffer context. (input arguments only)
155 * @src : userspace source pointer to copy from
156 * @len : length of data to copy
157 *
158 * This function copies "len" bytes of data from a userspace pointer to a
159 * buffer backend, at the current context offset. This is more or less a buffer
160 * backend-specific memcpy() operation. Calls the slow path
161 * (_ring_buffer_write_from_user) if copy is crossing a page boundary.
162 */
163static inline
164void lib_ring_buffer_copy_from_user(const struct lib_ring_buffer_config *config,
165 struct lib_ring_buffer_ctx *ctx,
166 const void __user *src, size_t len)
167{
168 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
169 struct channel_backend *chanb = &ctx->chan->backend;
170 size_t sbidx, index;
171 size_t offset = ctx->buf_offset;
172 ssize_t pagecpy;
173 struct lib_ring_buffer_backend_pages *rpages;
174 unsigned long sb_bindex, id;
175 unsigned long ret;
176
177 offset &= chanb->buf_size - 1;
178 sbidx = offset >> chanb->subbuf_size_order;
179 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
180 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
181 id = bufb->buf_wsb[sbidx].id;
182 sb_bindex = subbuffer_id_get_index(config, id);
183 rpages = bufb->array[sb_bindex];
184 CHAN_WARN_ON(ctx->chan,
185 config->mode == RING_BUFFER_OVERWRITE
186 && subbuffer_id_is_noref(config, id));
187
188 if (unlikely(!access_ok(VERIFY_READ, src, len)))
189 goto fill_buffer;
190
191 if (likely(pagecpy == len)) {
192 ret = lib_ring_buffer_do_copy_from_user(
193 rpages->p[index].virt + (offset & ~PAGE_MASK),
194 src, len);
195 if (unlikely(ret > 0)) {
196 len -= (pagecpy - ret);
197 offset += (pagecpy - ret);
198 goto fill_buffer;
199 }
200 } else {
201 _lib_ring_buffer_copy_from_user(bufb, offset, src, len, 0);
202 }
203 ctx->buf_offset += len;
204
205 return;
206
207fill_buffer:
208 /*
209 * In the error path we call the slow path version to avoid
210 * the pollution of static inline code.
211 */
212 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
213}
214
215/*
216 * This accessor counts the number of unread records in a buffer.
217 * It only provides a consistent value if no reads not writes are performed
218 * concurrently.
219 */
220static inline
221unsigned long lib_ring_buffer_get_records_unread(
222 const struct lib_ring_buffer_config *config,
223 struct lib_ring_buffer *buf)
224{
225 struct lib_ring_buffer_backend *bufb = &buf->backend;
226 struct lib_ring_buffer_backend_pages *pages;
227 unsigned long records_unread = 0, sb_bindex, id;
228 unsigned int i;
229
230 for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
231 id = bufb->buf_wsb[i].id;
232 sb_bindex = subbuffer_id_get_index(config, id);
233 pages = bufb->array[sb_bindex];
234 records_unread += v_read(config, &pages->records_unread);
235 }
236 if (config->mode == RING_BUFFER_OVERWRITE) {
237 id = bufb->buf_rsb.id;
238 sb_bindex = subbuffer_id_get_index(config, id);
239 pages = bufb->array[sb_bindex];
240 records_unread += v_read(config, &pages->records_unread);
241 }
242 return records_unread;
243}
244
245ssize_t lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
246 struct pipe_inode_info *pipe,
247 size_t len, unsigned int flags);
248loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin);
249
250#endif /* _LINUX_RING_BUFFER_BACKEND_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_internal.h b/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
deleted file mode 100644
index 442f357a4c40..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
+++ /dev/null
@@ -1,449 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
2#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
3
4/*
5 * linux/ringbuffer/backend_internal.h
6 *
7 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer backend (internal helpers).
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include "../../wrapper/ringbuffer/config.h"
15#include "../../wrapper/ringbuffer/backend_types.h"
16#include "../../wrapper/ringbuffer/frontend_types.h"
17#include <linux/string.h>
18#include <linux/uaccess.h>
19
20/* Ring buffer backend API presented to the frontend */
21
22/* Ring buffer and channel backend create/free */
23
24int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
25 struct channel_backend *chan, int cpu);
26void channel_backend_unregister_notifiers(struct channel_backend *chanb);
27void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
28int channel_backend_init(struct channel_backend *chanb,
29 const char *name,
30 const struct lib_ring_buffer_config *config,
31 void *priv, size_t subbuf_size,
32 size_t num_subbuf);
33void channel_backend_free(struct channel_backend *chanb);
34
35void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
36void channel_backend_reset(struct channel_backend *chanb);
37
38int lib_ring_buffer_backend_init(void);
39void lib_ring_buffer_backend_exit(void);
40
41extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
42 size_t offset, const void *src, size_t len,
43 ssize_t pagecpy);
44extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
45 size_t offset, int c, size_t len,
46 ssize_t pagecpy);
47extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
48 size_t offset, const void *src,
49 size_t len, ssize_t pagecpy);
50
51/*
52 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
53 * exchanged atomically.
54 *
55 * Top half word, except lowest bit, belongs to "offset", which is used to keep
56 * to count the produced buffers. For overwrite mode, this provides the
57 * consumer with the capacity to read subbuffers in order, handling the
58 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
59 * systems) concurrently with a single execution of get_subbuf (between offset
60 * sampling and subbuffer ID exchange).
61 */
62
63#define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
64
65#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
66#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
67#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
68/*
69 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
70 */
71#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
72#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
73#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
74/*
75 * In overwrite mode: lowest half of word is used for index.
76 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
77 * In producer-consumer mode: whole word used for index.
78 */
79#define SB_ID_INDEX_SHIFT 0
80#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
81#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
82
83/*
84 * Construct the subbuffer id from offset, index and noref. Use only the index
85 * for producer-consumer mode (offset and noref are only used in overwrite
86 * mode).
87 */
88static inline
89unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
90 unsigned long offset, unsigned long noref,
91 unsigned long index)
92{
93 if (config->mode == RING_BUFFER_OVERWRITE)
94 return (offset << SB_ID_OFFSET_SHIFT)
95 | (noref << SB_ID_NOREF_SHIFT)
96 | index;
97 else
98 return index;
99}
100
101/*
102 * Compare offset with the offset contained within id. Return 1 if the offset
103 * bits are identical, else 0.
104 */
105static inline
106int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
107 unsigned long id, unsigned long offset)
108{
109 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
110}
111
112static inline
113unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
114 unsigned long id)
115{
116 if (config->mode == RING_BUFFER_OVERWRITE)
117 return id & SB_ID_INDEX_MASK;
118 else
119 return id;
120}
121
122static inline
123unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
124 unsigned long id)
125{
126 if (config->mode == RING_BUFFER_OVERWRITE)
127 return !!(id & SB_ID_NOREF_MASK);
128 else
129 return 1;
130}
131
132/*
133 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
134 * needed.
135 */
136static inline
137void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
138 unsigned long *id)
139{
140 if (config->mode == RING_BUFFER_OVERWRITE)
141 *id |= SB_ID_NOREF_MASK;
142}
143
144static inline
145void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
146 unsigned long *id, unsigned long offset)
147{
148 unsigned long tmp;
149
150 if (config->mode == RING_BUFFER_OVERWRITE) {
151 tmp = *id;
152 tmp &= ~SB_ID_OFFSET_MASK;
153 tmp |= offset << SB_ID_OFFSET_SHIFT;
154 tmp |= SB_ID_NOREF_MASK;
155 /* Volatile store, read concurrently by readers. */
156 ACCESS_ONCE(*id) = tmp;
157 }
158}
159
160/* No volatile access, since already used locally */
161static inline
162void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
163 unsigned long *id)
164{
165 if (config->mode == RING_BUFFER_OVERWRITE)
166 *id &= ~SB_ID_NOREF_MASK;
167}
168
169/*
170 * For overwrite mode, cap the number of subbuffers per buffer to:
171 * 2^16 on 32-bit architectures
172 * 2^32 on 64-bit architectures
173 * This is required to fit in the index part of the ID. Return 0 on success,
174 * -EPERM on failure.
175 */
176static inline
177int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
178 unsigned long num_subbuf)
179{
180 if (config->mode == RING_BUFFER_OVERWRITE)
181 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
182 else
183 return 0;
184}
185
186static inline
187void subbuffer_count_record(const struct lib_ring_buffer_config *config,
188 struct lib_ring_buffer_backend *bufb,
189 unsigned long idx)
190{
191 unsigned long sb_bindex;
192
193 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
194 v_inc(config, &bufb->array[sb_bindex]->records_commit);
195}
196
197/*
198 * Reader has exclusive subbuffer access for record consumption. No need to
199 * perform the decrement atomically.
200 */
201static inline
202void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
203 struct lib_ring_buffer_backend *bufb)
204{
205 unsigned long sb_bindex;
206
207 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
208 CHAN_WARN_ON(bufb->chan,
209 !v_read(config, &bufb->array[sb_bindex]->records_unread));
210 /* Non-atomic decrement protected by exclusive subbuffer access */
211 _v_dec(config, &bufb->array[sb_bindex]->records_unread);
212 v_inc(config, &bufb->records_read);
213}
214
215static inline
216unsigned long subbuffer_get_records_count(
217 const struct lib_ring_buffer_config *config,
218 struct lib_ring_buffer_backend *bufb,
219 unsigned long idx)
220{
221 unsigned long sb_bindex;
222
223 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
224 return v_read(config, &bufb->array[sb_bindex]->records_commit);
225}
226
227/*
228 * Must be executed at subbuffer delivery when the writer has _exclusive_
229 * subbuffer access. See ring_buffer_check_deliver() for details.
230 * ring_buffer_get_records_count() must be called to get the records count
231 * before this function, because it resets the records_commit count.
232 */
233static inline
234unsigned long subbuffer_count_records_overrun(
235 const struct lib_ring_buffer_config *config,
236 struct lib_ring_buffer_backend *bufb,
237 unsigned long idx)
238{
239 struct lib_ring_buffer_backend_pages *pages;
240 unsigned long overruns, sb_bindex;
241
242 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
243 pages = bufb->array[sb_bindex];
244 overruns = v_read(config, &pages->records_unread);
245 v_set(config, &pages->records_unread,
246 v_read(config, &pages->records_commit));
247 v_set(config, &pages->records_commit, 0);
248
249 return overruns;
250}
251
252static inline
253void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
254 struct lib_ring_buffer_backend *bufb,
255 unsigned long idx,
256 unsigned long data_size)
257{
258 struct lib_ring_buffer_backend_pages *pages;
259 unsigned long sb_bindex;
260
261 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
262 pages = bufb->array[sb_bindex];
263 pages->data_size = data_size;
264}
265
266static inline
267unsigned long subbuffer_get_read_data_size(
268 const struct lib_ring_buffer_config *config,
269 struct lib_ring_buffer_backend *bufb)
270{
271 struct lib_ring_buffer_backend_pages *pages;
272 unsigned long sb_bindex;
273
274 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
275 pages = bufb->array[sb_bindex];
276 return pages->data_size;
277}
278
279static inline
280unsigned long subbuffer_get_data_size(
281 const struct lib_ring_buffer_config *config,
282 struct lib_ring_buffer_backend *bufb,
283 unsigned long idx)
284{
285 struct lib_ring_buffer_backend_pages *pages;
286 unsigned long sb_bindex;
287
288 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
289 pages = bufb->array[sb_bindex];
290 return pages->data_size;
291}
292
293/**
294 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
295 * writer.
296 */
297static inline
298void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
299 struct lib_ring_buffer_backend *bufb,
300 unsigned long idx)
301{
302 unsigned long id, new_id;
303
304 if (config->mode != RING_BUFFER_OVERWRITE)
305 return;
306
307 /*
308 * Performing a volatile access to read the sb_pages, because we want to
309 * read a coherent version of the pointer and the associated noref flag.
310 */
311 id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
312 for (;;) {
313 /* This check is called on the fast path for each record. */
314 if (likely(!subbuffer_id_is_noref(config, id))) {
315 /*
316 * Store after load dependency ordering the writes to
317 * the subbuffer after load and test of the noref flag
318 * matches the memory barrier implied by the cmpxchg()
319 * in update_read_sb_index().
320 */
321 return; /* Already writing to this buffer */
322 }
323 new_id = id;
324 subbuffer_id_clear_noref(config, &new_id);
325 new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
326 if (likely(new_id == id))
327 break;
328 id = new_id;
329 }
330}
331
332/**
333 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
334 * called by writer.
335 */
336static inline
337void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
338 struct lib_ring_buffer_backend *bufb,
339 unsigned long idx, unsigned long offset)
340{
341 if (config->mode != RING_BUFFER_OVERWRITE)
342 return;
343
344 /*
345 * Because ring_buffer_set_noref() is only called by a single thread
346 * (the one which updated the cc_sb value), there are no concurrent
347 * updates to take care of: other writers have not updated cc_sb, so
348 * they cannot set the noref flag, and concurrent readers cannot modify
349 * the pointer because the noref flag is not set yet.
350 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
351 * to the subbuffer before this set noref operation.
352 * subbuffer_set_noref() uses a volatile store to deal with concurrent
353 * readers of the noref flag.
354 */
355 CHAN_WARN_ON(bufb->chan,
356 subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
357 /*
358 * Memory barrier that ensures counter stores are ordered before set
359 * noref and offset.
360 */
361 smp_mb();
362 subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
363}
364
365/**
366 * update_read_sb_index - Read-side subbuffer index update.
367 */
368static inline
369int update_read_sb_index(const struct lib_ring_buffer_config *config,
370 struct lib_ring_buffer_backend *bufb,
371 struct channel_backend *chanb,
372 unsigned long consumed_idx,
373 unsigned long consumed_count)
374{
375 unsigned long old_id, new_id;
376
377 if (config->mode == RING_BUFFER_OVERWRITE) {
378 /*
379 * Exchange the target writer subbuffer with our own unused
380 * subbuffer. No need to use ACCESS_ONCE() here to read the
381 * old_wpage, because the value read will be confirmed by the
382 * following cmpxchg().
383 */
384 old_id = bufb->buf_wsb[consumed_idx].id;
385 if (unlikely(!subbuffer_id_is_noref(config, old_id)))
386 return -EAGAIN;
387 /*
388 * Make sure the offset count we are expecting matches the one
389 * indicated by the writer.
390 */
391 if (unlikely(!subbuffer_id_compare_offset(config, old_id,
392 consumed_count)))
393 return -EAGAIN;
394 CHAN_WARN_ON(bufb->chan,
395 !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
396 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
397 consumed_count);
398 new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
399 bufb->buf_rsb.id);
400 if (unlikely(old_id != new_id))
401 return -EAGAIN;
402 bufb->buf_rsb.id = new_id;
403 } else {
404 /* No page exchange, use the writer page directly */
405 bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
406 }
407 return 0;
408}
409
410/*
411 * Use the architecture-specific memcpy implementation for constant-sized
412 * inputs, but rely on an inline memcpy for length statically unknown.
413 * The function call to memcpy is just way too expensive for a fast path.
414 */
415#define lib_ring_buffer_do_copy(config, dest, src, len) \
416do { \
417 size_t __len = (len); \
418 if (__builtin_constant_p(len)) \
419 memcpy(dest, src, __len); \
420 else \
421 inline_memcpy(dest, src, __len); \
422} while (0)
423
424/*
425 * We use __copy_from_user to copy userspace data since we already
426 * did the access_ok for the whole range.
427 */
428static inline
429unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
430 const void __user *src,
431 unsigned long len)
432{
433 return __copy_from_user(dest, src, len);
434}
435
436/*
437 * write len bytes to dest with c
438 */
439static inline
440void lib_ring_buffer_do_memset(char *dest, int c,
441 unsigned long len)
442{
443 unsigned long i;
444
445 for (i = 0; i < len; i++)
446 dest[i] = c;
447}
448
449#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_types.h b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
deleted file mode 100644
index 25c41bc96433..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/backend_types.h
+++ /dev/null
@@ -1,80 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_BACKEND_TYPES_H
2#define _LINUX_RING_BUFFER_BACKEND_TYPES_H
3
4/*
5 * linux/ringbuffer/backend_types.h
6 *
7 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer backend (types).
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/cpumask.h>
15#include <linux/types.h>
16
17struct lib_ring_buffer_backend_page {
18 void *virt; /* page virtual address (cached) */
19 struct page *page; /* pointer to page structure */
20};
21
22struct lib_ring_buffer_backend_pages {
23 unsigned long mmap_offset; /* offset of the subbuffer in mmap */
24 union v_atomic records_commit; /* current records committed count */
25 union v_atomic records_unread; /* records to read */
26 unsigned long data_size; /* Amount of data to read from subbuf */
27 struct lib_ring_buffer_backend_page p[];
28};
29
30struct lib_ring_buffer_backend_subbuffer {
31 /* Identifier for subbuf backend pages. Exchanged atomically. */
32 unsigned long id; /* backend subbuffer identifier */
33};
34
35/*
36 * Forward declaration of frontend-specific channel and ring_buffer.
37 */
38struct channel;
39struct lib_ring_buffer;
40
41struct lib_ring_buffer_backend {
42 /* Array of ring_buffer_backend_subbuffer for writer */
43 struct lib_ring_buffer_backend_subbuffer *buf_wsb;
44 /* ring_buffer_backend_subbuffer for reader */
45 struct lib_ring_buffer_backend_subbuffer buf_rsb;
46 /*
47 * Pointer array of backend pages, for whole buffer.
48 * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
49 */
50 struct lib_ring_buffer_backend_pages **array;
51 unsigned int num_pages_per_subbuf;
52
53 struct channel *chan; /* Associated channel */
54 int cpu; /* This buffer's cpu. -1 if global. */
55 union v_atomic records_read; /* Number of records read */
56 uint allocated:1; /* is buffer allocated ? */
57};
58
59struct channel_backend {
60 unsigned long buf_size; /* Size of the buffer */
61 unsigned long subbuf_size; /* Sub-buffer size */
62 unsigned int subbuf_size_order; /* Order of sub-buffer size */
63 unsigned int num_subbuf_order; /*
64 * Order of number of sub-buffers/buffer
65 * for writer.
66 */
67 unsigned int buf_size_order; /* Order of buffer size */
68 uint extra_reader_sb:1; /* has extra reader subbuffer ? */
69 struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
70
71 unsigned long num_subbuf; /* Number of sub-buffers for writer */
72 u64 start_tsc; /* Channel creation TSC value */
73 void *priv; /* Client-specific information */
74 struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
75 const struct lib_ring_buffer_config *config; /* Ring buffer configuration */
76 cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
77 char name[NAME_MAX]; /* Channel name */
78};
79
80#endif /* _LINUX_RING_BUFFER_BACKEND_TYPES_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/config.h b/drivers/staging/lttng/lib/ringbuffer/config.h
deleted file mode 100644
index fd73d5519c92..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/config.h
+++ /dev/null
@@ -1,298 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_CONFIG_H
2#define _LINUX_RING_BUFFER_CONFIG_H
3
4/*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * Dual LGPL v2.1/GPL v2 license.
13 */
14
15#include <linux/types.h>
16#include <linux/percpu.h>
17#include "../align.h"
18
19struct lib_ring_buffer;
20struct channel;
21struct lib_ring_buffer_config;
22struct lib_ring_buffer_ctx;
23
24/*
25 * Ring buffer client callbacks. Only used by slow path, never on fast path.
26 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
27 * provided as inline functions too. These may simply return 0 if not used by
28 * the client.
29 */
30struct lib_ring_buffer_client_cb {
31 /* Mandatory callbacks */
32
33 /* A static inline version is also required for fast path */
34 u64 (*ring_buffer_clock_read) (struct channel *chan);
35 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
36 struct channel *chan, size_t offset,
37 size_t *pre_header_padding,
38 struct lib_ring_buffer_ctx *ctx);
39
40 /* Slow path only, at subbuffer switch */
41 size_t (*subbuffer_header_size) (void);
42 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
43 unsigned int subbuf_idx);
44 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
45 unsigned int subbuf_idx, unsigned long data_size);
46
47 /* Optional callbacks (can be set to NULL) */
48
49 /* Called at buffer creation/finalize */
50 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
51 int cpu, const char *name);
52 /*
53 * Clients should guarantee that no new reader handle can be opened
54 * after finalize.
55 */
56 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
57
58 /*
59 * Extract header length, payload length and timestamp from event
60 * record. Used by buffer iterators. Timestamp is only used by channel
61 * iterator.
62 */
63 void (*record_get) (const struct lib_ring_buffer_config *config,
64 struct channel *chan, struct lib_ring_buffer *buf,
65 size_t offset, size_t *header_len,
66 size_t *payload_len, u64 *timestamp);
67};
68
69/*
70 * Ring buffer instance configuration.
71 *
72 * Declare as "static const" within the client object to ensure the inline fast
73 * paths can be optimized.
74 *
75 * alloc/sync pairs:
76 *
77 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
78 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
79 * with preemption disabled (lib_ring_buffer_get_cpu() and
80 * lib_ring_buffer_put_cpu()).
81 *
82 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
83 * Per-cpu buffer with global synchronization. Tracing can be performed with
84 * preemption enabled, statistically stays on the local buffers.
85 *
86 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
87 * Should only be used for buffers belonging to a single thread or protected
88 * by mutual exclusion by the client. Note that periodical sub-buffer switch
89 * should be disabled in this kind of configuration.
90 *
91 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
92 * Global shared buffer with global synchronization.
93 *
94 * wakeup:
95 *
96 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
97 * buffers and wake up readers if data is ready. Mainly useful for tracers which
98 * don't want to call into the wakeup code on the tracing path. Use in
99 * combination with "read_timer_interval" channel_create() argument.
100 *
101 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
102 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
103 * for drivers.
104 *
105 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
106 * has the responsibility to perform wakeups.
107 */
108struct lib_ring_buffer_config {
109 enum {
110 RING_BUFFER_ALLOC_PER_CPU,
111 RING_BUFFER_ALLOC_GLOBAL,
112 } alloc;
113 enum {
114 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
115 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
116 } sync;
117 enum {
118 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
119 RING_BUFFER_DISCARD, /* Discard when buffer full */
120 } mode;
121 enum {
122 RING_BUFFER_SPLICE,
123 RING_BUFFER_MMAP,
124 RING_BUFFER_READ, /* TODO */
125 RING_BUFFER_ITERATOR,
126 RING_BUFFER_NONE,
127 } output;
128 enum {
129 RING_BUFFER_PAGE,
130 RING_BUFFER_VMAP, /* TODO */
131 RING_BUFFER_STATIC, /* TODO */
132 } backend;
133 enum {
134 RING_BUFFER_NO_OOPS_CONSISTENCY,
135 RING_BUFFER_OOPS_CONSISTENCY,
136 } oops;
137 enum {
138 RING_BUFFER_IPI_BARRIER,
139 RING_BUFFER_NO_IPI_BARRIER,
140 } ipi;
141 enum {
142 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
143 RING_BUFFER_WAKEUP_BY_WRITER, /*
144 * writer wakes up reader,
145 * not lock-free
146 * (takes spinlock).
147 */
148 } wakeup;
149 /*
150 * tsc_bits: timestamp bits saved at each record.
151 * 0 and 64 disable the timestamp compression scheme.
152 */
153 unsigned int tsc_bits;
154 struct lib_ring_buffer_client_cb cb;
155};
156
157/*
158 * ring buffer context
159 *
160 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
161 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
162 * lib_ring_buffer_write().
163 */
164struct lib_ring_buffer_ctx {
165 /* input received by lib_ring_buffer_reserve(), saved here. */
166 struct channel *chan; /* channel */
167 void *priv; /* client private data */
168 size_t data_size; /* size of payload */
169 int largest_align; /*
170 * alignment of the largest element
171 * in the payload
172 */
173 int cpu; /* processor id */
174
175 /* output from lib_ring_buffer_reserve() */
176 struct lib_ring_buffer *buf; /*
177 * buffer corresponding to processor id
178 * for this channel
179 */
180 size_t slot_size; /* size of the reserved slot */
181 unsigned long buf_offset; /* offset following the record header */
182 unsigned long pre_offset; /*
183 * Initial offset position _before_
184 * the record is written. Positioned
185 * prior to record header alignment
186 * padding.
187 */
188 u64 tsc; /* time-stamp counter value */
189 unsigned int rflags; /* reservation flags */
190};
191
192/**
193 * lib_ring_buffer_ctx_init - initialize ring buffer context
194 * @ctx: ring buffer context to initialize
195 * @chan: channel
196 * @priv: client private data
197 * @data_size: size of record data payload
198 * @largest_align: largest alignment within data payload types
199 * @cpu: processor id
200 */
201static inline
202void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
203 struct channel *chan, void *priv,
204 size_t data_size, int largest_align,
205 int cpu)
206{
207 ctx->chan = chan;
208 ctx->priv = priv;
209 ctx->data_size = data_size;
210 ctx->largest_align = largest_align;
211 ctx->cpu = cpu;
212 ctx->rflags = 0;
213}
214
215/*
216 * Reservation flags.
217 *
218 * RING_BUFFER_RFLAG_FULL_TSC
219 *
220 * This flag is passed to record_header_size() and to the primitive used to
221 * write the record header. It indicates that the full 64-bit time value is
222 * needed in the record header. If this flag is not set, the record header needs
223 * only to contain "tsc_bits" bit of time value.
224 *
225 * Reservation flags can be added by the client, starting from
226 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
227 * record_header_size() to lib_ring_buffer_write_record_header().
228 */
229#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
230#define RING_BUFFER_RFLAG_END (1U << 1)
231
232/*
233 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
234 * compile-time. We have to duplicate the "config->align" information and the
235 * definition here because config->align is used both in the slow and fast
236 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
237 */
238#ifdef RING_BUFFER_ALIGN
239
240# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
241
242/*
243 * Calculate the offset needed to align the type.
244 * size_of_type must be non-zero.
245 */
246static inline
247unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
248{
249 return offset_align(align_drift, size_of_type);
250}
251
252#else
253
254# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
255
256/*
257 * Calculate the offset needed to align the type.
258 * size_of_type must be non-zero.
259 */
260static inline
261unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
262{
263 return 0;
264}
265
266#endif
267
268/**
269 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
270 * @ctx: ring buffer context.
271 */
272static inline
273void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
274 size_t alignment)
275{
276 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
277 alignment);
278}
279
280/*
281 * lib_ring_buffer_check_config() returns 0 on success.
282 * Used internally to check for valid configurations at channel creation.
283 */
284static inline
285int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
286 unsigned int switch_timer_interval,
287 unsigned int read_timer_interval)
288{
289 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
290 && config->sync == RING_BUFFER_SYNC_PER_CPU
291 && switch_timer_interval)
292 return -EINVAL;
293 return 0;
294}
295
296#include "../../wrapper/ringbuffer/vatomic.h"
297
298#endif /* _LINUX_RING_BUFFER_CONFIG_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend.h b/drivers/staging/lttng/lib/ringbuffer/frontend.h
deleted file mode 100644
index 01af77a281b0..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/frontend.h
+++ /dev/null
@@ -1,228 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_FRONTEND_H
2#define _LINUX_RING_BUFFER_FRONTEND_H
3
4/*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19#include <linux/pipe_fs_i.h>
20#include <linux/rcupdate.h>
21#include <linux/cpumask.h>
22#include <linux/module.h>
23#include <linux/bitops.h>
24#include <linux/splice.h>
25#include <linux/string.h>
26#include <linux/timer.h>
27#include <linux/sched.h>
28#include <linux/cache.h>
29#include <linux/time.h>
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/stat.h>
33#include <linux/cpu.h>
34#include <linux/fs.h>
35
36#include <asm/atomic.h>
37#include <asm/local.h>
38
39/* Internal helpers */
40#include "../../wrapper/ringbuffer/frontend_internal.h"
41
42/* Buffer creation/removal and setup operations */
43
44/*
45 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
46 * padding to let readers get those sub-buffers. Used for live streaming.
47 *
48 * read_timer_interval is the time interval (in us) to wake up pending readers.
49 *
50 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
51 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
52 * be set to NULL for other backends.
53 */
54
55extern
56struct channel *channel_create(const struct lib_ring_buffer_config *config,
57 const char *name, void *priv,
58 void *buf_addr,
59 size_t subbuf_size, size_t num_subbuf,
60 unsigned int switch_timer_interval,
61 unsigned int read_timer_interval);
62
63/*
64 * channel_destroy returns the private data pointer. It finalizes all channel's
65 * buffers, waits for readers to release all references, and destroys the
66 * channel.
67 */
68extern
69void *channel_destroy(struct channel *chan);
70
71
72/* Buffer read operations */
73
74/*
75 * Iteration on channel cpumask needs to issue a read barrier to match the write
76 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
77 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
78 * only performed at channel destruction.
79 */
80#define for_each_channel_cpu(cpu, chan) \
81 for ((cpu) = -1; \
82 ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
83 smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
84
85extern struct lib_ring_buffer *channel_get_ring_buffer(
86 const struct lib_ring_buffer_config *config,
87 struct channel *chan, int cpu);
88extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
89extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
90
91/*
92 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
93 */
94extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
95 unsigned long *consumed,
96 unsigned long *produced);
97extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
98 unsigned long consumed_new);
99
100extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
101 unsigned long consumed);
102extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
103
104/*
105 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
106 * to read sub-buffers sequentially.
107 */
108static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
109{
110 int ret;
111
112 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
113 &buf->prod_snapshot);
114 if (ret)
115 return ret;
116 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
117 return ret;
118}
119
120static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
121{
122 lib_ring_buffer_put_subbuf(buf);
123 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
124 buf->backend.chan));
125}
126
127extern void channel_reset(struct channel *chan);
128extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
129
130static inline
131unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
132 struct lib_ring_buffer *buf)
133{
134 return v_read(config, &buf->offset);
135}
136
137static inline
138unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
139 struct lib_ring_buffer *buf)
140{
141 return atomic_long_read(&buf->consumed);
142}
143
144/*
145 * Must call lib_ring_buffer_is_finalized before reading counters (memory
146 * ordering enforced with respect to trace teardown).
147 */
148static inline
149int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
150 struct lib_ring_buffer *buf)
151{
152 int finalized = ACCESS_ONCE(buf->finalized);
153 /*
154 * Read finalized before counters.
155 */
156 smp_rmb();
157 return finalized;
158}
159
160static inline
161int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
162{
163 return chan->finalized;
164}
165
166static inline
167int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
168{
169 return atomic_read(&chan->record_disabled);
170}
171
172static inline
173unsigned long lib_ring_buffer_get_read_data_size(
174 const struct lib_ring_buffer_config *config,
175 struct lib_ring_buffer *buf)
176{
177 return subbuffer_get_read_data_size(config, &buf->backend);
178}
179
180static inline
181unsigned long lib_ring_buffer_get_records_count(
182 const struct lib_ring_buffer_config *config,
183 struct lib_ring_buffer *buf)
184{
185 return v_read(config, &buf->records_count);
186}
187
188static inline
189unsigned long lib_ring_buffer_get_records_overrun(
190 const struct lib_ring_buffer_config *config,
191 struct lib_ring_buffer *buf)
192{
193 return v_read(config, &buf->records_overrun);
194}
195
196static inline
197unsigned long lib_ring_buffer_get_records_lost_full(
198 const struct lib_ring_buffer_config *config,
199 struct lib_ring_buffer *buf)
200{
201 return v_read(config, &buf->records_lost_full);
202}
203
204static inline
205unsigned long lib_ring_buffer_get_records_lost_wrap(
206 const struct lib_ring_buffer_config *config,
207 struct lib_ring_buffer *buf)
208{
209 return v_read(config, &buf->records_lost_wrap);
210}
211
212static inline
213unsigned long lib_ring_buffer_get_records_lost_big(
214 const struct lib_ring_buffer_config *config,
215 struct lib_ring_buffer *buf)
216{
217 return v_read(config, &buf->records_lost_big);
218}
219
220static inline
221unsigned long lib_ring_buffer_get_records_read(
222 const struct lib_ring_buffer_config *config,
223 struct lib_ring_buffer *buf)
224{
225 return v_read(config, &buf->backend.records_read);
226}
227
228#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_api.h b/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
deleted file mode 100644
index 391e59377905..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
+++ /dev/null
@@ -1,358 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
2#define _LINUX_RING_BUFFER_FRONTEND_API_H
3
4/*
5 * linux/ringbuffer/frontend_api.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (buffer write API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
16 *
17 * Dual LGPL v2.1/GPL v2 license.
18 */
19
20#include "../../wrapper/ringbuffer/frontend.h"
21#include <linux/errno.h>
22
23/**
24 * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
25 *
26 * Disables preemption (acts as a RCU read-side critical section) and keeps a
27 * ring buffer nesting count as supplementary safety net to ensure tracer client
28 * code will never trigger an endless recursion. Returns the processor ID on
29 * success, -EPERM on failure (nesting count too high).
30 *
31 * asm volatile and "memory" clobber prevent the compiler from moving
32 * instructions out of the ring buffer nesting count. This is required to ensure
33 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
34 * divisions by 0, ...) are triggered within the incremented nesting count
35 * section.
36 */
37static inline
38int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
39{
40 int cpu, nesting;
41
42 rcu_read_lock_sched_notrace();
43 cpu = smp_processor_id();
44 nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
45 barrier();
46
47 if (unlikely(nesting > 4)) {
48 WARN_ON_ONCE(1);
49 per_cpu(lib_ring_buffer_nesting, cpu)--;
50 rcu_read_unlock_sched_notrace();
51 return -EPERM;
52 } else
53 return cpu;
54}
55
56/**
57 * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
58 */
59static inline
60void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
61{
62 barrier();
63 __get_cpu_var(lib_ring_buffer_nesting)--;
64 rcu_read_unlock_sched_notrace();
65}
66
67/*
68 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
69 * part of the API per se.
70 *
71 * returns 0 if reserve ok, or 1 if the slow path must be taken.
72 */
73static inline
74int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
75 struct lib_ring_buffer_ctx *ctx,
76 unsigned long *o_begin, unsigned long *o_end,
77 unsigned long *o_old, size_t *before_hdr_pad)
78{
79 struct channel *chan = ctx->chan;
80 struct lib_ring_buffer *buf = ctx->buf;
81 *o_begin = v_read(config, &buf->offset);
82 *o_old = *o_begin;
83
84 ctx->tsc = lib_ring_buffer_clock_read(chan);
85 if ((int64_t) ctx->tsc == -EIO)
86 return 1;
87
88 /*
89 * Prefetch cacheline for read because we have to read the previous
90 * commit counter to increment it and commit seq value to compare it to
91 * the commit counter.
92 */
93 prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
94
95 if (last_tsc_overflow(config, buf, ctx->tsc))
96 ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
97
98 if (unlikely(subbuf_offset(*o_begin, chan) == 0))
99 return 1;
100
101 ctx->slot_size = record_header_size(config, chan, *o_begin,
102 before_hdr_pad, ctx);
103 ctx->slot_size +=
104 lib_ring_buffer_align(*o_begin + ctx->slot_size,
105 ctx->largest_align) + ctx->data_size;
106 if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
107 > chan->backend.subbuf_size))
108 return 1;
109
110 /*
111 * Record fits in the current buffer and we are not on a switch
112 * boundary. It's safe to write.
113 */
114 *o_end = *o_begin + ctx->slot_size;
115
116 if (unlikely((subbuf_offset(*o_end, chan)) == 0))
117 /*
118 * The offset_end will fall at the very beginning of the next
119 * subbuffer.
120 */
121 return 1;
122
123 return 0;
124}
125
126/**
127 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
128 * @config: ring buffer instance configuration.
129 * @ctx: ring buffer context. (input and output) Must be already initialized.
130 *
131 * Atomic wait-free slot reservation. The reserved space starts at the context
132 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
133 *
134 * Return :
135 * 0 on success.
136 * -EAGAIN if channel is disabled.
137 * -ENOSPC if event size is too large for packet.
138 * -ENOBUFS if there is currently not enough space in buffer for the event.
139 * -EIO if data cannot be written into the buffer for any other reason.
140 */
141
142static inline
143int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
144 struct lib_ring_buffer_ctx *ctx)
145{
146 struct channel *chan = ctx->chan;
147 struct lib_ring_buffer *buf;
148 unsigned long o_begin, o_end, o_old;
149 size_t before_hdr_pad = 0;
150
151 if (atomic_read(&chan->record_disabled))
152 return -EAGAIN;
153
154 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
155 buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
156 else
157 buf = chan->backend.buf;
158 if (atomic_read(&buf->record_disabled))
159 return -EAGAIN;
160 ctx->buf = buf;
161
162 /*
163 * Perform retryable operations.
164 */
165 if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
166 &o_end, &o_old, &before_hdr_pad)))
167 goto slow_path;
168
169 if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
170 != o_old))
171 goto slow_path;
172
173 /*
174 * Atomically update last_tsc. This update races against concurrent
175 * atomic updates, but the race will always cause supplementary full TSC
176 * record headers, never the opposite (missing a full TSC record header
177 * when it would be needed).
178 */
179 save_last_tsc(config, ctx->buf, ctx->tsc);
180
181 /*
182 * Push the reader if necessary
183 */
184 lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
185
186 /*
187 * Clear noref flag for this subbuffer.
188 */
189 lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
190 subbuf_index(o_end - 1, chan));
191
192 ctx->pre_offset = o_begin;
193 ctx->buf_offset = o_begin + before_hdr_pad;
194 return 0;
195slow_path:
196 return lib_ring_buffer_reserve_slow(ctx);
197}
198
199/**
200 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
201 * @config: ring buffer instance configuration.
202 * @buf: buffer
203 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
204 *
205 * This operation is completely reentrant : can be called while tracing is
206 * active with absolutely no lock held.
207 *
208 * Note, however, that as a v_cmpxchg is used for some atomic operations and
209 * requires to be executed locally for per-CPU buffers, this function must be
210 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
211 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
212 */
213static inline
214void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
215 struct lib_ring_buffer *buf, enum switch_mode mode)
216{
217 lib_ring_buffer_switch_slow(buf, mode);
218}
219
220/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
221
222/**
223 * lib_ring_buffer_commit - Commit an record.
224 * @config: ring buffer instance configuration.
225 * @ctx: ring buffer context. (input arguments only)
226 *
227 * Atomic unordered slot commit. Increments the commit count in the
228 * specified sub-buffer, and delivers it if necessary.
229 */
230static inline
231void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
232 const struct lib_ring_buffer_ctx *ctx)
233{
234 struct channel *chan = ctx->chan;
235 struct lib_ring_buffer *buf = ctx->buf;
236 unsigned long offset_end = ctx->buf_offset;
237 unsigned long endidx = subbuf_index(offset_end - 1, chan);
238 unsigned long commit_count;
239
240 /*
241 * Must count record before incrementing the commit count.
242 */
243 subbuffer_count_record(config, &buf->backend, endidx);
244
245 /*
246 * Order all writes to buffer before the commit count update that will
247 * determine that the subbuffer is full.
248 */
249 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
250 /*
251 * Must write slot data before incrementing commit count. This
252 * compiler barrier is upgraded into a smp_mb() by the IPI sent
253 * by get_subbuf().
254 */
255 barrier();
256 } else
257 smp_wmb();
258
259 v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
260
261 /*
262 * commit count read can race with concurrent OOO commit count updates.
263 * This is only needed for lib_ring_buffer_check_deliver (for
264 * non-polling delivery only) and for
265 * lib_ring_buffer_write_commit_counter. The race can only cause the
266 * counter to be read with the same value more than once, which could
267 * cause :
268 * - Multiple delivery for the same sub-buffer (which is handled
269 * gracefully by the reader code) if the value is for a full
270 * sub-buffer. It's important that we can never miss a sub-buffer
271 * delivery. Re-reading the value after the v_add ensures this.
272 * - Reading a commit_count with a higher value that what was actually
273 * added to it for the lib_ring_buffer_write_commit_counter call
274 * (again caused by a concurrent committer). It does not matter,
275 * because this function is interested in the fact that the commit
276 * count reaches back the reserve offset for a specific sub-buffer,
277 * which is completely independent of the order.
278 */
279 commit_count = v_read(config, &buf->commit_hot[endidx].cc);
280
281 lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
282 commit_count, endidx);
283 /*
284 * Update used size at each commit. It's needed only for extracting
285 * ring_buffer buffers from vmcore, after crash.
286 */
287 lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
288 ctx->buf_offset, commit_count,
289 ctx->slot_size);
290}
291
292/**
293 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
294 * @config: ring buffer instance configuration.
295 * @ctx: ring buffer context. (input arguments only)
296 *
297 * Only succeeds if no other record has been written after the record to
298 * discard. If discard fails, the record must be committed to the buffer.
299 *
300 * Returns 0 upon success, -EPERM if the record cannot be discarded.
301 */
302static inline
303int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
304 const struct lib_ring_buffer_ctx *ctx)
305{
306 struct lib_ring_buffer *buf = ctx->buf;
307 unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
308
309 /*
310 * We need to ensure that if the cmpxchg succeeds and discards the
311 * record, the next record will record a full TSC, because it cannot
312 * rely on the last_tsc associated with the discarded record to detect
313 * overflows. The only way to ensure this is to set the last_tsc to 0
314 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
315 * timestamp in the next record.
316 *
317 * Note: if discard fails, we must leave the TSC in the record header.
318 * It is needed to keep track of TSC overflows for the following
319 * records.
320 */
321 save_last_tsc(config, buf, 0ULL);
322
323 if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
324 != end_offset))
325 return -EPERM;
326 else
327 return 0;
328}
329
330static inline
331void channel_record_disable(const struct lib_ring_buffer_config *config,
332 struct channel *chan)
333{
334 atomic_inc(&chan->record_disabled);
335}
336
337static inline
338void channel_record_enable(const struct lib_ring_buffer_config *config,
339 struct channel *chan)
340{
341 atomic_dec(&chan->record_disabled);
342}
343
344static inline
345void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
346 struct lib_ring_buffer *buf)
347{
348 atomic_inc(&buf->record_disabled);
349}
350
351static inline
352void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
353 struct lib_ring_buffer *buf)
354{
355 atomic_dec(&buf->record_disabled);
356}
357
358#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h b/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
deleted file mode 100644
index 3bd5721eb261..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
+++ /dev/null
@@ -1,424 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
2#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
3
4/*
5 * linux/ringbuffer/frontend_internal.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (internal helpers).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19#include "../../wrapper/ringbuffer/config.h"
20#include "../../wrapper/ringbuffer/backend_types.h"
21#include "../../wrapper/ringbuffer/frontend_types.h"
22#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
23
24/* Buffer offset macros */
25
26/* buf_trunc mask selects only the buffer number. */
27static inline
28unsigned long buf_trunc(unsigned long offset, struct channel *chan)
29{
30 return offset & ~(chan->backend.buf_size - 1);
31
32}
33
34/* Select the buffer number value (counter). */
35static inline
36unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
37{
38 return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
39}
40
41/* buf_offset mask selects only the offset within the current buffer. */
42static inline
43unsigned long buf_offset(unsigned long offset, struct channel *chan)
44{
45 return offset & (chan->backend.buf_size - 1);
46}
47
48/* subbuf_offset mask selects the offset within the current subbuffer. */
49static inline
50unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
51{
52 return offset & (chan->backend.subbuf_size - 1);
53}
54
55/* subbuf_trunc mask selects the subbuffer number. */
56static inline
57unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
58{
59 return offset & ~(chan->backend.subbuf_size - 1);
60}
61
62/* subbuf_align aligns the offset to the next subbuffer. */
63static inline
64unsigned long subbuf_align(unsigned long offset, struct channel *chan)
65{
66 return (offset + chan->backend.subbuf_size)
67 & ~(chan->backend.subbuf_size - 1);
68}
69
70/* subbuf_index returns the index of the current subbuffer within the buffer. */
71static inline
72unsigned long subbuf_index(unsigned long offset, struct channel *chan)
73{
74 return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
75}
76
77/*
78 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
79 * bits from the last TSC read. When overflows are detected, the full 64-bit
80 * timestamp counter should be written in the record header. Reads and writes
81 * last_tsc atomically.
82 */
83
84#if (BITS_PER_LONG == 32)
85static inline
86void save_last_tsc(const struct lib_ring_buffer_config *config,
87 struct lib_ring_buffer *buf, u64 tsc)
88{
89 if (config->tsc_bits == 0 || config->tsc_bits == 64)
90 return;
91
92 /*
93 * Ensure the compiler performs this update in a single instruction.
94 */
95 v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
96}
97
98static inline
99int last_tsc_overflow(const struct lib_ring_buffer_config *config,
100 struct lib_ring_buffer *buf, u64 tsc)
101{
102 unsigned long tsc_shifted;
103
104 if (config->tsc_bits == 0 || config->tsc_bits == 64)
105 return 0;
106
107 tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
108 if (unlikely(tsc_shifted
109 - (unsigned long)v_read(config, &buf->last_tsc)))
110 return 1;
111 else
112 return 0;
113}
114#else
115static inline
116void save_last_tsc(const struct lib_ring_buffer_config *config,
117 struct lib_ring_buffer *buf, u64 tsc)
118{
119 if (config->tsc_bits == 0 || config->tsc_bits == 64)
120 return;
121
122 v_set(config, &buf->last_tsc, (unsigned long)tsc);
123}
124
125static inline
126int last_tsc_overflow(const struct lib_ring_buffer_config *config,
127 struct lib_ring_buffer *buf, u64 tsc)
128{
129 if (config->tsc_bits == 0 || config->tsc_bits == 64)
130 return 0;
131
132 if (unlikely((tsc - v_read(config, &buf->last_tsc))
133 >> config->tsc_bits))
134 return 1;
135 else
136 return 0;
137}
138#endif
139
140extern
141int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
142
143extern
144void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
145 enum switch_mode mode);
146
147/* Buffer write helpers */
148
149static inline
150void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
151 struct channel *chan,
152 unsigned long offset)
153{
154 unsigned long consumed_old, consumed_new;
155
156 do {
157 consumed_old = atomic_long_read(&buf->consumed);
158 /*
159 * If buffer is in overwrite mode, push the reader consumed
160 * count if the write position has reached it and we are not
161 * at the first iteration (don't push the reader farther than
162 * the writer). This operation can be done concurrently by many
163 * writers in the same buffer, the writer being at the farthest
164 * write position sub-buffer index in the buffer being the one
165 * which will win this loop.
166 */
167 if (unlikely(subbuf_trunc(offset, chan)
168 - subbuf_trunc(consumed_old, chan)
169 >= chan->backend.buf_size))
170 consumed_new = subbuf_align(consumed_old, chan);
171 else
172 return;
173 } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
174 consumed_new) != consumed_old));
175}
176
177static inline
178void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
179 struct lib_ring_buffer *buf,
180 unsigned long commit_count,
181 unsigned long idx)
182{
183 if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
184 v_set(config, &buf->commit_hot[idx].seq, commit_count);
185}
186
187static inline
188int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
189 struct lib_ring_buffer *buf,
190 struct channel *chan)
191{
192 unsigned long consumed_old, consumed_idx, commit_count, write_offset;
193
194 consumed_old = atomic_long_read(&buf->consumed);
195 consumed_idx = subbuf_index(consumed_old, chan);
196 commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
197 /*
198 * No memory barrier here, since we are only interested
199 * in a statistically correct polling result. The next poll will
200 * get the data is we are racing. The mb() that ensures correct
201 * memory order is in get_subbuf.
202 */
203 write_offset = v_read(config, &buf->offset);
204
205 /*
206 * Check that the subbuffer we are trying to consume has been
207 * already fully committed.
208 */
209
210 if (((commit_count - chan->backend.subbuf_size)
211 & chan->commit_count_mask)
212 - (buf_trunc(consumed_old, chan)
213 >> chan->backend.num_subbuf_order)
214 != 0)
215 return 0;
216
217 /*
218 * Check that we are not about to read the same subbuffer in
219 * which the writer head is.
220 */
221 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
222 == 0)
223 return 0;
224
225 return 1;
226
227}
228
229static inline
230int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
231 struct lib_ring_buffer *buf,
232 struct channel *chan)
233{
234 return !!subbuf_offset(v_read(config, &buf->offset), chan);
235}
236
237static inline
238unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
239 struct lib_ring_buffer *buf,
240 unsigned long idx)
241{
242 return subbuffer_get_data_size(config, &buf->backend, idx);
243}
244
245/*
246 * Check if all space reservation in a buffer have been committed. This helps
247 * knowing if an execution context is nested (for per-cpu buffers only).
248 * This is a very specific ftrace use-case, so we keep this as "internal" API.
249 */
250static inline
251int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
252 struct lib_ring_buffer *buf,
253 struct channel *chan)
254{
255 unsigned long offset, idx, commit_count;
256
257 CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
258 CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
259
260 /*
261 * Read offset and commit count in a loop so they are both read
262 * atomically wrt interrupts. By deal with interrupt concurrency by
263 * restarting both reads if the offset has been pushed. Note that given
264 * we only have to deal with interrupt concurrency here, an interrupt
265 * modifying the commit count will also modify "offset", so it is safe
266 * to only check for offset modifications.
267 */
268 do {
269 offset = v_read(config, &buf->offset);
270 idx = subbuf_index(offset, chan);
271 commit_count = v_read(config, &buf->commit_hot[idx].cc);
272 } while (offset != v_read(config, &buf->offset));
273
274 return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
275 - (commit_count & chan->commit_count_mask) == 0);
276}
277
278static inline
279void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
280 struct lib_ring_buffer *buf,
281 struct channel *chan,
282 unsigned long offset,
283 unsigned long commit_count,
284 unsigned long idx)
285{
286 unsigned long old_commit_count = commit_count
287 - chan->backend.subbuf_size;
288 u64 tsc;
289
290 /* Check if all commits have been done */
291 if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
292 - (old_commit_count & chan->commit_count_mask) == 0)) {
293 /*
294 * If we succeeded at updating cc_sb below, we are the subbuffer
295 * writer delivering the subbuffer. Deals with concurrent
296 * updates of the "cc" value without adding a add_return atomic
297 * operation to the fast path.
298 *
299 * We are doing the delivery in two steps:
300 * - First, we cmpxchg() cc_sb to the new value
301 * old_commit_count + 1. This ensures that we are the only
302 * subbuffer user successfully filling the subbuffer, but we
303 * do _not_ set the cc_sb value to "commit_count" yet.
304 * Therefore, other writers that would wrap around the ring
305 * buffer and try to start writing to our subbuffer would
306 * have to drop records, because it would appear as
307 * non-filled.
308 * We therefore have exclusive access to the subbuffer control
309 * structures. This mutual exclusion with other writers is
310 * crucially important to perform record overruns count in
311 * flight recorder mode locklessly.
312 * - When we are ready to release the subbuffer (either for
313 * reading or for overrun by other writers), we simply set the
314 * cc_sb value to "commit_count" and perform delivery.
315 *
316 * The subbuffer size is least 2 bytes (minimum size: 1 page).
317 * This guarantees that old_commit_count + 1 != commit_count.
318 */
319 if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
320 old_commit_count, old_commit_count + 1)
321 == old_commit_count)) {
322 /*
323 * Start of exclusive subbuffer access. We are
324 * guaranteed to be the last writer in this subbuffer
325 * and any other writer trying to access this subbuffer
326 * in this state is required to drop records.
327 */
328 tsc = config->cb.ring_buffer_clock_read(chan);
329 v_add(config,
330 subbuffer_get_records_count(config,
331 &buf->backend, idx),
332 &buf->records_count);
333 v_add(config,
334 subbuffer_count_records_overrun(config,
335 &buf->backend,
336 idx),
337 &buf->records_overrun);
338 config->cb.buffer_end(buf, tsc, idx,
339 lib_ring_buffer_get_data_size(config,
340 buf,
341 idx));
342
343 /*
344 * Set noref flag and offset for this subbuffer id.
345 * Contains a memory barrier that ensures counter stores
346 * are ordered before set noref and offset.
347 */
348 lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
349 buf_trunc_val(offset, chan));
350
351 /*
352 * Order set_noref and record counter updates before the
353 * end of subbuffer exclusive access. Orders with
354 * respect to writers coming into the subbuffer after
355 * wrap around, and also order wrt concurrent readers.
356 */
357 smp_mb();
358 /* End of exclusive subbuffer access */
359 v_set(config, &buf->commit_cold[idx].cc_sb,
360 commit_count);
361 lib_ring_buffer_vmcore_check_deliver(config, buf,
362 commit_count, idx);
363
364 /*
365 * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
366 */
367 if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
368 && atomic_long_read(&buf->active_readers)
369 && lib_ring_buffer_poll_deliver(config, buf, chan)) {
370 wake_up_interruptible(&buf->read_wait);
371 wake_up_interruptible(&chan->read_wait);
372 }
373
374 }
375 }
376}
377
378/*
379 * lib_ring_buffer_write_commit_counter
380 *
381 * For flight recording. must be called after commit.
382 * This function increments the subbuffer's commit_seq counter each time the
383 * commit count reaches back the reserve offset (modulo subbuffer size). It is
384 * useful for crash dump.
385 */
386static inline
387void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
388 struct lib_ring_buffer *buf,
389 struct channel *chan,
390 unsigned long idx,
391 unsigned long buf_offset,
392 unsigned long commit_count,
393 size_t slot_size)
394{
395 unsigned long offset, commit_seq_old;
396
397 if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
398 return;
399
400 offset = buf_offset + slot_size;
401
402 /*
403 * subbuf_offset includes commit_count_mask. We can simply
404 * compare the offsets within the subbuffer without caring about
405 * buffer full/empty mismatch because offset is never zero here
406 * (subbuffer header and record headers have non-zero length).
407 */
408 if (unlikely(subbuf_offset(offset - commit_count, chan)))
409 return;
410
411 commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
412 while ((long) (commit_seq_old - commit_count) < 0)
413 commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
414 commit_seq_old, commit_count);
415}
416
417extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
418 struct channel_backend *chanb, int cpu);
419extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
420
421/* Keep track of trap nesting inside ring buffer code */
422DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
423
424#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
deleted file mode 100644
index eced7be95b64..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
+++ /dev/null
@@ -1,176 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2#define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
3
4/*
5 * linux/ringbuffer/frontend_types.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (types).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19#include <linux/kref.h>
20#include "../../wrapper/ringbuffer/config.h"
21#include "../../wrapper/ringbuffer/backend_types.h"
22#include "../../wrapper/spinlock.h"
23#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
24
25/*
26 * A switch is done during tracing or as a final flush after tracing (so it
27 * won't write in the new sub-buffer).
28 */
29enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
30
31/* channel-level read-side iterator */
32struct channel_iter {
33 /* Prio heap of buffers. Lowest timestamps at the top. */
34 struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
35 struct list_head empty_head; /* Empty buffers linked-list head */
36 int read_open; /* Opened for reading ? */
37 u64 last_qs; /* Last quiescent state timestamp */
38 u64 last_timestamp; /* Last timestamp (for WARN_ON) */
39 int last_cpu; /* Last timestamp cpu */
40 /*
41 * read() file operation state.
42 */
43 unsigned long len_left;
44};
45
46/* channel: collection of per-cpu ring buffers. */
47struct channel {
48 atomic_t record_disabled;
49 unsigned long commit_count_mask; /*
50 * Commit count mask, removing
51 * the MSBs corresponding to
52 * bits used to represent the
53 * subbuffer index.
54 */
55
56 struct channel_backend backend; /* Associated backend */
57
58 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
59 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
60 struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
61 struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
62 struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
63 uint cpu_hp_enable:1; /* Enable CPU hotplug notif. */
64 uint hp_iter_enable:1; /* Enable hp iter notif. */
65 wait_queue_head_t read_wait; /* reader wait queue */
66 wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
67 int finalized; /* Has channel been finalized */
68 struct channel_iter iter; /* Channel read-side iterator */
69 struct kref ref; /* Reference count */
70};
71
72/* Per-subbuffer commit counters used on the hot path */
73struct commit_counters_hot {
74 union v_atomic cc; /* Commit counter */
75 union v_atomic seq; /* Consecutive commits */
76};
77
78/* Per-subbuffer commit counters used only on cold paths */
79struct commit_counters_cold {
80 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
81};
82
83/* Per-buffer read iterator */
84struct lib_ring_buffer_iter {
85 u64 timestamp; /* Current record timestamp */
86 size_t header_len; /* Current record header length */
87 size_t payload_len; /* Current record payload length */
88
89 struct list_head empty_node; /* Linked list of empty buffers */
90 unsigned long consumed, read_offset, data_size;
91 enum {
92 ITER_GET_SUBBUF = 0,
93 ITER_TEST_RECORD,
94 ITER_NEXT_RECORD,
95 ITER_PUT_SUBBUF,
96 } state;
97 uint allocated:1;
98 uint read_open:1; /* Opened for reading ? */
99};
100
101/* ring buffer state */
102struct lib_ring_buffer {
103 /* First 32 bytes cache-hot cacheline */
104 union v_atomic offset; /* Current offset in the buffer */
105 struct commit_counters_hot *commit_hot;
106 /* Commit count per sub-buffer */
107 atomic_long_t consumed; /*
108 * Current offset in the buffer
109 * standard atomic access (shared)
110 */
111 atomic_t record_disabled;
112 /* End of first 32 bytes cacheline */
113 union v_atomic last_tsc; /*
114 * Last timestamp written in the buffer.
115 */
116
117 struct lib_ring_buffer_backend backend; /* Associated backend */
118
119 struct commit_counters_cold *commit_cold;
120 /* Commit count per sub-buffer */
121 atomic_long_t active_readers; /*
122 * Active readers count
123 * standard atomic access (shared)
124 */
125 /* Dropped records */
126 union v_atomic records_lost_full; /* Buffer full */
127 union v_atomic records_lost_wrap; /* Nested wrap-around */
128 union v_atomic records_lost_big; /* Events too big */
129 union v_atomic records_count; /* Number of records written */
130 union v_atomic records_overrun; /* Number of overwritten records */
131 wait_queue_head_t read_wait; /* reader buffer-level wait queue */
132 wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
133 int finalized; /* buffer has been finalized */
134 struct timer_list switch_timer; /* timer for periodical switch */
135 struct timer_list read_timer; /* timer for read poll */
136 raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
137 struct lib_ring_buffer_iter iter; /* read-side iterator */
138 unsigned long get_subbuf_consumed; /* Read-side consumed */
139 unsigned long prod_snapshot; /* Producer count snapshot */
140 unsigned long cons_snapshot; /* Consumer count snapshot */
141 uint get_subbuf:1; /* Sub-buffer being held by reader */
142 uint switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
143 uint read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
144};
145
146static inline
147void *channel_get_private(struct channel *chan)
148{
149 return chan->backend.priv;
150}
151
152/*
153 * Issue warnings and disable channels upon internal error.
154 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
155 * parameters.
156 */
157#define CHAN_WARN_ON(c, cond) \
158 ({ \
159 struct channel *__chan; \
160 int _____ret = unlikely(cond); \
161 if (_____ret) { \
162 if (__same_type(*(c), struct channel_backend)) \
163 __chan = container_of((void *) (c), \
164 struct channel, \
165 backend); \
166 else if (__same_type(*(c), struct channel)) \
167 __chan = (void *) (c); \
168 else \
169 BUG_ON(1); \
170 atomic_inc(&__chan->record_disabled); \
171 WARN_ON(1); \
172 } \
173 _____ret; \
174 })
175
176#endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/iterator.h b/drivers/staging/lttng/lib/ringbuffer/iterator.h
deleted file mode 100644
index f2bd50dd5927..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/iterator.h
+++ /dev/null
@@ -1,70 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_ITERATOR_H
2#define _LINUX_RING_BUFFER_ITERATOR_H
3
4/*
5 * linux/ringbuffer/iterator.h
6 *
7 * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer and channel iterators.
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * Dual LGPL v2.1/GPL v2 license.
15 */
16
17#include "../../wrapper/ringbuffer/backend.h"
18#include "../../wrapper/ringbuffer/frontend.h"
19
20/*
21 * lib_ring_buffer_get_next_record advances the buffer read position to the next
22 * record. It returns either the size of the next record, -EAGAIN if there is
23 * currently no data available, or -ENODATA if no data is available and buffer
24 * is finalized.
25 */
26extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
27 struct lib_ring_buffer *buf);
28
29/*
30 * channel_get_next_record advances the buffer read position to the next record.
31 * It returns either the size of the next record, -EAGAIN if there is currently
32 * no data available, or -ENODATA if no data is available and buffer is
33 * finalized.
34 * Returns the current buffer in ret_buf.
35 */
36extern ssize_t channel_get_next_record(struct channel *chan,
37 struct lib_ring_buffer **ret_buf);
38
39/**
40 * read_current_record - copy the buffer current record into dest.
41 * @buf: ring buffer
42 * @dest: destination where the record should be copied
43 *
44 * dest should be large enough to contain the record. Returns the number of
45 * bytes copied.
46 */
47static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
48{
49 return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
50 dest, buf->iter.payload_len);
51}
52
53extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
54extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
55extern int channel_iterator_open(struct channel *chan);
56extern void channel_iterator_release(struct channel *chan);
57
58extern const struct file_operations channel_payload_file_operations;
59extern const struct file_operations lib_ring_buffer_payload_file_operations;
60
61/*
62 * Used internally.
63 */
64int channel_iterator_init(struct channel *chan);
65void channel_iterator_unregister_notifiers(struct channel *chan);
66void channel_iterator_free(struct channel *chan);
67void channel_iterator_reset(struct channel *chan);
68void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
69
70#endif /* _LINUX_RING_BUFFER_ITERATOR_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/nohz.h b/drivers/staging/lttng/lib/ringbuffer/nohz.h
deleted file mode 100644
index 3c3107256902..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/nohz.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_NOHZ_H
2#define _LINUX_RING_BUFFER_NOHZ_H
3
4/*
5 * ringbuffer/nohz.h
6 *
7 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12#ifdef CONFIG_LIB_RING_BUFFER
13void lib_ring_buffer_tick_nohz_flush(void);
14void lib_ring_buffer_tick_nohz_stop(void);
15void lib_ring_buffer_tick_nohz_restart(void);
16#else
17static inline void lib_ring_buffer_tick_nohz_flush(void)
18{
19}
20
21static inline void lib_ring_buffer_tick_nohz_stop(void)
22{
23}
24
25static inline void lib_ring_buffer_tick_nohz_restart(void)
26{
27}
28#endif
29
30#endif /* _LINUX_RING_BUFFER_NOHZ_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
deleted file mode 100644
index d1b5b8cde549..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
+++ /dev/null
@@ -1,854 +0,0 @@
1/*
2 * ring_buffer_backend.c
3 *
4 * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9#include <linux/stddef.h>
10#include <linux/module.h>
11#include <linux/string.h>
12#include <linux/bitops.h>
13#include <linux/delay.h>
14#include <linux/errno.h>
15#include <linux/slab.h>
16#include <linux/cpu.h>
17#include <linux/mm.h>
18
19#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
20#include "../../wrapper/ringbuffer/config.h"
21#include "../../wrapper/ringbuffer/backend.h"
22#include "../../wrapper/ringbuffer/frontend.h"
23
24/**
25 * lib_ring_buffer_backend_allocate - allocate a channel buffer
26 * @config: ring buffer instance configuration
27 * @buf: the buffer struct
28 * @size: total size of the buffer
29 * @num_subbuf: number of subbuffers
30 * @extra_reader_sb: need extra subbuffer for reader
31 */
32static
33int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
34 struct lib_ring_buffer_backend *bufb,
35 size_t size, size_t num_subbuf,
36 int extra_reader_sb)
37{
38 struct channel_backend *chanb = &bufb->chan->backend;
39 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
40 unsigned long subbuf_size, mmap_offset = 0;
41 unsigned long num_subbuf_alloc;
42 struct page **pages;
43 void **virt;
44 unsigned long i;
45
46 num_pages = size >> PAGE_SHIFT;
47 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
48 subbuf_size = chanb->subbuf_size;
49 num_subbuf_alloc = num_subbuf;
50
51 if (extra_reader_sb) {
52 num_pages += num_pages_per_subbuf; /* Add pages for reader */
53 num_subbuf_alloc++;
54 }
55
56 pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
57 1 << INTERNODE_CACHE_SHIFT),
58 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
59 if (unlikely(!pages))
60 goto pages_error;
61
62 virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
63 1 << INTERNODE_CACHE_SHIFT),
64 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
65 if (unlikely(!virt))
66 goto virt_error;
67
68 bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
69 * num_subbuf_alloc,
70 1 << INTERNODE_CACHE_SHIFT),
71 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
72 if (unlikely(!bufb->array))
73 goto array_error;
74
75 for (i = 0; i < num_pages; i++) {
76 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
77 GFP_KERNEL | __GFP_ZERO, 0);
78 if (unlikely(!pages[i]))
79 goto depopulate;
80 virt[i] = page_address(pages[i]);
81 }
82 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
83
84 /* Allocate backend pages array elements */
85 for (i = 0; i < num_subbuf_alloc; i++) {
86 bufb->array[i] =
87 kzalloc_node(ALIGN(
88 sizeof(struct lib_ring_buffer_backend_pages) +
89 sizeof(struct lib_ring_buffer_backend_page)
90 * num_pages_per_subbuf,
91 1 << INTERNODE_CACHE_SHIFT),
92 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
93 if (!bufb->array[i])
94 goto free_array;
95 }
96
97 /* Allocate write-side subbuffer table */
98 bufb->buf_wsb = kzalloc_node(ALIGN(
99 sizeof(struct lib_ring_buffer_backend_subbuffer)
100 * num_subbuf,
101 1 << INTERNODE_CACHE_SHIFT),
102 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
103 if (unlikely(!bufb->buf_wsb))
104 goto free_array;
105
106 for (i = 0; i < num_subbuf; i++)
107 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
108
109 /* Assign read-side subbuffer table */
110 if (extra_reader_sb)
111 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
112 num_subbuf_alloc - 1);
113 else
114 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
115
116 /* Assign pages to page index */
117 for (i = 0; i < num_subbuf_alloc; i++) {
118 for (j = 0; j < num_pages_per_subbuf; j++) {
119 CHAN_WARN_ON(chanb, page_idx > num_pages);
120 bufb->array[i]->p[j].virt = virt[page_idx];
121 bufb->array[i]->p[j].page = pages[page_idx];
122 page_idx++;
123 }
124 if (config->output == RING_BUFFER_MMAP) {
125 bufb->array[i]->mmap_offset = mmap_offset;
126 mmap_offset += subbuf_size;
127 }
128 }
129
130 /*
131 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
132 * will not fault.
133 */
134 wrapper_vmalloc_sync_all();
135 kfree(virt);
136 kfree(pages);
137 return 0;
138
139free_array:
140 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
141 kfree(bufb->array[i]);
142depopulate:
143 /* Free all allocated pages */
144 for (i = 0; (i < num_pages && pages[i]); i++)
145 __free_page(pages[i]);
146 kfree(bufb->array);
147array_error:
148 kfree(virt);
149virt_error:
150 kfree(pages);
151pages_error:
152 return -ENOMEM;
153}
154
155int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
156 struct channel_backend *chanb, int cpu)
157{
158 const struct lib_ring_buffer_config *config = chanb->config;
159
160 bufb->chan = container_of(chanb, struct channel, backend);
161 bufb->cpu = cpu;
162
163 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
164 chanb->num_subbuf,
165 chanb->extra_reader_sb);
166}
167
168void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
169{
170 struct channel_backend *chanb = &bufb->chan->backend;
171 unsigned long i, j, num_subbuf_alloc;
172
173 num_subbuf_alloc = chanb->num_subbuf;
174 if (chanb->extra_reader_sb)
175 num_subbuf_alloc++;
176
177 kfree(bufb->buf_wsb);
178 for (i = 0; i < num_subbuf_alloc; i++) {
179 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
180 __free_page(bufb->array[i]->p[j].page);
181 kfree(bufb->array[i]);
182 }
183 kfree(bufb->array);
184 bufb->allocated = 0;
185}
186
187void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
188{
189 struct channel_backend *chanb = &bufb->chan->backend;
190 const struct lib_ring_buffer_config *config = chanb->config;
191 unsigned long num_subbuf_alloc;
192 unsigned int i;
193
194 num_subbuf_alloc = chanb->num_subbuf;
195 if (chanb->extra_reader_sb)
196 num_subbuf_alloc++;
197
198 for (i = 0; i < chanb->num_subbuf; i++)
199 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
200 if (chanb->extra_reader_sb)
201 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
202 num_subbuf_alloc - 1);
203 else
204 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
205
206 for (i = 0; i < num_subbuf_alloc; i++) {
207 /* Don't reset mmap_offset */
208 v_set(config, &bufb->array[i]->records_commit, 0);
209 v_set(config, &bufb->array[i]->records_unread, 0);
210 bufb->array[i]->data_size = 0;
211 /* Don't reset backend page and virt addresses */
212 }
213 /* Don't reset num_pages_per_subbuf, cpu, allocated */
214 v_set(config, &bufb->records_read, 0);
215}
216
217/*
218 * The frontend is responsible for also calling ring_buffer_backend_reset for
219 * each buffer when calling channel_backend_reset.
220 */
221void channel_backend_reset(struct channel_backend *chanb)
222{
223 struct channel *chan = container_of(chanb, struct channel, backend);
224 const struct lib_ring_buffer_config *config = chanb->config;
225
226 /*
227 * Don't reset buf_size, subbuf_size, subbuf_size_order,
228 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
229 * priv, notifiers, config, cpumask and name.
230 */
231 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
232}
233
234#ifdef CONFIG_HOTPLUG_CPU
235/**
236 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
237 * @nb: notifier block
238 * @action: hotplug action to take
239 * @hcpu: CPU number
240 *
241 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
242 */
243static
244int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
245 unsigned long action,
246 void *hcpu)
247{
248 unsigned int cpu = (unsigned long)hcpu;
249 struct channel_backend *chanb = container_of(nb, struct channel_backend,
250 cpu_hp_notifier);
251 const struct lib_ring_buffer_config *config = chanb->config;
252 struct lib_ring_buffer *buf;
253 int ret;
254
255 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
256
257 switch (action) {
258 case CPU_UP_PREPARE:
259 case CPU_UP_PREPARE_FROZEN:
260 buf = per_cpu_ptr(chanb->buf, cpu);
261 ret = lib_ring_buffer_create(buf, chanb, cpu);
262 if (ret) {
263 printk(KERN_ERR
264 "ring_buffer_cpu_hp_callback: cpu %d "
265 "buffer creation failed\n", cpu);
266 return NOTIFY_BAD;
267 }
268 break;
269 case CPU_DEAD:
270 case CPU_DEAD_FROZEN:
271 /* No need to do a buffer switch here, because it will happen
272 * when tracing is stopped, or will be done by switch timer CPU
273 * DEAD callback. */
274 break;
275 }
276 return NOTIFY_OK;
277}
278#endif
279
280/**
281 * channel_backend_init - initialize a channel backend
282 * @chanb: channel backend
283 * @name: channel name
284 * @config: client ring buffer configuration
285 * @priv: client private data
286 * @parent: dentry of parent directory, %NULL for root directory
287 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
288 * @num_subbuf: number of sub-buffers (power of 2)
289 *
290 * Returns channel pointer if successful, %NULL otherwise.
291 *
292 * Creates per-cpu channel buffers using the sizes and attributes
293 * specified. The created channel buffer files will be named
294 * name_0...name_N-1. File permissions will be %S_IRUSR.
295 *
296 * Called with CPU hotplug disabled.
297 */
298int channel_backend_init(struct channel_backend *chanb,
299 const char *name,
300 const struct lib_ring_buffer_config *config,
301 void *priv, size_t subbuf_size, size_t num_subbuf)
302{
303 struct channel *chan = container_of(chanb, struct channel, backend);
304 unsigned int i;
305 int ret;
306
307 if (!name)
308 return -EPERM;
309
310 if (!(subbuf_size && num_subbuf))
311 return -EPERM;
312
313 /* Check that the subbuffer size is larger than a page. */
314 if (subbuf_size < PAGE_SIZE)
315 return -EINVAL;
316
317 /*
318 * Make sure the number of subbuffers and subbuffer size are power of 2.
319 */
320 CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
321 CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
322
323 ret = subbuffer_id_check_index(config, num_subbuf);
324 if (ret)
325 return ret;
326
327 chanb->priv = priv;
328 chanb->buf_size = num_subbuf * subbuf_size;
329 chanb->subbuf_size = subbuf_size;
330 chanb->buf_size_order = get_count_order(chanb->buf_size);
331 chanb->subbuf_size_order = get_count_order(subbuf_size);
332 chanb->num_subbuf_order = get_count_order(num_subbuf);
333 chanb->extra_reader_sb =
334 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
335 chanb->num_subbuf = num_subbuf;
336 strlcpy(chanb->name, name, NAME_MAX);
337 chanb->config = config;
338
339 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
340 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
341 return -ENOMEM;
342 }
343
344 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
345 /* Allocating the buffer per-cpu structures */
346 chanb->buf = alloc_percpu(struct lib_ring_buffer);
347 if (!chanb->buf)
348 goto free_cpumask;
349
350 /*
351 * In case of non-hotplug cpu, if the ring-buffer is allocated
352 * in early initcall, it will not be notified of secondary cpus.
353 * In that off case, we need to allocate for all possible cpus.
354 */
355#ifdef CONFIG_HOTPLUG_CPU
356 /*
357 * buf->backend.allocated test takes care of concurrent CPU
358 * hotplug.
359 * Priority higher than frontend, so we create the ring buffer
360 * before we start the timer.
361 */
362 chanb->cpu_hp_notifier.notifier_call =
363 lib_ring_buffer_cpu_hp_callback;
364 chanb->cpu_hp_notifier.priority = 5;
365 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
366
367 get_online_cpus();
368 for_each_online_cpu(i) {
369 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
370 chanb, i);
371 if (ret)
372 goto free_bufs; /* cpu hotplug locked */
373 }
374 put_online_cpus();
375#else
376 for_each_possible_cpu(i) {
377 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
378 chanb, i);
379 if (ret)
380 goto free_bufs; /* cpu hotplug locked */
381 }
382#endif
383 } else {
384 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
385 if (!chanb->buf)
386 goto free_cpumask;
387 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
388 if (ret)
389 goto free_bufs;
390 }
391 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
392
393 return 0;
394
395free_bufs:
396 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
397 for_each_possible_cpu(i) {
398 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
399
400 if (!buf->backend.allocated)
401 continue;
402 lib_ring_buffer_free(buf);
403 }
404#ifdef CONFIG_HOTPLUG_CPU
405 put_online_cpus();
406#endif
407 free_percpu(chanb->buf);
408 } else
409 kfree(chanb->buf);
410free_cpumask:
411 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
412 free_cpumask_var(chanb->cpumask);
413 return -ENOMEM;
414}
415
416/**
417 * channel_backend_unregister_notifiers - unregister notifiers
418 * @chan: the channel
419 *
420 * Holds CPU hotplug.
421 */
422void channel_backend_unregister_notifiers(struct channel_backend *chanb)
423{
424 const struct lib_ring_buffer_config *config = chanb->config;
425
426 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
427 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
428}
429
430/**
431 * channel_backend_free - destroy the channel
432 * @chan: the channel
433 *
434 * Destroy all channel buffers and frees the channel.
435 */
436void channel_backend_free(struct channel_backend *chanb)
437{
438 const struct lib_ring_buffer_config *config = chanb->config;
439 unsigned int i;
440
441 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
442 for_each_possible_cpu(i) {
443 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
444
445 if (!buf->backend.allocated)
446 continue;
447 lib_ring_buffer_free(buf);
448 }
449 free_cpumask_var(chanb->cpumask);
450 free_percpu(chanb->buf);
451 } else {
452 struct lib_ring_buffer *buf = chanb->buf;
453
454 CHAN_WARN_ON(chanb, !buf->backend.allocated);
455 lib_ring_buffer_free(buf);
456 kfree(buf);
457 }
458}
459
460/**
461 * lib_ring_buffer_write - write data to a ring_buffer buffer.
462 * @bufb : buffer backend
463 * @offset : offset within the buffer
464 * @src : source address
465 * @len : length to write
466 * @pagecpy : page size copied so far
467 */
468void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
469 const void *src, size_t len, ssize_t pagecpy)
470{
471 struct channel_backend *chanb = &bufb->chan->backend;
472 const struct lib_ring_buffer_config *config = chanb->config;
473 size_t sbidx, index;
474 struct lib_ring_buffer_backend_pages *rpages;
475 unsigned long sb_bindex, id;
476
477 do {
478 len -= pagecpy;
479 src += pagecpy;
480 offset += pagecpy;
481 sbidx = offset >> chanb->subbuf_size_order;
482 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
483
484 /*
485 * Underlying layer should never ask for writes across
486 * subbuffers.
487 */
488 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
489
490 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
491 id = bufb->buf_wsb[sbidx].id;
492 sb_bindex = subbuffer_id_get_index(config, id);
493 rpages = bufb->array[sb_bindex];
494 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
495 && subbuffer_id_is_noref(config, id));
496 lib_ring_buffer_do_copy(config,
497 rpages->p[index].virt
498 + (offset & ~PAGE_MASK),
499 src, pagecpy);
500 } while (unlikely(len != pagecpy));
501}
502EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
503
504
505/**
506 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
507 * @bufb : buffer backend
508 * @offset : offset within the buffer
509 * @c : the byte to write
510 * @len : length to write
511 * @pagecpy : page size copied so far
512 */
513void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
514 size_t offset,
515 int c, size_t len, ssize_t pagecpy)
516{
517 struct channel_backend *chanb = &bufb->chan->backend;
518 const struct lib_ring_buffer_config *config = chanb->config;
519 size_t sbidx, index;
520 struct lib_ring_buffer_backend_pages *rpages;
521 unsigned long sb_bindex, id;
522
523 do {
524 len -= pagecpy;
525 offset += pagecpy;
526 sbidx = offset >> chanb->subbuf_size_order;
527 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
528
529 /*
530 * Underlying layer should never ask for writes across
531 * subbuffers.
532 */
533 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
534
535 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
536 id = bufb->buf_wsb[sbidx].id;
537 sb_bindex = subbuffer_id_get_index(config, id);
538 rpages = bufb->array[sb_bindex];
539 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
540 && subbuffer_id_is_noref(config, id));
541 lib_ring_buffer_do_memset(rpages->p[index].virt
542 + (offset & ~PAGE_MASK),
543 c, pagecpy);
544 } while (unlikely(len != pagecpy));
545}
546EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
547
548
549/**
550 * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
551 * @bufb : buffer backend
552 * @offset : offset within the buffer
553 * @src : source address
554 * @len : length to write
555 * @pagecpy : page size copied so far
556 *
557 * This function deals with userspace pointers, it should never be called
558 * directly without having the src pointer checked with access_ok()
559 * previously.
560 */
561void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
562 size_t offset,
563 const void __user *src, size_t len,
564 ssize_t pagecpy)
565{
566 struct channel_backend *chanb = &bufb->chan->backend;
567 const struct lib_ring_buffer_config *config = chanb->config;
568 size_t sbidx, index;
569 struct lib_ring_buffer_backend_pages *rpages;
570 unsigned long sb_bindex, id;
571 int ret;
572
573 do {
574 len -= pagecpy;
575 src += pagecpy;
576 offset += pagecpy;
577 sbidx = offset >> chanb->subbuf_size_order;
578 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
579
580 /*
581 * Underlying layer should never ask for writes across
582 * subbuffers.
583 */
584 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
585
586 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
587 id = bufb->buf_wsb[sbidx].id;
588 sb_bindex = subbuffer_id_get_index(config, id);
589 rpages = bufb->array[sb_bindex];
590 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
591 && subbuffer_id_is_noref(config, id));
592 ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
593 + (offset & ~PAGE_MASK),
594 src, pagecpy) != 0;
595 if (ret > 0) {
596 offset += (pagecpy - ret);
597 len -= (pagecpy - ret);
598 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
599 break; /* stop copy */
600 }
601 } while (unlikely(len != pagecpy));
602}
603EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
604
605/**
606 * lib_ring_buffer_read - read data from ring_buffer_buffer.
607 * @bufb : buffer backend
608 * @offset : offset within the buffer
609 * @dest : destination address
610 * @len : length to copy to destination
611 *
612 * Should be protected by get_subbuf/put_subbuf.
613 * Returns the length copied.
614 */
615size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
616 void *dest, size_t len)
617{
618 struct channel_backend *chanb = &bufb->chan->backend;
619 const struct lib_ring_buffer_config *config = chanb->config;
620 size_t index;
621 ssize_t pagecpy, orig_len;
622 struct lib_ring_buffer_backend_pages *rpages;
623 unsigned long sb_bindex, id;
624
625 orig_len = len;
626 offset &= chanb->buf_size - 1;
627 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
628 if (unlikely(!len))
629 return 0;
630 for (;;) {
631 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
632 id = bufb->buf_rsb.id;
633 sb_bindex = subbuffer_id_get_index(config, id);
634 rpages = bufb->array[sb_bindex];
635 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
636 && subbuffer_id_is_noref(config, id));
637 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
638 pagecpy);
639 len -= pagecpy;
640 if (likely(!len))
641 break;
642 dest += pagecpy;
643 offset += pagecpy;
644 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
645 /*
646 * Underlying layer should never ask for reads across
647 * subbuffers.
648 */
649 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
650 }
651 return orig_len;
652}
653EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
654
655/**
656 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
657 * @bufb : buffer backend
658 * @offset : offset within the buffer
659 * @dest : destination userspace address
660 * @len : length to copy to destination
661 *
662 * Should be protected by get_subbuf/put_subbuf.
663 * access_ok() must have been performed on dest addresses prior to call this
664 * function.
665 * Returns -EFAULT on error, 0 if ok.
666 */
667int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
668 size_t offset, void __user *dest, size_t len)
669{
670 struct channel_backend *chanb = &bufb->chan->backend;
671 const struct lib_ring_buffer_config *config = chanb->config;
672 size_t index;
673 ssize_t pagecpy;
674 struct lib_ring_buffer_backend_pages *rpages;
675 unsigned long sb_bindex, id;
676
677 offset &= chanb->buf_size - 1;
678 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
679 if (unlikely(!len))
680 return 0;
681 for (;;) {
682 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
683 id = bufb->buf_rsb.id;
684 sb_bindex = subbuffer_id_get_index(config, id);
685 rpages = bufb->array[sb_bindex];
686 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
687 && subbuffer_id_is_noref(config, id));
688 if (__copy_to_user(dest,
689 rpages->p[index].virt + (offset & ~PAGE_MASK),
690 pagecpy))
691 return -EFAULT;
692 len -= pagecpy;
693 if (likely(!len))
694 break;
695 dest += pagecpy;
696 offset += pagecpy;
697 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
698 /*
699 * Underlying layer should never ask for reads across
700 * subbuffers.
701 */
702 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
703 }
704 return 0;
705}
706EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
707
708/**
709 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
710 * @bufb : buffer backend
711 * @offset : offset within the buffer
712 * @dest : destination address
713 * @len : destination's length
714 *
715 * return string's length
716 * Should be protected by get_subbuf/put_subbuf.
717 */
718int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
719 void *dest, size_t len)
720{
721 struct channel_backend *chanb = &bufb->chan->backend;
722 const struct lib_ring_buffer_config *config = chanb->config;
723 size_t index;
724 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
725 char *str;
726 struct lib_ring_buffer_backend_pages *rpages;
727 unsigned long sb_bindex, id;
728
729 offset &= chanb->buf_size - 1;
730 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
731 orig_offset = offset;
732 for (;;) {
733 id = bufb->buf_rsb.id;
734 sb_bindex = subbuffer_id_get_index(config, id);
735 rpages = bufb->array[sb_bindex];
736 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
737 && subbuffer_id_is_noref(config, id));
738 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
739 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
740 strpagelen = strnlen(str, pagelen);
741 if (len) {
742 pagecpy = min_t(size_t, len, strpagelen);
743 if (dest) {
744 memcpy(dest, str, pagecpy);
745 dest += pagecpy;
746 }
747 len -= pagecpy;
748 }
749 offset += strpagelen;
750 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
751 if (strpagelen < pagelen)
752 break;
753 /*
754 * Underlying layer should never ask for reads across
755 * subbuffers.
756 */
757 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
758 }
759 if (dest && len)
760 ((char *)dest)[0] = 0;
761 return offset - orig_offset;
762}
763EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
764
765/**
766 * lib_ring_buffer_read_get_page - Get a whole page to read from
767 * @bufb : buffer backend
768 * @offset : offset within the buffer
769 * @virt : pointer to page address (output)
770 *
771 * Should be protected by get_subbuf/put_subbuf.
772 * Returns the pointer to the page struct pointer.
773 */
774struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
775 size_t offset, void ***virt)
776{
777 size_t index;
778 struct lib_ring_buffer_backend_pages *rpages;
779 struct channel_backend *chanb = &bufb->chan->backend;
780 const struct lib_ring_buffer_config *config = chanb->config;
781 unsigned long sb_bindex, id;
782
783 offset &= chanb->buf_size - 1;
784 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
785 id = bufb->buf_rsb.id;
786 sb_bindex = subbuffer_id_get_index(config, id);
787 rpages = bufb->array[sb_bindex];
788 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
789 && subbuffer_id_is_noref(config, id));
790 *virt = &rpages->p[index].virt;
791 return &rpages->p[index].page;
792}
793EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
794
795/**
796 * lib_ring_buffer_read_offset_address - get address of a buffer location
797 * @bufb : buffer backend
798 * @offset : offset within the buffer.
799 *
800 * Return the address where a given offset is located (for read).
801 * Should be used to get the current subbuffer header pointer. Given we know
802 * it's never on a page boundary, it's safe to write directly to this address,
803 * as long as the write is never bigger than a page size.
804 */
805void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
806 size_t offset)
807{
808 size_t index;
809 struct lib_ring_buffer_backend_pages *rpages;
810 struct channel_backend *chanb = &bufb->chan->backend;
811 const struct lib_ring_buffer_config *config = chanb->config;
812 unsigned long sb_bindex, id;
813
814 offset &= chanb->buf_size - 1;
815 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
816 id = bufb->buf_rsb.id;
817 sb_bindex = subbuffer_id_get_index(config, id);
818 rpages = bufb->array[sb_bindex];
819 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
820 && subbuffer_id_is_noref(config, id));
821 return rpages->p[index].virt + (offset & ~PAGE_MASK);
822}
823EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
824
825/**
826 * lib_ring_buffer_offset_address - get address of a location within the buffer
827 * @bufb : buffer backend
828 * @offset : offset within the buffer.
829 *
830 * Return the address where a given offset is located.
831 * Should be used to get the current subbuffer header pointer. Given we know
832 * it's always at the beginning of a page, it's safe to write directly to this
833 * address, as long as the write is never bigger than a page size.
834 */
835void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
836 size_t offset)
837{
838 size_t sbidx, index;
839 struct lib_ring_buffer_backend_pages *rpages;
840 struct channel_backend *chanb = &bufb->chan->backend;
841 const struct lib_ring_buffer_config *config = chanb->config;
842 unsigned long sb_bindex, id;
843
844 offset &= chanb->buf_size - 1;
845 sbidx = offset >> chanb->subbuf_size_order;
846 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
847 id = bufb->buf_wsb[sbidx].id;
848 sb_bindex = subbuffer_id_get_index(config, id);
849 rpages = bufb->array[sb_bindex];
850 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
851 && subbuffer_id_is_noref(config, id));
852 return rpages->p[index].virt + (offset & ~PAGE_MASK);
853}
854EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
deleted file mode 100644
index 348c05e7b141..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
+++ /dev/null
@@ -1,1715 +0,0 @@
1/*
2 * ring_buffer_frontend.c
3 *
4 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
7 * recorder (overwrite) modes. See thesis:
8 *
9 * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
10 * dissertation, Ecole Polytechnique de Montreal.
11 * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
12 *
13 * - Algorithm presentation in Chapter 5:
14 * "Lockless Multi-Core High-Throughput Buffering".
15 * - Algorithm formal verification in Section 8.6:
16 * "Formal verification of LTTng"
17 *
18 * Author:
19 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
20 *
21 * Inspired from LTT and RelayFS:
22 * Karim Yaghmour <karim@opersys.com>
23 * Tom Zanussi <zanussi@us.ibm.com>
24 * Bob Wisniewski <bob@watson.ibm.com>
25 * And from K42 :
26 * Bob Wisniewski <bob@watson.ibm.com>
27 *
28 * Buffer reader semantic :
29 *
30 * - get_subbuf_size
31 * while buffer is not finalized and empty
32 * - get_subbuf
33 * - if return value != 0, continue
34 * - splice one subbuffer worth of data to a pipe
35 * - splice the data from pipe to disk/network
36 * - put_subbuf
37 *
38 * Dual LGPL v2.1/GPL v2 license.
39 */
40
41#include <linux/delay.h>
42#include <linux/module.h>
43#include <linux/percpu.h>
44
45#include "../../wrapper/ringbuffer/config.h"
46#include "../../wrapper/ringbuffer/backend.h"
47#include "../../wrapper/ringbuffer/frontend.h"
48#include "../../wrapper/ringbuffer/iterator.h"
49#include "../../wrapper/ringbuffer/nohz.h"
50
51/*
52 * Internal structure representing offsets to use at a sub-buffer switch.
53 */
54struct switch_offsets {
55 unsigned long begin, end, old;
56 size_t pre_header_padding, size;
57 uint switch_new_start:1, switch_new_end:1, switch_old_start:1,
58 switch_old_end:1;
59};
60
61#ifdef CONFIG_NO_HZ
62enum tick_nohz_val {
63 TICK_NOHZ_STOP,
64 TICK_NOHZ_FLUSH,
65 TICK_NOHZ_RESTART,
66};
67
68static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
69#endif /* CONFIG_NO_HZ */
70
71static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
72
73DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
74EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
75
76static
77void lib_ring_buffer_print_errors(struct channel *chan,
78 struct lib_ring_buffer *buf, int cpu);
79
80/*
81 * Must be called under cpu hotplug protection.
82 */
83void lib_ring_buffer_free(struct lib_ring_buffer *buf)
84{
85 struct channel *chan = buf->backend.chan;
86
87 lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
88 kfree(buf->commit_hot);
89 kfree(buf->commit_cold);
90
91 lib_ring_buffer_backend_free(&buf->backend);
92}
93
94/**
95 * lib_ring_buffer_reset - Reset ring buffer to initial values.
96 * @buf: Ring buffer.
97 *
98 * Effectively empty the ring buffer. Should be called when the buffer is not
99 * used for writing. The ring buffer can be opened for reading, but the reader
100 * should not be using the iterator concurrently with reset. The previous
101 * current iterator record is reset.
102 */
103void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
104{
105 struct channel *chan = buf->backend.chan;
106 const struct lib_ring_buffer_config *config = chan->backend.config;
107 unsigned int i;
108
109 /*
110 * Reset iterator first. It will put the subbuffer if it currently holds
111 * it.
112 */
113 lib_ring_buffer_iterator_reset(buf);
114 v_set(config, &buf->offset, 0);
115 for (i = 0; i < chan->backend.num_subbuf; i++) {
116 v_set(config, &buf->commit_hot[i].cc, 0);
117 v_set(config, &buf->commit_hot[i].seq, 0);
118 v_set(config, &buf->commit_cold[i].cc_sb, 0);
119 }
120 atomic_long_set(&buf->consumed, 0);
121 atomic_set(&buf->record_disabled, 0);
122 v_set(config, &buf->last_tsc, 0);
123 lib_ring_buffer_backend_reset(&buf->backend);
124 /* Don't reset number of active readers */
125 v_set(config, &buf->records_lost_full, 0);
126 v_set(config, &buf->records_lost_wrap, 0);
127 v_set(config, &buf->records_lost_big, 0);
128 v_set(config, &buf->records_count, 0);
129 v_set(config, &buf->records_overrun, 0);
130 buf->finalized = 0;
131}
132EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
133
134/**
135 * channel_reset - Reset channel to initial values.
136 * @chan: Channel.
137 *
138 * Effectively empty the channel. Should be called when the channel is not used
139 * for writing. The channel can be opened for reading, but the reader should not
140 * be using the iterator concurrently with reset. The previous current iterator
141 * record is reset.
142 */
143void channel_reset(struct channel *chan)
144{
145 /*
146 * Reset iterators first. Will put the subbuffer if held for reading.
147 */
148 channel_iterator_reset(chan);
149 atomic_set(&chan->record_disabled, 0);
150 /* Don't reset commit_count_mask, still valid */
151 channel_backend_reset(&chan->backend);
152 /* Don't reset switch/read timer interval */
153 /* Don't reset notifiers and notifier enable bits */
154 /* Don't reset reader reference count */
155}
156EXPORT_SYMBOL_GPL(channel_reset);
157
158/*
159 * Must be called under cpu hotplug protection.
160 */
161int lib_ring_buffer_create(struct lib_ring_buffer *buf,
162 struct channel_backend *chanb, int cpu)
163{
164 const struct lib_ring_buffer_config *config = chanb->config;
165 struct channel *chan = container_of(chanb, struct channel, backend);
166 void *priv = chanb->priv;
167 size_t subbuf_header_size;
168 u64 tsc;
169 int ret;
170
171 /* Test for cpu hotplug */
172 if (buf->backend.allocated)
173 return 0;
174
175 /*
176 * Paranoia: per cpu dynamic allocation is not officially documented as
177 * zeroing the memory, so let's do it here too, just in case.
178 */
179 memset(buf, 0, sizeof(*buf));
180
181 ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
182 if (ret)
183 return ret;
184
185 buf->commit_hot =
186 kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
187 * chan->backend.num_subbuf,
188 1 << INTERNODE_CACHE_SHIFT),
189 GFP_KERNEL, cpu_to_node(max(cpu, 0)));
190 if (!buf->commit_hot) {
191 ret = -ENOMEM;
192 goto free_chanbuf;
193 }
194
195 buf->commit_cold =
196 kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
197 * chan->backend.num_subbuf,
198 1 << INTERNODE_CACHE_SHIFT),
199 GFP_KERNEL, cpu_to_node(max(cpu, 0)));
200 if (!buf->commit_cold) {
201 ret = -ENOMEM;
202 goto free_commit;
203 }
204
205 init_waitqueue_head(&buf->read_wait);
206 init_waitqueue_head(&buf->write_wait);
207 raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
208
209 /*
210 * Write the subbuffer header for first subbuffer so we know the total
211 * duration of data gathering.
212 */
213 subbuf_header_size = config->cb.subbuffer_header_size();
214 v_set(config, &buf->offset, subbuf_header_size);
215 subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
216 tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
217 config->cb.buffer_begin(buf, tsc, 0);
218 v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
219
220 if (config->cb.buffer_create) {
221 ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
222 if (ret)
223 goto free_init;
224 }
225
226 /*
227 * Ensure the buffer is ready before setting it to allocated and setting
228 * the cpumask.
229 * Used for cpu hotplug vs cpumask iteration.
230 */
231 smp_wmb();
232 buf->backend.allocated = 1;
233
234 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
235 CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
236 chan->backend.cpumask));
237 cpumask_set_cpu(cpu, chan->backend.cpumask);
238 }
239
240 return 0;
241
242 /* Error handling */
243free_init:
244 kfree(buf->commit_cold);
245free_commit:
246 kfree(buf->commit_hot);
247free_chanbuf:
248 lib_ring_buffer_backend_free(&buf->backend);
249 return ret;
250}
251
252static void switch_buffer_timer(unsigned long data)
253{
254 struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
255 struct channel *chan = buf->backend.chan;
256 const struct lib_ring_buffer_config *config = chan->backend.config;
257
258 /*
259 * Only flush buffers periodically if readers are active.
260 */
261 if (atomic_long_read(&buf->active_readers))
262 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
263
264 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
265 mod_timer_pinned(&buf->switch_timer,
266 jiffies + chan->switch_timer_interval);
267 else
268 mod_timer(&buf->switch_timer,
269 jiffies + chan->switch_timer_interval);
270}
271
272/*
273 * Called with ring_buffer_nohz_lock held for per-cpu buffers.
274 */
275static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
276{
277 struct channel *chan = buf->backend.chan;
278 const struct lib_ring_buffer_config *config = chan->backend.config;
279
280 if (!chan->switch_timer_interval || buf->switch_timer_enabled)
281 return;
282 init_timer(&buf->switch_timer);
283 buf->switch_timer.function = switch_buffer_timer;
284 buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
285 buf->switch_timer.data = (unsigned long)buf;
286 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
287 add_timer_on(&buf->switch_timer, buf->backend.cpu);
288 else
289 add_timer(&buf->switch_timer);
290 buf->switch_timer_enabled = 1;
291}
292
293/*
294 * Called with ring_buffer_nohz_lock held for per-cpu buffers.
295 */
296static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
297{
298 struct channel *chan = buf->backend.chan;
299
300 if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
301 return;
302
303 del_timer_sync(&buf->switch_timer);
304 buf->switch_timer_enabled = 0;
305}
306
307/*
308 * Polling timer to check the channels for data.
309 */
310static void read_buffer_timer(unsigned long data)
311{
312 struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
313 struct channel *chan = buf->backend.chan;
314 const struct lib_ring_buffer_config *config = chan->backend.config;
315
316 CHAN_WARN_ON(chan, !buf->backend.allocated);
317
318 if (atomic_long_read(&buf->active_readers)
319 && lib_ring_buffer_poll_deliver(config, buf, chan)) {
320 wake_up_interruptible(&buf->read_wait);
321 wake_up_interruptible(&chan->read_wait);
322 }
323
324 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
325 mod_timer_pinned(&buf->read_timer,
326 jiffies + chan->read_timer_interval);
327 else
328 mod_timer(&buf->read_timer,
329 jiffies + chan->read_timer_interval);
330}
331
332/*
333 * Called with ring_buffer_nohz_lock held for per-cpu buffers.
334 */
335static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
336{
337 struct channel *chan = buf->backend.chan;
338 const struct lib_ring_buffer_config *config = chan->backend.config;
339
340 if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
341 || !chan->read_timer_interval
342 || buf->read_timer_enabled)
343 return;
344
345 init_timer(&buf->read_timer);
346 buf->read_timer.function = read_buffer_timer;
347 buf->read_timer.expires = jiffies + chan->read_timer_interval;
348 buf->read_timer.data = (unsigned long)buf;
349
350 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
351 add_timer_on(&buf->read_timer, buf->backend.cpu);
352 else
353 add_timer(&buf->read_timer);
354 buf->read_timer_enabled = 1;
355}
356
357/*
358 * Called with ring_buffer_nohz_lock held for per-cpu buffers.
359 */
360static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
361{
362 struct channel *chan = buf->backend.chan;
363 const struct lib_ring_buffer_config *config = chan->backend.config;
364
365 if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
366 || !chan->read_timer_interval
367 || !buf->read_timer_enabled)
368 return;
369
370 del_timer_sync(&buf->read_timer);
371 /*
372 * do one more check to catch data that has been written in the last
373 * timer period.
374 */
375 if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
376 wake_up_interruptible(&buf->read_wait);
377 wake_up_interruptible(&chan->read_wait);
378 }
379 buf->read_timer_enabled = 0;
380}
381
382#ifdef CONFIG_HOTPLUG_CPU
383/**
384 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
385 * @nb: notifier block
386 * @action: hotplug action to take
387 * @hcpu: CPU number
388 *
389 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
390 */
391static
392int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
393 unsigned long action,
394 void *hcpu)
395{
396 unsigned int cpu = (unsigned long)hcpu;
397 struct channel *chan = container_of(nb, struct channel,
398 cpu_hp_notifier);
399 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
400 const struct lib_ring_buffer_config *config = chan->backend.config;
401
402 if (!chan->cpu_hp_enable)
403 return NOTIFY_DONE;
404
405 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
406
407 switch (action) {
408 case CPU_DOWN_FAILED:
409 case CPU_DOWN_FAILED_FROZEN:
410 case CPU_ONLINE:
411 case CPU_ONLINE_FROZEN:
412 wake_up_interruptible(&chan->hp_wait);
413 lib_ring_buffer_start_switch_timer(buf);
414 lib_ring_buffer_start_read_timer(buf);
415 return NOTIFY_OK;
416
417 case CPU_DOWN_PREPARE:
418 case CPU_DOWN_PREPARE_FROZEN:
419 lib_ring_buffer_stop_switch_timer(buf);
420 lib_ring_buffer_stop_read_timer(buf);
421 return NOTIFY_OK;
422
423 case CPU_DEAD:
424 case CPU_DEAD_FROZEN:
425 /*
426 * Performing a buffer switch on a remote CPU. Performed by
427 * the CPU responsible for doing the hotunplug after the target
428 * CPU stopped running completely. Ensures that all data
429 * from that remote CPU is flushed.
430 */
431 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
432 return NOTIFY_OK;
433
434 default:
435 return NOTIFY_DONE;
436 }
437}
438#endif
439
440#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
441/*
442 * For per-cpu buffers, call the reader wakeups before switching the buffer, so
443 * that wake-up-tracing generated events are flushed before going idle (in
444 * tick_nohz). We test if the spinlock is locked to deal with the race where
445 * readers try to sample the ring buffer before we perform the switch. We let
446 * the readers retry in that case. If there is data in the buffer, the wake up
447 * is going to forbid the CPU running the reader thread from going idle.
448 */
449static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
450 unsigned long val,
451 void *data)
452{
453 struct channel *chan = container_of(nb, struct channel,
454 tick_nohz_notifier);
455 const struct lib_ring_buffer_config *config = chan->backend.config;
456 struct lib_ring_buffer *buf;
457 int cpu = smp_processor_id();
458
459 if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
460 /*
461 * We don't support keeping the system idle with global buffers
462 * and streaming active. In order to do so, we would need to
463 * sample a non-nohz-cpumask racelessly with the nohz updates
464 * without adding synchronization overhead to nohz. Leave this
465 * use-case out for now.
466 */
467 return 0;
468 }
469
470 buf = channel_get_ring_buffer(config, chan, cpu);
471 switch (val) {
472 case TICK_NOHZ_FLUSH:
473 raw_spin_lock(&buf->raw_tick_nohz_spinlock);
474 if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
475 && chan->read_timer_interval
476 && atomic_long_read(&buf->active_readers)
477 && (lib_ring_buffer_poll_deliver(config, buf, chan)
478 || lib_ring_buffer_pending_data(config, buf, chan))) {
479 wake_up_interruptible(&buf->read_wait);
480 wake_up_interruptible(&chan->read_wait);
481 }
482 if (chan->switch_timer_interval)
483 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
484 raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
485 break;
486 case TICK_NOHZ_STOP:
487 spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
488 lib_ring_buffer_stop_switch_timer(buf);
489 lib_ring_buffer_stop_read_timer(buf);
490 spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
491 break;
492 case TICK_NOHZ_RESTART:
493 spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
494 lib_ring_buffer_start_read_timer(buf);
495 lib_ring_buffer_start_switch_timer(buf);
496 spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
497 break;
498 }
499
500 return 0;
501}
502
503void notrace lib_ring_buffer_tick_nohz_flush(void)
504{
505 atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
506 NULL);
507}
508
509void notrace lib_ring_buffer_tick_nohz_stop(void)
510{
511 atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
512 NULL);
513}
514
515void notrace lib_ring_buffer_tick_nohz_restart(void)
516{
517 atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
518 NULL);
519}
520#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
521
522/*
523 * Holds CPU hotplug.
524 */
525static void channel_unregister_notifiers(struct channel *chan)
526{
527 const struct lib_ring_buffer_config *config = chan->backend.config;
528 int cpu;
529
530 channel_iterator_unregister_notifiers(chan);
531 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
532#ifdef CONFIG_NO_HZ
533 /*
534 * Remove the nohz notifier first, so we are certain we stop
535 * the timers.
536 */
537 atomic_notifier_chain_unregister(&tick_nohz_notifier,
538 &chan->tick_nohz_notifier);
539 /*
540 * ring_buffer_nohz_lock will not be needed below, because
541 * we just removed the notifiers, which were the only source of
542 * concurrency.
543 */
544#endif /* CONFIG_NO_HZ */
545#ifdef CONFIG_HOTPLUG_CPU
546 get_online_cpus();
547 chan->cpu_hp_enable = 0;
548 for_each_online_cpu(cpu) {
549 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
550 cpu);
551 lib_ring_buffer_stop_switch_timer(buf);
552 lib_ring_buffer_stop_read_timer(buf);
553 }
554 put_online_cpus();
555 unregister_cpu_notifier(&chan->cpu_hp_notifier);
556#else
557 for_each_possible_cpu(cpu) {
558 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
559 cpu);
560 lib_ring_buffer_stop_switch_timer(buf);
561 lib_ring_buffer_stop_read_timer(buf);
562 }
563#endif
564 } else {
565 struct lib_ring_buffer *buf = chan->backend.buf;
566
567 lib_ring_buffer_stop_switch_timer(buf);
568 lib_ring_buffer_stop_read_timer(buf);
569 }
570 channel_backend_unregister_notifiers(&chan->backend);
571}
572
573static void channel_free(struct channel *chan)
574{
575 channel_iterator_free(chan);
576 channel_backend_free(&chan->backend);
577 kfree(chan);
578}
579
580/**
581 * channel_create - Create channel.
582 * @config: ring buffer instance configuration
583 * @name: name of the channel
584 * @priv: ring buffer client private data
585 * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
586 * address mapping. It is used only by RING_BUFFER_STATIC
587 * configuration. It can be set to NULL for other backends.
588 * @subbuf_size: subbuffer size
589 * @num_subbuf: number of subbuffers
590 * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
591 * padding to let readers get those sub-buffers.
592 * Used for live streaming.
593 * @read_timer_interval: Time interval (in us) to wake up pending readers.
594 *
595 * Holds cpu hotplug.
596 * Returns NULL on failure.
597 */
598struct channel *channel_create(const struct lib_ring_buffer_config *config,
599 const char *name, void *priv, void *buf_addr,
600 size_t subbuf_size,
601 size_t num_subbuf, unsigned int switch_timer_interval,
602 unsigned int read_timer_interval)
603{
604 int ret, cpu;
605 struct channel *chan;
606
607 if (lib_ring_buffer_check_config(config, switch_timer_interval,
608 read_timer_interval))
609 return NULL;
610
611 chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
612 if (!chan)
613 return NULL;
614
615 ret = channel_backend_init(&chan->backend, name, config, priv,
616 subbuf_size, num_subbuf);
617 if (ret)
618 goto error;
619
620 ret = channel_iterator_init(chan);
621 if (ret)
622 goto error_free_backend;
623
624 chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
625 chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
626 chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
627 kref_init(&chan->ref);
628 init_waitqueue_head(&chan->read_wait);
629 init_waitqueue_head(&chan->hp_wait);
630
631 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
632#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
633 /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
634 chan->tick_nohz_notifier.notifier_call =
635 ring_buffer_tick_nohz_callback;
636 chan->tick_nohz_notifier.priority = ~0U;
637 atomic_notifier_chain_register(&tick_nohz_notifier,
638 &chan->tick_nohz_notifier);
639#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
640
641 /*
642 * In case of non-hotplug cpu, if the ring-buffer is allocated
643 * in early initcall, it will not be notified of secondary cpus.
644 * In that off case, we need to allocate for all possible cpus.
645 */
646#ifdef CONFIG_HOTPLUG_CPU
647 chan->cpu_hp_notifier.notifier_call =
648 lib_ring_buffer_cpu_hp_callback;
649 chan->cpu_hp_notifier.priority = 6;
650 register_cpu_notifier(&chan->cpu_hp_notifier);
651
652 get_online_cpus();
653 for_each_online_cpu(cpu) {
654 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
655 cpu);
656 spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
657 lib_ring_buffer_start_switch_timer(buf);
658 lib_ring_buffer_start_read_timer(buf);
659 spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
660 }
661 chan->cpu_hp_enable = 1;
662 put_online_cpus();
663#else
664 for_each_possible_cpu(cpu) {
665 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
666 cpu);
667 spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
668 lib_ring_buffer_start_switch_timer(buf);
669 lib_ring_buffer_start_read_timer(buf);
670 spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
671 }
672#endif
673 } else {
674 struct lib_ring_buffer *buf = chan->backend.buf;
675
676 lib_ring_buffer_start_switch_timer(buf);
677 lib_ring_buffer_start_read_timer(buf);
678 }
679
680 return chan;
681
682error_free_backend:
683 channel_backend_free(&chan->backend);
684error:
685 kfree(chan);
686 return NULL;
687}
688EXPORT_SYMBOL_GPL(channel_create);
689
690static
691void channel_release(struct kref *kref)
692{
693 struct channel *chan = container_of(kref, struct channel, ref);
694 channel_free(chan);
695}
696
697/**
698 * channel_destroy - Finalize, wait for q.s. and destroy channel.
699 * @chan: channel to destroy
700 *
701 * Holds cpu hotplug.
702 * Call "destroy" callback, finalize channels, and then decrement the
703 * channel reference count. Note that when readers have completed data
704 * consumption of finalized channels, get_subbuf() will return -ENODATA.
705 * They should release their handle at that point. Returns the private
706 * data pointer.
707 */
708void *channel_destroy(struct channel *chan)
709{
710 int cpu;
711 const struct lib_ring_buffer_config *config = chan->backend.config;
712 void *priv;
713
714 channel_unregister_notifiers(chan);
715
716 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
717 /*
718 * No need to hold cpu hotplug, because all notifiers have been
719 * unregistered.
720 */
721 for_each_channel_cpu(cpu, chan) {
722 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
723 cpu);
724
725 if (config->cb.buffer_finalize)
726 config->cb.buffer_finalize(buf,
727 chan->backend.priv,
728 cpu);
729 if (buf->backend.allocated)
730 lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
731 /*
732 * Perform flush before writing to finalized.
733 */
734 smp_wmb();
735 ACCESS_ONCE(buf->finalized) = 1;
736 wake_up_interruptible(&buf->read_wait);
737 }
738 } else {
739 struct lib_ring_buffer *buf = chan->backend.buf;
740
741 if (config->cb.buffer_finalize)
742 config->cb.buffer_finalize(buf, chan->backend.priv, -1);
743 if (buf->backend.allocated)
744 lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
745 /*
746 * Perform flush before writing to finalized.
747 */
748 smp_wmb();
749 ACCESS_ONCE(buf->finalized) = 1;
750 wake_up_interruptible(&buf->read_wait);
751 }
752 ACCESS_ONCE(chan->finalized) = 1;
753 wake_up_interruptible(&chan->hp_wait);
754 wake_up_interruptible(&chan->read_wait);
755 priv = chan->backend.priv;
756 kref_put(&chan->ref, channel_release);
757 return priv;
758}
759EXPORT_SYMBOL_GPL(channel_destroy);
760
761struct lib_ring_buffer *channel_get_ring_buffer(
762 const struct lib_ring_buffer_config *config,
763 struct channel *chan, int cpu)
764{
765 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
766 return chan->backend.buf;
767 else
768 return per_cpu_ptr(chan->backend.buf, cpu);
769}
770EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
771
772int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
773{
774 struct channel *chan = buf->backend.chan;
775
776 if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
777 return -EBUSY;
778 kref_get(&chan->ref);
779 smp_mb__after_atomic_inc();
780 return 0;
781}
782EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
783
784void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
785{
786 struct channel *chan = buf->backend.chan;
787
788 CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
789 smp_mb__before_atomic_dec();
790 atomic_long_dec(&buf->active_readers);
791 kref_put(&chan->ref, channel_release);
792}
793EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
794
795/*
796 * Promote compiler barrier to a smp_mb().
797 * For the specific ring buffer case, this IPI call should be removed if the
798 * architecture does not reorder writes. This should eventually be provided by
799 * a separate architecture-specific infrastructure.
800 */
801static void remote_mb(void *info)
802{
803 smp_mb();
804}
805
806/**
807 * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
808 * @buf: ring buffer
809 * @consumed: consumed count indicating the position where to read
810 * @produced: produced count, indicates position when to stop reading
811 *
812 * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
813 * data to read at consumed position, or 0 if the get operation succeeds.
814 * Busy-loop trying to get data if the tick_nohz sequence lock is held.
815 */
816
817int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
818 unsigned long *consumed, unsigned long *produced)
819{
820 struct channel *chan = buf->backend.chan;
821 const struct lib_ring_buffer_config *config = chan->backend.config;
822 unsigned long consumed_cur, write_offset;
823 int finalized;
824
825retry:
826 finalized = ACCESS_ONCE(buf->finalized);
827 /*
828 * Read finalized before counters.
829 */
830 smp_rmb();
831 consumed_cur = atomic_long_read(&buf->consumed);
832 /*
833 * No need to issue a memory barrier between consumed count read and
834 * write offset read, because consumed count can only change
835 * concurrently in overwrite mode, and we keep a sequence counter
836 * identifier derived from the write offset to check we are getting
837 * the same sub-buffer we are expecting (the sub-buffers are atomically
838 * "tagged" upon writes, tags are checked upon read).
839 */
840 write_offset = v_read(config, &buf->offset);
841
842 /*
843 * Check that we are not about to read the same subbuffer in
844 * which the writer head is.
845 */
846 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
847 == 0)
848 goto nodata;
849
850 *consumed = consumed_cur;
851 *produced = subbuf_trunc(write_offset, chan);
852
853 return 0;
854
855nodata:
856 /*
857 * The memory barriers __wait_event()/wake_up_interruptible() take care
858 * of "raw_spin_is_locked" memory ordering.
859 */
860 if (finalized)
861 return -ENODATA;
862 else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
863 goto retry;
864 else
865 return -EAGAIN;
866}
867EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
868
869/**
870 * lib_ring_buffer_put_snapshot - move consumed counter forward
871 *
872 * Should only be called from consumer context.
873 * @buf: ring buffer
874 * @consumed_new: new consumed count value
875 */
876void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
877 unsigned long consumed_new)
878{
879 struct lib_ring_buffer_backend *bufb = &buf->backend;
880 struct channel *chan = bufb->chan;
881 unsigned long consumed;
882
883 CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
884
885 /*
886 * Only push the consumed value forward.
887 * If the consumed cmpxchg fails, this is because we have been pushed by
888 * the writer in flight recorder mode.
889 */
890 consumed = atomic_long_read(&buf->consumed);
891 while ((long) consumed - (long) consumed_new < 0)
892 consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
893 consumed_new);
894 /* Wake-up the metadata producer */
895 wake_up_interruptible(&buf->write_wait);
896}
897EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
898
899/**
900 * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
901 * @buf: ring buffer
902 * @consumed: consumed count indicating the position where to read
903 *
904 * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
905 * data to read at consumed position, or 0 if the get operation succeeds.
906 * Busy-loop trying to get data if the tick_nohz sequence lock is held.
907 */
908int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
909 unsigned long consumed)
910{
911 struct channel *chan = buf->backend.chan;
912 const struct lib_ring_buffer_config *config = chan->backend.config;
913 unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
914 int ret;
915 int finalized;
916
917retry:
918 finalized = ACCESS_ONCE(buf->finalized);
919 /*
920 * Read finalized before counters.
921 */
922 smp_rmb();
923 consumed_cur = atomic_long_read(&buf->consumed);
924 consumed_idx = subbuf_index(consumed, chan);
925 commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
926 /*
927 * Make sure we read the commit count before reading the buffer
928 * data and the write offset. Correct consumed offset ordering
929 * wrt commit count is insured by the use of cmpxchg to update
930 * the consumed offset.
931 * smp_call_function_single can fail if the remote CPU is offline,
932 * this is OK because then there is no wmb to execute there.
933 * If our thread is executing on the same CPU as the on the buffers
934 * belongs to, we don't have to synchronize it at all. If we are
935 * migrated, the scheduler will take care of the memory barriers.
936 * Normally, smp_call_function_single() should ensure program order when
937 * executing the remote function, which implies that it surrounds the
938 * function execution with :
939 * smp_mb()
940 * send IPI
941 * csd_lock_wait
942 * recv IPI
943 * smp_mb()
944 * exec. function
945 * smp_mb()
946 * csd unlock
947 * smp_mb()
948 *
949 * However, smp_call_function_single() does not seem to clearly execute
950 * such barriers. It depends on spinlock semantic to provide the barrier
951 * before executing the IPI and, when busy-looping, csd_lock_wait only
952 * executes smp_mb() when it has to wait for the other CPU.
953 *
954 * I don't trust this code. Therefore, let's add the smp_mb() sequence
955 * required ourself, even if duplicated. It has no performance impact
956 * anyway.
957 *
958 * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
959 * read and write vs write. They do not ensure core synchronization. We
960 * really have to ensure total order between the 3 barriers running on
961 * the 2 CPUs.
962 */
963 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
964 if (config->sync == RING_BUFFER_SYNC_PER_CPU
965 && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
966 if (raw_smp_processor_id() != buf->backend.cpu) {
967 /* Total order with IPI handler smp_mb() */
968 smp_mb();
969 smp_call_function_single(buf->backend.cpu,
970 remote_mb, NULL, 1);
971 /* Total order with IPI handler smp_mb() */
972 smp_mb();
973 }
974 } else {
975 /* Total order with IPI handler smp_mb() */
976 smp_mb();
977 smp_call_function(remote_mb, NULL, 1);
978 /* Total order with IPI handler smp_mb() */
979 smp_mb();
980 }
981 } else {
982 /*
983 * Local rmb to match the remote wmb to read the commit count
984 * before the buffer data and the write offset.
985 */
986 smp_rmb();
987 }
988
989 write_offset = v_read(config, &buf->offset);
990
991 /*
992 * Check that the buffer we are getting is after or at consumed_cur
993 * position.
994 */
995 if ((long) subbuf_trunc(consumed, chan)
996 - (long) subbuf_trunc(consumed_cur, chan) < 0)
997 goto nodata;
998
999 /*
1000 * Check that the subbuffer we are trying to consume has been
1001 * already fully committed.
1002 */
1003 if (((commit_count - chan->backend.subbuf_size)
1004 & chan->commit_count_mask)
1005 - (buf_trunc(consumed_cur, chan)
1006 >> chan->backend.num_subbuf_order)
1007 != 0)
1008 goto nodata;
1009
1010 /*
1011 * Check that we are not about to read the same subbuffer in
1012 * which the writer head is.
1013 */
1014 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
1015 == 0)
1016 goto nodata;
1017
1018 /*
1019 * Failure to get the subbuffer causes a busy-loop retry without going
1020 * to a wait queue. These are caused by short-lived race windows where
1021 * the writer is getting access to a subbuffer we were trying to get
1022 * access to. Also checks that the "consumed" buffer count we are
1023 * looking for matches the one contained in the subbuffer id.
1024 */
1025 ret = update_read_sb_index(config, &buf->backend, &chan->backend,
1026 consumed_idx, buf_trunc_val(consumed, chan));
1027 if (ret)
1028 goto retry;
1029 subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
1030
1031 buf->get_subbuf_consumed = consumed;
1032 buf->get_subbuf = 1;
1033
1034 return 0;
1035
1036nodata:
1037 /*
1038 * The memory barriers __wait_event()/wake_up_interruptible() take care
1039 * of "raw_spin_is_locked" memory ordering.
1040 */
1041 if (finalized)
1042 return -ENODATA;
1043 else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
1044 goto retry;
1045 else
1046 return -EAGAIN;
1047}
1048EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
1049
1050/**
1051 * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
1052 * @buf: ring buffer
1053 */
1054void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
1055{
1056 struct lib_ring_buffer_backend *bufb = &buf->backend;
1057 struct channel *chan = bufb->chan;
1058 const struct lib_ring_buffer_config *config = chan->backend.config;
1059 unsigned long read_sb_bindex, consumed_idx, consumed;
1060
1061 CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
1062
1063 if (!buf->get_subbuf) {
1064 /*
1065 * Reader puts a subbuffer it did not get.
1066 */
1067 CHAN_WARN_ON(chan, 1);
1068 return;
1069 }
1070 consumed = buf->get_subbuf_consumed;
1071 buf->get_subbuf = 0;
1072
1073 /*
1074 * Clear the records_unread counter. (overruns counter)
1075 * Can still be non-zero if a file reader simply grabbed the data
1076 * without using iterators.
1077 * Can be below zero if an iterator is used on a snapshot more than
1078 * once.
1079 */
1080 read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
1081 v_add(config, v_read(config,
1082 &bufb->array[read_sb_bindex]->records_unread),
1083 &bufb->records_read);
1084 v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
1085 CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
1086 && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
1087 subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
1088
1089 /*
1090 * Exchange the reader subbuffer with the one we put in its place in the
1091 * writer subbuffer table. Expect the original consumed count. If
1092 * update_read_sb_index fails, this is because the writer updated the
1093 * subbuffer concurrently. We should therefore keep the subbuffer we
1094 * currently have: it has become invalid to try reading this sub-buffer
1095 * consumed count value anyway.
1096 */
1097 consumed_idx = subbuf_index(consumed, chan);
1098 update_read_sb_index(config, &buf->backend, &chan->backend,
1099 consumed_idx, buf_trunc_val(consumed, chan));
1100 /*
1101 * update_read_sb_index return value ignored. Don't exchange sub-buffer
1102 * if the writer concurrently updated it.
1103 */
1104}
1105EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
1106
1107/*
1108 * cons_offset is an iterator on all subbuffer offsets between the reader
1109 * position and the writer position. (inclusive)
1110 */
1111static
1112void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
1113 struct channel *chan,
1114 unsigned long cons_offset,
1115 int cpu)
1116{
1117 const struct lib_ring_buffer_config *config = chan->backend.config;
1118 unsigned long cons_idx, commit_count, commit_count_sb;
1119
1120 cons_idx = subbuf_index(cons_offset, chan);
1121 commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
1122 commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
1123
1124 if (subbuf_offset(commit_count, chan) != 0)
1125 printk(KERN_WARNING
1126 "ring buffer %s, cpu %d: "
1127 "commit count in subbuffer %lu,\n"
1128 "expecting multiples of %lu bytes\n"
1129 " [ %lu bytes committed, %lu bytes reader-visible ]\n",
1130 chan->backend.name, cpu, cons_idx,
1131 chan->backend.subbuf_size,
1132 commit_count, commit_count_sb);
1133
1134 printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
1135 chan->backend.name, cpu, commit_count);
1136}
1137
1138static
1139void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
1140 struct channel *chan,
1141 void *priv, int cpu)
1142{
1143 const struct lib_ring_buffer_config *config = chan->backend.config;
1144 unsigned long write_offset, cons_offset;
1145
1146 /*
1147 * No need to order commit_count, write_offset and cons_offset reads
1148 * because we execute at teardown when no more writer nor reader
1149 * references are left.
1150 */
1151 write_offset = v_read(config, &buf->offset);
1152 cons_offset = atomic_long_read(&buf->consumed);
1153 if (write_offset != cons_offset)
1154 printk(KERN_DEBUG
1155 "ring buffer %s, cpu %d: "
1156 "non-consumed data\n"
1157 " [ %lu bytes written, %lu bytes read ]\n",
1158 chan->backend.name, cpu, write_offset, cons_offset);
1159
1160 for (cons_offset = atomic_long_read(&buf->consumed);
1161 (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
1162 chan)
1163 - cons_offset) > 0;
1164 cons_offset = subbuf_align(cons_offset, chan))
1165 lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
1166 cpu);
1167}
1168
1169static
1170void lib_ring_buffer_print_errors(struct channel *chan,
1171 struct lib_ring_buffer *buf, int cpu)
1172{
1173 const struct lib_ring_buffer_config *config = chan->backend.config;
1174 void *priv = chan->backend.priv;
1175
1176 printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
1177 "%lu records overrun\n",
1178 chan->backend.name, cpu,
1179 v_read(config, &buf->records_count),
1180 v_read(config, &buf->records_overrun));
1181
1182 if (v_read(config, &buf->records_lost_full)
1183 || v_read(config, &buf->records_lost_wrap)
1184 || v_read(config, &buf->records_lost_big))
1185 printk(KERN_WARNING
1186 "ring buffer %s, cpu %d: records were lost. Caused by:\n"
1187 " [ %lu buffer full, %lu nest buffer wrap-around, "
1188 "%lu event too big ]\n",
1189 chan->backend.name, cpu,
1190 v_read(config, &buf->records_lost_full),
1191 v_read(config, &buf->records_lost_wrap),
1192 v_read(config, &buf->records_lost_big));
1193
1194 lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
1195}
1196
1197/*
1198 * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
1199 *
1200 * Only executed when the buffer is finalized, in SWITCH_FLUSH.
1201 */
1202static
1203void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
1204 struct channel *chan,
1205 struct switch_offsets *offsets,
1206 u64 tsc)
1207{
1208 const struct lib_ring_buffer_config *config = chan->backend.config;
1209 unsigned long oldidx = subbuf_index(offsets->old, chan);
1210 unsigned long commit_count;
1211
1212 config->cb.buffer_begin(buf, tsc, oldidx);
1213
1214 /*
1215 * Order all writes to buffer before the commit count update that will
1216 * determine that the subbuffer is full.
1217 */
1218 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
1219 /*
1220 * Must write slot data before incrementing commit count. This
1221 * compiler barrier is upgraded into a smp_mb() by the IPI sent
1222 * by get_subbuf().
1223 */
1224 barrier();
1225 } else
1226 smp_wmb();
1227 v_add(config, config->cb.subbuffer_header_size(),
1228 &buf->commit_hot[oldidx].cc);
1229 commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
1230 /* Check if the written buffer has to be delivered */
1231 lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
1232 commit_count, oldidx);
1233 lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
1234 offsets->old, commit_count,
1235 config->cb.subbuffer_header_size());
1236}
1237
1238/*
1239 * lib_ring_buffer_switch_old_end: switch old subbuffer
1240 *
1241 * Note : offset_old should never be 0 here. It is ok, because we never perform
1242 * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
1243 * increments the offset_old value when doing a SWITCH_FLUSH on an empty
1244 * subbuffer.
1245 */
1246static
1247void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
1248 struct channel *chan,
1249 struct switch_offsets *offsets,
1250 u64 tsc)
1251{
1252 const struct lib_ring_buffer_config *config = chan->backend.config;
1253 unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
1254 unsigned long commit_count, padding_size, data_size;
1255
1256 data_size = subbuf_offset(offsets->old - 1, chan) + 1;
1257 padding_size = chan->backend.subbuf_size - data_size;
1258 subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
1259
1260 /*
1261 * Order all writes to buffer before the commit count update that will
1262 * determine that the subbuffer is full.
1263 */
1264 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
1265 /*
1266 * Must write slot data before incrementing commit count. This
1267 * compiler barrier is upgraded into a smp_mb() by the IPI sent
1268 * by get_subbuf().
1269 */
1270 barrier();
1271 } else
1272 smp_wmb();
1273 v_add(config, padding_size, &buf->commit_hot[oldidx].cc);
1274 commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
1275 lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
1276 commit_count, oldidx);
1277 lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
1278 offsets->old, commit_count,
1279 padding_size);
1280}
1281
1282/*
1283 * lib_ring_buffer_switch_new_start: Populate new subbuffer.
1284 *
1285 * This code can be executed unordered : writers may already have written to the
1286 * sub-buffer before this code gets executed, caution. The commit makes sure
1287 * that this code is executed before the deliver of this sub-buffer.
1288 */
1289static
1290void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
1291 struct channel *chan,
1292 struct switch_offsets *offsets,
1293 u64 tsc)
1294{
1295 const struct lib_ring_buffer_config *config = chan->backend.config;
1296 unsigned long beginidx = subbuf_index(offsets->begin, chan);
1297 unsigned long commit_count;
1298
1299 config->cb.buffer_begin(buf, tsc, beginidx);
1300
1301 /*
1302 * Order all writes to buffer before the commit count update that will
1303 * determine that the subbuffer is full.
1304 */
1305 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
1306 /*
1307 * Must write slot data before incrementing commit count. This
1308 * compiler barrier is upgraded into a smp_mb() by the IPI sent
1309 * by get_subbuf().
1310 */
1311 barrier();
1312 } else
1313 smp_wmb();
1314 v_add(config, config->cb.subbuffer_header_size(),
1315 &buf->commit_hot[beginidx].cc);
1316 commit_count = v_read(config, &buf->commit_hot[beginidx].cc);
1317 /* Check if the written buffer has to be delivered */
1318 lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
1319 commit_count, beginidx);
1320 lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
1321 offsets->begin, commit_count,
1322 config->cb.subbuffer_header_size());
1323}
1324
1325/*
1326 * lib_ring_buffer_switch_new_end: finish switching current subbuffer
1327 *
1328 * The only remaining threads could be the ones with pending commits. They will
1329 * have to do the deliver themselves.
1330 */
1331static
1332void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
1333 struct channel *chan,
1334 struct switch_offsets *offsets,
1335 u64 tsc)
1336{
1337 const struct lib_ring_buffer_config *config = chan->backend.config;
1338 unsigned long endidx = subbuf_index(offsets->end - 1, chan);
1339 unsigned long commit_count, padding_size, data_size;
1340
1341 data_size = subbuf_offset(offsets->end - 1, chan) + 1;
1342 padding_size = chan->backend.subbuf_size - data_size;
1343 subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
1344
1345 /*
1346 * Order all writes to buffer before the commit count update that will
1347 * determine that the subbuffer is full.
1348 */
1349 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
1350 /*
1351 * Must write slot data before incrementing commit count. This
1352 * compiler barrier is upgraded into a smp_mb() by the IPI sent
1353 * by get_subbuf().
1354 */
1355 barrier();
1356 } else
1357 smp_wmb();
1358 v_add(config, padding_size, &buf->commit_hot[endidx].cc);
1359 commit_count = v_read(config, &buf->commit_hot[endidx].cc);
1360 lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
1361 commit_count, endidx);
1362 lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
1363 offsets->end, commit_count,
1364 padding_size);
1365}
1366
1367/*
1368 * Returns :
1369 * 0 if ok
1370 * !0 if execution must be aborted.
1371 */
1372static
1373int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
1374 struct lib_ring_buffer *buf,
1375 struct channel *chan,
1376 struct switch_offsets *offsets,
1377 u64 *tsc)
1378{
1379 const struct lib_ring_buffer_config *config = chan->backend.config;
1380 unsigned long off;
1381
1382 offsets->begin = v_read(config, &buf->offset);
1383 offsets->old = offsets->begin;
1384 offsets->switch_old_start = 0;
1385 off = subbuf_offset(offsets->begin, chan);
1386
1387 *tsc = config->cb.ring_buffer_clock_read(chan);
1388
1389 /*
1390 * Ensure we flush the header of an empty subbuffer when doing the
1391 * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
1392 * total data gathering duration even if there were no records saved
1393 * after the last buffer switch.
1394 * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
1395 * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
1396 * subbuffer header as appropriate.
1397 * The next record that reserves space will be responsible for
1398 * populating the following subbuffer header. We choose not to populate
1399 * the next subbuffer header here because we want to be able to use
1400 * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
1401 * buffer flush, which must guarantee that all the buffer content
1402 * (records and header timestamps) are visible to the reader. This is
1403 * required for quiescence guarantees for the fusion merge.
1404 */
1405 if (mode == SWITCH_FLUSH || off > 0) {
1406 if (unlikely(off == 0)) {
1407 /*
1408 * The client does not save any header information.
1409 * Don't switch empty subbuffer on finalize, because it
1410 * is invalid to deliver a completely empty subbuffer.
1411 */
1412 if (!config->cb.subbuffer_header_size())
1413 return -1;
1414 /*
1415 * Need to write the subbuffer start header on finalize.
1416 */
1417 offsets->switch_old_start = 1;
1418 }
1419 offsets->begin = subbuf_align(offsets->begin, chan);
1420 } else
1421 return -1; /* we do not have to switch : buffer is empty */
1422 /* Note: old points to the next subbuf at offset 0 */
1423 offsets->end = offsets->begin;
1424 return 0;
1425}
1426
1427/*
1428 * Force a sub-buffer switch. This operation is completely reentrant : can be
1429 * called while tracing is active with absolutely no lock held.
1430 *
1431 * Note, however, that as a v_cmpxchg is used for some atomic
1432 * operations, this function must be called from the CPU which owns the buffer
1433 * for a ACTIVE flush.
1434 */
1435void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
1436{
1437 struct channel *chan = buf->backend.chan;
1438 const struct lib_ring_buffer_config *config = chan->backend.config;
1439 struct switch_offsets offsets;
1440 unsigned long oldidx;
1441 u64 tsc;
1442
1443 offsets.size = 0;
1444
1445 /*
1446 * Perform retryable operations.
1447 */
1448 do {
1449 if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
1450 &tsc))
1451 return; /* Switch not needed */
1452 } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
1453 != offsets.old);
1454
1455 /*
1456 * Atomically update last_tsc. This update races against concurrent
1457 * atomic updates, but the race will always cause supplementary full TSC
1458 * records, never the opposite (missing a full TSC record when it would
1459 * be needed).
1460 */
1461 save_last_tsc(config, buf, tsc);
1462
1463 /*
1464 * Push the reader if necessary
1465 */
1466 lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
1467
1468 oldidx = subbuf_index(offsets.old, chan);
1469 lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
1470
1471 /*
1472 * May need to populate header start on SWITCH_FLUSH.
1473 */
1474 if (offsets.switch_old_start) {
1475 lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
1476 offsets.old += config->cb.subbuffer_header_size();
1477 }
1478
1479 /*
1480 * Switch old subbuffer.
1481 */
1482 lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
1483}
1484EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
1485
1486/*
1487 * Returns :
1488 * 0 if ok
1489 * -ENOSPC if event size is too large for packet.
1490 * -ENOBUFS if there is currently not enough space in buffer for the event.
1491 * -EIO if data cannot be written into the buffer for any other reason.
1492 */
1493static
1494int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
1495 struct channel *chan,
1496 struct switch_offsets *offsets,
1497 struct lib_ring_buffer_ctx *ctx)
1498{
1499 const struct lib_ring_buffer_config *config = chan->backend.config;
1500 unsigned long reserve_commit_diff;
1501
1502 offsets->begin = v_read(config, &buf->offset);
1503 offsets->old = offsets->begin;
1504 offsets->switch_new_start = 0;
1505 offsets->switch_new_end = 0;
1506 offsets->switch_old_end = 0;
1507 offsets->pre_header_padding = 0;
1508
1509 ctx->tsc = config->cb.ring_buffer_clock_read(chan);
1510 if ((int64_t) ctx->tsc == -EIO)
1511 return -EIO;
1512
1513 if (last_tsc_overflow(config, buf, ctx->tsc))
1514 ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
1515
1516 if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
1517 offsets->switch_new_start = 1; /* For offsets->begin */
1518 } else {
1519 offsets->size = config->cb.record_header_size(config, chan,
1520 offsets->begin,
1521 &offsets->pre_header_padding,
1522 ctx);
1523 offsets->size +=
1524 lib_ring_buffer_align(offsets->begin + offsets->size,
1525 ctx->largest_align)
1526 + ctx->data_size;
1527 if (unlikely(subbuf_offset(offsets->begin, chan) +
1528 offsets->size > chan->backend.subbuf_size)) {
1529 offsets->switch_old_end = 1; /* For offsets->old */
1530 offsets->switch_new_start = 1; /* For offsets->begin */
1531 }
1532 }
1533 if (unlikely(offsets->switch_new_start)) {
1534 unsigned long sb_index;
1535
1536 /*
1537 * We are typically not filling the previous buffer completely.
1538 */
1539 if (likely(offsets->switch_old_end))
1540 offsets->begin = subbuf_align(offsets->begin, chan);
1541 offsets->begin = offsets->begin
1542 + config->cb.subbuffer_header_size();
1543 /* Test new buffer integrity */
1544 sb_index = subbuf_index(offsets->begin, chan);
1545 reserve_commit_diff =
1546 (buf_trunc(offsets->begin, chan)
1547 >> chan->backend.num_subbuf_order)
1548 - ((unsigned long) v_read(config,
1549 &buf->commit_cold[sb_index].cc_sb)
1550 & chan->commit_count_mask);
1551 if (likely(reserve_commit_diff == 0)) {
1552 /* Next subbuffer not being written to. */
1553 if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
1554 subbuf_trunc(offsets->begin, chan)
1555 - subbuf_trunc((unsigned long)
1556 atomic_long_read(&buf->consumed), chan)
1557 >= chan->backend.buf_size)) {
1558 /*
1559 * We do not overwrite non consumed buffers
1560 * and we are full : record is lost.
1561 */
1562 v_inc(config, &buf->records_lost_full);
1563 return -ENOBUFS;
1564 } else {
1565 /*
1566 * Next subbuffer not being written to, and we
1567 * are either in overwrite mode or the buffer is
1568 * not full. It's safe to write in this new
1569 * subbuffer.
1570 */
1571 }
1572 } else {
1573 /*
1574 * Next subbuffer reserve offset does not match the
1575 * commit offset. Drop record in producer-consumer and
1576 * overwrite mode. Caused by either a writer OOPS or too
1577 * many nested writes over a reserve/commit pair.
1578 */
1579 v_inc(config, &buf->records_lost_wrap);
1580 return -EIO;
1581 }
1582 offsets->size =
1583 config->cb.record_header_size(config, chan,
1584 offsets->begin,
1585 &offsets->pre_header_padding,
1586 ctx);
1587 offsets->size +=
1588 lib_ring_buffer_align(offsets->begin + offsets->size,
1589 ctx->largest_align)
1590 + ctx->data_size;
1591 if (unlikely(subbuf_offset(offsets->begin, chan)
1592 + offsets->size > chan->backend.subbuf_size)) {
1593 /*
1594 * Record too big for subbuffers, report error, don't
1595 * complete the sub-buffer switch.
1596 */
1597 v_inc(config, &buf->records_lost_big);
1598 return -ENOSPC;
1599 } else {
1600 /*
1601 * We just made a successful buffer switch and the
1602 * record fits in the new subbuffer. Let's write.
1603 */
1604 }
1605 } else {
1606 /*
1607 * Record fits in the current buffer and we are not on a switch
1608 * boundary. It's safe to write.
1609 */
1610 }
1611 offsets->end = offsets->begin + offsets->size;
1612
1613 if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
1614 /*
1615 * The offset_end will fall at the very beginning of the next
1616 * subbuffer.
1617 */
1618 offsets->switch_new_end = 1; /* For offsets->begin */
1619 }
1620 return 0;
1621}
1622
1623/**
1624 * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
1625 * @ctx: ring buffer context.
1626 *
1627 * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
1628 * -EIO for other errors, else returns 0.
1629 * It will take care of sub-buffer switching.
1630 */
1631int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
1632{
1633 struct channel *chan = ctx->chan;
1634 const struct lib_ring_buffer_config *config = chan->backend.config;
1635 struct lib_ring_buffer *buf;
1636 struct switch_offsets offsets;
1637 int ret;
1638
1639 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
1640 buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
1641 else
1642 buf = chan->backend.buf;
1643 ctx->buf = buf;
1644
1645 offsets.size = 0;
1646
1647 do {
1648 ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
1649 ctx);
1650 if (unlikely(ret))
1651 return ret;
1652 } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
1653 offsets.end)
1654 != offsets.old));
1655
1656 /*
1657 * Atomically update last_tsc. This update races against concurrent
1658 * atomic updates, but the race will always cause supplementary full TSC
1659 * records, never the opposite (missing a full TSC record when it would
1660 * be needed).
1661 */
1662 save_last_tsc(config, buf, ctx->tsc);
1663
1664 /*
1665 * Push the reader if necessary
1666 */
1667 lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
1668
1669 /*
1670 * Clear noref flag for this subbuffer.
1671 */
1672 lib_ring_buffer_clear_noref(config, &buf->backend,
1673 subbuf_index(offsets.end - 1, chan));
1674
1675 /*
1676 * Switch old subbuffer if needed.
1677 */
1678 if (unlikely(offsets.switch_old_end)) {
1679 lib_ring_buffer_clear_noref(config, &buf->backend,
1680 subbuf_index(offsets.old - 1, chan));
1681 lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
1682 }
1683
1684 /*
1685 * Populate new subbuffer.
1686 */
1687 if (unlikely(offsets.switch_new_start))
1688 lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
1689
1690 if (unlikely(offsets.switch_new_end))
1691 lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
1692
1693 ctx->slot_size = offsets.size;
1694 ctx->pre_offset = offsets.begin;
1695 ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
1696 return 0;
1697}
1698EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
1699
1700int __init init_lib_ring_buffer_frontend(void)
1701{
1702 int cpu;
1703
1704 for_each_possible_cpu(cpu)
1705 spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu));
1706 return 0;
1707}
1708
1709module_init(init_lib_ring_buffer_frontend);
1710
1711void __exit exit_lib_ring_buffer_frontend(void)
1712{
1713}
1714
1715module_exit(exit_lib_ring_buffer_frontend);
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
deleted file mode 100644
index 1321b5f965a9..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
+++ /dev/null
@@ -1,798 +0,0 @@
1/*
2 * ring_buffer_iterator.c
3 *
4 * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer and channel iterators. Get each event of a channel in order. Uses
7 * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
8 * complexity for the "get next event" operation.
9 *
10 * Author:
11 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 *
13 * Dual LGPL v2.1/GPL v2 license.
14 */
15
16#include "../../wrapper/ringbuffer/iterator.h"
17#include <linux/jiffies.h>
18#include <linux/delay.h>
19#include <linux/module.h>
20
21/*
22 * Safety factor taking into account internal kernel interrupt latency.
23 * Assuming 250ms worse-case latency.
24 */
25#define MAX_SYSTEM_LATENCY 250
26
27/*
28 * Maximum delta expected between trace clocks. At most 1 jiffy delta.
29 */
30#define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
31
32/**
33 * lib_ring_buffer_get_next_record - Get the next record in a buffer.
34 * @chan: channel
35 * @buf: buffer
36 *
37 * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
38 * buffer is empty and finalized. The buffer must already be opened for reading.
39 */
40ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
41 struct lib_ring_buffer *buf)
42{
43 const struct lib_ring_buffer_config *config = chan->backend.config;
44 struct lib_ring_buffer_iter *iter = &buf->iter;
45 int ret;
46
47restart:
48 switch (iter->state) {
49 case ITER_GET_SUBBUF:
50 ret = lib_ring_buffer_get_next_subbuf(buf);
51 if (ret && !ACCESS_ONCE(buf->finalized)
52 && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
53 /*
54 * Use "pull" scheme for global buffers. The reader
55 * itself flushes the buffer to "pull" data not visible
56 * to readers yet. Flush current subbuffer and re-try.
57 *
58 * Per-CPU buffers rather use a "push" scheme because
59 * the IPI needed to flush all CPU's buffers is too
60 * costly. In the "push" scheme, the reader waits for
61 * the writer periodic deferrable timer to flush the
62 * buffers (keeping track of a quiescent state
63 * timestamp). Therefore, the writer "pushes" data out
64 * of the buffers rather than letting the reader "pull"
65 * data from the buffer.
66 */
67 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
68 ret = lib_ring_buffer_get_next_subbuf(buf);
69 }
70 if (ret)
71 return ret;
72 iter->consumed = buf->cons_snapshot;
73 iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
74 iter->read_offset = iter->consumed;
75 /* skip header */
76 iter->read_offset += config->cb.subbuffer_header_size();
77 iter->state = ITER_TEST_RECORD;
78 goto restart;
79 case ITER_TEST_RECORD:
80 if (iter->read_offset - iter->consumed >= iter->data_size) {
81 iter->state = ITER_PUT_SUBBUF;
82 } else {
83 CHAN_WARN_ON(chan, !config->cb.record_get);
84 config->cb.record_get(config, chan, buf,
85 iter->read_offset,
86 &iter->header_len,
87 &iter->payload_len,
88 &iter->timestamp);
89 iter->read_offset += iter->header_len;
90 subbuffer_consume_record(config, &buf->backend);
91 iter->state = ITER_NEXT_RECORD;
92 return iter->payload_len;
93 }
94 goto restart;
95 case ITER_NEXT_RECORD:
96 iter->read_offset += iter->payload_len;
97 iter->state = ITER_TEST_RECORD;
98 goto restart;
99 case ITER_PUT_SUBBUF:
100 lib_ring_buffer_put_next_subbuf(buf);
101 iter->state = ITER_GET_SUBBUF;
102 goto restart;
103 default:
104 CHAN_WARN_ON(chan, 1); /* Should not happen */
105 return -EPERM;
106 }
107}
108EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
109
110static int buf_is_higher(void *a, void *b)
111{
112 struct lib_ring_buffer *bufa = a;
113 struct lib_ring_buffer *bufb = b;
114
115 /* Consider lowest timestamps to be at the top of the heap */
116 return (bufa->iter.timestamp < bufb->iter.timestamp);
117}
118
119static
120void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
121 struct channel *chan)
122{
123 struct lttng_ptr_heap *heap = &chan->iter.heap;
124 struct lib_ring_buffer *buf, *tmp;
125 ssize_t len;
126
127 list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
128 iter.empty_node) {
129 len = lib_ring_buffer_get_next_record(chan, buf);
130
131 /*
132 * Deal with -EAGAIN and -ENODATA.
133 * len >= 0 means record contains data.
134 * -EBUSY should never happen, because we support only one
135 * reader.
136 */
137 switch (len) {
138 case -EAGAIN:
139 /* Keep node in empty list */
140 break;
141 case -ENODATA:
142 /*
143 * Buffer is finalized. Don't add to list of empty
144 * buffer, because it has no more data to provide, ever.
145 */
146 list_del(&buf->iter.empty_node);
147 break;
148 case -EBUSY:
149 CHAN_WARN_ON(chan, 1);
150 break;
151 default:
152 /*
153 * Insert buffer into the heap, remove from empty buffer
154 * list.
155 */
156 CHAN_WARN_ON(chan, len < 0);
157 list_del(&buf->iter.empty_node);
158 CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
159 }
160 }
161}
162
163static
164void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
165 struct channel *chan)
166{
167 u64 timestamp_qs;
168 unsigned long wait_msecs;
169
170 /*
171 * No need to wait if no empty buffers are present.
172 */
173 if (list_empty(&chan->iter.empty_head))
174 return;
175
176 timestamp_qs = config->cb.ring_buffer_clock_read(chan);
177 /*
178 * We need to consider previously empty buffers.
179 * Do a get next buf record on each of them. Add them to
180 * the heap if they have data. If at least one of them
181 * don't have data, we need to wait for
182 * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
183 * buffers have been switched either by the timer or idle entry) and
184 * check them again, adding them if they have data.
185 */
186 lib_ring_buffer_get_empty_buf_records(config, chan);
187
188 /*
189 * No need to wait if no empty buffers are present.
190 */
191 if (list_empty(&chan->iter.empty_head))
192 return;
193
194 /*
195 * We need to wait for the buffer switch timer to run. If the
196 * CPU is idle, idle entry performed the switch.
197 * TODO: we could optimize further by skipping the sleep if all
198 * empty buffers belong to idle or offline cpus.
199 */
200 wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
201 wait_msecs += MAX_SYSTEM_LATENCY;
202 msleep(wait_msecs);
203 lib_ring_buffer_get_empty_buf_records(config, chan);
204 /*
205 * Any buffer still in the empty list here cannot possibly
206 * contain an event with a timestamp prior to "timestamp_qs".
207 * The new quiescent state timestamp is the one we grabbed
208 * before waiting for buffer data. It is therefore safe to
209 * ignore empty buffers up to last_qs timestamp for fusion
210 * merge.
211 */
212 chan->iter.last_qs = timestamp_qs;
213}
214
215/**
216 * channel_get_next_record - Get the next record in a channel.
217 * @chan: channel
218 * @ret_buf: the buffer in which the event is located (output)
219 *
220 * Returns the size of new current event, -EAGAIN if all buffers are empty,
221 * -ENODATA if all buffers are empty and finalized. The channel must already be
222 * opened for reading.
223 */
224
225ssize_t channel_get_next_record(struct channel *chan,
226 struct lib_ring_buffer **ret_buf)
227{
228 const struct lib_ring_buffer_config *config = chan->backend.config;
229 struct lib_ring_buffer *buf;
230 struct lttng_ptr_heap *heap;
231 ssize_t len;
232
233 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
234 *ret_buf = channel_get_ring_buffer(config, chan, 0);
235 return lib_ring_buffer_get_next_record(chan, *ret_buf);
236 }
237
238 heap = &chan->iter.heap;
239
240 /*
241 * get next record for topmost buffer.
242 */
243 buf = lttng_heap_maximum(heap);
244 if (buf) {
245 len = lib_ring_buffer_get_next_record(chan, buf);
246 /*
247 * Deal with -EAGAIN and -ENODATA.
248 * len >= 0 means record contains data.
249 */
250 switch (len) {
251 case -EAGAIN:
252 buf->iter.timestamp = 0;
253 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
254 /* Remove topmost buffer from the heap */
255 CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
256 break;
257 case -ENODATA:
258 /*
259 * Buffer is finalized. Remove buffer from heap and
260 * don't add to list of empty buffer, because it has no
261 * more data to provide, ever.
262 */
263 CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
264 break;
265 case -EBUSY:
266 CHAN_WARN_ON(chan, 1);
267 break;
268 default:
269 /*
270 * Reinsert buffer into the heap. Note that heap can be
271 * partially empty, so we need to use
272 * lttng_heap_replace_max().
273 */
274 CHAN_WARN_ON(chan, len < 0);
275 CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
276 break;
277 }
278 }
279
280 buf = lttng_heap_maximum(heap);
281 if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
282 /*
283 * Deal with buffers previously showing no data.
284 * Add buffers containing data to the heap, update
285 * last_qs.
286 */
287 lib_ring_buffer_wait_for_qs(config, chan);
288 }
289
290 *ret_buf = buf = lttng_heap_maximum(heap);
291 if (buf) {
292 /*
293 * If this warning triggers, you probably need to check your
294 * system interrupt latency. Typical causes: too many printk()
295 * output going to a serial console with interrupts off.
296 * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
297 * Observed on SMP KVM setups with trace_clock().
298 */
299 if (chan->iter.last_timestamp
300 > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
301 printk(KERN_WARNING "ring_buffer: timestamps going "
302 "backward. Last time %llu ns, cpu %d, "
303 "current time %llu ns, cpu %d, "
304 "delta %llu ns.\n",
305 chan->iter.last_timestamp, chan->iter.last_cpu,
306 buf->iter.timestamp, buf->backend.cpu,
307 chan->iter.last_timestamp - buf->iter.timestamp);
308 CHAN_WARN_ON(chan, 1);
309 }
310 chan->iter.last_timestamp = buf->iter.timestamp;
311 chan->iter.last_cpu = buf->backend.cpu;
312 return buf->iter.payload_len;
313 } else {
314 /* Heap is empty */
315 if (list_empty(&chan->iter.empty_head))
316 return -ENODATA; /* All buffers finalized */
317 else
318 return -EAGAIN; /* Temporarily empty */
319 }
320}
321EXPORT_SYMBOL_GPL(channel_get_next_record);
322
323static
324void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
325{
326 if (buf->iter.allocated)
327 return;
328
329 buf->iter.allocated = 1;
330 if (chan->iter.read_open && !buf->iter.read_open) {
331 CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
332 buf->iter.read_open = 1;
333 }
334
335 /* Add to list of buffers without any current record */
336 if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
337 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
338}
339
340#ifdef CONFIG_HOTPLUG_CPU
341static
342int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb,
343 unsigned long action,
344 void *hcpu)
345{
346 unsigned int cpu = (unsigned long)hcpu;
347 struct channel *chan = container_of(nb, struct channel,
348 hp_iter_notifier);
349 struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
350 const struct lib_ring_buffer_config *config = chan->backend.config;
351
352 if (!chan->hp_iter_enable)
353 return NOTIFY_DONE;
354
355 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
356
357 switch (action) {
358 case CPU_DOWN_FAILED:
359 case CPU_DOWN_FAILED_FROZEN:
360 case CPU_ONLINE:
361 case CPU_ONLINE_FROZEN:
362 lib_ring_buffer_iterator_init(chan, buf);
363 return NOTIFY_OK;
364 default:
365 return NOTIFY_DONE;
366 }
367}
368#endif
369
370int channel_iterator_init(struct channel *chan)
371{
372 const struct lib_ring_buffer_config *config = chan->backend.config;
373 struct lib_ring_buffer *buf;
374
375 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
376 int cpu, ret;
377
378 INIT_LIST_HEAD(&chan->iter.empty_head);
379 ret = lttng_heap_init(&chan->iter.heap,
380 num_possible_cpus(),
381 GFP_KERNEL, buf_is_higher);
382 if (ret)
383 return ret;
384 /*
385 * In case of non-hotplug cpu, if the ring-buffer is allocated
386 * in early initcall, it will not be notified of secondary cpus.
387 * In that off case, we need to allocate for all possible cpus.
388 */
389#ifdef CONFIG_HOTPLUG_CPU
390 chan->hp_iter_notifier.notifier_call =
391 channel_iterator_cpu_hotplug;
392 chan->hp_iter_notifier.priority = 10;
393 register_cpu_notifier(&chan->hp_iter_notifier);
394 get_online_cpus();
395 for_each_online_cpu(cpu) {
396 buf = per_cpu_ptr(chan->backend.buf, cpu);
397 lib_ring_buffer_iterator_init(chan, buf);
398 }
399 chan->hp_iter_enable = 1;
400 put_online_cpus();
401#else
402 for_each_possible_cpu(cpu) {
403 buf = per_cpu_ptr(chan->backend.buf, cpu);
404 lib_ring_buffer_iterator_init(chan, buf);
405 }
406#endif
407 } else {
408 buf = channel_get_ring_buffer(config, chan, 0);
409 lib_ring_buffer_iterator_init(chan, buf);
410 }
411 return 0;
412}
413
414void channel_iterator_unregister_notifiers(struct channel *chan)
415{
416 const struct lib_ring_buffer_config *config = chan->backend.config;
417
418 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
419 chan->hp_iter_enable = 0;
420 unregister_cpu_notifier(&chan->hp_iter_notifier);
421 }
422}
423
424void channel_iterator_free(struct channel *chan)
425{
426 const struct lib_ring_buffer_config *config = chan->backend.config;
427
428 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
429 lttng_heap_free(&chan->iter.heap);
430}
431
432int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
433{
434 struct channel *chan = buf->backend.chan;
435 const struct lib_ring_buffer_config *config = chan->backend.config;
436 CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
437 return lib_ring_buffer_open_read(buf);
438}
439EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
440
441/*
442 * Note: Iterators must not be mixed with other types of outputs, because an
443 * iterator can leave the buffer in "GET" state, which is not consistent with
444 * other types of output (mmap, splice, raw data read).
445 */
446void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
447{
448 lib_ring_buffer_release_read(buf);
449}
450EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
451
452int channel_iterator_open(struct channel *chan)
453{
454 const struct lib_ring_buffer_config *config = chan->backend.config;
455 struct lib_ring_buffer *buf;
456 int ret = 0, cpu;
457
458 CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
459
460 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
461 get_online_cpus();
462 /* Allow CPU hotplug to keep track of opened reader */
463 chan->iter.read_open = 1;
464 for_each_channel_cpu(cpu, chan) {
465 buf = channel_get_ring_buffer(config, chan, cpu);
466 ret = lib_ring_buffer_iterator_open(buf);
467 if (ret)
468 goto error;
469 buf->iter.read_open = 1;
470 }
471 put_online_cpus();
472 } else {
473 buf = channel_get_ring_buffer(config, chan, 0);
474 ret = lib_ring_buffer_iterator_open(buf);
475 }
476 return ret;
477error:
478 /* Error should always happen on CPU 0, hence no close is required. */
479 CHAN_WARN_ON(chan, cpu != 0);
480 put_online_cpus();
481 return ret;
482}
483EXPORT_SYMBOL_GPL(channel_iterator_open);
484
485void channel_iterator_release(struct channel *chan)
486{
487 const struct lib_ring_buffer_config *config = chan->backend.config;
488 struct lib_ring_buffer *buf;
489 int cpu;
490
491 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
492 get_online_cpus();
493 for_each_channel_cpu(cpu, chan) {
494 buf = channel_get_ring_buffer(config, chan, cpu);
495 if (buf->iter.read_open) {
496 lib_ring_buffer_iterator_release(buf);
497 buf->iter.read_open = 0;
498 }
499 }
500 chan->iter.read_open = 0;
501 put_online_cpus();
502 } else {
503 buf = channel_get_ring_buffer(config, chan, 0);
504 lib_ring_buffer_iterator_release(buf);
505 }
506}
507EXPORT_SYMBOL_GPL(channel_iterator_release);
508
509void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
510{
511 struct channel *chan = buf->backend.chan;
512
513 if (buf->iter.state != ITER_GET_SUBBUF)
514 lib_ring_buffer_put_next_subbuf(buf);
515 buf->iter.state = ITER_GET_SUBBUF;
516 /* Remove from heap (if present). */
517 if (lttng_heap_cherrypick(&chan->iter.heap, buf))
518 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
519 buf->iter.timestamp = 0;
520 buf->iter.header_len = 0;
521 buf->iter.payload_len = 0;
522 buf->iter.consumed = 0;
523 buf->iter.read_offset = 0;
524 buf->iter.data_size = 0;
525 /* Don't reset allocated and read_open */
526}
527
528void channel_iterator_reset(struct channel *chan)
529{
530 const struct lib_ring_buffer_config *config = chan->backend.config;
531 struct lib_ring_buffer *buf;
532 int cpu;
533
534 /* Empty heap, put into empty_head */
535 while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
536 list_add(&buf->iter.empty_node, &chan->iter.empty_head);
537
538 for_each_channel_cpu(cpu, chan) {
539 buf = channel_get_ring_buffer(config, chan, cpu);
540 lib_ring_buffer_iterator_reset(buf);
541 }
542 /* Don't reset read_open */
543 chan->iter.last_qs = 0;
544 chan->iter.last_timestamp = 0;
545 chan->iter.last_cpu = 0;
546 chan->iter.len_left = 0;
547}
548
549/*
550 * Ring buffer payload extraction read() implementation.
551 */
552static
553ssize_t channel_ring_buffer_file_read(struct file *filp,
554 char __user *user_buf,
555 size_t count,
556 loff_t *ppos,
557 struct channel *chan,
558 struct lib_ring_buffer *buf,
559 int fusionmerge)
560{
561 const struct lib_ring_buffer_config *config = chan->backend.config;
562 size_t read_count = 0, read_offset;
563 ssize_t len;
564
565 might_sleep();
566 if (!access_ok(VERIFY_WRITE, user_buf, count))
567 return -EFAULT;
568
569 /* Finish copy of previous record */
570 if (*ppos != 0) {
571 if (read_count < count) {
572 len = chan->iter.len_left;
573 read_offset = *ppos;
574 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
575 && fusionmerge)
576 buf = lttng_heap_maximum(&chan->iter.heap);
577 CHAN_WARN_ON(chan, !buf);
578 goto skip_get_next;
579 }
580 }
581
582 while (read_count < count) {
583 size_t copy_len, space_left;
584
585 if (fusionmerge)
586 len = channel_get_next_record(chan, &buf);
587 else
588 len = lib_ring_buffer_get_next_record(chan, buf);
589len_test:
590 if (len < 0) {
591 /*
592 * Check if buffer is finalized (end of file).
593 */
594 if (len == -ENODATA) {
595 /* A 0 read_count will tell about end of file */
596 goto nodata;
597 }
598 if (filp->f_flags & O_NONBLOCK) {
599 if (!read_count)
600 read_count = -EAGAIN;
601 goto nodata;
602 } else {
603 int error;
604
605 /*
606 * No data available at the moment, return what
607 * we got.
608 */
609 if (read_count)
610 goto nodata;
611
612 /*
613 * Wait for returned len to be >= 0 or -ENODATA.
614 */
615 if (fusionmerge)
616 error = wait_event_interruptible(
617 chan->read_wait,
618 ((len = channel_get_next_record(chan,
619 &buf)), len != -EAGAIN));
620 else
621 error = wait_event_interruptible(
622 buf->read_wait,
623 ((len = lib_ring_buffer_get_next_record(
624 chan, buf)), len != -EAGAIN));
625 CHAN_WARN_ON(chan, len == -EBUSY);
626 if (error) {
627 read_count = error;
628 goto nodata;
629 }
630 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
631 goto len_test;
632 }
633 }
634 read_offset = buf->iter.read_offset;
635skip_get_next:
636 space_left = count - read_count;
637 if (len <= space_left) {
638 copy_len = len;
639 chan->iter.len_left = 0;
640 *ppos = 0;
641 } else {
642 copy_len = space_left;
643 chan->iter.len_left = len - copy_len;
644 *ppos = read_offset + copy_len;
645 }
646 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
647 &user_buf[read_count],
648 copy_len)) {
649 /*
650 * Leave the len_left and ppos values at their current
651 * state, as we currently have a valid event to read.
652 */
653 return -EFAULT;
654 }
655 read_count += copy_len;
656 };
657 return read_count;
658
659nodata:
660 *ppos = 0;
661 chan->iter.len_left = 0;
662 return read_count;
663}
664
665/**
666 * lib_ring_buffer_file_read - Read buffer record payload.
667 * @filp: file structure pointer.
668 * @buffer: user buffer to read data into.
669 * @count: number of bytes to read.
670 * @ppos: file read position.
671 *
672 * Returns a negative value on error, or the number of bytes read on success.
673 * ppos is used to save the position _within the current record_ between calls
674 * to read().
675 */
676static
677ssize_t lib_ring_buffer_file_read(struct file *filp,
678 char __user *user_buf,
679 size_t count,
680 loff_t *ppos)
681{
682 struct inode *inode = filp->f_dentry->d_inode;
683 struct lib_ring_buffer *buf = inode->i_private;
684 struct channel *chan = buf->backend.chan;
685
686 return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
687 chan, buf, 0);
688}
689
690/**
691 * channel_file_read - Read channel record payload.
692 * @filp: file structure pointer.
693 * @buffer: user buffer to read data into.
694 * @count: number of bytes to read.
695 * @ppos: file read position.
696 *
697 * Returns a negative value on error, or the number of bytes read on success.
698 * ppos is used to save the position _within the current record_ between calls
699 * to read().
700 */
701static
702ssize_t channel_file_read(struct file *filp,
703 char __user *user_buf,
704 size_t count,
705 loff_t *ppos)
706{
707 struct inode *inode = filp->f_dentry->d_inode;
708 struct channel *chan = inode->i_private;
709 const struct lib_ring_buffer_config *config = chan->backend.config;
710
711 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
712 return channel_ring_buffer_file_read(filp, user_buf, count,
713 ppos, chan, NULL, 1);
714 else {
715 struct lib_ring_buffer *buf =
716 channel_get_ring_buffer(config, chan, 0);
717 return channel_ring_buffer_file_read(filp, user_buf, count,
718 ppos, chan, buf, 0);
719 }
720}
721
722static
723int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
724{
725 struct lib_ring_buffer *buf = inode->i_private;
726 int ret;
727
728 ret = lib_ring_buffer_iterator_open(buf);
729 if (ret)
730 return ret;
731
732 file->private_data = buf;
733 ret = nonseekable_open(inode, file);
734 if (ret)
735 goto release_iter;
736 return 0;
737
738release_iter:
739 lib_ring_buffer_iterator_release(buf);
740 return ret;
741}
742
743static
744int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
745{
746 struct lib_ring_buffer *buf = inode->i_private;
747
748 lib_ring_buffer_iterator_release(buf);
749 return 0;
750}
751
752static
753int channel_file_open(struct inode *inode, struct file *file)
754{
755 struct channel *chan = inode->i_private;
756 int ret;
757
758 ret = channel_iterator_open(chan);
759 if (ret)
760 return ret;
761
762 file->private_data = chan;
763 ret = nonseekable_open(inode, file);
764 if (ret)
765 goto release_iter;
766 return 0;
767
768release_iter:
769 channel_iterator_release(chan);
770 return ret;
771}
772
773static
774int channel_file_release(struct inode *inode, struct file *file)
775{
776 struct channel *chan = inode->i_private;
777
778 channel_iterator_release(chan);
779 return 0;
780}
781
782const struct file_operations channel_payload_file_operations = {
783 .owner = THIS_MODULE,
784 .open = channel_file_open,
785 .release = channel_file_release,
786 .read = channel_file_read,
787 .llseek = lib_ring_buffer_no_llseek,
788};
789EXPORT_SYMBOL_GPL(channel_payload_file_operations);
790
791const struct file_operations lib_ring_buffer_payload_file_operations = {
792 .owner = THIS_MODULE,
793 .open = lib_ring_buffer_file_open,
794 .release = lib_ring_buffer_file_release,
795 .read = lib_ring_buffer_file_read,
796 .llseek = lib_ring_buffer_no_llseek,
797};
798EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
deleted file mode 100644
index c9d6e89a7695..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
+++ /dev/null
@@ -1,109 +0,0 @@
1/*
2 * ring_buffer_mmap.c
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
6 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Re-using content from kernel/relay.c.
9 *
10 * This file is released under the GPL v2.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15
16#include "../../wrapper/ringbuffer/backend.h"
17#include "../../wrapper/ringbuffer/frontend.h"
18#include "../../wrapper/ringbuffer/vfs.h"
19
20/*
21 * fault() vm_op implementation for ring buffer file mapping.
22 */
23static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
24{
25 struct lib_ring_buffer *buf = vma->vm_private_data;
26 struct channel *chan = buf->backend.chan;
27 const struct lib_ring_buffer_config *config = chan->backend.config;
28 pgoff_t pgoff = vmf->pgoff;
29 struct page **page;
30 void **virt;
31 unsigned long offset, sb_bindex;
32
33 /*
34 * Verify that faults are only done on the range of pages owned by the
35 * reader.
36 */
37 offset = pgoff << PAGE_SHIFT;
38 sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
39 if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
40 && offset < buf->backend.array[sb_bindex]->mmap_offset +
41 buf->backend.chan->backend.subbuf_size))
42 return VM_FAULT_SIGBUS;
43 /*
44 * ring_buffer_read_get_page() gets the page in the current reader's
45 * pages.
46 */
47 page = lib_ring_buffer_read_get_page(&buf->backend, offset, &virt);
48 if (!*page)
49 return VM_FAULT_SIGBUS;
50 get_page(*page);
51 vmf->page = *page;
52
53 return 0;
54}
55
56/*
57 * vm_ops for ring buffer file mappings.
58 */
59static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
60 .fault = lib_ring_buffer_fault,
61};
62
63/**
64 * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
65 * @buf: ring buffer to map
66 * @vma: vm_area_struct describing memory to be mapped
67 *
68 * Returns 0 if ok, negative on error
69 *
70 * Caller should already have grabbed mmap_sem.
71 */
72static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
73 struct vm_area_struct *vma)
74{
75 unsigned long length = vma->vm_end - vma->vm_start;
76 struct channel *chan = buf->backend.chan;
77 const struct lib_ring_buffer_config *config = chan->backend.config;
78 unsigned long mmap_buf_len;
79
80 if (config->output != RING_BUFFER_MMAP)
81 return -EINVAL;
82
83 mmap_buf_len = chan->backend.buf_size;
84 if (chan->backend.extra_reader_sb)
85 mmap_buf_len += chan->backend.subbuf_size;
86
87 if (length != mmap_buf_len)
88 return -EINVAL;
89
90 vma->vm_ops = &lib_ring_buffer_mmap_ops;
91 vma->vm_flags |= VM_DONTEXPAND;
92 vma->vm_private_data = buf;
93
94 return 0;
95}
96
97/**
98 * lib_ring_buffer_mmap - mmap file op
99 * @filp: the file
100 * @vma: the vma describing what to map
101 *
102 * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
103 */
104int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
105{
106 struct lib_ring_buffer *buf = filp->private_data;
107 return lib_ring_buffer_mmap_buf(buf, vma);
108}
109EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
deleted file mode 100644
index ded18ba80fc1..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * ring_buffer_splice.c
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
6 * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Re-using content from kernel/relay.c.
9 *
10 * This file is released under the GPL v2.
11 */
12
13#include <linux/module.h>
14#include <linux/fs.h>
15
16#include "../../wrapper/splice.h"
17#include "../../wrapper/ringbuffer/backend.h"
18#include "../../wrapper/ringbuffer/frontend.h"
19#include "../../wrapper/ringbuffer/vfs.h"
20
21#if 0
22#define printk_dbg(fmt, args...) printk(fmt, args)
23#else
24#define printk_dbg(fmt, args...)
25#endif
26
27loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin)
28{
29 return -ESPIPE;
30}
31
32/*
33 * Release pages from the buffer so splice pipe_to_file can move them.
34 * Called after the pipe has been populated with buffer pages.
35 */
36static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
37 struct pipe_buffer *pbuf)
38{
39 __free_page(pbuf->page);
40}
41
42static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
43 .can_merge = 0,
44 .map = generic_pipe_buf_map,
45 .unmap = generic_pipe_buf_unmap,
46 .confirm = generic_pipe_buf_confirm,
47 .release = lib_ring_buffer_pipe_buf_release,
48 .steal = generic_pipe_buf_steal,
49 .get = generic_pipe_buf_get,
50};
51
52/*
53 * Page release operation after splice pipe_to_file ends.
54 */
55static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
56 unsigned int i)
57{
58 __free_page(spd->pages[i]);
59}
60
61/*
62 * subbuf_splice_actor - splice up to one subbuf's worth of data
63 */
64static int subbuf_splice_actor(struct file *in,
65 loff_t *ppos,
66 struct pipe_inode_info *pipe,
67 size_t len,
68 unsigned int flags)
69{
70 struct lib_ring_buffer *buf = in->private_data;
71 struct channel *chan = buf->backend.chan;
72 const struct lib_ring_buffer_config *config = chan->backend.config;
73 unsigned int poff, subbuf_pages, nr_pages;
74 struct page *pages[PIPE_DEF_BUFFERS];
75 struct partial_page partial[PIPE_DEF_BUFFERS];
76 struct splice_pipe_desc spd = {
77 .pages = pages,
78 .nr_pages = 0,
79 .partial = partial,
80 .flags = flags,
81 .ops = &ring_buffer_pipe_buf_ops,
82 .spd_release = lib_ring_buffer_page_release,
83 };
84 unsigned long consumed_old, roffset;
85 unsigned long bytes_avail;
86
87 /*
88 * Check that a GET_SUBBUF ioctl has been done before.
89 */
90 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
91 consumed_old = lib_ring_buffer_get_consumed(config, buf);
92 consumed_old += *ppos;
93
94 /*
95 * Adjust read len, if longer than what is available.
96 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
97 * protection.
98 */
99 bytes_avail = chan->backend.subbuf_size;
100 WARN_ON(bytes_avail > chan->backend.buf_size);
101 len = min_t(size_t, len, bytes_avail);
102 subbuf_pages = bytes_avail >> PAGE_SHIFT;
103 nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
104 roffset = consumed_old & PAGE_MASK;
105 poff = consumed_old & ~PAGE_MASK;
106 printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
107 len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
108
109 for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
110 unsigned int this_len;
111 struct page **page, *new_page;
112 void **virt;
113
114 if (!len)
115 break;
116 printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
117 len, roffset);
118
119 /*
120 * We have to replace the page we are moving into the splice
121 * pipe.
122 */
123 new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
124 0)),
125 GFP_KERNEL | __GFP_ZERO, 0);
126 if (!new_page)
127 break;
128
129 this_len = PAGE_SIZE - poff;
130 page = lib_ring_buffer_read_get_page(&buf->backend, roffset, &virt);
131 spd.pages[spd.nr_pages] = *page;
132 *page = new_page;
133 *virt = page_address(new_page);
134 spd.partial[spd.nr_pages].offset = poff;
135 spd.partial[spd.nr_pages].len = this_len;
136
137 poff = 0;
138 roffset += PAGE_SIZE;
139 len -= this_len;
140 }
141
142 if (!spd.nr_pages)
143 return 0;
144
145 return wrapper_splice_to_pipe(pipe, &spd);
146}
147
148ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
149 struct pipe_inode_info *pipe, size_t len,
150 unsigned int flags)
151{
152 struct lib_ring_buffer *buf = in->private_data;
153 struct channel *chan = buf->backend.chan;
154 const struct lib_ring_buffer_config *config = chan->backend.config;
155 ssize_t spliced;
156 int ret;
157
158 if (config->output != RING_BUFFER_SPLICE)
159 return -EINVAL;
160
161 /*
162 * We require ppos and length to be page-aligned for performance reasons
163 * (no page copy). Size is known using the ioctl
164 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
165 * We fail when the ppos or len passed is not page-sized, because splice
166 * is not allowed to copy more than the length passed as parameter (so
167 * the ABI does not let us silently copy more than requested to include
168 * padding).
169 */
170 if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
171 return -EINVAL;
172
173 ret = 0;
174 spliced = 0;
175
176 printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
177 (ssize_t)*ppos);
178 while (len && !spliced) {
179 ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
180 printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
181 if (ret < 0)
182 break;
183 else if (!ret) {
184 if (flags & SPLICE_F_NONBLOCK)
185 ret = -EAGAIN;
186 break;
187 }
188
189 *ppos += ret;
190 if (ret > len)
191 len = 0;
192 else
193 len -= ret;
194 spliced += ret;
195 }
196
197 if (spliced)
198 return spliced;
199
200 return ret;
201}
202EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
deleted file mode 100644
index 8b783052a5f6..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
+++ /dev/null
@@ -1,390 +0,0 @@
1/*
2 * ring_buffer_vfs.c
3 *
4 * Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring Buffer VFS file operations.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/fs.h>
13#include <linux/compat.h>
14
15#include "../../wrapper/ringbuffer/backend.h"
16#include "../../wrapper/ringbuffer/frontend.h"
17#include "../../wrapper/ringbuffer/vfs.h"
18#include "../../wrapper/poll.h"
19
20static int put_ulong(unsigned long val, unsigned long arg)
21{
22 return put_user(val, (unsigned long __user *)arg);
23}
24
25#ifdef CONFIG_COMPAT
26static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
27{
28 return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
29}
30#endif
31
32/**
33 * lib_ring_buffer_open - ring buffer open file operation
34 * @inode: opened inode
35 * @file: opened file
36 *
37 * Open implementation. Makes sure only one open instance of a buffer is
38 * done at a given moment.
39 */
40int lib_ring_buffer_open(struct inode *inode, struct file *file)
41{
42 struct lib_ring_buffer *buf = inode->i_private;
43 int ret;
44
45 if (!buf)
46 return -EINVAL;
47
48 ret = lib_ring_buffer_open_read(buf);
49 if (ret)
50 return ret;
51
52 file->private_data = buf;
53 ret = nonseekable_open(inode, file);
54 if (ret)
55 goto release_read;
56 return 0;
57
58release_read:
59 lib_ring_buffer_release_read(buf);
60 return ret;
61}
62
63/**
64 * lib_ring_buffer_release - ring buffer release file operation
65 * @inode: opened inode
66 * @file: opened file
67 *
68 * Release implementation.
69 */
70int lib_ring_buffer_release(struct inode *inode, struct file *file)
71{
72 struct lib_ring_buffer *buf = file->private_data;
73
74 lib_ring_buffer_release_read(buf);
75
76 return 0;
77}
78
79/**
80 * lib_ring_buffer_poll - ring buffer poll file operation
81 * @filp: the file
82 * @wait: poll table
83 *
84 * Poll implementation.
85 */
86unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
87{
88 unsigned int mask = 0;
89 struct lib_ring_buffer *buf = filp->private_data;
90 struct channel *chan = buf->backend.chan;
91 const struct lib_ring_buffer_config *config = chan->backend.config;
92 int finalized, disabled;
93
94 if (filp->f_mode & FMODE_READ) {
95 poll_wait_set_exclusive(wait);
96 poll_wait(filp, &buf->read_wait, wait);
97
98 finalized = lib_ring_buffer_is_finalized(config, buf);
99 disabled = lib_ring_buffer_channel_is_disabled(chan);
100
101 /*
102 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
103 * finalized load before offsets loads.
104 */
105 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
106retry:
107 if (disabled)
108 return POLLERR;
109
110 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
111 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
112 == 0) {
113 if (finalized)
114 return POLLHUP;
115 else {
116 /*
117 * The memory barriers
118 * __wait_event()/wake_up_interruptible() take
119 * care of "raw_spin_is_locked" memory ordering.
120 */
121 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
122 goto retry;
123 else
124 return 0;
125 }
126 } else {
127 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
128 chan)
129 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
130 chan)
131 >= chan->backend.buf_size)
132 return POLLPRI | POLLRDBAND;
133 else
134 return POLLIN | POLLRDNORM;
135 }
136 }
137 return mask;
138}
139
140/**
141 * lib_ring_buffer_ioctl - control ring buffer reader synchronization
142 *
143 * @filp: the file
144 * @cmd: the command
145 * @arg: command arg
146 *
147 * This ioctl implements commands necessary for producer/consumer
148 * and flight recorder reader interaction :
149 * RING_BUFFER_GET_NEXT_SUBBUF
150 * Get the next sub-buffer that can be read. It never blocks.
151 * RING_BUFFER_PUT_NEXT_SUBBUF
152 * Release the currently read sub-buffer.
153 * RING_BUFFER_GET_SUBBUF_SIZE
154 * returns the size of the current sub-buffer.
155 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
156 * returns the maximum size for sub-buffers.
157 * RING_BUFFER_GET_NUM_SUBBUF
158 * returns the number of reader-visible sub-buffers in the per cpu
159 * channel (for mmap).
160 * RING_BUFFER_GET_MMAP_READ_OFFSET
161 * returns the offset of the subbuffer belonging to the reader.
162 * Should only be used for mmap clients.
163 */
164long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
165{
166 struct lib_ring_buffer *buf = filp->private_data;
167 struct channel *chan = buf->backend.chan;
168 const struct lib_ring_buffer_config *config = chan->backend.config;
169
170 if (lib_ring_buffer_channel_is_disabled(chan))
171 return -EIO;
172
173 switch (cmd) {
174 case RING_BUFFER_SNAPSHOT:
175 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
176 &buf->prod_snapshot);
177 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
178 return put_ulong(buf->cons_snapshot, arg);
179 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
180 return put_ulong(buf->prod_snapshot, arg);
181 case RING_BUFFER_GET_SUBBUF:
182 {
183 unsigned long uconsume;
184 long ret;
185
186 ret = get_user(uconsume, (unsigned long __user *) arg);
187 if (ret)
188 return ret; /* will return -EFAULT */
189 ret = lib_ring_buffer_get_subbuf(buf, uconsume);
190 if (!ret) {
191 /* Set file position to zero at each successful "get" */
192 filp->f_pos = 0;
193 }
194 return ret;
195 }
196 case RING_BUFFER_PUT_SUBBUF:
197 lib_ring_buffer_put_subbuf(buf);
198 return 0;
199
200 case RING_BUFFER_GET_NEXT_SUBBUF:
201 {
202 long ret;
203
204 ret = lib_ring_buffer_get_next_subbuf(buf);
205 if (!ret) {
206 /* Set file position to zero at each successful "get" */
207 filp->f_pos = 0;
208 }
209 return ret;
210 }
211 case RING_BUFFER_PUT_NEXT_SUBBUF:
212 lib_ring_buffer_put_next_subbuf(buf);
213 return 0;
214 case RING_BUFFER_GET_SUBBUF_SIZE:
215 return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
216 arg);
217 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
218 {
219 unsigned long size;
220
221 size = lib_ring_buffer_get_read_data_size(config, buf);
222 size = PAGE_ALIGN(size);
223 return put_ulong(size, arg);
224 }
225 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
226 return put_ulong(chan->backend.subbuf_size, arg);
227 case RING_BUFFER_GET_MMAP_LEN:
228 {
229 unsigned long mmap_buf_len;
230
231 if (config->output != RING_BUFFER_MMAP)
232 return -EINVAL;
233 mmap_buf_len = chan->backend.buf_size;
234 if (chan->backend.extra_reader_sb)
235 mmap_buf_len += chan->backend.subbuf_size;
236 if (mmap_buf_len > INT_MAX)
237 return -EFBIG;
238 return put_ulong(mmap_buf_len, arg);
239 }
240 case RING_BUFFER_GET_MMAP_READ_OFFSET:
241 {
242 unsigned long sb_bindex;
243
244 if (config->output != RING_BUFFER_MMAP)
245 return -EINVAL;
246 sb_bindex = subbuffer_id_get_index(config,
247 buf->backend.buf_rsb.id);
248 return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
249 arg);
250 }
251 case RING_BUFFER_FLUSH:
252 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
253 return 0;
254 default:
255 return -ENOIOCTLCMD;
256 }
257}
258
259#ifdef CONFIG_COMPAT
260long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
261 unsigned long arg)
262{
263 struct lib_ring_buffer *buf = filp->private_data;
264 struct channel *chan = buf->backend.chan;
265 const struct lib_ring_buffer_config *config = chan->backend.config;
266
267 if (lib_ring_buffer_channel_is_disabled(chan))
268 return -EIO;
269
270 switch (cmd) {
271 case RING_BUFFER_SNAPSHOT:
272 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
273 &buf->prod_snapshot);
274 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
275 return compat_put_ulong(buf->cons_snapshot, arg);
276 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
277 return compat_put_ulong(buf->prod_snapshot, arg);
278 case RING_BUFFER_GET_SUBBUF:
279 {
280 __u32 uconsume;
281 unsigned long consume;
282 long ret;
283
284 ret = get_user(uconsume, (__u32 __user *) arg);
285 if (ret)
286 return ret; /* will return -EFAULT */
287 consume = buf->cons_snapshot;
288 consume &= ~0xFFFFFFFFL;
289 consume |= uconsume;
290 ret = lib_ring_buffer_get_subbuf(buf, consume);
291 if (!ret) {
292 /* Set file position to zero at each successful "get" */
293 filp->f_pos = 0;
294 }
295 return ret;
296 }
297 case RING_BUFFER_PUT_SUBBUF:
298 lib_ring_buffer_put_subbuf(buf);
299 return 0;
300
301 case RING_BUFFER_GET_NEXT_SUBBUF:
302 {
303 long ret;
304
305 ret = lib_ring_buffer_get_next_subbuf(buf);
306 if (!ret) {
307 /* Set file position to zero at each successful "get" */
308 filp->f_pos = 0;
309 }
310 return ret;
311 }
312 case RING_BUFFER_PUT_NEXT_SUBBUF:
313 lib_ring_buffer_put_next_subbuf(buf);
314 return 0;
315 case RING_BUFFER_GET_SUBBUF_SIZE:
316 {
317 unsigned long data_size;
318
319 data_size = lib_ring_buffer_get_read_data_size(config, buf);
320 if (data_size > UINT_MAX)
321 return -EFBIG;
322 return put_ulong(data_size, arg);
323 }
324 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
325 {
326 unsigned long size;
327
328 size = lib_ring_buffer_get_read_data_size(config, buf);
329 size = PAGE_ALIGN(size);
330 if (size > UINT_MAX)
331 return -EFBIG;
332 return put_ulong(size, arg);
333 }
334 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
335 if (chan->backend.subbuf_size > UINT_MAX)
336 return -EFBIG;
337 return put_ulong(chan->backend.subbuf_size, arg);
338 case RING_BUFFER_GET_MMAP_LEN:
339 {
340 unsigned long mmap_buf_len;
341
342 if (config->output != RING_BUFFER_MMAP)
343 return -EINVAL;
344 mmap_buf_len = chan->backend.buf_size;
345 if (chan->backend.extra_reader_sb)
346 mmap_buf_len += chan->backend.subbuf_size;
347 if (mmap_buf_len > UINT_MAX)
348 return -EFBIG;
349 return put_ulong(mmap_buf_len, arg);
350 }
351 case RING_BUFFER_GET_MMAP_READ_OFFSET:
352 {
353 unsigned long sb_bindex, read_offset;
354
355 if (config->output != RING_BUFFER_MMAP)
356 return -EINVAL;
357 sb_bindex = subbuffer_id_get_index(config,
358 buf->backend.buf_rsb.id);
359 read_offset = buf->backend.array[sb_bindex]->mmap_offset;
360 if (read_offset > UINT_MAX)
361 return -EINVAL;
362 return put_ulong(read_offset, arg);
363 }
364 case RING_BUFFER_FLUSH:
365 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
366 return 0;
367 default:
368 return -ENOIOCTLCMD;
369 }
370}
371#endif
372
373const struct file_operations lib_ring_buffer_file_operations = {
374 .owner = THIS_MODULE,
375 .open = lib_ring_buffer_open,
376 .release = lib_ring_buffer_release,
377 .poll = lib_ring_buffer_poll,
378 .splice_read = lib_ring_buffer_splice_read,
379 .mmap = lib_ring_buffer_mmap,
380 .unlocked_ioctl = lib_ring_buffer_ioctl,
381 .llseek = lib_ring_buffer_no_llseek,
382#ifdef CONFIG_COMPAT
383 .compat_ioctl = lib_ring_buffer_compat_ioctl,
384#endif
385};
386EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
387
388MODULE_LICENSE("GPL and additional rights");
389MODULE_AUTHOR("Mathieu Desnoyers");
390MODULE_DESCRIPTION("Ring Buffer Library VFS");
diff --git a/drivers/staging/lttng/lib/ringbuffer/vatomic.h b/drivers/staging/lttng/lib/ringbuffer/vatomic.h
deleted file mode 100644
index b944dd63472f..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/vatomic.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_VATOMIC_H
2#define _LINUX_RING_BUFFER_VATOMIC_H
3
4/*
5 * linux/ringbuffer/vatomic.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12#include <asm/atomic.h>
13#include <asm/local.h>
14
15/*
16 * Same data type (long) accessed differently depending on configuration.
17 * v field is for non-atomic access (protected by mutual exclusion).
18 * In the fast-path, the ring_buffer_config structure is constant, so the
19 * compiler can statically select the appropriate branch.
20 * local_t is used for per-cpu and per-thread buffers.
21 * atomic_long_t is used for globally shared buffers.
22 */
23union v_atomic {
24 local_t l;
25 atomic_long_t a;
26 long v;
27};
28
29static inline
30long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
31{
32 if (config->sync == RING_BUFFER_SYNC_PER_CPU)
33 return local_read(&v_a->l);
34 else
35 return atomic_long_read(&v_a->a);
36}
37
38static inline
39void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
40 long v)
41{
42 if (config->sync == RING_BUFFER_SYNC_PER_CPU)
43 local_set(&v_a->l, v);
44 else
45 atomic_long_set(&v_a->a, v);
46}
47
48static inline
49void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
50{
51 if (config->sync == RING_BUFFER_SYNC_PER_CPU)
52 local_add(v, &v_a->l);
53 else
54 atomic_long_add(v, &v_a->a);
55}
56
57static inline
58void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
59{
60 if (config->sync == RING_BUFFER_SYNC_PER_CPU)
61 local_inc(&v_a->l);
62 else
63 atomic_long_inc(&v_a->a);
64}
65
66/*
67 * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
68 */
69static inline
70void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
71{
72 --v_a->v;
73}
74
75static inline
76long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
77 long old, long _new)
78{
79 if (config->sync == RING_BUFFER_SYNC_PER_CPU)
80 return local_cmpxchg(&v_a->l, old, _new);
81 else
82 return atomic_long_cmpxchg(&v_a->a, old, _new);
83}
84
85#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
diff --git a/drivers/staging/lttng/lib/ringbuffer/vfs.h b/drivers/staging/lttng/lib/ringbuffer/vfs.h
deleted file mode 100644
index d073e4c555fe..000000000000
--- a/drivers/staging/lttng/lib/ringbuffer/vfs.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef _LINUX_RING_BUFFER_VFS_H
2#define _LINUX_RING_BUFFER_VFS_H
3
4/*
5 * linux/ringbuffer/vfs.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Wait-free ring buffer VFS file operations.
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * Dual LGPL v2.1/GPL v2 license.
15 */
16
17#include <linux/fs.h>
18#include <linux/poll.h>
19
20/* VFS API */
21
22extern const struct file_operations lib_ring_buffer_file_operations;
23
24/*
25 * Internal file operations.
26 */
27
28int lib_ring_buffer_open(struct inode *inode, struct file *file);
29int lib_ring_buffer_release(struct inode *inode, struct file *file);
30unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait);
31ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
32 struct pipe_inode_info *pipe, size_t len,
33 unsigned int flags);
34int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
35
36/* Ring Buffer ioctl() and ioctl numbers */
37long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
38#ifdef CONFIG_COMPAT
39long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
40 unsigned long arg);
41#endif
42
43/*
44 * Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
45 * consume sub-buffers sequentially.
46 *
47 * Reading sub-buffers without consuming them can be performed with:
48 *
49 * RING_BUFFER_SNAPSHOT
50 * RING_BUFFER_SNAPSHOT_GET_CONSUMED
51 * RING_BUFFER_SNAPSHOT_GET_PRODUCED
52 *
53 * to get the offset range to consume, and then by passing each sub-buffer
54 * offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
55 * with RING_BUFFER_PUT_SUBBUF.
56 *
57 * Note that the "snapshot" API can be used to read the sub-buffer in reverse
58 * order, which is useful for flight recorder snapshots.
59 */
60
61/* Get a snapshot of the current ring buffer producer and consumer positions */
62#define RING_BUFFER_SNAPSHOT _IO(0xF6, 0x00)
63/* Get the consumer position (iteration start) */
64#define RING_BUFFER_SNAPSHOT_GET_CONSUMED _IOR(0xF6, 0x01, unsigned long)
65/* Get the producer position (iteration end) */
66#define RING_BUFFER_SNAPSHOT_GET_PRODUCED _IOR(0xF6, 0x02, unsigned long)
67/* Get exclusive read access to the specified sub-buffer position */
68#define RING_BUFFER_GET_SUBBUF _IOW(0xF6, 0x03, unsigned long)
69/* Release exclusive sub-buffer access */
70#define RING_BUFFER_PUT_SUBBUF _IO(0xF6, 0x04)
71
72/* Get exclusive read access to the next sub-buffer that can be read. */
73#define RING_BUFFER_GET_NEXT_SUBBUF _IO(0xF6, 0x05)
74/* Release exclusive sub-buffer access, move consumer forward. */
75#define RING_BUFFER_PUT_NEXT_SUBBUF _IO(0xF6, 0x06)
76/* returns the size of the current sub-buffer, without padding (for mmap). */
77#define RING_BUFFER_GET_SUBBUF_SIZE _IOR(0xF6, 0x07, unsigned long)
78/* returns the size of the current sub-buffer, with padding (for splice). */
79#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE _IOR(0xF6, 0x08, unsigned long)
80/* returns the maximum size for sub-buffers. */
81#define RING_BUFFER_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, unsigned long)
82/* returns the length to mmap. */
83#define RING_BUFFER_GET_MMAP_LEN _IOR(0xF6, 0x0A, unsigned long)
84/* returns the offset of the subbuffer belonging to the mmap reader. */
85#define RING_BUFFER_GET_MMAP_READ_OFFSET _IOR(0xF6, 0x0B, unsigned long)
86/* flush the current sub-buffer */
87#define RING_BUFFER_FLUSH _IO(0xF6, 0x0C)
88
89#endif /* _LINUX_RING_BUFFER_VFS_H */
diff --git a/drivers/staging/lttng/ltt-context.c b/drivers/staging/lttng/ltt-context.c
deleted file mode 100644
index 60ea525bb3e6..000000000000
--- a/drivers/staging/lttng/ltt-context.c
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * ltt-context.c
3 *
4 * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng trace/channel/event context management.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
16#include "ltt-events.h"
17#include "ltt-tracer.h"
18
19int lttng_find_context(struct lttng_ctx *ctx, const char *name)
20{
21 unsigned int i;
22
23 for (i = 0; i < ctx->nr_fields; i++) {
24 /* Skip allocated (but non-initialized) contexts */
25 if (!ctx->fields[i].event_field.name)
26 continue;
27 if (!strcmp(ctx->fields[i].event_field.name, name))
28 return 1;
29 }
30 return 0;
31}
32EXPORT_SYMBOL_GPL(lttng_find_context);
33
34/*
35 * Note: as we append context information, the pointer location may change.
36 */
37struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
38{
39 struct lttng_ctx_field *field;
40 struct lttng_ctx *ctx;
41
42 if (!*ctx_p) {
43 *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
44 if (!*ctx_p)
45 return NULL;
46 }
47 ctx = *ctx_p;
48 if (ctx->nr_fields + 1 > ctx->allocated_fields) {
49 struct lttng_ctx_field *new_fields;
50
51 ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
52 new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
53 if (!new_fields)
54 return NULL;
55 if (ctx->fields)
56 memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
57 kfree(ctx->fields);
58 ctx->fields = new_fields;
59 }
60 field = &ctx->fields[ctx->nr_fields];
61 ctx->nr_fields++;
62 return field;
63}
64EXPORT_SYMBOL_GPL(lttng_append_context);
65
66/*
67 * Remove last context field.
68 */
69void lttng_remove_context_field(struct lttng_ctx **ctx_p,
70 struct lttng_ctx_field *field)
71{
72 struct lttng_ctx *ctx;
73
74 ctx = *ctx_p;
75 ctx->nr_fields--;
76 WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
77 memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
78}
79EXPORT_SYMBOL_GPL(lttng_remove_context_field);
80
81void lttng_destroy_context(struct lttng_ctx *ctx)
82{
83 int i;
84
85 if (!ctx)
86 return;
87 for (i = 0; i < ctx->nr_fields; i++) {
88 if (ctx->fields[i].destroy)
89 ctx->fields[i].destroy(&ctx->fields[i]);
90 }
91 kfree(ctx->fields);
92 kfree(ctx);
93}
diff --git a/drivers/staging/lttng/ltt-debugfs-abi.c b/drivers/staging/lttng/ltt-debugfs-abi.c
deleted file mode 100644
index 37cccfa35f47..000000000000
--- a/drivers/staging/lttng/ltt-debugfs-abi.c
+++ /dev/null
@@ -1,777 +0,0 @@
1/*
2 * ltt-debugfs-abi.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng debugfs ABI
7 *
8 * Mimic system calls for:
9 * - session creation, returns a file descriptor or failure.
10 * - channel creation, returns a file descriptor or failure.
11 * - Operates on a session file descriptor
12 * - Takes all channel options as parameters.
13 * - stream get, returns a file descriptor or failure.
14 * - Operates on a channel file descriptor.
15 * - stream notifier get, returns a file descriptor or failure.
16 * - Operates on a channel file descriptor.
17 * - event creation, returns a file descriptor or failure.
18 * - Operates on a channel file descriptor
19 * - Takes an event name as parameter
20 * - Takes an instrumentation source as parameter
21 * - e.g. tracepoints, dynamic_probes...
22 * - Takes instrumentation source specific arguments.
23 *
24 * Dual LGPL v2.1/GPL v2 license.
25 */
26
27#include <linux/module.h>
28#include <linux/debugfs.h>
29#include <linux/proc_fs.h>
30#include <linux/anon_inodes.h>
31#include <linux/file.h>
32#include <linux/uaccess.h>
33#include <linux/slab.h>
34#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
35#include "wrapper/ringbuffer/vfs.h"
36#include "wrapper/poll.h"
37#include "ltt-debugfs-abi.h"
38#include "ltt-events.h"
39#include "ltt-tracer.h"
40
41/*
42 * This is LTTng's own personal way to create a system call as an external
43 * module. We use ioctl() on /sys/kernel/debug/lttng.
44 */
45
46static struct dentry *lttng_dentry;
47static struct proc_dir_entry *lttng_proc_dentry;
48static const struct file_operations lttng_fops;
49static const struct file_operations lttng_session_fops;
50static const struct file_operations lttng_channel_fops;
51static const struct file_operations lttng_metadata_fops;
52static const struct file_operations lttng_event_fops;
53
54/*
55 * Teardown management: opened file descriptors keep a refcount on the module,
56 * so it can only exit when all file descriptors are closed.
57 */
58
59enum channel_type {
60 PER_CPU_CHANNEL,
61 METADATA_CHANNEL,
62};
63
64static
65int lttng_abi_create_session(void)
66{
67 struct ltt_session *session;
68 struct file *session_file;
69 int session_fd, ret;
70
71 session = ltt_session_create();
72 if (!session)
73 return -ENOMEM;
74 session_fd = get_unused_fd();
75 if (session_fd < 0) {
76 ret = session_fd;
77 goto fd_error;
78 }
79 session_file = anon_inode_getfile("[lttng_session]",
80 &lttng_session_fops,
81 session, O_RDWR);
82 if (IS_ERR(session_file)) {
83 ret = PTR_ERR(session_file);
84 goto file_error;
85 }
86 session->file = session_file;
87 fd_install(session_fd, session_file);
88 return session_fd;
89
90file_error:
91 put_unused_fd(session_fd);
92fd_error:
93 ltt_session_destroy(session);
94 return ret;
95}
96
97static
98int lttng_abi_tracepoint_list(void)
99{
100 struct file *tracepoint_list_file;
101 int file_fd, ret;
102
103 file_fd = get_unused_fd();
104 if (file_fd < 0) {
105 ret = file_fd;
106 goto fd_error;
107 }
108
109 tracepoint_list_file = anon_inode_getfile("[lttng_session]",
110 &lttng_tracepoint_list_fops,
111 NULL, O_RDWR);
112 if (IS_ERR(tracepoint_list_file)) {
113 ret = PTR_ERR(tracepoint_list_file);
114 goto file_error;
115 }
116 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
117 if (ret < 0)
118 goto open_error;
119 fd_install(file_fd, tracepoint_list_file);
120 if (file_fd < 0) {
121 ret = file_fd;
122 goto fd_error;
123 }
124 return file_fd;
125
126open_error:
127 fput(tracepoint_list_file);
128file_error:
129 put_unused_fd(file_fd);
130fd_error:
131 return ret;
132}
133
134static
135long lttng_abi_tracer_version(struct file *file,
136 struct lttng_kernel_tracer_version __user *uversion_param)
137{
138 struct lttng_kernel_tracer_version v;
139
140 v.version = LTTNG_VERSION;
141 v.patchlevel = LTTNG_PATCHLEVEL;
142 v.sublevel = LTTNG_SUBLEVEL;
143
144 if (copy_to_user(uversion_param, &v, sizeof(v)))
145 return -EFAULT;
146 return 0;
147}
148
149static
150long lttng_abi_add_context(struct file *file,
151 struct lttng_kernel_context __user *ucontext_param,
152 struct lttng_ctx **ctx, struct ltt_session *session)
153{
154 struct lttng_kernel_context context_param;
155
156 if (session->been_active)
157 return -EPERM;
158
159 if (copy_from_user(&context_param, ucontext_param, sizeof(context_param)))
160 return -EFAULT;
161
162 switch (context_param.ctx) {
163 case LTTNG_KERNEL_CONTEXT_PID:
164 return lttng_add_pid_to_ctx(ctx);
165 case LTTNG_KERNEL_CONTEXT_PRIO:
166 return lttng_add_prio_to_ctx(ctx);
167 case LTTNG_KERNEL_CONTEXT_NICE:
168 return lttng_add_nice_to_ctx(ctx);
169 case LTTNG_KERNEL_CONTEXT_VPID:
170 return lttng_add_vpid_to_ctx(ctx);
171 case LTTNG_KERNEL_CONTEXT_TID:
172 return lttng_add_tid_to_ctx(ctx);
173 case LTTNG_KERNEL_CONTEXT_VTID:
174 return lttng_add_vtid_to_ctx(ctx);
175 case LTTNG_KERNEL_CONTEXT_PPID:
176 return lttng_add_ppid_to_ctx(ctx);
177 case LTTNG_KERNEL_CONTEXT_VPPID:
178 return lttng_add_vppid_to_ctx(ctx);
179 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
180 context_param.u.perf_counter.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
181 return lttng_add_perf_counter_to_ctx(context_param.u.perf_counter.type,
182 context_param.u.perf_counter.config,
183 context_param.u.perf_counter.name,
184 ctx);
185 case LTTNG_KERNEL_CONTEXT_PROCNAME:
186 return lttng_add_procname_to_ctx(ctx);
187 default:
188 return -EINVAL;
189 }
190}
191
192/**
193 * lttng_ioctl - lttng syscall through ioctl
194 *
195 * @file: the file
196 * @cmd: the command
197 * @arg: command arg
198 *
199 * This ioctl implements lttng commands:
200 * LTTNG_KERNEL_SESSION
201 * Returns a LTTng trace session file descriptor
202 * LTTNG_KERNEL_TRACER_VERSION
203 * Returns the LTTng kernel tracer version
204 * LTTNG_KERNEL_TRACEPOINT_LIST
205 * Returns a file descriptor listing available tracepoints
206 * LTTNG_KERNEL_WAIT_QUIESCENT
207 * Returns after all previously running probes have completed
208 *
209 * The returned session will be deleted when its file descriptor is closed.
210 */
211static
212long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
213{
214 switch (cmd) {
215 case LTTNG_KERNEL_SESSION:
216 return lttng_abi_create_session();
217 case LTTNG_KERNEL_TRACER_VERSION:
218 return lttng_abi_tracer_version(file,
219 (struct lttng_kernel_tracer_version __user *) arg);
220 case LTTNG_KERNEL_TRACEPOINT_LIST:
221 return lttng_abi_tracepoint_list();
222 case LTTNG_KERNEL_WAIT_QUIESCENT:
223 synchronize_trace();
224 return 0;
225 case LTTNG_KERNEL_CALIBRATE:
226 {
227 struct lttng_kernel_calibrate __user *ucalibrate =
228 (struct lttng_kernel_calibrate __user *) arg;
229 struct lttng_kernel_calibrate calibrate;
230 int ret;
231
232 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
233 return -EFAULT;
234 ret = lttng_calibrate(&calibrate);
235 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
236 return -EFAULT;
237 return ret;
238 }
239 default:
240 return -ENOIOCTLCMD;
241 }
242}
243
244static const struct file_operations lttng_fops = {
245 .owner = THIS_MODULE,
246 .unlocked_ioctl = lttng_ioctl,
247#ifdef CONFIG_COMPAT
248 .compat_ioctl = lttng_ioctl,
249#endif
250};
251
252/*
253 * We tolerate no failure in this function (if one happens, we print a dmesg
254 * error, but cannot return any error, because the channel information is
255 * invariant.
256 */
257static
258void lttng_metadata_create_events(struct file *channel_file)
259{
260 struct ltt_channel *channel = channel_file->private_data;
261 static struct lttng_kernel_event metadata_params = {
262 .instrumentation = LTTNG_KERNEL_TRACEPOINT,
263 .name = "lttng_metadata",
264 };
265 struct ltt_event *event;
266
267 /*
268 * We tolerate no failure path after event creation. It will stay
269 * invariant for the rest of the session.
270 */
271 event = ltt_event_create(channel, &metadata_params, NULL, NULL);
272 if (!event) {
273 goto create_error;
274 }
275 return;
276
277create_error:
278 WARN_ON(1);
279 return; /* not allowed to return error */
280}
281
282static
283int lttng_abi_create_channel(struct file *session_file,
284 struct lttng_kernel_channel __user *uchan_param,
285 enum channel_type channel_type)
286{
287 struct ltt_session *session = session_file->private_data;
288 const struct file_operations *fops = NULL;
289 const char *transport_name;
290 struct ltt_channel *chan;
291 struct file *chan_file;
292 struct lttng_kernel_channel chan_param;
293 int chan_fd;
294 int ret = 0;
295
296 if (copy_from_user(&chan_param, uchan_param, sizeof(chan_param)))
297 return -EFAULT;
298 chan_fd = get_unused_fd();
299 if (chan_fd < 0) {
300 ret = chan_fd;
301 goto fd_error;
302 }
303 switch (channel_type) {
304 case PER_CPU_CHANNEL:
305 fops = &lttng_channel_fops;
306 break;
307 case METADATA_CHANNEL:
308 fops = &lttng_metadata_fops;
309 break;
310 }
311
312 chan_file = anon_inode_getfile("[lttng_channel]",
313 fops,
314 NULL, O_RDWR);
315 if (IS_ERR(chan_file)) {
316 ret = PTR_ERR(chan_file);
317 goto file_error;
318 }
319 switch (channel_type) {
320 case PER_CPU_CHANNEL:
321 if (chan_param.output == LTTNG_KERNEL_SPLICE) {
322 transport_name = chan_param.overwrite ?
323 "relay-overwrite" : "relay-discard";
324 } else if (chan_param.output == LTTNG_KERNEL_MMAP) {
325 transport_name = chan_param.overwrite ?
326 "relay-overwrite-mmap" : "relay-discard-mmap";
327 } else {
328 return -EINVAL;
329 }
330 break;
331 case METADATA_CHANNEL:
332 if (chan_param.output == LTTNG_KERNEL_SPLICE)
333 transport_name = "relay-metadata";
334 else if (chan_param.output == LTTNG_KERNEL_MMAP)
335 transport_name = "relay-metadata-mmap";
336 else
337 return -EINVAL;
338 break;
339 default:
340 transport_name = "<unknown>";
341 break;
342 }
343 /*
344 * We tolerate no failure path after channel creation. It will stay
345 * invariant for the rest of the session.
346 */
347 chan = ltt_channel_create(session, transport_name, NULL,
348 chan_param.subbuf_size,
349 chan_param.num_subbuf,
350 chan_param.switch_timer_interval,
351 chan_param.read_timer_interval);
352 if (!chan) {
353 ret = -EINVAL;
354 goto chan_error;
355 }
356 chan->file = chan_file;
357 chan_file->private_data = chan;
358 fd_install(chan_fd, chan_file);
359 if (channel_type == METADATA_CHANNEL) {
360 session->metadata = chan;
361 lttng_metadata_create_events(chan_file);
362 }
363
364 /* The channel created holds a reference on the session */
365 atomic_long_inc(&session_file->f_count);
366
367 return chan_fd;
368
369chan_error:
370 fput(chan_file);
371file_error:
372 put_unused_fd(chan_fd);
373fd_error:
374 return ret;
375}
376
377/**
378 * lttng_session_ioctl - lttng session fd ioctl
379 *
380 * @file: the file
381 * @cmd: the command
382 * @arg: command arg
383 *
384 * This ioctl implements lttng commands:
385 * LTTNG_KERNEL_CHANNEL
386 * Returns a LTTng channel file descriptor
387 * LTTNG_KERNEL_ENABLE
388 * Enables tracing for a session (weak enable)
389 * LTTNG_KERNEL_DISABLE
390 * Disables tracing for a session (strong disable)
391 * LTTNG_KERNEL_METADATA
392 * Returns a LTTng metadata file descriptor
393 *
394 * The returned channel will be deleted when its file descriptor is closed.
395 */
396static
397long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
398{
399 struct ltt_session *session = file->private_data;
400
401 switch (cmd) {
402 case LTTNG_KERNEL_CHANNEL:
403 return lttng_abi_create_channel(file,
404 (struct lttng_kernel_channel __user *) arg,
405 PER_CPU_CHANNEL);
406 case LTTNG_KERNEL_SESSION_START:
407 case LTTNG_KERNEL_ENABLE:
408 return ltt_session_enable(session);
409 case LTTNG_KERNEL_SESSION_STOP:
410 case LTTNG_KERNEL_DISABLE:
411 return ltt_session_disable(session);
412 case LTTNG_KERNEL_METADATA:
413 return lttng_abi_create_channel(file,
414 (struct lttng_kernel_channel __user *) arg,
415 METADATA_CHANNEL);
416 default:
417 return -ENOIOCTLCMD;
418 }
419}
420
421/*
422 * Called when the last file reference is dropped.
423 *
424 * Big fat note: channels and events are invariant for the whole session after
425 * their creation. So this session destruction also destroys all channel and
426 * event structures specific to this session (they are not destroyed when their
427 * individual file is released).
428 */
429static
430int lttng_session_release(struct inode *inode, struct file *file)
431{
432 struct ltt_session *session = file->private_data;
433
434 if (session)
435 ltt_session_destroy(session);
436 return 0;
437}
438
439static const struct file_operations lttng_session_fops = {
440 .owner = THIS_MODULE,
441 .release = lttng_session_release,
442 .unlocked_ioctl = lttng_session_ioctl,
443#ifdef CONFIG_COMPAT
444 .compat_ioctl = lttng_session_ioctl,
445#endif
446};
447
448static
449int lttng_abi_open_stream(struct file *channel_file)
450{
451 struct ltt_channel *channel = channel_file->private_data;
452 struct lib_ring_buffer *buf;
453 int stream_fd, ret;
454 struct file *stream_file;
455
456 buf = channel->ops->buffer_read_open(channel->chan);
457 if (!buf)
458 return -ENOENT;
459
460 stream_fd = get_unused_fd();
461 if (stream_fd < 0) {
462 ret = stream_fd;
463 goto fd_error;
464 }
465 stream_file = anon_inode_getfile("[lttng_stream]",
466 &lib_ring_buffer_file_operations,
467 buf, O_RDWR);
468 if (IS_ERR(stream_file)) {
469 ret = PTR_ERR(stream_file);
470 goto file_error;
471 }
472 /*
473 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
474 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
475 * file descriptor, so we set FMODE_PREAD here.
476 */
477 stream_file->f_mode |= FMODE_PREAD;
478 fd_install(stream_fd, stream_file);
479 /*
480 * The stream holds a reference to the channel within the generic ring
481 * buffer library, so no need to hold a refcount on the channel and
482 * session files here.
483 */
484 return stream_fd;
485
486file_error:
487 put_unused_fd(stream_fd);
488fd_error:
489 channel->ops->buffer_read_close(buf);
490 return ret;
491}
492
493static
494int lttng_abi_create_event(struct file *channel_file,
495 struct lttng_kernel_event __user *uevent_param)
496{
497 struct ltt_channel *channel = channel_file->private_data;
498 struct ltt_event *event;
499 struct lttng_kernel_event event_param;
500 int event_fd, ret;
501 struct file *event_file;
502
503 if (copy_from_user(&event_param, uevent_param, sizeof(event_param)))
504 return -EFAULT;
505 event_param.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
506 switch (event_param.instrumentation) {
507 case LTTNG_KERNEL_KRETPROBE:
508 event_param.u.kretprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
509 break;
510 case LTTNG_KERNEL_KPROBE:
511 event_param.u.kprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
512 break;
513 case LTTNG_KERNEL_FUNCTION:
514 event_param.u.ftrace.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
515 break;
516 default:
517 break;
518 }
519 switch (event_param.instrumentation) {
520 default:
521 event_fd = get_unused_fd();
522 if (event_fd < 0) {
523 ret = event_fd;
524 goto fd_error;
525 }
526 event_file = anon_inode_getfile("[lttng_event]",
527 &lttng_event_fops,
528 NULL, O_RDWR);
529 if (IS_ERR(event_file)) {
530 ret = PTR_ERR(event_file);
531 goto file_error;
532 }
533 /*
534 * We tolerate no failure path after event creation. It
535 * will stay invariant for the rest of the session.
536 */
537 event = ltt_event_create(channel, &event_param, NULL, NULL);
538 if (!event) {
539 ret = -EINVAL;
540 goto event_error;
541 }
542 event_file->private_data = event;
543 fd_install(event_fd, event_file);
544 /* The event holds a reference on the channel */
545 atomic_long_inc(&channel_file->f_count);
546 break;
547 case LTTNG_KERNEL_SYSCALL:
548 /*
549 * Only all-syscall tracing supported for now.
550 */
551 if (event_param.name[0] != '\0')
552 return -EINVAL;
553 ret = lttng_syscalls_register(channel, NULL);
554 if (ret)
555 goto fd_error;
556 event_fd = 0;
557 break;
558 }
559 return event_fd;
560
561event_error:
562 fput(event_file);
563file_error:
564 put_unused_fd(event_fd);
565fd_error:
566 return ret;
567}
568
569/**
570 * lttng_channel_ioctl - lttng syscall through ioctl
571 *
572 * @file: the file
573 * @cmd: the command
574 * @arg: command arg
575 *
576 * This ioctl implements lttng commands:
577 * LTTNG_KERNEL_STREAM
578 * Returns an event stream file descriptor or failure.
579 * (typically, one event stream records events from one CPU)
580 * LTTNG_KERNEL_EVENT
581 * Returns an event file descriptor or failure.
582 * LTTNG_KERNEL_CONTEXT
583 * Prepend a context field to each event in the channel
584 * LTTNG_KERNEL_ENABLE
585 * Enable recording for events in this channel (weak enable)
586 * LTTNG_KERNEL_DISABLE
587 * Disable recording for events in this channel (strong disable)
588 *
589 * Channel and event file descriptors also hold a reference on the session.
590 */
591static
592long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
593{
594 struct ltt_channel *channel = file->private_data;
595
596 switch (cmd) {
597 case LTTNG_KERNEL_STREAM:
598 return lttng_abi_open_stream(file);
599 case LTTNG_KERNEL_EVENT:
600 return lttng_abi_create_event(file, (struct lttng_kernel_event __user *) arg);
601 case LTTNG_KERNEL_CONTEXT:
602 return lttng_abi_add_context(file,
603 (struct lttng_kernel_context __user *) arg,
604 &channel->ctx, channel->session);
605 case LTTNG_KERNEL_ENABLE:
606 return ltt_channel_enable(channel);
607 case LTTNG_KERNEL_DISABLE:
608 return ltt_channel_disable(channel);
609 default:
610 return -ENOIOCTLCMD;
611 }
612}
613
614/**
615 * lttng_metadata_ioctl - lttng syscall through ioctl
616 *
617 * @file: the file
618 * @cmd: the command
619 * @arg: command arg
620 *
621 * This ioctl implements lttng commands:
622 * LTTNG_KERNEL_STREAM
623 * Returns an event stream file descriptor or failure.
624 *
625 * Channel and event file descriptors also hold a reference on the session.
626 */
627static
628long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
629{
630 switch (cmd) {
631 case LTTNG_KERNEL_STREAM:
632 return lttng_abi_open_stream(file);
633 default:
634 return -ENOIOCTLCMD;
635 }
636}
637
638/**
639 * lttng_channel_poll - lttng stream addition/removal monitoring
640 *
641 * @file: the file
642 * @wait: poll table
643 */
644unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
645{
646 struct ltt_channel *channel = file->private_data;
647 unsigned int mask = 0;
648
649 if (file->f_mode & FMODE_READ) {
650 poll_wait_set_exclusive(wait);
651 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
652 wait);
653
654 if (channel->ops->is_disabled(channel->chan))
655 return POLLERR;
656 if (channel->ops->is_finalized(channel->chan))
657 return POLLHUP;
658 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
659 return POLLIN | POLLRDNORM;
660 return 0;
661 }
662 return mask;
663
664}
665
666static
667int lttng_channel_release(struct inode *inode, struct file *file)
668{
669 struct ltt_channel *channel = file->private_data;
670
671 if (channel)
672 fput(channel->session->file);
673 return 0;
674}
675
676static const struct file_operations lttng_channel_fops = {
677 .owner = THIS_MODULE,
678 .release = lttng_channel_release,
679 .poll = lttng_channel_poll,
680 .unlocked_ioctl = lttng_channel_ioctl,
681#ifdef CONFIG_COMPAT
682 .compat_ioctl = lttng_channel_ioctl,
683#endif
684};
685
686static const struct file_operations lttng_metadata_fops = {
687 .owner = THIS_MODULE,
688 .release = lttng_channel_release,
689 .unlocked_ioctl = lttng_metadata_ioctl,
690#ifdef CONFIG_COMPAT
691 .compat_ioctl = lttng_metadata_ioctl,
692#endif
693};
694
695/**
696 * lttng_event_ioctl - lttng syscall through ioctl
697 *
698 * @file: the file
699 * @cmd: the command
700 * @arg: command arg
701 *
702 * This ioctl implements lttng commands:
703 * LTTNG_KERNEL_CONTEXT
704 * Prepend a context field to each record of this event
705 * LTTNG_KERNEL_ENABLE
706 * Enable recording for this event (weak enable)
707 * LTTNG_KERNEL_DISABLE
708 * Disable recording for this event (strong disable)
709 */
710static
711long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
712{
713 struct ltt_event *event = file->private_data;
714
715 switch (cmd) {
716 case LTTNG_KERNEL_CONTEXT:
717 return lttng_abi_add_context(file,
718 (struct lttng_kernel_context __user *) arg,
719 &event->ctx, event->chan->session);
720 case LTTNG_KERNEL_ENABLE:
721 return ltt_event_enable(event);
722 case LTTNG_KERNEL_DISABLE:
723 return ltt_event_disable(event);
724 default:
725 return -ENOIOCTLCMD;
726 }
727}
728
729static
730int lttng_event_release(struct inode *inode, struct file *file)
731{
732 struct ltt_event *event = file->private_data;
733
734 if (event)
735 fput(event->chan->file);
736 return 0;
737}
738
739/* TODO: filter control ioctl */
740static const struct file_operations lttng_event_fops = {
741 .owner = THIS_MODULE,
742 .release = lttng_event_release,
743 .unlocked_ioctl = lttng_event_ioctl,
744#ifdef CONFIG_COMPAT
745 .compat_ioctl = lttng_event_ioctl,
746#endif
747};
748
749int __init ltt_debugfs_abi_init(void)
750{
751 int ret = 0;
752
753 wrapper_vmalloc_sync_all();
754 lttng_dentry = debugfs_create_file("lttng", S_IWUSR, NULL, NULL,
755 &lttng_fops);
756 if (IS_ERR(lttng_dentry))
757 lttng_dentry = NULL;
758
759 lttng_proc_dentry = proc_create_data("lttng", S_IWUSR, NULL,
760 &lttng_fops, NULL);
761
762 if (!lttng_dentry && !lttng_proc_dentry) {
763 printk(KERN_ERR "Error creating LTTng control file\n");
764 ret = -ENOMEM;
765 goto error;
766 }
767error:
768 return ret;
769}
770
771void __exit ltt_debugfs_abi_exit(void)
772{
773 if (lttng_dentry)
774 debugfs_remove(lttng_dentry);
775 if (lttng_proc_dentry)
776 remove_proc_entry("lttng", NULL);
777}
diff --git a/drivers/staging/lttng/ltt-debugfs-abi.h b/drivers/staging/lttng/ltt-debugfs-abi.h
deleted file mode 100644
index a018297f7fb5..000000000000
--- a/drivers/staging/lttng/ltt-debugfs-abi.h
+++ /dev/null
@@ -1,153 +0,0 @@
1#ifndef _LTT_DEBUGFS_ABI_H
2#define _LTT_DEBUGFS_ABI_H
3
4/*
5 * ltt-debugfs-abi.h
6 *
7 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * LTTng debugfs ABI header
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/fs.h>
15
16#define LTTNG_SYM_NAME_LEN 256
17
18enum lttng_kernel_instrumentation {
19 LTTNG_KERNEL_TRACEPOINT = 0,
20 LTTNG_KERNEL_KPROBE = 1,
21 LTTNG_KERNEL_FUNCTION = 2,
22 LTTNG_KERNEL_KRETPROBE = 3,
23 LTTNG_KERNEL_NOOP = 4, /* not hooked */
24 LTTNG_KERNEL_SYSCALL = 5,
25};
26
27/*
28 * LTTng consumer mode
29 */
30enum lttng_kernel_output {
31 LTTNG_KERNEL_SPLICE = 0,
32 LTTNG_KERNEL_MMAP = 1,
33};
34
35/*
36 * LTTng DebugFS ABI structures.
37 */
38
39struct lttng_kernel_channel {
40 int overwrite; /* 1: overwrite, 0: discard */
41 uint64_t subbuf_size; /* in bytes */
42 uint64_t num_subbuf;
43 unsigned int switch_timer_interval; /* usecs */
44 unsigned int read_timer_interval; /* usecs */
45 enum lttng_kernel_output output; /* splice, mmap */
46};
47
48struct lttng_kernel_kretprobe {
49 uint64_t addr;
50
51 uint64_t offset;
52 char symbol_name[LTTNG_SYM_NAME_LEN];
53};
54
55/*
56 * Either addr is used, or symbol_name and offset.
57 */
58struct lttng_kernel_kprobe {
59 uint64_t addr;
60
61 uint64_t offset;
62 char symbol_name[LTTNG_SYM_NAME_LEN];
63};
64
65struct lttng_kernel_function_tracer {
66 char symbol_name[LTTNG_SYM_NAME_LEN];
67};
68
69/*
70 * For syscall tracing, name = '\0' means "enable all".
71 */
72struct lttng_kernel_event {
73 char name[LTTNG_SYM_NAME_LEN]; /* event name */
74 enum lttng_kernel_instrumentation instrumentation;
75 /* Per instrumentation type configuration */
76 union {
77 struct lttng_kernel_kretprobe kretprobe;
78 struct lttng_kernel_kprobe kprobe;
79 struct lttng_kernel_function_tracer ftrace;
80 } u;
81};
82
83struct lttng_kernel_tracer_version {
84 uint32_t version;
85 uint32_t patchlevel;
86 uint32_t sublevel;
87};
88
89enum lttng_kernel_calibrate_type {
90 LTTNG_KERNEL_CALIBRATE_KRETPROBE,
91};
92
93struct lttng_kernel_calibrate {
94 enum lttng_kernel_calibrate_type type; /* type (input) */
95};
96
97enum lttng_kernel_context_type {
98 LTTNG_KERNEL_CONTEXT_PID = 0,
99 LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
100 LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
101 LTTNG_KERNEL_CONTEXT_PRIO = 3,
102 LTTNG_KERNEL_CONTEXT_NICE = 4,
103 LTTNG_KERNEL_CONTEXT_VPID = 5,
104 LTTNG_KERNEL_CONTEXT_TID = 6,
105 LTTNG_KERNEL_CONTEXT_VTID = 7,
106 LTTNG_KERNEL_CONTEXT_PPID = 8,
107 LTTNG_KERNEL_CONTEXT_VPPID = 9,
108};
109
110struct lttng_kernel_perf_counter_ctx {
111 uint32_t type;
112 uint64_t config;
113 char name[LTTNG_SYM_NAME_LEN];
114};
115
116struct lttng_kernel_context {
117 enum lttng_kernel_context_type ctx;
118 union {
119 struct lttng_kernel_perf_counter_ctx perf_counter;
120 } u;
121};
122
123/* LTTng file descriptor ioctl */
124#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x40)
125#define LTTNG_KERNEL_TRACER_VERSION \
126 _IOR(0xF6, 0x41, struct lttng_kernel_tracer_version)
127#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x42)
128#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x43)
129#define LTTNG_KERNEL_CALIBRATE \
130 _IOWR(0xF6, 0x44, struct lttng_kernel_calibrate)
131
132/* Session FD ioctl */
133#define LTTNG_KERNEL_METADATA \
134 _IOW(0xF6, 0x50, struct lttng_kernel_channel)
135#define LTTNG_KERNEL_CHANNEL \
136 _IOW(0xF6, 0x51, struct lttng_kernel_channel)
137#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x52)
138#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x53)
139
140/* Channel FD ioctl */
141#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x60)
142#define LTTNG_KERNEL_EVENT \
143 _IOW(0xF6, 0x61, struct lttng_kernel_event)
144
145/* Event and Channel FD ioctl */
146#define LTTNG_KERNEL_CONTEXT \
147 _IOW(0xF6, 0x70, struct lttng_kernel_context)
148
149/* Event, Channel and Session ioctl */
150#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x80)
151#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x81)
152
153#endif /* _LTT_DEBUGFS_ABI_H */
diff --git a/drivers/staging/lttng/ltt-endian.h b/drivers/staging/lttng/ltt-endian.h
deleted file mode 100644
index 9a0512d2c202..000000000000
--- a/drivers/staging/lttng/ltt-endian.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _LTT_ENDIAN_H
2#define _LTT_ENDIAN_H
3
4/*
5 * ltt-endian.h
6 *
7 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12#ifdef __KERNEL__
13# include <asm/byteorder.h>
14# ifdef __BIG_ENDIAN
15# define __BYTE_ORDER __BIG_ENDIAN
16# elif defined(__LITTLE_ENDIAN)
17# define __BYTE_ORDER __LITTLE_ENDIAN
18# else
19# error "unknown endianness"
20# endif
21#ifndef __BIG_ENDIAN
22# define __BIG_ENDIAN 4321
23#endif
24#ifndef __LITTLE_ENDIAN
25# define __LITTLE_ENDIAN 1234
26#endif
27#else
28# include <endian.h>
29#endif
30
31#endif /* _LTT_ENDIAN_H */
diff --git a/drivers/staging/lttng/ltt-events.c b/drivers/staging/lttng/ltt-events.c
deleted file mode 100644
index 42299142ecf2..000000000000
--- a/drivers/staging/lttng/ltt-events.c
+++ /dev/null
@@ -1,1009 +0,0 @@
1/*
2 * ltt-events.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Holds LTTng per-session event registry.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/jiffies.h>
17#include "wrapper/uuid.h"
18#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
19#include "ltt-events.h"
20#include "ltt-tracer.h"
21
22static LIST_HEAD(sessions);
23static LIST_HEAD(ltt_transport_list);
24static DEFINE_MUTEX(sessions_mutex);
25static struct kmem_cache *event_cache;
26
27static void _ltt_event_destroy(struct ltt_event *event);
28static void _ltt_channel_destroy(struct ltt_channel *chan);
29static int _ltt_event_unregister(struct ltt_event *event);
30static
31int _ltt_event_metadata_statedump(struct ltt_session *session,
32 struct ltt_channel *chan,
33 struct ltt_event *event);
34static
35int _ltt_session_metadata_statedump(struct ltt_session *session);
36
37void synchronize_trace(void)
38{
39 synchronize_sched();
40#ifdef CONFIG_PREEMPT_RT
41 synchronize_rcu();
42#endif
43}
44
45struct ltt_session *ltt_session_create(void)
46{
47 struct ltt_session *session;
48
49 mutex_lock(&sessions_mutex);
50 session = kzalloc(sizeof(struct ltt_session), GFP_KERNEL);
51 if (!session)
52 return NULL;
53 INIT_LIST_HEAD(&session->chan);
54 INIT_LIST_HEAD(&session->events);
55 uuid_le_gen(&session->uuid);
56 list_add(&session->list, &sessions);
57 mutex_unlock(&sessions_mutex);
58 return session;
59}
60
61void ltt_session_destroy(struct ltt_session *session)
62{
63 struct ltt_channel *chan, *tmpchan;
64 struct ltt_event *event, *tmpevent;
65 int ret;
66
67 mutex_lock(&sessions_mutex);
68 ACCESS_ONCE(session->active) = 0;
69 list_for_each_entry(chan, &session->chan, list) {
70 ret = lttng_syscalls_unregister(chan);
71 WARN_ON(ret);
72 }
73 list_for_each_entry(event, &session->events, list) {
74 ret = _ltt_event_unregister(event);
75 WARN_ON(ret);
76 }
77 synchronize_trace(); /* Wait for in-flight events to complete */
78 list_for_each_entry_safe(event, tmpevent, &session->events, list)
79 _ltt_event_destroy(event);
80 list_for_each_entry_safe(chan, tmpchan, &session->chan, list)
81 _ltt_channel_destroy(chan);
82 list_del(&session->list);
83 mutex_unlock(&sessions_mutex);
84 kfree(session);
85}
86
87int ltt_session_enable(struct ltt_session *session)
88{
89 int ret = 0;
90 struct ltt_channel *chan;
91
92 mutex_lock(&sessions_mutex);
93 if (session->active) {
94 ret = -EBUSY;
95 goto end;
96 }
97
98 /*
99 * Snapshot the number of events per channel to know the type of header
100 * we need to use.
101 */
102 list_for_each_entry(chan, &session->chan, list) {
103 if (chan->header_type)
104 continue; /* don't change it if session stop/restart */
105 if (chan->free_event_id < 31)
106 chan->header_type = 1; /* compact */
107 else
108 chan->header_type = 2; /* large */
109 }
110
111 ACCESS_ONCE(session->active) = 1;
112 ACCESS_ONCE(session->been_active) = 1;
113 ret = _ltt_session_metadata_statedump(session);
114 if (ret)
115 ACCESS_ONCE(session->active) = 0;
116end:
117 mutex_unlock(&sessions_mutex);
118 return ret;
119}
120
121int ltt_session_disable(struct ltt_session *session)
122{
123 int ret = 0;
124
125 mutex_lock(&sessions_mutex);
126 if (!session->active) {
127 ret = -EBUSY;
128 goto end;
129 }
130 ACCESS_ONCE(session->active) = 0;
131end:
132 mutex_unlock(&sessions_mutex);
133 return ret;
134}
135
136int ltt_channel_enable(struct ltt_channel *channel)
137{
138 int old;
139
140 if (channel == channel->session->metadata)
141 return -EPERM;
142 old = xchg(&channel->enabled, 1);
143 if (old)
144 return -EEXIST;
145 return 0;
146}
147
148int ltt_channel_disable(struct ltt_channel *channel)
149{
150 int old;
151
152 if (channel == channel->session->metadata)
153 return -EPERM;
154 old = xchg(&channel->enabled, 0);
155 if (!old)
156 return -EEXIST;
157 return 0;
158}
159
160int ltt_event_enable(struct ltt_event *event)
161{
162 int old;
163
164 if (event->chan == event->chan->session->metadata)
165 return -EPERM;
166 old = xchg(&event->enabled, 1);
167 if (old)
168 return -EEXIST;
169 return 0;
170}
171
172int ltt_event_disable(struct ltt_event *event)
173{
174 int old;
175
176 if (event->chan == event->chan->session->metadata)
177 return -EPERM;
178 old = xchg(&event->enabled, 0);
179 if (!old)
180 return -EEXIST;
181 return 0;
182}
183
184static struct ltt_transport *ltt_transport_find(const char *name)
185{
186 struct ltt_transport *transport;
187
188 list_for_each_entry(transport, &ltt_transport_list, node) {
189 if (!strcmp(transport->name, name))
190 return transport;
191 }
192 return NULL;
193}
194
195struct ltt_channel *ltt_channel_create(struct ltt_session *session,
196 const char *transport_name,
197 void *buf_addr,
198 size_t subbuf_size, size_t num_subbuf,
199 unsigned int switch_timer_interval,
200 unsigned int read_timer_interval)
201{
202 struct ltt_channel *chan;
203 struct ltt_transport *transport = NULL;
204
205 mutex_lock(&sessions_mutex);
206 if (session->been_active)
207 goto active; /* Refuse to add channel to active session */
208 transport = ltt_transport_find(transport_name);
209 if (!transport) {
210 printk(KERN_WARNING "LTTng transport %s not found\n",
211 transport_name);
212 goto notransport;
213 }
214 if (!try_module_get(transport->owner)) {
215 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
216 goto notransport;
217 }
218 chan = kzalloc(sizeof(struct ltt_channel), GFP_KERNEL);
219 if (!chan)
220 goto nomem;
221 chan->session = session;
222 chan->id = session->free_chan_id++;
223 /*
224 * Note: the channel creation op already writes into the packet
225 * headers. Therefore the "chan" information used as input
226 * should be already accessible.
227 */
228 chan->chan = transport->ops.channel_create("[lttng]", chan, buf_addr,
229 subbuf_size, num_subbuf, switch_timer_interval,
230 read_timer_interval);
231 if (!chan->chan)
232 goto create_error;
233 chan->enabled = 1;
234 chan->ops = &transport->ops;
235 chan->transport = transport;
236 list_add(&chan->list, &session->chan);
237 mutex_unlock(&sessions_mutex);
238 return chan;
239
240create_error:
241 kfree(chan);
242nomem:
243 if (transport)
244 module_put(transport->owner);
245notransport:
246active:
247 mutex_unlock(&sessions_mutex);
248 return NULL;
249}
250
251/*
252 * Only used internally at session destruction.
253 */
254static
255void _ltt_channel_destroy(struct ltt_channel *chan)
256{
257 chan->ops->channel_destroy(chan->chan);
258 module_put(chan->transport->owner);
259 list_del(&chan->list);
260 lttng_destroy_context(chan->ctx);
261 kfree(chan);
262}
263
264/*
265 * Supports event creation while tracing session is active.
266 */
267struct ltt_event *ltt_event_create(struct ltt_channel *chan,
268 struct lttng_kernel_event *event_param,
269 void *filter,
270 const struct lttng_event_desc *internal_desc)
271{
272 struct ltt_event *event;
273 int ret;
274
275 mutex_lock(&sessions_mutex);
276 if (chan->free_event_id == -1UL)
277 goto full;
278 /*
279 * This is O(n^2) (for each event, the loop is called at event
280 * creation). Might require a hash if we have lots of events.
281 */
282 list_for_each_entry(event, &chan->session->events, list)
283 if (!strcmp(event->desc->name, event_param->name))
284 goto exist;
285 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
286 if (!event)
287 goto cache_error;
288 event->chan = chan;
289 event->filter = filter;
290 event->id = chan->free_event_id++;
291 event->enabled = 1;
292 event->instrumentation = event_param->instrumentation;
293 /* Populate ltt_event structure before tracepoint registration. */
294 smp_wmb();
295 switch (event_param->instrumentation) {
296 case LTTNG_KERNEL_TRACEPOINT:
297 event->desc = ltt_event_get(event_param->name);
298 if (!event->desc)
299 goto register_error;
300 ret = tracepoint_probe_register(event_param->name,
301 event->desc->probe_callback,
302 event);
303 if (ret)
304 goto register_error;
305 break;
306 case LTTNG_KERNEL_KPROBE:
307 ret = lttng_kprobes_register(event_param->name,
308 event_param->u.kprobe.symbol_name,
309 event_param->u.kprobe.offset,
310 event_param->u.kprobe.addr,
311 event);
312 if (ret)
313 goto register_error;
314 ret = try_module_get(event->desc->owner);
315 WARN_ON_ONCE(!ret);
316 break;
317 case LTTNG_KERNEL_KRETPROBE:
318 {
319 struct ltt_event *event_return;
320
321 /* kretprobe defines 2 events */
322 event_return =
323 kmem_cache_zalloc(event_cache, GFP_KERNEL);
324 if (!event_return)
325 goto register_error;
326 event_return->chan = chan;
327 event_return->filter = filter;
328 event_return->id = chan->free_event_id++;
329 event_return->enabled = 1;
330 event_return->instrumentation = event_param->instrumentation;
331 /*
332 * Populate ltt_event structure before kretprobe registration.
333 */
334 smp_wmb();
335 ret = lttng_kretprobes_register(event_param->name,
336 event_param->u.kretprobe.symbol_name,
337 event_param->u.kretprobe.offset,
338 event_param->u.kretprobe.addr,
339 event, event_return);
340 if (ret) {
341 kmem_cache_free(event_cache, event_return);
342 goto register_error;
343 }
344 /* Take 2 refs on the module: one per event. */
345 ret = try_module_get(event->desc->owner);
346 WARN_ON_ONCE(!ret);
347 ret = try_module_get(event->desc->owner);
348 WARN_ON_ONCE(!ret);
349 ret = _ltt_event_metadata_statedump(chan->session, chan,
350 event_return);
351 if (ret) {
352 kmem_cache_free(event_cache, event_return);
353 module_put(event->desc->owner);
354 module_put(event->desc->owner);
355 goto statedump_error;
356 }
357 list_add(&event_return->list, &chan->session->events);
358 break;
359 }
360 case LTTNG_KERNEL_FUNCTION:
361 ret = lttng_ftrace_register(event_param->name,
362 event_param->u.ftrace.symbol_name,
363 event);
364 if (ret)
365 goto register_error;
366 ret = try_module_get(event->desc->owner);
367 WARN_ON_ONCE(!ret);
368 break;
369 case LTTNG_KERNEL_NOOP:
370 event->desc = internal_desc;
371 if (!event->desc)
372 goto register_error;
373 break;
374 default:
375 WARN_ON_ONCE(1);
376 }
377 ret = _ltt_event_metadata_statedump(chan->session, chan, event);
378 if (ret)
379 goto statedump_error;
380 list_add(&event->list, &chan->session->events);
381 mutex_unlock(&sessions_mutex);
382 return event;
383
384statedump_error:
385 /* If a statedump error occurs, events will not be readable. */
386register_error:
387 kmem_cache_free(event_cache, event);
388cache_error:
389exist:
390full:
391 mutex_unlock(&sessions_mutex);
392 return NULL;
393}
394
395/*
396 * Only used internally at session destruction.
397 */
398int _ltt_event_unregister(struct ltt_event *event)
399{
400 int ret = -EINVAL;
401
402 switch (event->instrumentation) {
403 case LTTNG_KERNEL_TRACEPOINT:
404 ret = tracepoint_probe_unregister(event->desc->name,
405 event->desc->probe_callback,
406 event);
407 if (ret)
408 return ret;
409 break;
410 case LTTNG_KERNEL_KPROBE:
411 lttng_kprobes_unregister(event);
412 ret = 0;
413 break;
414 case LTTNG_KERNEL_KRETPROBE:
415 lttng_kretprobes_unregister(event);
416 ret = 0;
417 break;
418 case LTTNG_KERNEL_FUNCTION:
419 lttng_ftrace_unregister(event);
420 ret = 0;
421 break;
422 case LTTNG_KERNEL_NOOP:
423 ret = 0;
424 break;
425 default:
426 WARN_ON_ONCE(1);
427 }
428 return ret;
429}
430
431/*
432 * Only used internally at session destruction.
433 */
434static
435void _ltt_event_destroy(struct ltt_event *event)
436{
437 switch (event->instrumentation) {
438 case LTTNG_KERNEL_TRACEPOINT:
439 ltt_event_put(event->desc);
440 break;
441 case LTTNG_KERNEL_KPROBE:
442 module_put(event->desc->owner);
443 lttng_kprobes_destroy_private(event);
444 break;
445 case LTTNG_KERNEL_KRETPROBE:
446 module_put(event->desc->owner);
447 lttng_kretprobes_destroy_private(event);
448 break;
449 case LTTNG_KERNEL_FUNCTION:
450 module_put(event->desc->owner);
451 lttng_ftrace_destroy_private(event);
452 break;
453 case LTTNG_KERNEL_NOOP:
454 break;
455 default:
456 WARN_ON_ONCE(1);
457 }
458 list_del(&event->list);
459 lttng_destroy_context(event->ctx);
460 kmem_cache_free(event_cache, event);
461}
462
463/*
464 * We have exclusive access to our metadata buffer (protected by the
465 * sessions_mutex), so we can do racy operations such as looking for
466 * remaining space left in packet and write, since mutual exclusion
467 * protects us from concurrent writes.
468 */
469int lttng_metadata_printf(struct ltt_session *session,
470 const char *fmt, ...)
471{
472 struct lib_ring_buffer_ctx ctx;
473 struct ltt_channel *chan = session->metadata;
474 char *str;
475 int ret = 0, waitret;
476 size_t len, reserve_len, pos;
477 va_list ap;
478
479 WARN_ON_ONCE(!ACCESS_ONCE(session->active));
480
481 va_start(ap, fmt);
482 str = kvasprintf(GFP_KERNEL, fmt, ap);
483 va_end(ap);
484 if (!str)
485 return -ENOMEM;
486
487 len = strlen(str);
488 pos = 0;
489
490 for (pos = 0; pos < len; pos += reserve_len) {
491 reserve_len = min_t(size_t,
492 chan->ops->packet_avail_size(chan->chan),
493 len - pos);
494 lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, reserve_len,
495 sizeof(char), -1);
496 /*
497 * We don't care about metadata buffer's records lost
498 * count, because we always retry here. Report error if
499 * we need to bail out after timeout or being
500 * interrupted.
501 */
502 waitret = wait_event_interruptible_timeout(*chan->ops->get_writer_buf_wait_queue(chan->chan, -1),
503 ({
504 ret = chan->ops->event_reserve(&ctx, 0);
505 ret != -ENOBUFS || !ret;
506 }),
507 msecs_to_jiffies(LTTNG_METADATA_TIMEOUT_MSEC));
508 if (!waitret || waitret == -ERESTARTSYS || ret) {
509 printk(KERN_WARNING "LTTng: Failure to write metadata to buffers (%s)\n",
510 waitret == -ERESTARTSYS ? "interrupted" :
511 (ret == -ENOBUFS ? "timeout" : "I/O error"));
512 if (waitret == -ERESTARTSYS)
513 ret = waitret;
514 goto end;
515 }
516 chan->ops->event_write(&ctx, &str[pos], reserve_len);
517 chan->ops->event_commit(&ctx);
518 }
519end:
520 kfree(str);
521 return ret;
522}
523
524static
525int _ltt_field_statedump(struct ltt_session *session,
526 const struct lttng_event_field *field)
527{
528 int ret = 0;
529
530 switch (field->type.atype) {
531 case atype_integer:
532 ret = lttng_metadata_printf(session,
533 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
534 field->type.u.basic.integer.size,
535 field->type.u.basic.integer.alignment,
536 field->type.u.basic.integer.signedness,
537 (field->type.u.basic.integer.encoding == lttng_encode_none)
538 ? "none"
539 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
540 ? "UTF8"
541 : "ASCII",
542 field->type.u.basic.integer.base,
543#ifdef __BIG_ENDIAN
544 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
545#else
546 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
547#endif
548 field->name);
549 break;
550 case atype_enum:
551 ret = lttng_metadata_printf(session,
552 " %s _%s;\n",
553 field->type.u.basic.enumeration.name,
554 field->name);
555 break;
556 case atype_array:
557 {
558 const struct lttng_basic_type *elem_type;
559
560 elem_type = &field->type.u.array.elem_type;
561 ret = lttng_metadata_printf(session,
562 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
563 elem_type->u.basic.integer.size,
564 elem_type->u.basic.integer.alignment,
565 elem_type->u.basic.integer.signedness,
566 (elem_type->u.basic.integer.encoding == lttng_encode_none)
567 ? "none"
568 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
569 ? "UTF8"
570 : "ASCII",
571 elem_type->u.basic.integer.base,
572#ifdef __BIG_ENDIAN
573 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
574#else
575 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
576#endif
577 field->name, field->type.u.array.length);
578 break;
579 }
580 case atype_sequence:
581 {
582 const struct lttng_basic_type *elem_type;
583 const struct lttng_basic_type *length_type;
584
585 elem_type = &field->type.u.sequence.elem_type;
586 length_type = &field->type.u.sequence.length_type;
587 ret = lttng_metadata_printf(session,
588 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
589 length_type->u.basic.integer.size,
590 (unsigned int) length_type->u.basic.integer.alignment,
591 length_type->u.basic.integer.signedness,
592 (length_type->u.basic.integer.encoding == lttng_encode_none)
593 ? "none"
594 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
595 ? "UTF8"
596 : "ASCII"),
597 length_type->u.basic.integer.base,
598#ifdef __BIG_ENDIAN
599 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
600#else
601 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
602#endif
603 field->name);
604 if (ret)
605 return ret;
606
607 ret = lttng_metadata_printf(session,
608 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
609 elem_type->u.basic.integer.size,
610 (unsigned int) elem_type->u.basic.integer.alignment,
611 elem_type->u.basic.integer.signedness,
612 (elem_type->u.basic.integer.encoding == lttng_encode_none)
613 ? "none"
614 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
615 ? "UTF8"
616 : "ASCII"),
617 elem_type->u.basic.integer.base,
618#ifdef __BIG_ENDIAN
619 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
620#else
621 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
622#endif
623 field->name,
624 field->name);
625 break;
626 }
627
628 case atype_string:
629 /* Default encoding is UTF8 */
630 ret = lttng_metadata_printf(session,
631 " string%s _%s;\n",
632 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
633 " { encoding = ASCII; }" : "",
634 field->name);
635 break;
636 default:
637 WARN_ON_ONCE(1);
638 return -EINVAL;
639 }
640 return ret;
641}
642
643static
644int _ltt_context_metadata_statedump(struct ltt_session *session,
645 struct lttng_ctx *ctx)
646{
647 int ret = 0;
648 int i;
649
650 if (!ctx)
651 return 0;
652 for (i = 0; i < ctx->nr_fields; i++) {
653 const struct lttng_ctx_field *field = &ctx->fields[i];
654
655 ret = _ltt_field_statedump(session, &field->event_field);
656 if (ret)
657 return ret;
658 }
659 return ret;
660}
661
662static
663int _ltt_fields_metadata_statedump(struct ltt_session *session,
664 struct ltt_event *event)
665{
666 const struct lttng_event_desc *desc = event->desc;
667 int ret = 0;
668 int i;
669
670 for (i = 0; i < desc->nr_fields; i++) {
671 const struct lttng_event_field *field = &desc->fields[i];
672
673 ret = _ltt_field_statedump(session, field);
674 if (ret)
675 return ret;
676 }
677 return ret;
678}
679
680static
681int _ltt_event_metadata_statedump(struct ltt_session *session,
682 struct ltt_channel *chan,
683 struct ltt_event *event)
684{
685 int ret = 0;
686
687 if (event->metadata_dumped || !ACCESS_ONCE(session->active))
688 return 0;
689 if (chan == session->metadata)
690 return 0;
691
692 ret = lttng_metadata_printf(session,
693 "event {\n"
694 " name = %s;\n"
695 " id = %u;\n"
696 " stream_id = %u;\n",
697 event->desc->name,
698 event->id,
699 event->chan->id);
700 if (ret)
701 goto end;
702
703 if (event->ctx) {
704 ret = lttng_metadata_printf(session,
705 " context := struct {\n");
706 if (ret)
707 goto end;
708 }
709 ret = _ltt_context_metadata_statedump(session, event->ctx);
710 if (ret)
711 goto end;
712 if (event->ctx) {
713 ret = lttng_metadata_printf(session,
714 " };\n");
715 if (ret)
716 goto end;
717 }
718
719 ret = lttng_metadata_printf(session,
720 " fields := struct {\n"
721 );
722 if (ret)
723 goto end;
724
725 ret = _ltt_fields_metadata_statedump(session, event);
726 if (ret)
727 goto end;
728
729 /*
730 * LTTng space reservation can only reserve multiples of the
731 * byte size.
732 */
733 ret = lttng_metadata_printf(session,
734 " };\n"
735 "};\n\n");
736 if (ret)
737 goto end;
738
739 event->metadata_dumped = 1;
740end:
741 return ret;
742
743}
744
745static
746int _ltt_channel_metadata_statedump(struct ltt_session *session,
747 struct ltt_channel *chan)
748{
749 int ret = 0;
750
751 if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
752 return 0;
753 if (chan == session->metadata)
754 return 0;
755
756 WARN_ON_ONCE(!chan->header_type);
757 ret = lttng_metadata_printf(session,
758 "stream {\n"
759 " id = %u;\n"
760 " event.header := %s;\n"
761 " packet.context := struct packet_context;\n",
762 chan->id,
763 chan->header_type == 1 ? "struct event_header_compact" :
764 "struct event_header_large");
765 if (ret)
766 goto end;
767
768 if (chan->ctx) {
769 ret = lttng_metadata_printf(session,
770 " event.context := struct {\n");
771 if (ret)
772 goto end;
773 }
774 ret = _ltt_context_metadata_statedump(session, chan->ctx);
775 if (ret)
776 goto end;
777 if (chan->ctx) {
778 ret = lttng_metadata_printf(session,
779 " };\n");
780 if (ret)
781 goto end;
782 }
783
784 ret = lttng_metadata_printf(session,
785 "};\n\n");
786
787 chan->metadata_dumped = 1;
788end:
789 return ret;
790}
791
792static
793int _ltt_stream_packet_context_declare(struct ltt_session *session)
794{
795 return lttng_metadata_printf(session,
796 "struct packet_context {\n"
797 " uint64_t timestamp_begin;\n"
798 " uint64_t timestamp_end;\n"
799 " uint32_t events_discarded;\n"
800 " uint32_t content_size;\n"
801 " uint32_t packet_size;\n"
802 " uint32_t cpu_id;\n"
803 "};\n\n"
804 );
805}
806
807/*
808 * Compact header:
809 * id: range: 0 - 30.
810 * id 31 is reserved to indicate an extended header.
811 *
812 * Large header:
813 * id: range: 0 - 65534.
814 * id 65535 is reserved to indicate an extended header.
815 */
816static
817int _ltt_event_header_declare(struct ltt_session *session)
818{
819 return lttng_metadata_printf(session,
820 "struct event_header_compact {\n"
821 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
822 " variant <id> {\n"
823 " struct {\n"
824 " uint27_t timestamp;\n"
825 " } compact;\n"
826 " struct {\n"
827 " uint32_t id;\n"
828 " uint64_t timestamp;\n"
829 " } extended;\n"
830 " } v;\n"
831 "} align(%u);\n"
832 "\n"
833 "struct event_header_large {\n"
834 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
835 " variant <id> {\n"
836 " struct {\n"
837 " uint32_t timestamp;\n"
838 " } compact;\n"
839 " struct {\n"
840 " uint32_t id;\n"
841 " uint64_t timestamp;\n"
842 " } extended;\n"
843 " } v;\n"
844 "} align(%u);\n\n",
845 ltt_alignof(uint32_t) * CHAR_BIT,
846 ltt_alignof(uint16_t) * CHAR_BIT
847 );
848}
849
850/*
851 * Output metadata into this session's metadata buffers.
852 */
853static
854int _ltt_session_metadata_statedump(struct ltt_session *session)
855{
856 unsigned char *uuid_c = session->uuid.b;
857 unsigned char uuid_s[37];
858 struct ltt_channel *chan;
859 struct ltt_event *event;
860 int ret = 0;
861
862 if (!ACCESS_ONCE(session->active))
863 return 0;
864 if (session->metadata_dumped)
865 goto skip_session;
866 if (!session->metadata) {
867 printk(KERN_WARNING "LTTng: attempt to start tracing, but metadata channel is not found. Operation abort.\n");
868 return -EPERM;
869 }
870
871 snprintf(uuid_s, sizeof(uuid_s),
872 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
873 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
874 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
875 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
876 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
877
878 ret = lttng_metadata_printf(session,
879 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
880 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
881 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
882 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
883 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
884 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
885 "\n"
886 "trace {\n"
887 " major = %u;\n"
888 " minor = %u;\n"
889 " uuid = \"%s\";\n"
890 " byte_order = %s;\n"
891 " packet.header := struct {\n"
892 " uint32_t magic;\n"
893 " uint8_t uuid[16];\n"
894 " uint32_t stream_id;\n"
895 " };\n"
896 "};\n\n",
897 ltt_alignof(uint8_t) * CHAR_BIT,
898 ltt_alignof(uint16_t) * CHAR_BIT,
899 ltt_alignof(uint32_t) * CHAR_BIT,
900 ltt_alignof(uint64_t) * CHAR_BIT,
901 CTF_VERSION_MAJOR,
902 CTF_VERSION_MINOR,
903 uuid_s,
904#ifdef __BIG_ENDIAN
905 "be"
906#else
907 "le"
908#endif
909 );
910 if (ret)
911 goto end;
912
913 ret = _ltt_stream_packet_context_declare(session);
914 if (ret)
915 goto end;
916
917 ret = _ltt_event_header_declare(session);
918 if (ret)
919 goto end;
920
921skip_session:
922 list_for_each_entry(chan, &session->chan, list) {
923 ret = _ltt_channel_metadata_statedump(session, chan);
924 if (ret)
925 goto end;
926 }
927
928 list_for_each_entry(event, &session->events, list) {
929 ret = _ltt_event_metadata_statedump(session, event->chan, event);
930 if (ret)
931 goto end;
932 }
933 session->metadata_dumped = 1;
934end:
935 return ret;
936}
937
938/**
939 * ltt_transport_register - LTT transport registration
940 * @transport: transport structure
941 *
942 * Registers a transport which can be used as output to extract the data out of
943 * LTTng. The module calling this registration function must ensure that no
944 * trap-inducing code will be executed by the transport functions. E.g.
945 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
946 * is made visible to the transport function. This registration acts as a
947 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
948 * after its registration must it synchronize the TLBs.
949 */
950void ltt_transport_register(struct ltt_transport *transport)
951{
952 /*
953 * Make sure no page fault can be triggered by the module about to be
954 * registered. We deal with this here so we don't have to call
955 * vmalloc_sync_all() in each module's init.
956 */
957 wrapper_vmalloc_sync_all();
958
959 mutex_lock(&sessions_mutex);
960 list_add_tail(&transport->node, &ltt_transport_list);
961 mutex_unlock(&sessions_mutex);
962}
963EXPORT_SYMBOL_GPL(ltt_transport_register);
964
965/**
966 * ltt_transport_unregister - LTT transport unregistration
967 * @transport: transport structure
968 */
969void ltt_transport_unregister(struct ltt_transport *transport)
970{
971 mutex_lock(&sessions_mutex);
972 list_del(&transport->node);
973 mutex_unlock(&sessions_mutex);
974}
975EXPORT_SYMBOL_GPL(ltt_transport_unregister);
976
977static int __init ltt_events_init(void)
978{
979 int ret;
980
981 event_cache = KMEM_CACHE(ltt_event, 0);
982 if (!event_cache)
983 return -ENOMEM;
984 ret = ltt_debugfs_abi_init();
985 if (ret)
986 goto error_abi;
987 return 0;
988error_abi:
989 kmem_cache_destroy(event_cache);
990 return ret;
991}
992
993module_init(ltt_events_init);
994
995static void __exit ltt_events_exit(void)
996{
997 struct ltt_session *session, *tmpsession;
998
999 ltt_debugfs_abi_exit();
1000 list_for_each_entry_safe(session, tmpsession, &sessions, list)
1001 ltt_session_destroy(session);
1002 kmem_cache_destroy(event_cache);
1003}
1004
1005module_exit(ltt_events_exit);
1006
1007MODULE_LICENSE("GPL and additional rights");
1008MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
1009MODULE_DESCRIPTION("LTTng Events");
diff --git a/drivers/staging/lttng/ltt-events.h b/drivers/staging/lttng/ltt-events.h
deleted file mode 100644
index c370ca68c15c..000000000000
--- a/drivers/staging/lttng/ltt-events.h
+++ /dev/null
@@ -1,452 +0,0 @@
1#ifndef _LTT_EVENTS_H
2#define _LTT_EVENTS_H
3
4/*
5 * ltt-events.h
6 *
7 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Holds LTTng per-session event registry.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/list.h>
15#include <linux/kprobes.h>
16#include "wrapper/uuid.h"
17#include "ltt-debugfs-abi.h"
18
19#undef is_signed_type
20#define is_signed_type(type) (((type)(-1)) < 0)
21
22struct ltt_channel;
23struct ltt_session;
24struct lib_ring_buffer_ctx;
25struct perf_event;
26struct perf_event_attr;
27
28/* Type description */
29
30/* Update the astract_types name table in lttng-types.c along with this enum */
31enum abstract_types {
32 atype_integer,
33 atype_enum,
34 atype_array,
35 atype_sequence,
36 atype_string,
37 NR_ABSTRACT_TYPES,
38};
39
40/* Update the string_encodings name table in lttng-types.c along with this enum */
41enum lttng_string_encodings {
42 lttng_encode_none = 0,
43 lttng_encode_UTF8 = 1,
44 lttng_encode_ASCII = 2,
45 NR_STRING_ENCODINGS,
46};
47
48struct lttng_enum_entry {
49 unsigned long long start, end; /* start and end are inclusive */
50 const char *string;
51};
52
53#define __type_integer(_type, _byte_order, _base, _encoding) \
54 { \
55 .atype = atype_integer, \
56 .u.basic.integer = \
57 { \
58 .size = sizeof(_type) * CHAR_BIT, \
59 .alignment = ltt_alignof(_type) * CHAR_BIT, \
60 .signedness = is_signed_type(_type), \
61 .reverse_byte_order = _byte_order != __BYTE_ORDER, \
62 .base = _base, \
63 .encoding = lttng_encode_##_encoding, \
64 }, \
65 } \
66
67struct lttng_integer_type {
68 unsigned int size; /* in bits */
69 unsigned short alignment; /* in bits */
70 uint signedness:1;
71 uint reverse_byte_order:1;
72 unsigned int base; /* 2, 8, 10, 16, for pretty print */
73 enum lttng_string_encodings encoding;
74};
75
76union _lttng_basic_type {
77 struct lttng_integer_type integer;
78 struct {
79 const char *name;
80 } enumeration;
81 struct {
82 enum lttng_string_encodings encoding;
83 } string;
84};
85
86struct lttng_basic_type {
87 enum abstract_types atype;
88 union {
89 union _lttng_basic_type basic;
90 } u;
91};
92
93struct lttng_type {
94 enum abstract_types atype;
95 union {
96 union _lttng_basic_type basic;
97 struct {
98 struct lttng_basic_type elem_type;
99 unsigned int length; /* num. elems. */
100 } array;
101 struct {
102 struct lttng_basic_type length_type;
103 struct lttng_basic_type elem_type;
104 } sequence;
105 } u;
106};
107
108struct lttng_enum {
109 const char *name;
110 struct lttng_type container_type;
111 const struct lttng_enum_entry *entries;
112 unsigned int len;
113};
114
115/* Event field description */
116
117struct lttng_event_field {
118 const char *name;
119 struct lttng_type type;
120};
121
122/*
123 * We need to keep this perf counter field separately from struct
124 * lttng_ctx_field because cpu hotplug needs fixed-location addresses.
125 */
126struct lttng_perf_counter_field {
127 struct notifier_block nb;
128 int hp_enable;
129 struct perf_event_attr *attr;
130 struct perf_event **e; /* per-cpu array */
131};
132
133struct lttng_ctx_field {
134 struct lttng_event_field event_field;
135 size_t (*get_size)(size_t offset);
136 void (*record)(struct lttng_ctx_field *field,
137 struct lib_ring_buffer_ctx *ctx,
138 struct ltt_channel *chan);
139 union {
140 struct lttng_perf_counter_field *perf_counter;
141 } u;
142 void (*destroy)(struct lttng_ctx_field *field);
143};
144
145struct lttng_ctx {
146 struct lttng_ctx_field *fields;
147 unsigned int nr_fields;
148 unsigned int allocated_fields;
149};
150
151struct lttng_event_desc {
152 const char *name;
153 void *probe_callback;
154 const struct lttng_event_ctx *ctx; /* context */
155 const struct lttng_event_field *fields; /* event payload */
156 unsigned int nr_fields;
157 struct module *owner;
158};
159
160struct lttng_probe_desc {
161 const struct lttng_event_desc **event_desc;
162 unsigned int nr_events;
163 struct list_head head; /* chain registered probes */
164};
165
166struct lttng_krp; /* Kretprobe handling */
167
168/*
169 * ltt_event structure is referred to by the tracing fast path. It must be
170 * kept small.
171 */
172struct ltt_event {
173 unsigned int id;
174 struct ltt_channel *chan;
175 int enabled;
176 const struct lttng_event_desc *desc;
177 void *filter;
178 struct lttng_ctx *ctx;
179 enum lttng_kernel_instrumentation instrumentation;
180 union {
181 struct {
182 struct kprobe kp;
183 char *symbol_name;
184 } kprobe;
185 struct {
186 struct lttng_krp *lttng_krp;
187 char *symbol_name;
188 } kretprobe;
189 struct {
190 char *symbol_name;
191 } ftrace;
192 } u;
193 struct list_head list; /* Event list */
194 uint metadata_dumped:1;
195};
196
197struct ltt_channel_ops {
198 struct channel *(*channel_create)(const char *name,
199 struct ltt_channel *ltt_chan,
200 void *buf_addr,
201 size_t subbuf_size, size_t num_subbuf,
202 unsigned int switch_timer_interval,
203 unsigned int read_timer_interval);
204 void (*channel_destroy)(struct channel *chan);
205 struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
206 int (*buffer_has_read_closed_stream)(struct channel *chan);
207 void (*buffer_read_close)(struct lib_ring_buffer *buf);
208 int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
209 uint32_t event_id);
210 void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
211 void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
212 size_t len);
213 void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
214 const void *src, size_t len);
215 void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
216 int c, size_t len);
217 /*
218 * packet_avail_size returns the available size in the current
219 * packet. Note that the size returned is only a hint, since it
220 * may change due to concurrent writes.
221 */
222 size_t (*packet_avail_size)(struct channel *chan);
223 wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
224 wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
225 int (*is_finalized)(struct channel *chan);
226 int (*is_disabled)(struct channel *chan);
227};
228
229struct ltt_transport {
230 char *name;
231 struct module *owner;
232 struct list_head node;
233 struct ltt_channel_ops ops;
234};
235
236struct ltt_channel {
237 unsigned int id;
238 struct channel *chan; /* Channel buffers */
239 int enabled;
240 struct lttng_ctx *ctx;
241 /* Event ID management */
242 struct ltt_session *session;
243 struct file *file; /* File associated to channel */
244 unsigned int free_event_id; /* Next event ID to allocate */
245 struct list_head list; /* Channel list */
246 struct ltt_channel_ops *ops;
247 struct ltt_transport *transport;
248 struct ltt_event **sc_table; /* for syscall tracing */
249 struct ltt_event **compat_sc_table;
250 struct ltt_event *sc_unknown; /* for unknown syscalls */
251 struct ltt_event *sc_compat_unknown;
252 struct ltt_event *sc_exit; /* for syscall exit */
253 int header_type; /* 0: unset, 1: compact, 2: large */
254 uint metadata_dumped:1;
255};
256
257struct ltt_session {
258 int active; /* Is trace session active ? */
259 int been_active; /* Has trace session been active ? */
260 struct file *file; /* File associated to session */
261 struct ltt_channel *metadata; /* Metadata channel */
262 struct list_head chan; /* Channel list head */
263 struct list_head events; /* Event list head */
264 struct list_head list; /* Session list */
265 unsigned int free_chan_id; /* Next chan ID to allocate */
266 uuid_le uuid; /* Trace session unique ID */
267 uint metadata_dumped:1;
268};
269
270struct ltt_session *ltt_session_create(void);
271int ltt_session_enable(struct ltt_session *session);
272int ltt_session_disable(struct ltt_session *session);
273void ltt_session_destroy(struct ltt_session *session);
274
275struct ltt_channel *ltt_channel_create(struct ltt_session *session,
276 const char *transport_name,
277 void *buf_addr,
278 size_t subbuf_size, size_t num_subbuf,
279 unsigned int switch_timer_interval,
280 unsigned int read_timer_interval);
281struct ltt_channel *ltt_global_channel_create(struct ltt_session *session,
282 int overwrite, void *buf_addr,
283 size_t subbuf_size, size_t num_subbuf,
284 unsigned int switch_timer_interval,
285 unsigned int read_timer_interval);
286
287struct ltt_event *ltt_event_create(struct ltt_channel *chan,
288 struct lttng_kernel_event *event_param,
289 void *filter,
290 const struct lttng_event_desc *internal_desc);
291
292int ltt_channel_enable(struct ltt_channel *channel);
293int ltt_channel_disable(struct ltt_channel *channel);
294int ltt_event_enable(struct ltt_event *event);
295int ltt_event_disable(struct ltt_event *event);
296
297void ltt_transport_register(struct ltt_transport *transport);
298void ltt_transport_unregister(struct ltt_transport *transport);
299
300void synchronize_trace(void);
301int ltt_debugfs_abi_init(void);
302void ltt_debugfs_abi_exit(void);
303
304int ltt_probe_register(struct lttng_probe_desc *desc);
305void ltt_probe_unregister(struct lttng_probe_desc *desc);
306const struct lttng_event_desc *ltt_event_get(const char *name);
307void ltt_event_put(const struct lttng_event_desc *desc);
308int ltt_probes_init(void);
309void ltt_probes_exit(void);
310
311#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
312int lttng_syscalls_register(struct ltt_channel *chan, void *filter);
313int lttng_syscalls_unregister(struct ltt_channel *chan);
314#else
315static inline int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
316{
317 return -ENOSYS;
318}
319
320static inline int lttng_syscalls_unregister(struct ltt_channel *chan)
321{
322 return 0;
323}
324#endif
325
326struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
327int lttng_find_context(struct lttng_ctx *ctx, const char *name);
328void lttng_remove_context_field(struct lttng_ctx **ctx,
329 struct lttng_ctx_field *field);
330void lttng_destroy_context(struct lttng_ctx *ctx);
331int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
332int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
333int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
334int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
335int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
336int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
337int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
338int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
339int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
340#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
341int lttng_add_perf_counter_to_ctx(uint32_t type,
342 uint64_t config,
343 const char *name,
344 struct lttng_ctx **ctx);
345#else
346static inline
347int lttng_add_perf_counter_to_ctx(uint32_t type,
348 uint64_t config,
349 const char *name,
350 struct lttng_ctx **ctx)
351{
352 return -ENOSYS;
353}
354#endif
355
356#ifdef CONFIG_KPROBES
357int lttng_kprobes_register(const char *name,
358 const char *symbol_name,
359 uint64_t offset,
360 uint64_t addr,
361 struct ltt_event *event);
362void lttng_kprobes_unregister(struct ltt_event *event);
363void lttng_kprobes_destroy_private(struct ltt_event *event);
364#else
365static inline
366int lttng_kprobes_register(const char *name,
367 const char *symbol_name,
368 uint64_t offset,
369 uint64_t addr,
370 struct ltt_event *event)
371{
372 return -ENOSYS;
373}
374
375static inline
376void lttng_kprobes_unregister(struct ltt_event *event)
377{
378}
379
380static inline
381void lttng_kprobes_destroy_private(struct ltt_event *event)
382{
383}
384#endif
385
386#ifdef CONFIG_KRETPROBES
387int lttng_kretprobes_register(const char *name,
388 const char *symbol_name,
389 uint64_t offset,
390 uint64_t addr,
391 struct ltt_event *event_entry,
392 struct ltt_event *event_exit);
393void lttng_kretprobes_unregister(struct ltt_event *event);
394void lttng_kretprobes_destroy_private(struct ltt_event *event);
395#else
396static inline
397int lttng_kretprobes_register(const char *name,
398 const char *symbol_name,
399 uint64_t offset,
400 uint64_t addr,
401 struct ltt_event *event_entry,
402 struct ltt_event *event_exit)
403{
404 return -ENOSYS;
405}
406
407static inline
408void lttng_kretprobes_unregister(struct ltt_event *event)
409{
410}
411
412static inline
413void lttng_kretprobes_destroy_private(struct ltt_event *event)
414{
415}
416#endif
417
418#ifdef CONFIG_DYNAMIC_FTRACE
419int lttng_ftrace_register(const char *name,
420 const char *symbol_name,
421 struct ltt_event *event);
422void lttng_ftrace_unregister(struct ltt_event *event);
423void lttng_ftrace_destroy_private(struct ltt_event *event);
424#else
425static inline
426int lttng_ftrace_register(const char *name,
427 const char *symbol_name,
428 struct ltt_event *event)
429{
430 return -ENOSYS;
431}
432
433static inline
434void lttng_ftrace_unregister(struct ltt_event *event)
435{
436}
437
438static inline
439void lttng_ftrace_destroy_private(struct ltt_event *event)
440{
441}
442#endif
443
444int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
445
446extern const struct file_operations lttng_tracepoint_list_fops;
447
448#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
449#define TRACEPOINT_HAS_DATA_ARG
450#endif
451
452#endif /* _LTT_EVENTS_H */
diff --git a/drivers/staging/lttng/ltt-probes.c b/drivers/staging/lttng/ltt-probes.c
deleted file mode 100644
index 81dcbd715df0..000000000000
--- a/drivers/staging/lttng/ltt-probes.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * ltt-probes.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Holds LTTng probes registry.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/mutex.h>
14#include <linux/seq_file.h>
15
16#include "ltt-events.h"
17
18static LIST_HEAD(probe_list);
19static DEFINE_MUTEX(probe_mutex);
20
21static
22const struct lttng_event_desc *find_event(const char *name)
23{
24 struct lttng_probe_desc *probe_desc;
25 int i;
26
27 list_for_each_entry(probe_desc, &probe_list, head) {
28 for (i = 0; i < probe_desc->nr_events; i++) {
29 if (!strcmp(probe_desc->event_desc[i]->name, name))
30 return probe_desc->event_desc[i];
31 }
32 }
33 return NULL;
34}
35
36int ltt_probe_register(struct lttng_probe_desc *desc)
37{
38 int ret = 0;
39 int i;
40
41 mutex_lock(&probe_mutex);
42 /*
43 * TODO: This is O(N^2). Turn into a hash table when probe registration
44 * overhead becomes an issue.
45 */
46 for (i = 0; i < desc->nr_events; i++) {
47 if (find_event(desc->event_desc[i]->name)) {
48 ret = -EEXIST;
49 goto end;
50 }
51 }
52 list_add(&desc->head, &probe_list);
53end:
54 mutex_unlock(&probe_mutex);
55 return ret;
56}
57EXPORT_SYMBOL_GPL(ltt_probe_register);
58
59void ltt_probe_unregister(struct lttng_probe_desc *desc)
60{
61 mutex_lock(&probe_mutex);
62 list_del(&desc->head);
63 mutex_unlock(&probe_mutex);
64}
65EXPORT_SYMBOL_GPL(ltt_probe_unregister);
66
67const struct lttng_event_desc *ltt_event_get(const char *name)
68{
69 const struct lttng_event_desc *event;
70 int ret;
71
72 mutex_lock(&probe_mutex);
73 event = find_event(name);
74 mutex_unlock(&probe_mutex);
75 if (!event)
76 return NULL;
77 ret = try_module_get(event->owner);
78 WARN_ON_ONCE(!ret);
79 return event;
80}
81EXPORT_SYMBOL_GPL(ltt_event_get);
82
83void ltt_event_put(const struct lttng_event_desc *event)
84{
85 module_put(event->owner);
86}
87EXPORT_SYMBOL_GPL(ltt_event_put);
88
89static
90void *tp_list_start(struct seq_file *m, loff_t *pos)
91{
92 struct lttng_probe_desc *probe_desc;
93 int iter = 0, i;
94
95 mutex_lock(&probe_mutex);
96 list_for_each_entry(probe_desc, &probe_list, head) {
97 for (i = 0; i < probe_desc->nr_events; i++) {
98 if (iter++ >= *pos)
99 return (void *) probe_desc->event_desc[i];
100 }
101 }
102 /* End of list */
103 return NULL;
104}
105
106static
107void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
108{
109 struct lttng_probe_desc *probe_desc;
110 int iter = 0, i;
111
112 (*ppos)++;
113 list_for_each_entry(probe_desc, &probe_list, head) {
114 for (i = 0; i < probe_desc->nr_events; i++) {
115 if (iter++ >= *ppos)
116 return (void *) probe_desc->event_desc[i];
117 }
118 }
119 /* End of list */
120 return NULL;
121}
122
123static
124void tp_list_stop(struct seq_file *m, void *p)
125{
126 mutex_unlock(&probe_mutex);
127}
128
129static
130int tp_list_show(struct seq_file *m, void *p)
131{
132 const struct lttng_event_desc *probe_desc = p;
133
134 /*
135 * Don't export lttng internal events (metadata).
136 */
137 if (!strncmp(probe_desc->name, "lttng_", sizeof("lttng_") - 1))
138 return 0;
139 seq_printf(m, "event { name = %s; };\n",
140 probe_desc->name);
141 return 0;
142}
143
144static
145const struct seq_operations lttng_tracepoint_list_seq_ops = {
146 .start = tp_list_start,
147 .next = tp_list_next,
148 .stop = tp_list_stop,
149 .show = tp_list_show,
150};
151
152static
153int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
154{
155 return seq_open(file, &lttng_tracepoint_list_seq_ops);
156}
157
158const struct file_operations lttng_tracepoint_list_fops = {
159 .owner = THIS_MODULE,
160 .open = lttng_tracepoint_list_open,
161 .read = seq_read,
162 .llseek = seq_lseek,
163 .release = seq_release,
164};
diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-discard.c b/drivers/staging/lttng/ltt-ring-buffer-client-discard.c
deleted file mode 100644
index eafcf45ad0f6..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-client-discard.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-client-discard.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client (discard mode).
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
15#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
17#include "ltt-ring-buffer-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
deleted file mode 100644
index 29819a7352d1..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-client-discard.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client (discard mode).
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
15#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
17#include "ltt-ring-buffer-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
deleted file mode 100644
index 741aa7b463ef..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-client-overwrite.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client (overwrite mode).
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
15#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
17#include "ltt-ring-buffer-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c b/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
deleted file mode 100644
index 9811941aa369..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-client-overwrite.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client (overwrite mode).
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
15#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
17#include "ltt-ring-buffer-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-client.h b/drivers/staging/lttng/ltt-ring-buffer-client.h
deleted file mode 100644
index 8df37901095a..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-client.h
+++ /dev/null
@@ -1,569 +0,0 @@
1/*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include "lib/bitfield.h"
14#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
15#include "wrapper/trace-clock.h"
16#include "ltt-events.h"
17#include "ltt-tracer.h"
18#include "wrapper/ringbuffer/frontend_types.h"
19
20/*
21 * Keep the natural field alignment for _each field_ within this structure if
22 * you ever add/remove a field from this header. Packed attribute is not used
23 * because gcc generates poor code on at least powerpc and mips. Don't ever
24 * let gcc add padding between the structure elements.
25 *
26 * The guarantee we have with timestamps is that all the events in a
27 * packet are included (inclusive) within the begin/end timestamps of
28 * the packet. Another guarantee we have is that the "timestamp begin",
29 * as well as the event timestamps, are monotonically increasing (never
30 * decrease) when moving forward in a stream (physically). But this
31 * guarantee does not apply to "timestamp end", because it is sampled at
32 * commit time, which is not ordered with respect to space reservation.
33 */
34
35struct packet_header {
36 /* Trace packet header */
37 uint32_t magic; /*
38 * Trace magic number.
39 * contains endianness information.
40 */
41 uint8_t uuid[16];
42 uint32_t stream_id;
43
44 struct {
45 /* Stream packet context */
46 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
47 uint64_t timestamp_end; /* Cycle count at subbuffer end */
48 uint32_t events_discarded; /*
49 * Events lost in this subbuffer since
50 * the beginning of the trace.
51 * (may overflow)
52 */
53 uint32_t content_size; /* Size of data in subbuffer */
54 uint32_t packet_size; /* Subbuffer size (include padding) */
55 uint32_t cpu_id; /* CPU id associated with stream */
56 uint8_t header_end; /* End of header */
57 } ctx;
58};
59
60
61static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
62{
63 return trace_clock_read64();
64}
65
66static inline
67size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
68{
69 int i;
70 size_t orig_offset = offset;
71
72 if (likely(!ctx))
73 return 0;
74 for (i = 0; i < ctx->nr_fields; i++)
75 offset += ctx->fields[i].get_size(offset);
76 return offset - orig_offset;
77}
78
79static inline
80void ctx_record(struct lib_ring_buffer_ctx *bufctx,
81 struct ltt_channel *chan,
82 struct lttng_ctx *ctx)
83{
84 int i;
85
86 if (likely(!ctx))
87 return;
88 for (i = 0; i < ctx->nr_fields; i++)
89 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
90}
91
92/*
93 * record_header_size - Calculate the header size and padding necessary.
94 * @config: ring buffer instance configuration
95 * @chan: channel
96 * @offset: offset in the write buffer
97 * @pre_header_padding: padding to add before the header (output)
98 * @ctx: reservation context
99 *
100 * Returns the event header size (including padding).
101 *
102 * The payload must itself determine its own alignment from the biggest type it
103 * contains.
104 */
105static __inline__
106unsigned char record_header_size(const struct lib_ring_buffer_config *config,
107 struct channel *chan, size_t offset,
108 size_t *pre_header_padding,
109 struct lib_ring_buffer_ctx *ctx)
110{
111 struct ltt_channel *ltt_chan = channel_get_private(chan);
112 struct ltt_event *event = ctx->priv;
113 size_t orig_offset = offset;
114 size_t padding;
115
116 switch (ltt_chan->header_type) {
117 case 1: /* compact */
118 padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
119 offset += padding;
120 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
121 offset += sizeof(uint32_t); /* id and timestamp */
122 } else {
123 /* Minimum space taken by 5-bit id */
124 offset += sizeof(uint8_t);
125 /* Align extended struct on largest member */
126 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
127 offset += sizeof(uint32_t); /* id */
128 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
129 offset += sizeof(uint64_t); /* timestamp */
130 }
131 break;
132 case 2: /* large */
133 padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
134 offset += padding;
135 offset += sizeof(uint16_t);
136 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
137 offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
138 offset += sizeof(uint32_t); /* timestamp */
139 } else {
140 /* Align extended struct on largest member */
141 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
142 offset += sizeof(uint32_t); /* id */
143 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
144 offset += sizeof(uint64_t); /* timestamp */
145 }
146 break;
147 default:
148 padding = 0;
149 WARN_ON_ONCE(1);
150 }
151 offset += ctx_get_size(offset, event->ctx);
152 offset += ctx_get_size(offset, ltt_chan->ctx);
153
154 *pre_header_padding = padding;
155 return offset - orig_offset;
156}
157
158#include "wrapper/ringbuffer/api.h"
159
160static
161void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
162 struct lib_ring_buffer_ctx *ctx,
163 uint32_t event_id);
164
165/*
166 * ltt_write_event_header
167 *
168 * Writes the event header to the offset (already aligned on 32-bits).
169 *
170 * @config: ring buffer instance configuration
171 * @ctx: reservation context
172 * @event_id: event ID
173 */
174static __inline__
175void ltt_write_event_header(const struct lib_ring_buffer_config *config,
176 struct lib_ring_buffer_ctx *ctx,
177 uint32_t event_id)
178{
179 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
180 struct ltt_event *event = ctx->priv;
181
182 if (unlikely(ctx->rflags))
183 goto slow_path;
184
185 switch (ltt_chan->header_type) {
186 case 1: /* compact */
187 {
188 uint32_t id_time = 0;
189
190 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
191 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
192 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
193 break;
194 }
195 case 2: /* large */
196 {
197 uint32_t timestamp = (uint32_t) ctx->tsc;
198 uint16_t id = event_id;
199
200 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
201 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
202 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
203 break;
204 }
205 default:
206 WARN_ON_ONCE(1);
207 }
208
209 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
210 ctx_record(ctx, ltt_chan, event->ctx);
211 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
212
213 return;
214
215slow_path:
216 ltt_write_event_header_slow(config, ctx, event_id);
217}
218
219static
220void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
221 struct lib_ring_buffer_ctx *ctx,
222 uint32_t event_id)
223{
224 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
225 struct ltt_event *event = ctx->priv;
226
227 switch (ltt_chan->header_type) {
228 case 1: /* compact */
229 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
230 uint32_t id_time = 0;
231
232 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
233 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
234 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
235 } else {
236 uint8_t id = 0;
237 uint64_t timestamp = ctx->tsc;
238
239 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
240 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
241 /* Align extended struct on largest member */
242 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
243 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
244 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
245 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
246 }
247 break;
248 case 2: /* large */
249 {
250 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
251 uint32_t timestamp = (uint32_t) ctx->tsc;
252 uint16_t id = event_id;
253
254 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
255 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
256 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
257 } else {
258 uint16_t id = 65535;
259 uint64_t timestamp = ctx->tsc;
260
261 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
262 /* Align extended struct on largest member */
263 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
264 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
265 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
266 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
267 }
268 break;
269 }
270 default:
271 WARN_ON_ONCE(1);
272 }
273 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
274 ctx_record(ctx, ltt_chan, event->ctx);
275 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
276}
277
278static const struct lib_ring_buffer_config client_config;
279
280static u64 client_ring_buffer_clock_read(struct channel *chan)
281{
282 return lib_ring_buffer_clock_read(chan);
283}
284
285static
286size_t client_record_header_size(const struct lib_ring_buffer_config *config,
287 struct channel *chan, size_t offset,
288 size_t *pre_header_padding,
289 struct lib_ring_buffer_ctx *ctx)
290{
291 return record_header_size(config, chan, offset,
292 pre_header_padding, ctx);
293}
294
295/**
296 * client_packet_header_size - called on buffer-switch to a new sub-buffer
297 *
298 * Return header size without padding after the structure. Don't use packed
299 * structure because gcc generates inefficient code on some architectures
300 * (powerpc, mips..)
301 */
302static size_t client_packet_header_size(void)
303{
304 return offsetof(struct packet_header, ctx.header_end);
305}
306
307static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
308 unsigned int subbuf_idx)
309{
310 struct channel *chan = buf->backend.chan;
311 struct packet_header *header =
312 (struct packet_header *)
313 lib_ring_buffer_offset_address(&buf->backend,
314 subbuf_idx * chan->backend.subbuf_size);
315 struct ltt_channel *ltt_chan = channel_get_private(chan);
316 struct ltt_session *session = ltt_chan->session;
317
318 header->magic = CTF_MAGIC_NUMBER;
319 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
320 header->stream_id = ltt_chan->id;
321 header->ctx.timestamp_begin = tsc;
322 header->ctx.timestamp_end = 0;
323 header->ctx.events_discarded = 0;
324 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
325 header->ctx.packet_size = 0xFFFFFFFF;
326 header->ctx.cpu_id = buf->backend.cpu;
327}
328
329/*
330 * offset is assumed to never be 0 here : never deliver a completely empty
331 * subbuffer. data_size is between 1 and subbuf_size.
332 */
333static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
334 unsigned int subbuf_idx, unsigned long data_size)
335{
336 struct channel *chan = buf->backend.chan;
337 struct packet_header *header =
338 (struct packet_header *)
339 lib_ring_buffer_offset_address(&buf->backend,
340 subbuf_idx * chan->backend.subbuf_size);
341 unsigned long records_lost = 0;
342
343 header->ctx.timestamp_end = tsc;
344 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
345 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
346 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
347 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
348 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
349 header->ctx.events_discarded = records_lost;
350}
351
352static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
353 int cpu, const char *name)
354{
355 return 0;
356}
357
358static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
359{
360}
361
362static const struct lib_ring_buffer_config client_config = {
363 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
364 .cb.record_header_size = client_record_header_size,
365 .cb.subbuffer_header_size = client_packet_header_size,
366 .cb.buffer_begin = client_buffer_begin,
367 .cb.buffer_end = client_buffer_end,
368 .cb.buffer_create = client_buffer_create,
369 .cb.buffer_finalize = client_buffer_finalize,
370
371 .tsc_bits = 32,
372 .alloc = RING_BUFFER_ALLOC_PER_CPU,
373 .sync = RING_BUFFER_SYNC_PER_CPU,
374 .mode = RING_BUFFER_MODE_TEMPLATE,
375 .backend = RING_BUFFER_PAGE,
376 .output = RING_BUFFER_OUTPUT_TEMPLATE,
377 .oops = RING_BUFFER_OOPS_CONSISTENCY,
378 .ipi = RING_BUFFER_IPI_BARRIER,
379 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
380};
381
382static
383struct channel *_channel_create(const char *name,
384 struct ltt_channel *ltt_chan, void *buf_addr,
385 size_t subbuf_size, size_t num_subbuf,
386 unsigned int switch_timer_interval,
387 unsigned int read_timer_interval)
388{
389 return channel_create(&client_config, name, ltt_chan, buf_addr,
390 subbuf_size, num_subbuf, switch_timer_interval,
391 read_timer_interval);
392}
393
394static
395void ltt_channel_destroy(struct channel *chan)
396{
397 channel_destroy(chan);
398}
399
400static
401struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
402{
403 struct lib_ring_buffer *buf;
404 int cpu;
405
406 for_each_channel_cpu(cpu, chan) {
407 buf = channel_get_ring_buffer(&client_config, chan, cpu);
408 if (!lib_ring_buffer_open_read(buf))
409 return buf;
410 }
411 return NULL;
412}
413
414static
415int ltt_buffer_has_read_closed_stream(struct channel *chan)
416{
417 struct lib_ring_buffer *buf;
418 int cpu;
419
420 for_each_channel_cpu(cpu, chan) {
421 buf = channel_get_ring_buffer(&client_config, chan, cpu);
422 if (!atomic_long_read(&buf->active_readers))
423 return 1;
424 }
425 return 0;
426}
427
428static
429void ltt_buffer_read_close(struct lib_ring_buffer *buf)
430{
431 lib_ring_buffer_release_read(buf);
432}
433
434static
435int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
436 uint32_t event_id)
437{
438 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
439 int ret, cpu;
440
441 cpu = lib_ring_buffer_get_cpu(&client_config);
442 if (cpu < 0)
443 return -EPERM;
444 ctx->cpu = cpu;
445
446 switch (ltt_chan->header_type) {
447 case 1: /* compact */
448 if (event_id > 30)
449 ctx->rflags |= LTT_RFLAG_EXTENDED;
450 break;
451 case 2: /* large */
452 if (event_id > 65534)
453 ctx->rflags |= LTT_RFLAG_EXTENDED;
454 break;
455 default:
456 WARN_ON_ONCE(1);
457 }
458
459 ret = lib_ring_buffer_reserve(&client_config, ctx);
460 if (ret)
461 goto put;
462 ltt_write_event_header(&client_config, ctx, event_id);
463 return 0;
464put:
465 lib_ring_buffer_put_cpu(&client_config);
466 return ret;
467}
468
469static
470void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
471{
472 lib_ring_buffer_commit(&client_config, ctx);
473 lib_ring_buffer_put_cpu(&client_config);
474}
475
476static
477void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
478 size_t len)
479{
480 lib_ring_buffer_write(&client_config, ctx, src, len);
481}
482
483static
484void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
485 const void __user *src, size_t len)
486{
487 lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
488}
489
490static
491void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
492 int c, size_t len)
493{
494 lib_ring_buffer_memset(&client_config, ctx, c, len);
495}
496
497static
498wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
499{
500 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
501 chan, cpu);
502 return &buf->write_wait;
503}
504
505static
506wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
507{
508 return &chan->hp_wait;
509}
510
511static
512int ltt_is_finalized(struct channel *chan)
513{
514 return lib_ring_buffer_channel_is_finalized(chan);
515}
516
517static
518int ltt_is_disabled(struct channel *chan)
519{
520 return lib_ring_buffer_channel_is_disabled(chan);
521}
522
523static struct ltt_transport ltt_relay_transport = {
524 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
525 .owner = THIS_MODULE,
526 .ops = {
527 .channel_create = _channel_create,
528 .channel_destroy = ltt_channel_destroy,
529 .buffer_read_open = ltt_buffer_read_open,
530 .buffer_has_read_closed_stream =
531 ltt_buffer_has_read_closed_stream,
532 .buffer_read_close = ltt_buffer_read_close,
533 .event_reserve = ltt_event_reserve,
534 .event_commit = ltt_event_commit,
535 .event_write = ltt_event_write,
536 .event_write_from_user = ltt_event_write_from_user,
537 .event_memset = ltt_event_memset,
538 .packet_avail_size = NULL, /* Would be racy anyway */
539 .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
540 .get_hp_wait_queue = ltt_get_hp_wait_queue,
541 .is_finalized = ltt_is_finalized,
542 .is_disabled = ltt_is_disabled,
543 },
544};
545
546static int __init ltt_ring_buffer_client_init(void)
547{
548 /*
549 * This vmalloc sync all also takes care of the lib ring buffer
550 * vmalloc'd module pages when it is built as a module into LTTng.
551 */
552 wrapper_vmalloc_sync_all();
553 ltt_transport_register(&ltt_relay_transport);
554 return 0;
555}
556
557module_init(ltt_ring_buffer_client_init);
558
559static void __exit ltt_ring_buffer_client_exit(void)
560{
561 ltt_transport_unregister(&ltt_relay_transport);
562}
563
564module_exit(ltt_ring_buffer_client_exit);
565
566MODULE_LICENSE("GPL and additional rights");
567MODULE_AUTHOR("Mathieu Desnoyers");
568MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
569 " client");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
deleted file mode 100644
index ac6fe78c45e5..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-metadata-client.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer metadta client.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
15#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
17#include "ltt-ring-buffer-metadata-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
deleted file mode 100644
index 529bbb19ffe7..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
14#include "ltt-events.h"
15#include "ltt-tracer.h"
16
17struct metadata_packet_header {
18 uint32_t magic; /* 0x75D11D57 */
19 uint8_t uuid[16]; /* Unique Universal Identifier */
20 uint32_t checksum; /* 0 if unused */
21 uint32_t content_size; /* in bits */
22 uint32_t packet_size; /* in bits */
23 uint8_t compression_scheme; /* 0 if unused */
24 uint8_t encryption_scheme; /* 0 if unused */
25 uint8_t checksum_scheme; /* 0 if unused */
26 uint8_t major; /* CTF spec major version number */
27 uint8_t minor; /* CTF spec minor version number */
28 uint8_t header_end[0];
29};
30
31struct metadata_record_header {
32 uint8_t header_end[0]; /* End of header */
33};
34
35static const struct lib_ring_buffer_config client_config;
36
37static inline
38u64 lib_ring_buffer_clock_read(struct channel *chan)
39{
40 return 0;
41}
42
43static inline
44unsigned char record_header_size(const struct lib_ring_buffer_config *config,
45 struct channel *chan, size_t offset,
46 size_t *pre_header_padding,
47 struct lib_ring_buffer_ctx *ctx)
48{
49 return 0;
50}
51
52#include "wrapper/ringbuffer/api.h"
53
54static u64 client_ring_buffer_clock_read(struct channel *chan)
55{
56 return 0;
57}
58
59static
60size_t client_record_header_size(const struct lib_ring_buffer_config *config,
61 struct channel *chan, size_t offset,
62 size_t *pre_header_padding,
63 struct lib_ring_buffer_ctx *ctx)
64{
65 return 0;
66}
67
68/**
69 * client_packet_header_size - called on buffer-switch to a new sub-buffer
70 *
71 * Return header size without padding after the structure. Don't use packed
72 * structure because gcc generates inefficient code on some architectures
73 * (powerpc, mips..)
74 */
75static size_t client_packet_header_size(void)
76{
77 return offsetof(struct metadata_packet_header, header_end);
78}
79
80static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
81 unsigned int subbuf_idx)
82{
83 struct channel *chan = buf->backend.chan;
84 struct metadata_packet_header *header =
85 (struct metadata_packet_header *)
86 lib_ring_buffer_offset_address(&buf->backend,
87 subbuf_idx * chan->backend.subbuf_size);
88 struct ltt_channel *ltt_chan = channel_get_private(chan);
89 struct ltt_session *session = ltt_chan->session;
90
91 header->magic = TSDL_MAGIC_NUMBER;
92 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
93 header->checksum = 0; /* 0 if unused */
94 header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
95 header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
96 header->compression_scheme = 0; /* 0 if unused */
97 header->encryption_scheme = 0; /* 0 if unused */
98 header->checksum_scheme = 0; /* 0 if unused */
99 header->major = CTF_SPEC_MAJOR;
100 header->minor = CTF_SPEC_MINOR;
101}
102
103/*
104 * offset is assumed to never be 0 here : never deliver a completely empty
105 * subbuffer. data_size is between 1 and subbuf_size.
106 */
107static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
108 unsigned int subbuf_idx, unsigned long data_size)
109{
110 struct channel *chan = buf->backend.chan;
111 struct metadata_packet_header *header =
112 (struct metadata_packet_header *)
113 lib_ring_buffer_offset_address(&buf->backend,
114 subbuf_idx * chan->backend.subbuf_size);
115 unsigned long records_lost = 0;
116
117 header->content_size = data_size * CHAR_BIT; /* in bits */
118 header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
119 /*
120 * We do not care about the records lost count, because the metadata
121 * channel waits and retry.
122 */
123 (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
124 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
125 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
126 WARN_ON_ONCE(records_lost != 0);
127}
128
129static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
130 int cpu, const char *name)
131{
132 return 0;
133}
134
135static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
136{
137}
138
139static const struct lib_ring_buffer_config client_config = {
140 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
141 .cb.record_header_size = client_record_header_size,
142 .cb.subbuffer_header_size = client_packet_header_size,
143 .cb.buffer_begin = client_buffer_begin,
144 .cb.buffer_end = client_buffer_end,
145 .cb.buffer_create = client_buffer_create,
146 .cb.buffer_finalize = client_buffer_finalize,
147
148 .tsc_bits = 0,
149 .alloc = RING_BUFFER_ALLOC_GLOBAL,
150 .sync = RING_BUFFER_SYNC_GLOBAL,
151 .mode = RING_BUFFER_MODE_TEMPLATE,
152 .backend = RING_BUFFER_PAGE,
153 .output = RING_BUFFER_OUTPUT_TEMPLATE,
154 .oops = RING_BUFFER_OOPS_CONSISTENCY,
155 .ipi = RING_BUFFER_IPI_BARRIER,
156 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
157};
158
159static
160struct channel *_channel_create(const char *name,
161 struct ltt_channel *ltt_chan, void *buf_addr,
162 size_t subbuf_size, size_t num_subbuf,
163 unsigned int switch_timer_interval,
164 unsigned int read_timer_interval)
165{
166 return channel_create(&client_config, name, ltt_chan, buf_addr,
167 subbuf_size, num_subbuf, switch_timer_interval,
168 read_timer_interval);
169}
170
171static
172void ltt_channel_destroy(struct channel *chan)
173{
174 channel_destroy(chan);
175}
176
177static
178struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
179{
180 struct lib_ring_buffer *buf;
181
182 buf = channel_get_ring_buffer(&client_config, chan, 0);
183 if (!lib_ring_buffer_open_read(buf))
184 return buf;
185 return NULL;
186}
187
188static
189int ltt_buffer_has_read_closed_stream(struct channel *chan)
190{
191 struct lib_ring_buffer *buf;
192 int cpu;
193
194 for_each_channel_cpu(cpu, chan) {
195 buf = channel_get_ring_buffer(&client_config, chan, cpu);
196 if (!atomic_long_read(&buf->active_readers))
197 return 1;
198 }
199 return 0;
200}
201
202static
203void ltt_buffer_read_close(struct lib_ring_buffer *buf)
204{
205 lib_ring_buffer_release_read(buf);
206}
207
208static
209int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
210{
211 return lib_ring_buffer_reserve(&client_config, ctx);
212}
213
214static
215void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
216{
217 lib_ring_buffer_commit(&client_config, ctx);
218}
219
220static
221void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
222 size_t len)
223{
224 lib_ring_buffer_write(&client_config, ctx, src, len);
225}
226
227static
228void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
229 const void __user *src, size_t len)
230{
231 lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
232}
233
234static
235void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
236 int c, size_t len)
237{
238 lib_ring_buffer_memset(&client_config, ctx, c, len);
239}
240
241static
242size_t ltt_packet_avail_size(struct channel *chan)
243
244{
245 unsigned long o_begin;
246 struct lib_ring_buffer *buf;
247
248 buf = chan->backend.buf; /* Only for global buffer ! */
249 o_begin = v_read(&client_config, &buf->offset);
250 if (subbuf_offset(o_begin, chan) != 0) {
251 return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
252 } else {
253 return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
254 - sizeof(struct metadata_packet_header);
255 }
256}
257
258static
259wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
260{
261 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
262 chan, cpu);
263 return &buf->write_wait;
264}
265
266static
267wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
268{
269 return &chan->hp_wait;
270}
271
272static
273int ltt_is_finalized(struct channel *chan)
274{
275 return lib_ring_buffer_channel_is_finalized(chan);
276}
277
278static
279int ltt_is_disabled(struct channel *chan)
280{
281 return lib_ring_buffer_channel_is_disabled(chan);
282}
283
284static struct ltt_transport ltt_relay_transport = {
285 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
286 .owner = THIS_MODULE,
287 .ops = {
288 .channel_create = _channel_create,
289 .channel_destroy = ltt_channel_destroy,
290 .buffer_read_open = ltt_buffer_read_open,
291 .buffer_has_read_closed_stream =
292 ltt_buffer_has_read_closed_stream,
293 .buffer_read_close = ltt_buffer_read_close,
294 .event_reserve = ltt_event_reserve,
295 .event_commit = ltt_event_commit,
296 .event_write_from_user = ltt_event_write_from_user,
297 .event_memset = ltt_event_memset,
298 .event_write = ltt_event_write,
299 .packet_avail_size = ltt_packet_avail_size,
300 .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
301 .get_hp_wait_queue = ltt_get_hp_wait_queue,
302 .is_finalized = ltt_is_finalized,
303 .is_disabled = ltt_is_disabled,
304 },
305};
306
307static int __init ltt_ring_buffer_client_init(void)
308{
309 /*
310 * This vmalloc sync all also takes care of the lib ring buffer
311 * vmalloc'd module pages when it is built as a module into LTTng.
312 */
313 wrapper_vmalloc_sync_all();
314 ltt_transport_register(&ltt_relay_transport);
315 return 0;
316}
317
318module_init(ltt_ring_buffer_client_init);
319
320static void __exit ltt_ring_buffer_client_exit(void)
321{
322 ltt_transport_unregister(&ltt_relay_transport);
323}
324
325module_exit(ltt_ring_buffer_client_exit);
326
327MODULE_LICENSE("GPL and additional rights");
328MODULE_AUTHOR("Mathieu Desnoyers");
329MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
330 " client");
diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c b/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
deleted file mode 100644
index 5cad3f9201ad..000000000000
--- a/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * ltt-ring-buffer-metadata-client.c
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer metadta client.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include "ltt-tracer.h"
13
14#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
15#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
16#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
17#include "ltt-ring-buffer-metadata-client.h"
18
19MODULE_LICENSE("GPL and additional rights");
20MODULE_AUTHOR("Mathieu Desnoyers");
21MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
diff --git a/drivers/staging/lttng/ltt-tracer-core.h b/drivers/staging/lttng/ltt-tracer-core.h
deleted file mode 100644
index 5abc432d24a0..000000000000
--- a/drivers/staging/lttng/ltt-tracer-core.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef LTT_TRACER_CORE_H
2#define LTT_TRACER_CORE_H
3
4/*
5 * ltt-tracer-core.h
6 *
7 * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * This contains the core definitions for the Linux Trace Toolkit.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/list.h>
15#include <linux/percpu.h>
16
17#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
18/* Align data on its natural alignment */
19#define RING_BUFFER_ALIGN
20#endif
21
22#include "wrapper/ringbuffer/config.h"
23
24struct ltt_session;
25struct ltt_channel;
26struct ltt_event;
27
28#endif /* LTT_TRACER_CORE_H */
diff --git a/drivers/staging/lttng/ltt-tracer.h b/drivers/staging/lttng/ltt-tracer.h
deleted file mode 100644
index a21c38ca3f33..000000000000
--- a/drivers/staging/lttng/ltt-tracer.h
+++ /dev/null
@@ -1,67 +0,0 @@
1#ifndef _LTT_TRACER_H
2#define _LTT_TRACER_H
3
4/*
5 * ltt-tracer.h
6 *
7 * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * This contains the definitions for the Linux Trace Toolkit tracer.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <stdarg.h>
15#include <linux/types.h>
16#include <linux/limits.h>
17#include <linux/list.h>
18#include <linux/cache.h>
19#include <linux/timex.h>
20#include <linux/wait.h>
21#include <asm/atomic.h>
22#include <asm/local.h>
23
24#include "wrapper/trace-clock.h"
25#include "ltt-tracer-core.h"
26#include "ltt-events.h"
27
28#define LTTNG_VERSION 0
29#define LTTNG_PATCHLEVEL 9
30#define LTTNG_SUBLEVEL 1
31
32#ifndef CHAR_BIT
33#define CHAR_BIT 8
34#endif
35
36/* Number of bytes to log with a read/write event */
37#define LTT_LOG_RW_SIZE 32L
38#define LTT_MAX_SMALL_SIZE 0xFFFFU
39
40#ifdef RING_BUFFER_ALIGN
41#define ltt_alignof(type) __alignof__(type)
42#else
43#define ltt_alignof(type) 1
44#endif
45
46/* Tracer properties */
47#define CTF_MAGIC_NUMBER 0xC1FC1FC1
48#define TSDL_MAGIC_NUMBER 0x75D11D57
49
50/* CTF specification version followed */
51#define CTF_SPEC_MAJOR 1
52#define CTF_SPEC_MINOR 8
53
54/* Tracer major/minor versions */
55#define CTF_VERSION_MAJOR 0
56#define CTF_VERSION_MINOR 1
57
58/*
59 * Number of milliseconds to retry before failing metadata writes on buffer full
60 * condition. (10 seconds)
61 */
62#define LTTNG_METADATA_TIMEOUT_MSEC 10000
63
64#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
65#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
66
67#endif /* _LTT_TRACER_H */
diff --git a/drivers/staging/lttng/lttng-calibrate.c b/drivers/staging/lttng/lttng-calibrate.c
deleted file mode 100644
index 07e3c5b34153..000000000000
--- a/drivers/staging/lttng/lttng-calibrate.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * lttng-calibrate.c
3 *
4 * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng probe calibration.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include "ltt-debugfs-abi.h"
12#include "ltt-events.h"
13
14noinline
15void lttng_calibrate_kretprobe(void)
16{
17 asm volatile ("");
18}
19
20int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
21{
22 switch (calibrate->type) {
23 case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
24 lttng_calibrate_kretprobe();
25 break;
26 default:
27 return -EINVAL;
28 }
29 return 0;
30}
diff --git a/drivers/staging/lttng/lttng-context-nice.c b/drivers/staging/lttng/lttng-context-nice.c
deleted file mode 100644
index 9b99b5492465..000000000000
--- a/drivers/staging/lttng/lttng-context-nice.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng nice context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t nice_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += lib_ring_buffer_align(offset, ltt_alignof(int));
24 size += sizeof(int);
25 return size;
26}
27
28static
29void nice_record(struct lttng_ctx_field *field,
30 struct lib_ring_buffer_ctx *ctx,
31 struct ltt_channel *chan)
32{
33 int nice;
34
35 nice = task_nice(current);
36 lib_ring_buffer_align_ctx(ctx, ltt_alignof(nice));
37 chan->ops->event_write(ctx, &nice, sizeof(nice));
38}
39
40int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
41{
42 struct lttng_ctx_field *field;
43
44 field = lttng_append_context(ctx);
45 if (!field)
46 return -ENOMEM;
47 if (lttng_find_context(*ctx, "nice")) {
48 lttng_remove_context_field(ctx, field);
49 return -EEXIST;
50 }
51 field->event_field.name = "nice";
52 field->event_field.type.atype = atype_integer;
53 field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
54 field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
55 field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
56 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
57 field->event_field.type.u.basic.integer.base = 10;
58 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
59 field->get_size = nice_get_size;
60 field->record = nice_record;
61 wrapper_vmalloc_sync_all();
62 return 0;
63}
64EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
65
66MODULE_LICENSE("GPL and additional rights");
67MODULE_AUTHOR("Mathieu Desnoyers");
68MODULE_DESCRIPTION("Linux Trace Toolkit Nice Context");
diff --git a/drivers/staging/lttng/lttng-context-perf-counters.c b/drivers/staging/lttng/lttng-context-perf-counters.c
deleted file mode 100644
index 3ae2266f948b..000000000000
--- a/drivers/staging/lttng/lttng-context-perf-counters.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
14#include <linux/string.h>
15#include "ltt-events.h"
16#include "wrapper/ringbuffer/frontend_types.h"
17#include "wrapper/vmalloc.h"
18#include "wrapper/perf.h"
19#include "ltt-tracer.h"
20
21static
22size_t perf_counter_get_size(size_t offset)
23{
24 size_t size = 0;
25
26 size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
27 size += sizeof(uint64_t);
28 return size;
29}
30
31static
32void perf_counter_record(struct lttng_ctx_field *field,
33 struct lib_ring_buffer_ctx *ctx,
34 struct ltt_channel *chan)
35{
36 struct perf_event *event;
37 uint64_t value;
38
39 event = field->u.perf_counter->e[ctx->cpu];
40 if (likely(event)) {
41 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
42 value = 0;
43 } else {
44 event->pmu->read(event);
45 value = local64_read(&event->count);
46 }
47 } else {
48 /*
49 * Perf chooses not to be clever and not to support enabling a
50 * perf counter before the cpu is brought up. Therefore, we need
51 * to support having events coming (e.g. scheduler events)
52 * before the counter is setup. Write an arbitrary 0 in this
53 * case.
54 */
55 value = 0;
56 }
57 lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
58 chan->ops->event_write(ctx, &value, sizeof(value));
59}
60
61#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
62static
63void overflow_callback(struct perf_event *event,
64 struct perf_sample_data *data,
65 struct pt_regs *regs)
66{
67}
68#else
69static
70void overflow_callback(struct perf_event *event, int nmi,
71 struct perf_sample_data *data,
72 struct pt_regs *regs)
73{
74}
75#endif
76
77static
78void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
79{
80 struct perf_event **events = field->u.perf_counter->e;
81 int cpu;
82
83 get_online_cpus();
84 for_each_online_cpu(cpu)
85 perf_event_release_kernel(events[cpu]);
86 put_online_cpus();
87#ifdef CONFIG_HOTPLUG_CPU
88 unregister_cpu_notifier(&field->u.perf_counter->nb);
89#endif
90 kfree(field->event_field.name);
91 kfree(field->u.perf_counter->attr);
92 kfree(events);
93 kfree(field->u.perf_counter);
94}
95
96#ifdef CONFIG_HOTPLUG_CPU
97
98/**
99 * lttng_perf_counter_hp_callback - CPU hotplug callback
100 * @nb: notifier block
101 * @action: hotplug action to take
102 * @hcpu: CPU number
103 *
104 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
105 *
106 * We can setup perf counters when the cpu is online (up prepare seems to be too
107 * soon).
108 */
109static
110int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
111 unsigned long action,
112 void *hcpu)
113{
114 unsigned int cpu = (unsigned long) hcpu;
115 struct lttng_perf_counter_field *perf_field =
116 container_of(nb, struct lttng_perf_counter_field, nb);
117 struct perf_event **events = perf_field->e;
118 struct perf_event_attr *attr = perf_field->attr;
119 struct perf_event *pevent;
120
121 if (!perf_field->hp_enable)
122 return NOTIFY_OK;
123
124 switch (action) {
125 case CPU_ONLINE:
126 case CPU_ONLINE_FROZEN:
127 pevent = wrapper_perf_event_create_kernel_counter(attr,
128 cpu, NULL, overflow_callback);
129 if (!pevent || IS_ERR(pevent))
130 return NOTIFY_BAD;
131 if (pevent->state == PERF_EVENT_STATE_ERROR) {
132 perf_event_release_kernel(pevent);
133 return NOTIFY_BAD;
134 }
135 barrier(); /* Create perf counter before setting event */
136 events[cpu] = pevent;
137 break;
138 case CPU_UP_CANCELED:
139 case CPU_UP_CANCELED_FROZEN:
140 case CPU_DEAD:
141 case CPU_DEAD_FROZEN:
142 pevent = events[cpu];
143 events[cpu] = NULL;
144 barrier(); /* NULLify event before perf counter teardown */
145 perf_event_release_kernel(pevent);
146 break;
147 }
148 return NOTIFY_OK;
149}
150
151#endif
152
153int lttng_add_perf_counter_to_ctx(uint32_t type,
154 uint64_t config,
155 const char *name,
156 struct lttng_ctx **ctx)
157{
158 struct lttng_ctx_field *field;
159 struct lttng_perf_counter_field *perf_field;
160 struct perf_event **events;
161 struct perf_event_attr *attr;
162 int ret;
163 int cpu;
164 char *name_alloc;
165
166 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
167 if (!events)
168 return -ENOMEM;
169
170 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
171 if (!attr) {
172 ret = -ENOMEM;
173 goto error_attr;
174 }
175
176 attr->type = type;
177 attr->config = config;
178 attr->size = sizeof(struct perf_event_attr);
179 attr->pinned = 1;
180 attr->disabled = 0;
181
182 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
183 if (!perf_field) {
184 ret = -ENOMEM;
185 goto error_alloc_perf_field;
186 }
187 perf_field->e = events;
188 perf_field->attr = attr;
189
190 name_alloc = kstrdup(name, GFP_KERNEL);
191 if (!name_alloc) {
192 ret = -ENOMEM;
193 goto name_alloc_error;
194 }
195
196 field = lttng_append_context(ctx);
197 if (!field) {
198 ret = -ENOMEM;
199 goto append_context_error;
200 }
201 if (lttng_find_context(*ctx, name_alloc)) {
202 ret = -EEXIST;
203 goto find_error;
204 }
205
206#ifdef CONFIG_HOTPLUG_CPU
207 perf_field->nb.notifier_call =
208 lttng_perf_counter_cpu_hp_callback;
209 perf_field->nb.priority = 0;
210 register_cpu_notifier(&perf_field->nb);
211#endif
212
213 get_online_cpus();
214 for_each_online_cpu(cpu) {
215 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
216 cpu, NULL, overflow_callback);
217 if (!events[cpu] || IS_ERR(events[cpu])) {
218 ret = -EINVAL;
219 goto counter_error;
220 }
221 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
222 ret = -EBUSY;
223 goto counter_busy;
224 }
225 }
226 put_online_cpus();
227
228 field->destroy = lttng_destroy_perf_counter_field;
229
230 field->event_field.name = name_alloc;
231 field->event_field.type.atype = atype_integer;
232 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
233 field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT;
234 field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t);
235 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
236 field->event_field.type.u.basic.integer.base = 10;
237 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
238 field->get_size = perf_counter_get_size;
239 field->record = perf_counter_record;
240 field->u.perf_counter = perf_field;
241 perf_field->hp_enable = 1;
242
243 wrapper_vmalloc_sync_all();
244 return 0;
245
246counter_busy:
247counter_error:
248 for_each_online_cpu(cpu) {
249 if (events[cpu] && !IS_ERR(events[cpu]))
250 perf_event_release_kernel(events[cpu]);
251 }
252 put_online_cpus();
253#ifdef CONFIG_HOTPLUG_CPU
254 unregister_cpu_notifier(&perf_field->nb);
255#endif
256find_error:
257 lttng_remove_context_field(ctx, field);
258append_context_error:
259 kfree(name_alloc);
260name_alloc_error:
261 kfree(perf_field);
262error_alloc_perf_field:
263 kfree(attr);
264error_attr:
265 kfree(events);
266 return ret;
267}
268
269MODULE_LICENSE("GPL and additional rights");
270MODULE_AUTHOR("Mathieu Desnoyers");
271MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
diff --git a/drivers/staging/lttng/lttng-context-pid.c b/drivers/staging/lttng/lttng-context-pid.c
deleted file mode 100644
index 698b242245a2..000000000000
--- a/drivers/staging/lttng/lttng-context-pid.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng PID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t pid_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
24 size += sizeof(pid_t);
25 return size;
26}
27
28static
29void pid_record(struct lttng_ctx_field *field,
30 struct lib_ring_buffer_ctx *ctx,
31 struct ltt_channel *chan)
32{
33 pid_t pid;
34
35 pid = task_tgid_nr(current);
36 lib_ring_buffer_align_ctx(ctx, ltt_alignof(pid));
37 chan->ops->event_write(ctx, &pid, sizeof(pid));
38}
39
40int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
41{
42 struct lttng_ctx_field *field;
43
44 field = lttng_append_context(ctx);
45 if (!field)
46 return -ENOMEM;
47 if (lttng_find_context(*ctx, "pid")) {
48 lttng_remove_context_field(ctx, field);
49 return -EEXIST;
50 }
51 field->event_field.name = "pid";
52 field->event_field.type.atype = atype_integer;
53 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
54 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
55 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
56 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
57 field->event_field.type.u.basic.integer.base = 10;
58 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
59 field->get_size = pid_get_size;
60 field->record = pid_record;
61 wrapper_vmalloc_sync_all();
62 return 0;
63}
64EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
65
66MODULE_LICENSE("GPL and additional rights");
67MODULE_AUTHOR("Mathieu Desnoyers");
68MODULE_DESCRIPTION("Linux Trace Toolkit PID Context");
diff --git a/drivers/staging/lttng/lttng-context-ppid.c b/drivers/staging/lttng/lttng-context-ppid.c
deleted file mode 100644
index 738f7e6882d5..000000000000
--- a/drivers/staging/lttng/lttng-context-ppid.c
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng PPID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/syscalls.h>
14#include "ltt-events.h"
15#include "wrapper/ringbuffer/frontend_types.h"
16#include "wrapper/vmalloc.h"
17#include "ltt-tracer.h"
18
19static
20size_t ppid_get_size(size_t offset)
21{
22 size_t size = 0;
23
24 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
25 size += sizeof(pid_t);
26 return size;
27}
28
29static
30void ppid_record(struct lttng_ctx_field *field,
31 struct lib_ring_buffer_ctx *ctx,
32 struct ltt_channel *chan)
33{
34 pid_t ppid;
35
36 rcu_read_lock();
37 ppid = task_tgid_nr(current->real_parent);
38 rcu_read_unlock();
39 lib_ring_buffer_align_ctx(ctx, ltt_alignof(ppid));
40 chan->ops->event_write(ctx, &ppid, sizeof(ppid));
41}
42
43int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
44{
45 struct lttng_ctx_field *field;
46
47 field = lttng_append_context(ctx);
48 if (!field)
49 return -ENOMEM;
50 if (lttng_find_context(*ctx, "ppid")) {
51 lttng_remove_context_field(ctx, field);
52 return -EEXIST;
53 }
54 field->event_field.name = "ppid";
55 field->event_field.type.atype = atype_integer;
56 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
57 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
58 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
59 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
60 field->event_field.type.u.basic.integer.base = 10;
61 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
62 field->get_size = ppid_get_size;
63 field->record = ppid_record;
64 wrapper_vmalloc_sync_all();
65 return 0;
66}
67EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
68
69MODULE_LICENSE("GPL and additional rights");
70MODULE_AUTHOR("Mathieu Desnoyers");
71MODULE_DESCRIPTION("Linux Trace Toolkit PPID Context");
diff --git a/drivers/staging/lttng/lttng-context-prio.c b/drivers/staging/lttng/lttng-context-prio.c
deleted file mode 100644
index 1ee3a54daf2c..000000000000
--- a/drivers/staging/lttng/lttng-context-prio.c
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng priority context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "wrapper/kallsyms.h"
17#include "ltt-tracer.h"
18
19static
20int (*wrapper_task_prio_sym)(struct task_struct *t);
21
22int wrapper_task_prio_init(void)
23{
24 wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
25 if (!wrapper_task_prio_sym) {
26 printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
27 return -EINVAL;
28 }
29 return 0;
30}
31
32static
33size_t prio_get_size(size_t offset)
34{
35 size_t size = 0;
36
37 size += lib_ring_buffer_align(offset, ltt_alignof(int));
38 size += sizeof(int);
39 return size;
40}
41
42static
43void prio_record(struct lttng_ctx_field *field,
44 struct lib_ring_buffer_ctx *ctx,
45 struct ltt_channel *chan)
46{
47 int prio;
48
49 prio = wrapper_task_prio_sym(current);
50 lib_ring_buffer_align_ctx(ctx, ltt_alignof(prio));
51 chan->ops->event_write(ctx, &prio, sizeof(prio));
52}
53
54int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
55{
56 struct lttng_ctx_field *field;
57 int ret;
58
59 if (!wrapper_task_prio_sym) {
60 ret = wrapper_task_prio_init();
61 if (ret)
62 return ret;
63 }
64
65 field = lttng_append_context(ctx);
66 if (!field)
67 return -ENOMEM;
68 if (lttng_find_context(*ctx, "prio")) {
69 lttng_remove_context_field(ctx, field);
70 return -EEXIST;
71 }
72 field->event_field.name = "prio";
73 field->event_field.type.atype = atype_integer;
74 field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
75 field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
76 field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
77 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
78 field->event_field.type.u.basic.integer.base = 10;
79 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
80 field->get_size = prio_get_size;
81 field->record = prio_record;
82 wrapper_vmalloc_sync_all();
83 return 0;
84}
85EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
86
87MODULE_LICENSE("GPL and additional rights");
88MODULE_AUTHOR("Mathieu Desnoyers");
89MODULE_DESCRIPTION("Linux Trace Toolkit Priority Context");
diff --git a/drivers/staging/lttng/lttng-context-procname.c b/drivers/staging/lttng/lttng-context-procname.c
deleted file mode 100644
index c6bc6468004f..000000000000
--- a/drivers/staging/lttng/lttng-context-procname.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng procname context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t procname_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += sizeof(current->comm);
24 return size;
25}
26
27/*
28 * Racy read of procname. We simply copy its whole array size.
29 * Races with /proc/<task>/procname write only.
30 * Otherwise having to take a mutex for each event is cumbersome and
31 * could lead to crash in IRQ context and deadlock of the lockdep tracer.
32 */
33static
34void procname_record(struct lttng_ctx_field *field,
35 struct lib_ring_buffer_ctx *ctx,
36 struct ltt_channel *chan)
37{
38 chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
39}
40
41int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
42{
43 struct lttng_ctx_field *field;
44
45 field = lttng_append_context(ctx);
46 if (!field)
47 return -ENOMEM;
48 if (lttng_find_context(*ctx, "procname")) {
49 lttng_remove_context_field(ctx, field);
50 return -EEXIST;
51 }
52 field->event_field.name = "procname";
53 field->event_field.type.atype = atype_array;
54 field->event_field.type.u.array.elem_type.atype = atype_integer;
55 field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
56 field->event_field.type.u.array.elem_type.u.basic.integer.alignment = ltt_alignof(char) * CHAR_BIT;
57 field->event_field.type.u.array.elem_type.u.basic.integer.signedness = is_signed_type(char);
58 field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
59 field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
60 field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8;
61 field->event_field.type.u.array.length = sizeof(current->comm);
62
63 field->get_size = procname_get_size;
64 field->record = procname_record;
65 wrapper_vmalloc_sync_all();
66 return 0;
67}
68EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
69
70MODULE_LICENSE("GPL and additional rights");
71MODULE_AUTHOR("Mathieu Desnoyers");
72MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
diff --git a/drivers/staging/lttng/lttng-context-tid.c b/drivers/staging/lttng/lttng-context-tid.c
deleted file mode 100644
index d5ccdb635c40..000000000000
--- a/drivers/staging/lttng/lttng-context-tid.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng TID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t tid_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
24 size += sizeof(pid_t);
25 return size;
26}
27
28static
29void tid_record(struct lttng_ctx_field *field,
30 struct lib_ring_buffer_ctx *ctx,
31 struct ltt_channel *chan)
32{
33 pid_t tid;
34
35 tid = task_pid_nr(current);
36 lib_ring_buffer_align_ctx(ctx, ltt_alignof(tid));
37 chan->ops->event_write(ctx, &tid, sizeof(tid));
38}
39
40int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
41{
42 struct lttng_ctx_field *field;
43
44 field = lttng_append_context(ctx);
45 if (!field)
46 return -ENOMEM;
47 if (lttng_find_context(*ctx, "tid")) {
48 lttng_remove_context_field(ctx, field);
49 return -EEXIST;
50 }
51 field->event_field.name = "tid";
52 field->event_field.type.atype = atype_integer;
53 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
54 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
55 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
56 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
57 field->event_field.type.u.basic.integer.base = 10;
58 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
59 field->get_size = tid_get_size;
60 field->record = tid_record;
61 wrapper_vmalloc_sync_all();
62 return 0;
63}
64EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
65
66MODULE_LICENSE("GPL and additional rights");
67MODULE_AUTHOR("Mathieu Desnoyers");
68MODULE_DESCRIPTION("Linux Trace Toolkit TID Context");
diff --git a/drivers/staging/lttng/lttng-context-vpid.c b/drivers/staging/lttng/lttng-context-vpid.c
deleted file mode 100644
index 3f16e03a6d99..000000000000
--- a/drivers/staging/lttng/lttng-context-vpid.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng vPID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t vpid_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
24 size += sizeof(pid_t);
25 return size;
26}
27
28static
29void vpid_record(struct lttng_ctx_field *field,
30 struct lib_ring_buffer_ctx *ctx,
31 struct ltt_channel *chan)
32{
33 pid_t vpid;
34
35 /*
36 * nsproxy can be NULL when scheduled out of exit.
37 */
38 if (!current->nsproxy)
39 vpid = 0;
40 else
41 vpid = task_tgid_vnr(current);
42 lib_ring_buffer_align_ctx(ctx, ltt_alignof(vpid));
43 chan->ops->event_write(ctx, &vpid, sizeof(vpid));
44}
45
46int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
47{
48 struct lttng_ctx_field *field;
49
50 field = lttng_append_context(ctx);
51 if (!field)
52 return -ENOMEM;
53 if (lttng_find_context(*ctx, "vpid")) {
54 lttng_remove_context_field(ctx, field);
55 return -EEXIST;
56 }
57 field->event_field.name = "vpid";
58 field->event_field.type.atype = atype_integer;
59 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
60 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
61 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
62 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
63 field->event_field.type.u.basic.integer.base = 10;
64 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
65 field->get_size = vpid_get_size;
66 field->record = vpid_record;
67 wrapper_vmalloc_sync_all();
68 return 0;
69}
70EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
71
72MODULE_LICENSE("GPL and additional rights");
73MODULE_AUTHOR("Mathieu Desnoyers");
74MODULE_DESCRIPTION("Linux Trace Toolkit vPID Context");
diff --git a/drivers/staging/lttng/lttng-context-vppid.c b/drivers/staging/lttng/lttng-context-vppid.c
deleted file mode 100644
index f01b02068d67..000000000000
--- a/drivers/staging/lttng/lttng-context-vppid.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng vPPID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/syscalls.h>
14#include "ltt-events.h"
15#include "wrapper/ringbuffer/frontend_types.h"
16#include "wrapper/vmalloc.h"
17#include "ltt-tracer.h"
18
19static
20size_t vppid_get_size(size_t offset)
21{
22 size_t size = 0;
23
24 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
25 size += sizeof(pid_t);
26 return size;
27}
28
29static
30void vppid_record(struct lttng_ctx_field *field,
31 struct lib_ring_buffer_ctx *ctx,
32 struct ltt_channel *chan)
33{
34 struct task_struct *parent;
35 pid_t vppid;
36
37 /*
38 * nsproxy can be NULL when scheduled out of exit.
39 */
40 rcu_read_lock();
41 parent = rcu_dereference(current->real_parent);
42 if (!parent->nsproxy)
43 vppid = 0;
44 else
45 vppid = task_tgid_vnr(parent);
46 rcu_read_unlock();
47 lib_ring_buffer_align_ctx(ctx, ltt_alignof(vppid));
48 chan->ops->event_write(ctx, &vppid, sizeof(vppid));
49}
50
51int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
52{
53 struct lttng_ctx_field *field;
54
55 field = lttng_append_context(ctx);
56 if (!field)
57 return -ENOMEM;
58 if (lttng_find_context(*ctx, "vppid")) {
59 lttng_remove_context_field(ctx, field);
60 return -EEXIST;
61 }
62 field->event_field.name = "vppid";
63 field->event_field.type.atype = atype_integer;
64 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
65 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
66 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
67 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
68 field->event_field.type.u.basic.integer.base = 10;
69 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
70 field->get_size = vppid_get_size;
71 field->record = vppid_record;
72 wrapper_vmalloc_sync_all();
73 return 0;
74}
75EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
76
77MODULE_LICENSE("GPL and additional rights");
78MODULE_AUTHOR("Mathieu Desnoyers");
79MODULE_DESCRIPTION("Linux Trace Toolkit vPPID Context");
diff --git a/drivers/staging/lttng/lttng-context-vtid.c b/drivers/staging/lttng/lttng-context-vtid.c
deleted file mode 100644
index 264bbb3011da..000000000000
--- a/drivers/staging/lttng/lttng-context-vtid.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng vTID context.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include "ltt-events.h"
14#include "wrapper/ringbuffer/frontend_types.h"
15#include "wrapper/vmalloc.h"
16#include "ltt-tracer.h"
17
18static
19size_t vtid_get_size(size_t offset)
20{
21 size_t size = 0;
22
23 size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
24 size += sizeof(pid_t);
25 return size;
26}
27
28static
29void vtid_record(struct lttng_ctx_field *field,
30 struct lib_ring_buffer_ctx *ctx,
31 struct ltt_channel *chan)
32{
33 pid_t vtid;
34
35 /*
36 * nsproxy can be NULL when scheduled out of exit.
37 */
38 if (!current->nsproxy)
39 vtid = 0;
40 else
41 vtid = task_pid_vnr(current);
42 lib_ring_buffer_align_ctx(ctx, ltt_alignof(vtid));
43 chan->ops->event_write(ctx, &vtid, sizeof(vtid));
44}
45
46int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
47{
48 struct lttng_ctx_field *field;
49
50 field = lttng_append_context(ctx);
51 if (!field)
52 return -ENOMEM;
53 if (lttng_find_context(*ctx, "vtid")) {
54 lttng_remove_context_field(ctx, field);
55 return -EEXIST;
56 }
57 field->event_field.name = "vtid";
58 field->event_field.type.atype = atype_integer;
59 field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
60 field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
61 field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
62 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
63 field->event_field.type.u.basic.integer.base = 10;
64 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
65 field->get_size = vtid_get_size;
66 field->record = vtid_record;
67 wrapper_vmalloc_sync_all();
68 return 0;
69}
70EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
71
72MODULE_LICENSE("GPL and additional rights");
73MODULE_AUTHOR("Mathieu Desnoyers");
74MODULE_DESCRIPTION("Linux Trace Toolkit vTID Context");
diff --git a/drivers/staging/lttng/lttng-syscalls.c b/drivers/staging/lttng/lttng-syscalls.c
deleted file mode 100644
index 16624a7f76df..000000000000
--- a/drivers/staging/lttng/lttng-syscalls.c
+++ /dev/null
@@ -1,438 +0,0 @@
1/*
2 * lttng-syscalls.c
3 *
4 * Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng syscall probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/compat.h>
14#include <asm/ptrace.h>
15#include <asm/syscall.h>
16
17#include "ltt-events.h"
18
19#ifndef CONFIG_COMPAT
20static inline int is_compat_task(void)
21{
22 return 0;
23}
24#endif
25
26static
27void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
28
29/*
30 * Take care of NOARGS not supported by mainline.
31 */
32#define DECLARE_EVENT_CLASS_NOARGS(name, tstruct, assign, print)
33#define DEFINE_EVENT_NOARGS(template, name)
34#define TRACE_EVENT_NOARGS(name, struct, assign, print)
35
36/*
37 * Create LTTng tracepoint probes.
38 */
39#define LTTNG_PACKAGE_BUILD
40#define CREATE_TRACE_POINTS
41#define TP_MODULE_OVERRIDE
42#define TRACE_INCLUDE_PATH ../instrumentation/syscalls/headers
43
44#define PARAMS(args...) args
45
46#undef TRACE_SYSTEM
47
48/* Hijack probe callback for system calls */
49#undef TP_PROBE_CB
50#define TP_PROBE_CB(_template) &syscall_entry_probe
51#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
52 TRACE_EVENT(_name, PARAMS(_proto), PARAMS(_args),\
53 PARAMS(_struct), PARAMS(_assign), PARAMS(_printk))
54#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
55 DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_struct), PARAMS(_assign),\
56 PARAMS(_printk))
57#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
58 DEFINE_EVENT_NOARGS(_template, _name)
59#define TRACE_SYSTEM syscalls_integers
60#include "instrumentation/syscalls/headers/syscalls_integers.h"
61#undef TRACE_SYSTEM
62#define TRACE_SYSTEM syscalls_pointers
63#include "instrumentation/syscalls/headers/syscalls_pointers.h"
64#undef TRACE_SYSTEM
65#undef SC_TRACE_EVENT
66#undef SC_DECLARE_EVENT_CLASS_NOARGS
67#undef SC_DEFINE_EVENT_NOARGS
68
69#define TRACE_SYSTEM syscalls_unknown
70#include "instrumentation/syscalls/headers/syscalls_unknown.h"
71#undef TRACE_SYSTEM
72
73/* For compat syscalls */
74#undef _TRACE_SYSCALLS_integers_H
75#undef _TRACE_SYSCALLS_pointers_H
76
77/* Hijack probe callback for system calls */
78#undef TP_PROBE_CB
79#define TP_PROBE_CB(_template) &syscall_entry_probe
80#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
81 TRACE_EVENT(compat_##_name, PARAMS(_proto), PARAMS(_args), \
82 PARAMS(_struct), PARAMS(_assign), \
83 PARAMS(_printk))
84#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
85 DECLARE_EVENT_CLASS_NOARGS(compat_##_name, PARAMS(_struct), \
86 PARAMS(_assign), PARAMS(_printk))
87#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
88 DEFINE_EVENT_NOARGS(compat_##_template, compat_##_name)
89#define TRACE_SYSTEM compat_syscalls_integers
90#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
91#undef TRACE_SYSTEM
92#define TRACE_SYSTEM compat_syscalls_pointers
93#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
94#undef TRACE_SYSTEM
95#undef SC_TRACE_EVENT
96#undef SC_DECLARE_EVENT_CLASS_NOARGS
97#undef SC_DEFINE_EVENT_NOARGS
98#undef TP_PROBE_CB
99
100#undef TP_MODULE_OVERRIDE
101#undef LTTNG_PACKAGE_BUILD
102#undef CREATE_TRACE_POINTS
103
104struct trace_syscall_entry {
105 void *func;
106 const struct lttng_event_desc *desc;
107 const struct lttng_event_field *fields;
108 unsigned int nrargs;
109};
110
111#define CREATE_SYSCALL_TABLE
112
113#undef TRACE_SYSCALL_TABLE
114#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
115 [ _nr ] = { \
116 .func = __event_probe__##_template, \
117 .nrargs = (_nrargs), \
118 .fields = __event_fields___##_template, \
119 .desc = &__event_desc___##_name, \
120 },
121
122static const struct trace_syscall_entry sc_table[] = {
123#include "instrumentation/syscalls/headers/syscalls_integers.h"
124#include "instrumentation/syscalls/headers/syscalls_pointers.h"
125};
126
127#undef TRACE_SYSCALL_TABLE
128#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
129 [ _nr ] = { \
130 .func = __event_probe__##compat_##_template, \
131 .nrargs = (_nrargs), \
132 .fields = __event_fields___##compat_##_template,\
133 .desc = &__event_desc___##compat_##_name, \
134 },
135
136/* Create compatibility syscall table */
137const struct trace_syscall_entry compat_sc_table[] = {
138#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
139#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
140};
141
142#undef CREATE_SYSCALL_TABLE
143
144static void syscall_entry_unknown(struct ltt_event *event,
145 struct pt_regs *regs, unsigned int id)
146{
147 unsigned long args[UNKNOWN_SYSCALL_NRARGS];
148
149 syscall_get_arguments(current, regs, 0, UNKNOWN_SYSCALL_NRARGS, args);
150 if (unlikely(is_compat_task()))
151 __event_probe__compat_sys_unknown(event, id, args);
152 else
153 __event_probe__sys_unknown(event, id, args);
154}
155
156void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
157{
158 struct ltt_channel *chan = __data;
159 struct ltt_event *event, *unknown_event;
160 const struct trace_syscall_entry *table, *entry;
161 size_t table_len;
162
163 if (unlikely(is_compat_task())) {
164 table = compat_sc_table;
165 table_len = ARRAY_SIZE(compat_sc_table);
166 unknown_event = chan->sc_compat_unknown;
167 } else {
168 table = sc_table;
169 table_len = ARRAY_SIZE(sc_table);
170 unknown_event = chan->sc_unknown;
171 }
172 if (unlikely(id >= table_len)) {
173 syscall_entry_unknown(unknown_event, regs, id);
174 return;
175 }
176 if (unlikely(is_compat_task()))
177 event = chan->compat_sc_table[id];
178 else
179 event = chan->sc_table[id];
180 if (unlikely(!event)) {
181 syscall_entry_unknown(unknown_event, regs, id);
182 return;
183 }
184 entry = &table[id];
185 WARN_ON_ONCE(!entry);
186
187 switch (entry->nrargs) {
188 case 0:
189 {
190 void (*fptr)(void *__data) = entry->func;
191
192 fptr(event);
193 break;
194 }
195 case 1:
196 {
197 void (*fptr)(void *__data, unsigned long arg0) = entry->func;
198 unsigned long args[1];
199
200 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
201 fptr(event, args[0]);
202 break;
203 }
204 case 2:
205 {
206 void (*fptr)(void *__data,
207 unsigned long arg0,
208 unsigned long arg1) = entry->func;
209 unsigned long args[2];
210
211 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
212 fptr(event, args[0], args[1]);
213 break;
214 }
215 case 3:
216 {
217 void (*fptr)(void *__data,
218 unsigned long arg0,
219 unsigned long arg1,
220 unsigned long arg2) = entry->func;
221 unsigned long args[3];
222
223 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
224 fptr(event, args[0], args[1], args[2]);
225 break;
226 }
227 case 4:
228 {
229 void (*fptr)(void *__data,
230 unsigned long arg0,
231 unsigned long arg1,
232 unsigned long arg2,
233 unsigned long arg3) = entry->func;
234 unsigned long args[4];
235
236 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
237 fptr(event, args[0], args[1], args[2], args[3]);
238 break;
239 }
240 case 5:
241 {
242 void (*fptr)(void *__data,
243 unsigned long arg0,
244 unsigned long arg1,
245 unsigned long arg2,
246 unsigned long arg3,
247 unsigned long arg4) = entry->func;
248 unsigned long args[5];
249
250 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
251 fptr(event, args[0], args[1], args[2], args[3], args[4]);
252 break;
253 }
254 case 6:
255 {
256 void (*fptr)(void *__data,
257 unsigned long arg0,
258 unsigned long arg1,
259 unsigned long arg2,
260 unsigned long arg3,
261 unsigned long arg4,
262 unsigned long arg5) = entry->func;
263 unsigned long args[6];
264
265 syscall_get_arguments(current, regs, 0, entry->nrargs, args);
266 fptr(event, args[0], args[1], args[2],
267 args[3], args[4], args[5]);
268 break;
269 }
270 default:
271 break;
272 }
273}
274
275/* noinline to diminish caller stack size */
276static
277int fill_table(const struct trace_syscall_entry *table, size_t table_len,
278 struct ltt_event **chan_table, struct ltt_channel *chan, void *filter)
279{
280 const struct lttng_event_desc *desc;
281 unsigned int i;
282
283 /* Allocate events for each syscall, insert into table */
284 for (i = 0; i < table_len; i++) {
285 struct lttng_kernel_event ev;
286 desc = table[i].desc;
287
288 if (!desc) {
289 /* Unknown syscall */
290 continue;
291 }
292 /*
293 * Skip those already populated by previous failed
294 * register for this channel.
295 */
296 if (chan_table[i])
297 continue;
298 memset(&ev, 0, sizeof(ev));
299 strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
300 ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
301 ev.instrumentation = LTTNG_KERNEL_NOOP;
302 chan_table[i] = ltt_event_create(chan, &ev, filter,
303 desc);
304 if (!chan_table[i]) {
305 /*
306 * If something goes wrong in event registration
307 * after the first one, we have no choice but to
308 * leave the previous events in there, until
309 * deleted by session teardown.
310 */
311 return -EINVAL;
312 }
313 }
314 return 0;
315}
316
317int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
318{
319 struct lttng_kernel_event ev;
320 int ret;
321
322 wrapper_vmalloc_sync_all();
323
324 if (!chan->sc_table) {
325 /* create syscall table mapping syscall to events */
326 chan->sc_table = kzalloc(sizeof(struct ltt_event *)
327 * ARRAY_SIZE(sc_table), GFP_KERNEL);
328 if (!chan->sc_table)
329 return -ENOMEM;
330 }
331
332#ifdef CONFIG_COMPAT
333 if (!chan->compat_sc_table) {
334 /* create syscall table mapping compat syscall to events */
335 chan->compat_sc_table = kzalloc(sizeof(struct ltt_event *)
336 * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
337 if (!chan->compat_sc_table)
338 return -ENOMEM;
339 }
340#endif
341 if (!chan->sc_unknown) {
342 const struct lttng_event_desc *desc =
343 &__event_desc___sys_unknown;
344
345 memset(&ev, 0, sizeof(ev));
346 strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
347 ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
348 ev.instrumentation = LTTNG_KERNEL_NOOP;
349 chan->sc_unknown = ltt_event_create(chan, &ev, filter,
350 desc);
351 if (!chan->sc_unknown) {
352 return -EINVAL;
353 }
354 }
355
356 if (!chan->sc_compat_unknown) {
357 const struct lttng_event_desc *desc =
358 &__event_desc___compat_sys_unknown;
359
360 memset(&ev, 0, sizeof(ev));
361 strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
362 ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
363 ev.instrumentation = LTTNG_KERNEL_NOOP;
364 chan->sc_compat_unknown = ltt_event_create(chan, &ev, filter,
365 desc);
366 if (!chan->sc_compat_unknown) {
367 return -EINVAL;
368 }
369 }
370
371 if (!chan->sc_exit) {
372 const struct lttng_event_desc *desc =
373 &__event_desc___exit_syscall;
374
375 memset(&ev, 0, sizeof(ev));
376 strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
377 ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
378 ev.instrumentation = LTTNG_KERNEL_NOOP;
379 chan->sc_exit = ltt_event_create(chan, &ev, filter,
380 desc);
381 if (!chan->sc_exit) {
382 return -EINVAL;
383 }
384 }
385
386 ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
387 chan->sc_table, chan, filter);
388 if (ret)
389 return ret;
390#ifdef CONFIG_COMPAT
391 ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
392 chan->compat_sc_table, chan, filter);
393 if (ret)
394 return ret;
395#endif
396 ret = tracepoint_probe_register("sys_enter",
397 (void *) syscall_entry_probe, chan);
398 if (ret)
399 return ret;
400 /*
401 * We change the name of sys_exit tracepoint due to namespace
402 * conflict with sys_exit syscall entry.
403 */
404 ret = tracepoint_probe_register("sys_exit",
405 (void *) __event_probe__exit_syscall,
406 chan->sc_exit);
407 if (ret) {
408 WARN_ON_ONCE(tracepoint_probe_unregister("sys_enter",
409 (void *) syscall_entry_probe, chan));
410 }
411 return ret;
412}
413
414/*
415 * Only called at session destruction.
416 */
417int lttng_syscalls_unregister(struct ltt_channel *chan)
418{
419 int ret;
420
421 if (!chan->sc_table)
422 return 0;
423 ret = tracepoint_probe_unregister("sys_exit",
424 (void *) __event_probe__exit_syscall,
425 chan->sc_exit);
426 if (ret)
427 return ret;
428 ret = tracepoint_probe_unregister("sys_enter",
429 (void *) syscall_entry_probe, chan);
430 if (ret)
431 return ret;
432 /* ltt_event destroy will be performed by ltt_session_destroy() */
433 kfree(chan->sc_table);
434#ifdef CONFIG_COMPAT
435 kfree(chan->compat_sc_table);
436#endif
437 return 0;
438}
diff --git a/drivers/staging/lttng/probes/Makefile b/drivers/staging/lttng/probes/Makefile
deleted file mode 100644
index bdc1179ec8a8..000000000000
--- a/drivers/staging/lttng/probes/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the LTT probes.
3#
4
5ccflags-y += -I$(PWD)/probes
6obj-m += lttng-types.o
7
8obj-m += lttng-probe-lttng.o
9
10obj-m += lttng-probe-sched.o
11obj-m += lttng-probe-irq.o
12
13ifneq ($(CONFIG_KVM),)
14obj-m += lttng-probe-kvm.o
15endif
16
17ifneq ($(CONFIG_BLOCK),)
18ifneq ($(CONFIG_EVENT_TRACING),) # need blk_cmd_buf_len
19obj-m += $(shell \
20 if [ $(VERSION) -ge 3 \
21 -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
22 echo "lttng-probe-block.o" ; fi;)
23endif
24endif
25
26ifneq ($(CONFIG_KPROBES),)
27obj-m += lttng-kprobes.o
28endif
29
30
31ifneq ($(CONFIG_KRETPROBES),)
32obj-m += lttng-kretprobes.o
33endif
34
35ifneq ($(CONFIG_DYNAMIC_FTRACE),)
36obj-m += lttng-ftrace.o
37endif
diff --git a/drivers/staging/lttng/probes/define_trace.h b/drivers/staging/lttng/probes/define_trace.h
deleted file mode 100644
index 3c9a46784abc..000000000000
--- a/drivers/staging/lttng/probes/define_trace.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * define_trace.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10/*
11 * Trace files that want to automate creationg of all tracepoints defined
12 * in their file should include this file. The following are macros that the
13 * trace file may define:
14 *
15 * TRACE_SYSTEM defines the system the tracepoint is for
16 *
17 * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
18 * This macro may be defined to tell define_trace.h what file to include.
19 * Note, leave off the ".h".
20 *
21 * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
22 * then this macro can define the path to use. Note, the path is relative to
23 * define_trace.h, not the file including it. Full path names for out of tree
24 * modules must be used.
25 */
26
27#ifdef CREATE_TRACE_POINTS
28
29/* Prevent recursion */
30#undef CREATE_TRACE_POINTS
31
32#include <linux/stringify.h>
33/*
34 * module.h includes tracepoints, and because ftrace.h
35 * pulls in module.h:
36 * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
37 * linux/ftrace.h -> linux/module.h
38 * we must include module.h here before we play with any of
39 * the TRACE_EVENT() macros, otherwise the tracepoints included
40 * by module.h may break the build.
41 */
42#include <linux/module.h>
43
44#undef TRACE_EVENT
45#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
46 DEFINE_TRACE(name)
47
48#undef TRACE_EVENT_CONDITION
49#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
50 TRACE_EVENT(name, \
51 PARAMS(proto), \
52 PARAMS(args), \
53 PARAMS(tstruct), \
54 PARAMS(assign), \
55 PARAMS(print))
56
57#undef TRACE_EVENT_FN
58#define TRACE_EVENT_FN(name, proto, args, tstruct, \
59 assign, print, reg, unreg) \
60 DEFINE_TRACE_FN(name, reg, unreg)
61
62#undef DEFINE_EVENT
63#define DEFINE_EVENT(template, name, proto, args) \
64 DEFINE_TRACE(name)
65
66#undef DEFINE_EVENT_PRINT
67#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
68 DEFINE_TRACE(name)
69
70#undef DEFINE_EVENT_CONDITION
71#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
73
74#undef DECLARE_TRACE
75#define DECLARE_TRACE(name, proto, args) \
76 DEFINE_TRACE(name)
77
78#undef TRACE_INCLUDE
79#undef __TRACE_INCLUDE
80
81#ifndef TRACE_INCLUDE_FILE
82# define TRACE_INCLUDE_FILE TRACE_SYSTEM
83# define UNDEF_TRACE_INCLUDE_FILE
84#endif
85
86#ifndef TRACE_INCLUDE_PATH
87# define __TRACE_INCLUDE(system) <trace/events/system.h>
88# define UNDEF_TRACE_INCLUDE_PATH
89#else
90# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
91#endif
92
93# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
94
95/* Let the trace headers be reread */
96#define TRACE_HEADER_MULTI_READ
97
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
99
100/* Make all open coded DECLARE_TRACE nops */
101#undef DECLARE_TRACE
102#define DECLARE_TRACE(name, proto, args)
103
104#ifdef LTTNG_PACKAGE_BUILD
105#include "lttng-events.h"
106#endif
107
108#undef TRACE_EVENT
109#undef TRACE_EVENT_FN
110#undef TRACE_EVENT_CONDITION
111#undef DECLARE_EVENT_CLASS
112#undef DEFINE_EVENT
113#undef DEFINE_EVENT_PRINT
114#undef DEFINE_EVENT_CONDITION
115#undef TRACE_HEADER_MULTI_READ
116#undef DECLARE_TRACE
117
118/* Only undef what we defined in this file */
119#ifdef UNDEF_TRACE_INCLUDE_FILE
120# undef TRACE_INCLUDE_FILE
121# undef UNDEF_TRACE_INCLUDE_FILE
122#endif
123
124#ifdef UNDEF_TRACE_INCLUDE_PATH
125# undef TRACE_INCLUDE_PATH
126# undef UNDEF_TRACE_INCLUDE_PATH
127#endif
128
129/* We may be processing more files */
130#define CREATE_TRACE_POINTS
131
132#endif /* CREATE_TRACE_POINTS */
diff --git a/drivers/staging/lttng/probes/lttng-events-reset.h b/drivers/staging/lttng/probes/lttng-events-reset.h
deleted file mode 100644
index c8a1046d90b9..000000000000
--- a/drivers/staging/lttng/probes/lttng-events-reset.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * lttng-events-reset.h
3 *
4 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9/* Reset macros used within TRACE_EVENT to "nothing" */
10
11#undef __field_full
12#define __field_full(_type, _item, _order, _base)
13
14#undef __array_enc_ext
15#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)
16
17#undef __dynamic_array_enc_ext
18#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)
19
20#undef __dynamic_array_len
21#define __dynamic_array_len(_type, _item, _length)
22
23#undef __string
24#define __string(_item, _src)
25
26#undef tp_assign
27#define tp_assign(dest, src)
28
29#undef tp_memcpy
30#define tp_memcpy(dest, src, len)
31
32#undef tp_memcpy_dyn
33#define tp_memcpy_dyn(dest, src, len)
34
35#undef tp_strcpy
36#define tp_strcpy(dest, src)
37
38#undef __get_str
39#define __get_str(field)
40
41#undef __get_dynamic_array
42#define __get_dynamic_array(field)
43
44#undef __get_dynamic_array_len
45#define __get_dynamic_array_len(field)
46
47#undef TP_PROTO
48#define TP_PROTO(args...)
49
50#undef TP_ARGS
51#define TP_ARGS(args...)
52
53#undef TP_STRUCT__entry
54#define TP_STRUCT__entry(args...)
55
56#undef TP_fast_assign
57#define TP_fast_assign(args...)
58
59#undef __perf_count
60#define __perf_count(args...)
61
62#undef __perf_addr
63#define __perf_addr(args...)
64
65#undef TP_perf_assign
66#define TP_perf_assign(args...)
67
68#undef TP_printk
69#define TP_printk(args...)
70
71#undef DECLARE_EVENT_CLASS
72#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print)
73
74#undef DECLARE_EVENT_CLASS_NOARGS
75#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print)
76
77#undef DEFINE_EVENT
78#define DEFINE_EVENT(_template, _name, _proto, _args)
79
80#undef DEFINE_EVENT_NOARGS
81#define DEFINE_EVENT_NOARGS(_template, _name)
82
83#undef TRACE_EVENT_FLAGS
84#define TRACE_EVENT_FLAGS(name, value)
diff --git a/drivers/staging/lttng/probes/lttng-events.h b/drivers/staging/lttng/probes/lttng-events.h
deleted file mode 100644
index d486994f3a0c..000000000000
--- a/drivers/staging/lttng/probes/lttng-events.h
+++ /dev/null
@@ -1,703 +0,0 @@
1/*
2 * lttng-events.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/debugfs.h>
11#include "lttng.h"
12#include "lttng-types.h"
13#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
14#include "../wrapper/ringbuffer/frontend_types.h"
15#include "../ltt-events.h"
16#include "../ltt-tracer-core.h"
17
18/*
19 * Macro declarations used for all stages.
20 */
21
22/*
23 * DECLARE_EVENT_CLASS can be used to add a generic function
24 * handlers for events. That is, if all events have the same
25 * parameters and just have distinct trace points.
26 * Each tracepoint can be defined with DEFINE_EVENT and that
27 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 *
29 * TRACE_EVENT is a one to one mapping between tracepoint and template.
30 */
31
32#undef TRACE_EVENT
33#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
34 DECLARE_EVENT_CLASS(name, \
35 PARAMS(proto), \
36 PARAMS(args), \
37 PARAMS(tstruct), \
38 PARAMS(assign), \
39 PARAMS(print)) \
40 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
41
42#undef TRACE_EVENT_NOARGS
43#define TRACE_EVENT_NOARGS(name, tstruct, assign, print) \
44 DECLARE_EVENT_CLASS_NOARGS(name, \
45 PARAMS(tstruct), \
46 PARAMS(assign), \
47 PARAMS(print)) \
48 DEFINE_EVENT_NOARGS(name, name)
49
50
51#undef DEFINE_EVENT_PRINT
52#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
53 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
54
55/* Callbacks are meaningless to LTTng. */
56#undef TRACE_EVENT_FN
57#define TRACE_EVENT_FN(name, proto, args, tstruct, \
58 assign, print, reg, unreg) \
59 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
60 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
61
62/*
63 * Stage 1 of the trace events.
64 *
65 * Create dummy trace calls for each events, verifying that the LTTng module
66 * TRACE_EVENT headers match the kernel arguments. Will be optimized out by the
67 * compiler.
68 */
69
70#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
71
72#undef TP_PROTO
73#define TP_PROTO(args...) args
74
75#undef TP_ARGS
76#define TP_ARGS(args...) args
77
78#undef DEFINE_EVENT
79#define DEFINE_EVENT(_template, _name, _proto, _args) \
80void trace_##_name(_proto);
81
82#undef DEFINE_EVENT_NOARGS
83#define DEFINE_EVENT_NOARGS(_template, _name) \
84void trace_##_name(void *__data);
85
86#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
87
88/*
89 * Stage 2 of the trace events.
90 *
91 * Create event field type metadata section.
92 * Each event produce an array of fields.
93 */
94
95#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
96
97/* Named field types must be defined in lttng-types.h */
98
99#undef __field_full
100#define __field_full(_type, _item, _order, _base) \
101 { \
102 .name = #_item, \
103 .type = __type_integer(_type, _order, _base, none), \
104 },
105
106#undef __field
107#define __field(_type, _item) \
108 __field_full(_type, _item, __BYTE_ORDER, 10)
109
110#undef __field_ext
111#define __field_ext(_type, _item, _filter_type) \
112 __field(_type, _item)
113
114#undef __field_hex
115#define __field_hex(_type, _item) \
116 __field_full(_type, _item, __BYTE_ORDER, 16)
117
118#undef __field_network
119#define __field_network(_type, _item) \
120 __field_full(_type, _item, __BIG_ENDIAN, 10)
121
122#undef __field_network_hex
123#define __field_network_hex(_type, _item) \
124 __field_full(_type, _item, __BIG_ENDIAN, 16)
125
126#undef __array_enc_ext
127#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
128 { \
129 .name = #_item, \
130 .type = \
131 { \
132 .atype = atype_array, \
133 .u.array = \
134 { \
135 .length = _length, \
136 .elem_type = __type_integer(_type, _order, _base, _encoding), \
137 }, \
138 }, \
139 },
140
141#undef __array
142#define __array(_type, _item, _length) \
143 __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
144
145#undef __array_text
146#define __array_text(_type, _item, _length) \
147 __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
148
149#undef __array_hex
150#define __array_hex(_type, _item, _length) \
151 __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
152
153#undef __dynamic_array_enc_ext
154#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
155 { \
156 .name = #_item, \
157 .type = \
158 { \
159 .atype = atype_sequence, \
160 .u.sequence = \
161 { \
162 .length_type = __type_integer(u32, __BYTE_ORDER, 10, none), \
163 .elem_type = __type_integer(_type, _order, _base, _encoding), \
164 }, \
165 }, \
166 },
167
168#undef __dynamic_array
169#define __dynamic_array(_type, _item, _length) \
170 __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
171
172#undef __dynamic_array_text
173#define __dynamic_array_text(_type, _item, _length) \
174 __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
175
176#undef __dynamic_array_hex
177#define __dynamic_array_hex(_type, _item, _length) \
178 __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
179
180#undef __string
181#define __string(_item, _src) \
182 { \
183 .name = #_item, \
184 .type = \
185 { \
186 .atype = atype_string, \
187 .u.basic.string.encoding = lttng_encode_UTF8, \
188 }, \
189 },
190
191#undef __string_from_user
192#define __string_from_user(_item, _src) \
193 __string(_item, _src)
194
195#undef TP_STRUCT__entry
196#define TP_STRUCT__entry(args...) args /* Only one used in this phase */
197
198#undef DECLARE_EVENT_CLASS_NOARGS
199#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
200 static const struct lttng_event_field __event_fields___##_name[] = { \
201 _tstruct \
202 };
203
204#undef DECLARE_EVENT_CLASS
205#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
206 DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_tstruct), PARAMS(_assign), \
207 PARAMS(_print))
208
209#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
210
211/*
212 * Stage 3 of the trace events.
213 *
214 * Create probe callback prototypes.
215 */
216
217#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
218
219#undef TP_PROTO
220#define TP_PROTO(args...) args
221
222#undef DECLARE_EVENT_CLASS
223#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
224static void __event_probe__##_name(void *__data, _proto);
225
226#undef DECLARE_EVENT_CLASS_NOARGS
227#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
228static void __event_probe__##_name(void *__data);
229
230#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
231
232/*
233 * Stage 3.9 of the trace events.
234 *
235 * Create event descriptions.
236 */
237
238/* Named field types must be defined in lttng-types.h */
239
240#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
241
242#ifndef TP_PROBE_CB
243#define TP_PROBE_CB(_template) &__event_probe__##_template
244#endif
245
246#undef DEFINE_EVENT_NOARGS
247#define DEFINE_EVENT_NOARGS(_template, _name) \
248static const struct lttng_event_desc __event_desc___##_name = { \
249 .fields = __event_fields___##_template, \
250 .name = #_name, \
251 .probe_callback = (void *) TP_PROBE_CB(_template), \
252 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
253 .owner = THIS_MODULE, \
254};
255
256#undef DEFINE_EVENT
257#define DEFINE_EVENT(_template, _name, _proto, _args) \
258 DEFINE_EVENT_NOARGS(_template, _name)
259
260#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
261
262
263/*
264 * Stage 4 of the trace events.
265 *
266 * Create an array of event description pointers.
267 */
268
269/* Named field types must be defined in lttng-types.h */
270
271#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
272
273#undef DEFINE_EVENT_NOARGS
274#define DEFINE_EVENT_NOARGS(_template, _name) \
275 &__event_desc___##_name,
276
277#undef DEFINE_EVENT
278#define DEFINE_EVENT(_template, _name, _proto, _args) \
279 DEFINE_EVENT_NOARGS(_template, _name)
280
281#define TP_ID1(_token, _system) _token##_system
282#define TP_ID(_token, _system) TP_ID1(_token, _system)
283
284static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286};
287
288#undef TP_ID1
289#undef TP_ID
290
291
292/*
293 * Stage 5 of the trace events.
294 *
295 * Create a toplevel descriptor for the whole probe.
296 */
297
298#define TP_ID1(_token, _system) _token##_system
299#define TP_ID(_token, _system) TP_ID1(_token, _system)
300
301/* non-const because list head will be modified when registered. */
302static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
303 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
304 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
305};
306
307#undef TP_ID1
308#undef TP_ID
309
310/*
311 * Stage 6 of the trace events.
312 *
313 * Create static inline function that calculates event size.
314 */
315
316#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
317
318/* Named field types must be defined in lttng-types.h */
319
320#undef __field_full
321#define __field_full(_type, _item, _order, _base) \
322 __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
323 __event_len += sizeof(_type);
324
325#undef __array_enc_ext
326#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
327 __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
328 __event_len += sizeof(_type) * (_length);
329
330#undef __dynamic_array_enc_ext
331#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
332 __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(u32)); \
333 __event_len += sizeof(u32); \
334 __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
335 __dynamic_len[__dynamic_len_idx] = (_length); \
336 __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
337 __dynamic_len_idx++;
338
339#undef __string
340#define __string(_item, _src) \
341 __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
342
343/*
344 * strlen_user includes \0. If returns 0, it faulted, so we set size to
345 * 1 (\0 only).
346 */
347#undef __string_from_user
348#define __string_from_user(_item, _src) \
349 __event_len += __dynamic_len[__dynamic_len_idx++] = \
350 max_t(size_t, strlen_user(_src), 1);
351
352#undef TP_PROTO
353#define TP_PROTO(args...) args
354
355#undef TP_STRUCT__entry
356#define TP_STRUCT__entry(args...) args
357
358#undef DECLARE_EVENT_CLASS
359#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
360static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
361{ \
362 size_t __event_len = 0; \
363 unsigned int __dynamic_len_idx = 0; \
364 \
365 if (0) \
366 (void) __dynamic_len_idx; /* don't warn if unused */ \
367 _tstruct \
368 return __event_len; \
369}
370
371#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
372
373/*
374 * Stage 7 of the trace events.
375 *
376 * Create static inline function that calculates event payload alignment.
377 */
378
379#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
380
381/* Named field types must be defined in lttng-types.h */
382
383#undef __field_full
384#define __field_full(_type, _item, _order, _base) \
385 __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
386
387#undef __array_enc_ext
388#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
389 __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
390
391#undef __dynamic_array_enc_ext
392#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
393 __event_align = max_t(size_t, __event_align, ltt_alignof(u32)); \
394 __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
395
396#undef __string
397#define __string(_item, _src)
398
399#undef __string_from_user
400#define __string_from_user(_item, _src)
401
402#undef TP_PROTO
403#define TP_PROTO(args...) args
404
405#undef TP_STRUCT__entry
406#define TP_STRUCT__entry(args...) args
407
408#undef DECLARE_EVENT_CLASS
409#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
410static inline size_t __event_get_align__##_name(_proto) \
411{ \
412 size_t __event_align = 1; \
413 _tstruct \
414 return __event_align; \
415}
416
417#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
418
419
420/*
421 * Stage 8 of the trace events.
422 *
423 * Create structure declaration that allows the "assign" macros to access the
424 * field types.
425 */
426
427#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
428
429/* Named field types must be defined in lttng-types.h */
430
431#undef __field_full
432#define __field_full(_type, _item, _order, _base) _type _item;
433
434#undef __array_enc_ext
435#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
436 _type _item;
437
438#undef __dynamic_array_enc_ext
439#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
440 _type _item;
441
442#undef __string
443#define __string(_item, _src) char _item;
444
445#undef __string_from_user
446#define __string_from_user(_item, _src) \
447 __string(_item, _src)
448
449#undef TP_STRUCT__entry
450#define TP_STRUCT__entry(args...) args
451
452#undef DECLARE_EVENT_CLASS
453#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
454struct __event_typemap__##_name { \
455 _tstruct \
456};
457
458#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
459
460
461/*
462 * Stage 9 of the trace events.
463 *
464 * Create the probe function : call even size calculation and write event data
465 * into the buffer.
466 *
467 * We use both the field and assignment macros to write the fields in the order
468 * defined in the field declaration. The field declarations control the
469 * execution order, jumping to the appropriate assignment block.
470 */
471
472#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
473
474#undef __field_full
475#define __field_full(_type, _item, _order, _base) \
476 goto __assign_##_item; \
477__end_field_##_item:
478
479#undef __array_enc_ext
480#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
481 goto __assign_##_item; \
482__end_field_##_item:
483
484#undef __dynamic_array_enc_ext
485#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
486 goto __assign_##_item##_1; \
487__end_field_##_item##_1: \
488 goto __assign_##_item##_2; \
489__end_field_##_item##_2:
490
491#undef __string
492#define __string(_item, _src) \
493 goto __assign_##_item; \
494__end_field_##_item:
495
496#undef __string_from_user
497#define __string_from_user(_item, _src) \
498 __string(_item, _src)
499
500/*
501 * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
502 * strcpy().
503 */
504#undef tp_assign
505#define tp_assign(dest, src) \
506__assign_##dest: \
507 { \
508 __typeof__(__typemap.dest) __tmp = (src); \
509 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__tmp)); \
510 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
511 } \
512 goto __end_field_##dest;
513
514#undef tp_memcpy
515#define tp_memcpy(dest, src, len) \
516__assign_##dest: \
517 if (0) \
518 (void) __typemap.dest; \
519 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
520 __chan->ops->event_write(&__ctx, src, len); \
521 goto __end_field_##dest;
522
523#undef tp_memcpy_dyn
524#define tp_memcpy_dyn(dest, src) \
525__assign_##dest##_1: \
526 { \
527 u32 __tmpl = __dynamic_len[__dynamic_len_idx]; \
528 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(u32)); \
529 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
530 } \
531 goto __end_field_##dest##_1; \
532__assign_##dest##_2: \
533 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
534 __chan->ops->event_write(&__ctx, src, \
535 sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
536 goto __end_field_##dest##_2;
537
538#undef tp_memcpy_from_user
539#define tp_memcpy_from_user(dest, src, len) \
540 __assign_##dest: \
541 if (0) \
542 (void) __typemap.dest; \
543 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
544 __chan->ops->event_write_from_user(&__ctx, src, len); \
545 goto __end_field_##dest;
546
547/*
548 * The string length including the final \0.
549 */
550#undef tp_copy_string_from_user
551#define tp_copy_string_from_user(dest, src) \
552 __assign_##dest: \
553 { \
554 size_t __ustrlen; \
555 \
556 if (0) \
557 (void) __typemap.dest; \
558 lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest));\
559 __ustrlen = __get_dynamic_array_len(dest); \
560 if (likely(__ustrlen > 1)) { \
561 __chan->ops->event_write_from_user(&__ctx, src, \
562 __ustrlen - 1); \
563 } \
564 __chan->ops->event_memset(&__ctx, 0, 1); \
565 } \
566 goto __end_field_##dest;
567#undef tp_strcpy
568#define tp_strcpy(dest, src) \
569 tp_memcpy(dest, src, __get_dynamic_array_len(dest))
570
571/* Named field types must be defined in lttng-types.h */
572
573#undef __get_str
574#define __get_str(field) field
575
576#undef __get_dynamic_array
577#define __get_dynamic_array(field) field
578
579/* Beware: this get len actually consumes the len value */
580#undef __get_dynamic_array_len
581#define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++]
582
583#undef TP_PROTO
584#define TP_PROTO(args...) args
585
586#undef TP_ARGS
587#define TP_ARGS(args...) args
588
589#undef TP_STRUCT__entry
590#define TP_STRUCT__entry(args...) args
591
592#undef TP_fast_assign
593#define TP_fast_assign(args...) args
594
595#undef DECLARE_EVENT_CLASS
596#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
597static void __event_probe__##_name(void *__data, _proto) \
598{ \
599 struct ltt_event *__event = __data; \
600 struct ltt_channel *__chan = __event->chan; \
601 struct lib_ring_buffer_ctx __ctx; \
602 size_t __event_len, __event_align; \
603 size_t __dynamic_len_idx = 0; \
604 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
605 struct __event_typemap__##_name __typemap; \
606 int __ret; \
607 \
608 if (0) \
609 (void) __dynamic_len_idx; /* don't warn if unused */ \
610 if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
611 return; \
612 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
613 return; \
614 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
615 return; \
616 __event_len = __event_get_size__##_name(__dynamic_len, _args); \
617 __event_align = __event_get_align__##_name(_args); \
618 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
619 __event_align, -1); \
620 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
621 if (__ret < 0) \
622 return; \
623 /* Control code (field ordering) */ \
624 _tstruct \
625 __chan->ops->event_commit(&__ctx); \
626 return; \
627 /* Copy code, steered by control code */ \
628 _assign \
629}
630
631#undef DECLARE_EVENT_CLASS_NOARGS
632#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
633static void __event_probe__##_name(void *__data) \
634{ \
635 struct ltt_event *__event = __data; \
636 struct ltt_channel *__chan = __event->chan; \
637 struct lib_ring_buffer_ctx __ctx; \
638 size_t __event_len, __event_align; \
639 int __ret; \
640 \
641 if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
642 return; \
643 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
644 return; \
645 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
646 return; \
647 __event_len = 0; \
648 __event_align = 1; \
649 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
650 __event_align, -1); \
651 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
652 if (__ret < 0) \
653 return; \
654 /* Control code (field ordering) */ \
655 _tstruct \
656 __chan->ops->event_commit(&__ctx); \
657 return; \
658 /* Copy code, steered by control code */ \
659 _assign \
660}
661
662#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
663
664/*
665 * Stage 10 of the trace events.
666 *
667 * Register/unregister probes at module load/unload.
668 */
669
670#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
671
672#define TP_ID1(_token, _system) _token##_system
673#define TP_ID(_token, _system) TP_ID1(_token, _system)
674#define module_init_eval1(_token, _system) module_init(_token##_system)
675#define module_init_eval(_token, _system) module_init_eval1(_token, _system)
676#define module_exit_eval1(_token, _system) module_exit(_token##_system)
677#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
678
679#ifndef TP_MODULE_OVERRIDE
680static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
681{
682 wrapper_vmalloc_sync_all();
683 return ltt_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
684}
685
686module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
687
688static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
689{
690 ltt_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
691}
692
693module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
694#endif
695
696#undef module_init_eval
697#undef module_exit_eval
698#undef TP_ID1
699#undef TP_ID
700
701#undef TP_PROTO
702#undef TP_ARGS
703#undef TRACE_EVENT_FLAGS
diff --git a/drivers/staging/lttng/probes/lttng-ftrace.c b/drivers/staging/lttng/probes/lttng-ftrace.c
deleted file mode 100644
index 1aa71831e86f..000000000000
--- a/drivers/staging/lttng/probes/lttng-ftrace.c
+++ /dev/null
@@ -1,188 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng function tracer integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10/*
11 * Ftrace function tracer does not seem to provide synchronization between probe
12 * teardown and callback execution. Therefore, we make this module permanently
13 * loaded (unloadable).
14 *
15 * TODO: Move to register_ftrace_function() (which is exported for
16 * modules) for Linux >= 3.0. It is faster (only enables the selected
17 * functions), and will stay there.
18 */
19
20#include <linux/module.h>
21#include <linux/ftrace.h>
22#include <linux/slab.h>
23#include "../ltt-events.h"
24#include "../wrapper/ringbuffer/frontend_types.h"
25#include "../wrapper/ftrace.h"
26#include "../wrapper/vmalloc.h"
27#include "../ltt-tracer.h"
28
29static
30void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
31{
32 struct ltt_event *event = *data;
33 struct ltt_channel *chan = event->chan;
34 struct lib_ring_buffer_ctx ctx;
35 struct {
36 unsigned long ip;
37 unsigned long parent_ip;
38 } payload;
39 int ret;
40
41 if (unlikely(!ACCESS_ONCE(chan->session->active)))
42 return;
43 if (unlikely(!ACCESS_ONCE(chan->enabled)))
44 return;
45 if (unlikely(!ACCESS_ONCE(event->enabled)))
46 return;
47
48 lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
49 sizeof(payload), ltt_alignof(payload), -1);
50 ret = chan->ops->event_reserve(&ctx, event->id);
51 if (ret < 0)
52 return;
53 payload.ip = ip;
54 payload.parent_ip = parent_ip;
55 lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
56 chan->ops->event_write(&ctx, &payload, sizeof(payload));
57 chan->ops->event_commit(&ctx);
58 return;
59}
60
61/*
62 * Create event description
63 */
64static
65int lttng_create_ftrace_event(const char *name, struct ltt_event *event)
66{
67 struct lttng_event_field *fields;
68 struct lttng_event_desc *desc;
69 int ret;
70
71 desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
72 if (!desc)
73 return -ENOMEM;
74 desc->name = kstrdup(name, GFP_KERNEL);
75 if (!desc->name) {
76 ret = -ENOMEM;
77 goto error_str;
78 }
79 desc->nr_fields = 2;
80 desc->fields = fields =
81 kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
82 if (!desc->fields) {
83 ret = -ENOMEM;
84 goto error_fields;
85 }
86 fields[0].name = "ip";
87 fields[0].type.atype = atype_integer;
88 fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
89 fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
90 fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
91 fields[0].type.u.basic.integer.reverse_byte_order = 0;
92 fields[0].type.u.basic.integer.base = 16;
93 fields[0].type.u.basic.integer.encoding = lttng_encode_none;
94
95 fields[1].name = "parent_ip";
96 fields[1].type.atype = atype_integer;
97 fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
98 fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
99 fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
100 fields[1].type.u.basic.integer.reverse_byte_order = 0;
101 fields[1].type.u.basic.integer.base = 16;
102 fields[1].type.u.basic.integer.encoding = lttng_encode_none;
103
104 desc->owner = THIS_MODULE;
105 event->desc = desc;
106
107 return 0;
108
109error_fields:
110 kfree(desc->name);
111error_str:
112 kfree(desc);
113 return ret;
114}
115
116static
117struct ftrace_probe_ops lttng_ftrace_ops = {
118 .func = lttng_ftrace_handler,
119};
120
121int lttng_ftrace_register(const char *name,
122 const char *symbol_name,
123 struct ltt_event *event)
124{
125 int ret;
126
127 ret = lttng_create_ftrace_event(name, event);
128 if (ret)
129 goto error;
130
131 event->u.ftrace.symbol_name = kstrdup(symbol_name, GFP_KERNEL);
132 if (!event->u.ftrace.symbol_name)
133 goto name_error;
134
135 /* Ensure the memory we just allocated don't trigger page faults */
136 wrapper_vmalloc_sync_all();
137
138 ret = wrapper_register_ftrace_function_probe(event->u.ftrace.symbol_name,
139 &lttng_ftrace_ops, event);
140 if (ret < 0)
141 goto register_error;
142 return 0;
143
144register_error:
145 kfree(event->u.ftrace.symbol_name);
146name_error:
147 kfree(event->desc->name);
148 kfree(event->desc);
149error:
150 return ret;
151}
152EXPORT_SYMBOL_GPL(lttng_ftrace_register);
153
154void lttng_ftrace_unregister(struct ltt_event *event)
155{
156 wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
157 &lttng_ftrace_ops, event);
158}
159EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
160
161void lttng_ftrace_destroy_private(struct ltt_event *event)
162{
163 kfree(event->u.ftrace.symbol_name);
164 kfree(event->desc->fields);
165 kfree(event->desc->name);
166 kfree(event->desc);
167}
168EXPORT_SYMBOL_GPL(lttng_ftrace_destroy_private);
169
170int lttng_ftrace_init(void)
171{
172 wrapper_vmalloc_sync_all();
173 return 0;
174}
175module_init(lttng_ftrace_init)
176
177/*
178 * Ftrace takes care of waiting for a grace period (RCU sched) at probe
179 * unregistration, and disables preemption around probe call.
180 */
181void lttng_ftrace_exit(void)
182{
183}
184module_exit(lttng_ftrace_exit)
185
186MODULE_LICENSE("GPL and additional rights");
187MODULE_AUTHOR("Mathieu Desnoyers");
188MODULE_DESCRIPTION("Linux Trace Toolkit Ftrace Support");
diff --git a/drivers/staging/lttng/probes/lttng-kprobes.c b/drivers/staging/lttng/probes/lttng-kprobes.c
deleted file mode 100644
index 784002aed096..000000000000
--- a/drivers/staging/lttng/probes/lttng-kprobes.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng kprobes integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/kprobes.h>
12#include <linux/slab.h>
13#include "../ltt-events.h"
14#include "../wrapper/ringbuffer/frontend_types.h"
15#include "../wrapper/vmalloc.h"
16#include "../ltt-tracer.h"
17
18static
19int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
20{
21 struct ltt_event *event =
22 container_of(p, struct ltt_event, u.kprobe.kp);
23 struct ltt_channel *chan = event->chan;
24 struct lib_ring_buffer_ctx ctx;
25 int ret;
26 unsigned long data = (unsigned long) p->addr;
27
28 if (unlikely(!ACCESS_ONCE(chan->session->active)))
29 return 0;
30 if (unlikely(!ACCESS_ONCE(chan->enabled)))
31 return 0;
32 if (unlikely(!ACCESS_ONCE(event->enabled)))
33 return 0;
34
35 lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(data),
36 ltt_alignof(data), -1);
37 ret = chan->ops->event_reserve(&ctx, event->id);
38 if (ret < 0)
39 return 0;
40 lib_ring_buffer_align_ctx(&ctx, ltt_alignof(data));
41 chan->ops->event_write(&ctx, &data, sizeof(data));
42 chan->ops->event_commit(&ctx);
43 return 0;
44}
45
46/*
47 * Create event description
48 */
49static
50int lttng_create_kprobe_event(const char *name, struct ltt_event *event)
51{
52 struct lttng_event_field *field;
53 struct lttng_event_desc *desc;
54 int ret;
55
56 desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
57 if (!desc)
58 return -ENOMEM;
59 desc->name = kstrdup(name, GFP_KERNEL);
60 if (!desc->name) {
61 ret = -ENOMEM;
62 goto error_str;
63 }
64 desc->nr_fields = 1;
65 desc->fields = field =
66 kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
67 if (!field) {
68 ret = -ENOMEM;
69 goto error_field;
70 }
71 field->name = "ip";
72 field->type.atype = atype_integer;
73 field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
74 field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
75 field->type.u.basic.integer.signedness = is_signed_type(unsigned long);
76 field->type.u.basic.integer.reverse_byte_order = 0;
77 field->type.u.basic.integer.base = 16;
78 field->type.u.basic.integer.encoding = lttng_encode_none;
79 desc->owner = THIS_MODULE;
80 event->desc = desc;
81
82 return 0;
83
84error_field:
85 kfree(desc->name);
86error_str:
87 kfree(desc);
88 return ret;
89}
90
91int lttng_kprobes_register(const char *name,
92 const char *symbol_name,
93 uint64_t offset,
94 uint64_t addr,
95 struct ltt_event *event)
96{
97 int ret;
98
99 /* Kprobes expects a NULL symbol name if unused */
100 if (symbol_name[0] == '\0')
101 symbol_name = NULL;
102
103 ret = lttng_create_kprobe_event(name, event);
104 if (ret)
105 goto error;
106 memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
107 event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
108 if (symbol_name) {
109 event->u.kprobe.symbol_name =
110 kzalloc(LTTNG_SYM_NAME_LEN * sizeof(char),
111 GFP_KERNEL);
112 if (!event->u.kprobe.symbol_name) {
113 ret = -ENOMEM;
114 goto name_error;
115 }
116 memcpy(event->u.kprobe.symbol_name, symbol_name,
117 LTTNG_SYM_NAME_LEN * sizeof(char));
118 event->u.kprobe.kp.symbol_name =
119 event->u.kprobe.symbol_name;
120 }
121 event->u.kprobe.kp.offset = offset;
122 event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
123
124 /*
125 * Ensure the memory we just allocated don't trigger page faults.
126 * Well.. kprobes itself puts the page fault handler on the blacklist,
127 * but we can never be too careful.
128 */
129 wrapper_vmalloc_sync_all();
130
131 ret = register_kprobe(&event->u.kprobe.kp);
132 if (ret)
133 goto register_error;
134 return 0;
135
136register_error:
137 kfree(event->u.kprobe.symbol_name);
138name_error:
139 kfree(event->desc->fields);
140 kfree(event->desc->name);
141 kfree(event->desc);
142error:
143 return ret;
144}
145EXPORT_SYMBOL_GPL(lttng_kprobes_register);
146
147void lttng_kprobes_unregister(struct ltt_event *event)
148{
149 unregister_kprobe(&event->u.kprobe.kp);
150}
151EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
152
153void lttng_kprobes_destroy_private(struct ltt_event *event)
154{
155 kfree(event->u.kprobe.symbol_name);
156 kfree(event->desc->fields);
157 kfree(event->desc->name);
158 kfree(event->desc);
159}
160EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
161
162MODULE_LICENSE("GPL and additional rights");
163MODULE_AUTHOR("Mathieu Desnoyers");
164MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support");
diff --git a/drivers/staging/lttng/probes/lttng-kretprobes.c b/drivers/staging/lttng/probes/lttng-kretprobes.c
deleted file mode 100644
index 6b291018f715..000000000000
--- a/drivers/staging/lttng/probes/lttng-kretprobes.c
+++ /dev/null
@@ -1,277 +0,0 @@
1/*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng kretprobes integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/module.h>
11#include <linux/kprobes.h>
12#include <linux/slab.h>
13#include <linux/kref.h>
14#include "../ltt-events.h"
15#include "../wrapper/ringbuffer/frontend_types.h"
16#include "../wrapper/vmalloc.h"
17#include "../ltt-tracer.h"
18
19enum lttng_kretprobe_type {
20 EVENT_ENTRY = 0,
21 EVENT_RETURN = 1,
22};
23
24struct lttng_krp {
25 struct kretprobe krp;
26 struct ltt_event *event[2]; /* ENTRY and RETURN */
27 struct kref kref_register;
28 struct kref kref_alloc;
29};
30
31static
32int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
33 struct pt_regs *regs,
34 enum lttng_kretprobe_type type)
35{
36 struct lttng_krp *lttng_krp =
37 container_of(krpi->rp, struct lttng_krp, krp);
38 struct ltt_event *event =
39 lttng_krp->event[type];
40 struct ltt_channel *chan = event->chan;
41 struct lib_ring_buffer_ctx ctx;
42 int ret;
43 struct {
44 unsigned long ip;
45 unsigned long parent_ip;
46 } payload;
47
48 if (unlikely(!ACCESS_ONCE(chan->session->active)))
49 return 0;
50 if (unlikely(!ACCESS_ONCE(chan->enabled)))
51 return 0;
52 if (unlikely(!ACCESS_ONCE(event->enabled)))
53 return 0;
54
55 payload.ip = (unsigned long) krpi->rp->kp.addr;
56 payload.parent_ip = (unsigned long) krpi->ret_addr;
57
58 lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(payload),
59 ltt_alignof(payload), -1);
60 ret = chan->ops->event_reserve(&ctx, event->id);
61 if (ret < 0)
62 return 0;
63 lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
64 chan->ops->event_write(&ctx, &payload, sizeof(payload));
65 chan->ops->event_commit(&ctx);
66 return 0;
67}
68
69static
70int lttng_kretprobes_handler_entry(struct kretprobe_instance *krpi,
71 struct pt_regs *regs)
72{
73 return _lttng_kretprobes_handler(krpi, regs, EVENT_ENTRY);
74}
75
76static
77int lttng_kretprobes_handler_return(struct kretprobe_instance *krpi,
78 struct pt_regs *regs)
79{
80 return _lttng_kretprobes_handler(krpi, regs, EVENT_RETURN);
81}
82
83/*
84 * Create event description
85 */
86static
87int lttng_create_kprobe_event(const char *name, struct ltt_event *event,
88 enum lttng_kretprobe_type type)
89{
90 struct lttng_event_field *fields;
91 struct lttng_event_desc *desc;
92 int ret;
93 char *alloc_name;
94 size_t name_len;
95 const char *suffix = NULL;
96
97 desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
98 if (!desc)
99 return -ENOMEM;
100 name_len = strlen(name);
101 switch (type) {
102 case EVENT_ENTRY:
103 suffix = "_entry";
104 break;
105 case EVENT_RETURN:
106 suffix = "_return";
107 break;
108 }
109 name_len += strlen(suffix);
110 alloc_name = kmalloc(name_len + 1, GFP_KERNEL);
111 if (!alloc_name) {
112 ret = -ENOMEM;
113 goto error_str;
114 }
115 strcpy(alloc_name, name);
116 strcat(alloc_name, suffix);
117 desc->name = alloc_name;
118 desc->nr_fields = 2;
119 desc->fields = fields =
120 kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
121 if (!desc->fields) {
122 ret = -ENOMEM;
123 goto error_fields;
124 }
125 fields[0].name = "ip";
126 fields[0].type.atype = atype_integer;
127 fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
128 fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
129 fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
130 fields[0].type.u.basic.integer.reverse_byte_order = 0;
131 fields[0].type.u.basic.integer.base = 16;
132 fields[0].type.u.basic.integer.encoding = lttng_encode_none;
133
134 fields[1].name = "parent_ip";
135 fields[1].type.atype = atype_integer;
136 fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
137 fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
138 fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
139 fields[1].type.u.basic.integer.reverse_byte_order = 0;
140 fields[1].type.u.basic.integer.base = 16;
141 fields[1].type.u.basic.integer.encoding = lttng_encode_none;
142
143 desc->owner = THIS_MODULE;
144 event->desc = desc;
145
146 return 0;
147
148error_fields:
149 kfree(desc->name);
150error_str:
151 kfree(desc);
152 return ret;
153}
154
155int lttng_kretprobes_register(const char *name,
156 const char *symbol_name,
157 uint64_t offset,
158 uint64_t addr,
159 struct ltt_event *event_entry,
160 struct ltt_event *event_return)
161{
162 int ret;
163 struct lttng_krp *lttng_krp;
164
165 /* Kprobes expects a NULL symbol name if unused */
166 if (symbol_name[0] == '\0')
167 symbol_name = NULL;
168
169 ret = lttng_create_kprobe_event(name, event_entry, EVENT_ENTRY);
170 if (ret)
171 goto error;
172 ret = lttng_create_kprobe_event(name, event_return, EVENT_RETURN);
173 if (ret)
174 goto event_return_error;
175 lttng_krp = kzalloc(sizeof(*lttng_krp), GFP_KERNEL);
176 if (!lttng_krp)
177 goto krp_error;
178 lttng_krp->krp.entry_handler = lttng_kretprobes_handler_entry;
179 lttng_krp->krp.handler = lttng_kretprobes_handler_return;
180 if (symbol_name) {
181 char *alloc_symbol;
182
183 alloc_symbol = kstrdup(symbol_name, GFP_KERNEL);
184 if (!alloc_symbol) {
185 ret = -ENOMEM;
186 goto name_error;
187 }
188 lttng_krp->krp.kp.symbol_name =
189 alloc_symbol;
190 event_entry->u.kretprobe.symbol_name =
191 alloc_symbol;
192 event_return->u.kretprobe.symbol_name =
193 alloc_symbol;
194 }
195 lttng_krp->krp.kp.offset = offset;
196 lttng_krp->krp.kp.addr = (void *) (unsigned long) addr;
197
198 /* Allow probe handler to find event structures */
199 lttng_krp->event[EVENT_ENTRY] = event_entry;
200 lttng_krp->event[EVENT_RETURN] = event_return;
201 event_entry->u.kretprobe.lttng_krp = lttng_krp;
202 event_return->u.kretprobe.lttng_krp = lttng_krp;
203
204 /*
205 * Both events must be unregistered before the kretprobe is
206 * unregistered. Same for memory allocation.
207 */
208 kref_init(&lttng_krp->kref_alloc);
209 kref_get(&lttng_krp->kref_alloc); /* inc refcount to 2 */
210 kref_init(&lttng_krp->kref_register);
211 kref_get(&lttng_krp->kref_register); /* inc refcount to 2 */
212
213 /*
214 * Ensure the memory we just allocated don't trigger page faults.
215 * Well.. kprobes itself puts the page fault handler on the blacklist,
216 * but we can never be too careful.
217 */
218 wrapper_vmalloc_sync_all();
219
220 ret = register_kretprobe(&lttng_krp->krp);
221 if (ret)
222 goto register_error;
223 return 0;
224
225register_error:
226 kfree(lttng_krp->krp.kp.symbol_name);
227name_error:
228 kfree(lttng_krp);
229krp_error:
230 kfree(event_return->desc->fields);
231 kfree(event_return->desc->name);
232 kfree(event_return->desc);
233event_return_error:
234 kfree(event_entry->desc->fields);
235 kfree(event_entry->desc->name);
236 kfree(event_entry->desc);
237error:
238 return ret;
239}
240EXPORT_SYMBOL_GPL(lttng_kretprobes_register);
241
242static
243void _lttng_kretprobes_unregister_release(struct kref *kref)
244{
245 struct lttng_krp *lttng_krp =
246 container_of(kref, struct lttng_krp, kref_register);
247 unregister_kretprobe(&lttng_krp->krp);
248}
249
250void lttng_kretprobes_unregister(struct ltt_event *event)
251{
252 kref_put(&event->u.kretprobe.lttng_krp->kref_register,
253 _lttng_kretprobes_unregister_release);
254}
255EXPORT_SYMBOL_GPL(lttng_kretprobes_unregister);
256
257static
258void _lttng_kretprobes_release(struct kref *kref)
259{
260 struct lttng_krp *lttng_krp =
261 container_of(kref, struct lttng_krp, kref_alloc);
262 kfree(lttng_krp->krp.kp.symbol_name);
263}
264
265void lttng_kretprobes_destroy_private(struct ltt_event *event)
266{
267 kfree(event->desc->fields);
268 kfree(event->desc->name);
269 kfree(event->desc);
270 kref_put(&event->u.kretprobe.lttng_krp->kref_alloc,
271 _lttng_kretprobes_release);
272}
273EXPORT_SYMBOL_GPL(lttng_kretprobes_destroy_private);
274
275MODULE_LICENSE("GPL and additional rights");
276MODULE_AUTHOR("Mathieu Desnoyers");
277MODULE_DESCRIPTION("Linux Trace Toolkit Kretprobes Support");
diff --git a/drivers/staging/lttng/probes/lttng-probe-block.c b/drivers/staging/lttng/probes/lttng-probe-block.c
deleted file mode 100644
index 9eeebfc3a600..000000000000
--- a/drivers/staging/lttng/probes/lttng-probe-block.c
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * probes/lttng-probe-block.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng block probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/blktrace_api.h>
13
14/*
15 * Create the tracepoint static inlines from the kernel to validate that our
16 * trace event macros match the kernel we run on.
17 */
18#include <trace/events/block.h>
19
20/*
21 * Create LTTng tracepoint probes.
22 */
23#define LTTNG_PACKAGE_BUILD
24#define CREATE_TRACE_POINTS
25#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
26
27#include "../instrumentation/events/lttng-module/block.h"
28
29MODULE_LICENSE("GPL and additional rights");
30MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
31MODULE_DESCRIPTION("LTTng block probes");
diff --git a/drivers/staging/lttng/probes/lttng-probe-irq.c b/drivers/staging/lttng/probes/lttng-probe-irq.c
deleted file mode 100644
index 4a6a322d1447..000000000000
--- a/drivers/staging/lttng/probes/lttng-probe-irq.c
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * probes/lttng-probe-irq.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng irq probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13
14/*
15 * Create the tracepoint static inlines from the kernel to validate that our
16 * trace event macros match the kernel we run on.
17 */
18#include <trace/events/irq.h>
19
20/*
21 * Create LTTng tracepoint probes.
22 */
23#define LTTNG_PACKAGE_BUILD
24#define CREATE_TRACE_POINTS
25#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
26
27#include "../instrumentation/events/lttng-module/irq.h"
28
29MODULE_LICENSE("GPL and additional rights");
30MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
31MODULE_DESCRIPTION("LTTng irq probes");
diff --git a/drivers/staging/lttng/probes/lttng-probe-kvm.c b/drivers/staging/lttng/probes/lttng-probe-kvm.c
deleted file mode 100644
index 9efc6dd16880..000000000000
--- a/drivers/staging/lttng/probes/lttng-probe-kvm.c
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * probes/lttng-probe-kvm.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng kvm probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/kvm_host.h>
13
14/*
15 * Create the tracepoint static inlines from the kernel to validate that our
16 * trace event macros match the kernel we run on.
17 */
18#include <trace/events/kvm.h>
19
20/*
21 * Create LTTng tracepoint probes.
22 */
23#define LTTNG_PACKAGE_BUILD
24#define CREATE_TRACE_POINTS
25#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
26
27#include "../instrumentation/events/lttng-module/kvm.h"
28
29MODULE_LICENSE("GPL and additional rights");
30MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
31MODULE_DESCRIPTION("LTTng kvm probes");
diff --git a/drivers/staging/lttng/probes/lttng-probe-lttng.c b/drivers/staging/lttng/probes/lttng-probe-lttng.c
deleted file mode 100644
index 62aab6c75a76..000000000000
--- a/drivers/staging/lttng/probes/lttng-probe-lttng.c
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * probes/lttng-probe-core.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng core probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12
13/*
14 * Create LTTng tracepoint probes.
15 */
16#define LTTNG_PACKAGE_BUILD
17#define CREATE_TRACE_POINTS
18#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
19
20#include "../instrumentation/events/lttng-module/lttng.h"
21
22MODULE_LICENSE("GPL and additional rights");
23MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
24MODULE_DESCRIPTION("LTTng core probes");
diff --git a/drivers/staging/lttng/probes/lttng-probe-sched.c b/drivers/staging/lttng/probes/lttng-probe-sched.c
deleted file mode 100644
index 18c1521de1db..000000000000
--- a/drivers/staging/lttng/probes/lttng-probe-sched.c
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * probes/lttng-probe-sched.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng sched probes.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12
13/*
14 * Create the tracepoint static inlines from the kernel to validate that our
15 * trace event macros match the kernel we run on.
16 */
17#include <trace/events/sched.h>
18
19/*
20 * Create LTTng tracepoint probes.
21 */
22#define LTTNG_PACKAGE_BUILD
23#define CREATE_TRACE_POINTS
24#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
25
26#include "../instrumentation/events/lttng-module/sched.h"
27
28MODULE_LICENSE("GPL and additional rights");
29MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
30MODULE_DESCRIPTION("LTTng sched probes");
diff --git a/drivers/staging/lttng/probes/lttng-type-list.h b/drivers/staging/lttng/probes/lttng-type-list.h
deleted file mode 100644
index 7b953dbc5efe..000000000000
--- a/drivers/staging/lttng/probes/lttng-type-list.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * lttng-type-list.h
3 *
4 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9/* Type list, used to create metadata */
10
11/* Enumerations */
12TRACE_EVENT_ENUM(hrtimer_mode,
13 V(HRTIMER_MODE_ABS),
14 V(HRTIMER_MODE_REL),
15 V(HRTIMER_MODE_PINNED),
16 V(HRTIMER_MODE_ABS_PINNED),
17 V(HRTIMER_MODE_REL_PINNED),
18 R(HRTIMER_MODE_UNDEFINED, 0x04, 0x20), /* Example (to remove) */
19)
20
21TRACE_EVENT_TYPE(hrtimer_mode, enum, unsigned char)
diff --git a/drivers/staging/lttng/probes/lttng-types.c b/drivers/staging/lttng/probes/lttng-types.c
deleted file mode 100644
index 93a9ae5436fb..000000000000
--- a/drivers/staging/lttng/probes/lttng-types.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * probes/lttng-types.c
3 *
4 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng types.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
14#include "../ltt-events.h"
15#include "lttng-types.h"
16#include <linux/hrtimer.h>
17
18#define STAGE_EXPORT_ENUMS
19#include "lttng-types.h"
20#include "lttng-type-list.h"
21#undef STAGE_EXPORT_ENUMS
22
23struct lttng_enum lttng_enums[] = {
24#define STAGE_EXPORT_TYPES
25#include "lttng-types.h"
26#include "lttng-type-list.h"
27#undef STAGE_EXPORT_TYPES
28};
29
30static int lttng_types_init(void)
31{
32 int ret = 0;
33
34 wrapper_vmalloc_sync_all();
35 /* TODO */
36 return ret;
37}
38
39module_init(lttng_types_init);
40
41static void lttng_types_exit(void)
42{
43}
44
45module_exit(lttng_types_exit);
46
47MODULE_LICENSE("GPL and additional rights");
48MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
49MODULE_DESCRIPTION("LTTng types");
diff --git a/drivers/staging/lttng/probes/lttng-types.h b/drivers/staging/lttng/probes/lttng-types.h
deleted file mode 100644
index 10620280baf6..000000000000
--- a/drivers/staging/lttng/probes/lttng-types.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Protect against multiple inclusion of structure declarations, but run the
3 * stages below each time.
4 */
5#ifndef _LTTNG_PROBES_LTTNG_TYPES_H
6#define _LTTNG_PROBES_LTTNG_TYPES_H
7
8/*
9 * probes/lttng-types.h
10 *
11 * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 *
13 * LTTng types.
14 *
15 * Dual LGPL v2.1/GPL v2 license.
16 */
17
18#include <linux/seq_file.h>
19#include "lttng.h"
20#include "../ltt-events.h"
21#include "../ltt-tracer.h"
22#include "../ltt-endian.h"
23
24#endif /* _LTTNG_PROBES_LTTNG_TYPES_H */
25
26/* Export enumerations */
27
28#ifdef STAGE_EXPORT_ENUMS
29
30#undef TRACE_EVENT_TYPE
31#define TRACE_EVENT_TYPE(_name, _abstract_type, args...)
32
33#undef TRACE_EVENT_ENUM
34#define TRACE_EVENT_ENUM(_name, _entries...) \
35 const struct lttng_enum_entry __trace_event_enum_##_name[] = { \
36 PARAMS(_entries) \
37 };
38
39/* Enumeration entry (single value) */
40#undef V
41#define V(_string) { _string, _string, #_string}
42
43/* Enumeration entry (range) */
44#undef R
45#define R(_string, _range_start, _range_end) \
46 { _range_start, _range_end, #_string }
47
48#endif /* STAGE_EXPORT_ENUMS */
49
50
51/* Export named types */
52
53#ifdef STAGE_EXPORT_TYPES
54
55#undef TRACE_EVENT_TYPE___enum
56#define TRACE_EVENT_TYPE___enum(_name, _container_type) \
57 { \
58 .name = #_name, \
59 .container_type = __type_integer(_container_type, __BYTE_ORDER, 10, none), \
60 .entries = __trace_event_enum_##_name, \
61 .len = ARRAY_SIZE(__trace_event_enum_##_name), \
62 },
63
64/* Local declaration */
65#undef TRACE_EVENT_TYPE
66#define TRACE_EVENT_TYPE(_name, _abstract_type, args...) \
67 TRACE_EVENT_TYPE___##_abstract_type(_name, args)
68
69#undef TRACE_EVENT_ENUM
70#define TRACE_EVENT_ENUM(_name, _entries...)
71
72#endif /* STAGE_EXPORT_TYPES */
diff --git a/drivers/staging/lttng/probes/lttng.h b/drivers/staging/lttng/probes/lttng.h
deleted file mode 100644
index e16fc2dd8ce5..000000000000
--- a/drivers/staging/lttng/probes/lttng.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _LTTNG_PROBES_LTTNG_H
2#define _LTTNG_PROBES_LTTNG_H
3
4/*
5 * lttng.h
6 *
7 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12#undef PARAMS
13#define PARAMS(args...) args
14
15#endif /* _LTTNG_PROBES_LTTNG_H */
diff --git a/drivers/staging/lttng/wrapper/ftrace.h b/drivers/staging/lttng/wrapper/ftrace.h
deleted file mode 100644
index ace33c54100d..000000000000
--- a/drivers/staging/lttng/wrapper/ftrace.h
+++ /dev/null
@@ -1,70 +0,0 @@
1#ifndef _LTT_WRAPPER_FTRACE_H
2#define _LTT_WRAPPER_FTRACE_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
8 * available, else we need to have a kernel that exports this function to GPL
9 * modules.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/ftrace.h>
15
16#ifdef CONFIG_KALLSYMS
17
18#include <linux/kallsyms.h>
19#include "kallsyms.h"
20
21static inline
22int wrapper_register_ftrace_function_probe(char *glob,
23 struct ftrace_probe_ops *ops, void *data)
24{
25 int (*register_ftrace_function_probe_sym)(char *glob,
26 struct ftrace_probe_ops *ops, void *data);
27
28 register_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("register_ftrace_function_probe");
29 if (register_ftrace_function_probe_sym) {
30 return register_ftrace_function_probe_sym(glob, ops, data);
31 } else {
32 printk(KERN_WARNING "LTTng: register_ftrace_function_probe symbol lookup failed.\n");
33 return -EINVAL;
34 }
35}
36
37static inline
38void wrapper_unregister_ftrace_function_probe(char *glob,
39 struct ftrace_probe_ops *ops, void *data)
40{
41 void (*unregister_ftrace_function_probe_sym)(char *glob,
42 struct ftrace_probe_ops *ops, void *data);
43
44 unregister_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("unregister_ftrace_function_probe");
45 if (unregister_ftrace_function_probe_sym) {
46 unregister_ftrace_function_probe_sym(glob, ops, data);
47 } else {
48 printk(KERN_WARNING "LTTng: unregister_ftrace_function_probe symbol lookup failed.\n");
49 WARN_ON(1);
50 }
51}
52
53#else
54
55static inline
56int wrapper_register_ftrace_function_probe(char *glob,
57 struct ftrace_probe_ops *ops, void *data)
58{
59 return register_ftrace_function_probe(glob, ops, data);
60}
61
62static inline
63void wrapper_unregister_ftrace_function_probe(char *glob,
64 struct ftrace_probe_ops *ops, void *data)
65{
66 return unregister_ftrace_function_probe(glob, ops, data);
67}
68#endif
69
70#endif /* _LTT_WRAPPER_FTRACE_H */
diff --git a/drivers/staging/lttng/wrapper/inline_memcpy.h b/drivers/staging/lttng/wrapper/inline_memcpy.h
deleted file mode 100644
index 33150cdb5b5e..000000000000
--- a/drivers/staging/lttng/wrapper/inline_memcpy.h
+++ /dev/null
@@ -1,11 +0,0 @@
1/*
2 * wrapper/inline_memcpy.h
3 *
4 * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
10#define inline_memcpy memcpy
11#endif
diff --git a/drivers/staging/lttng/wrapper/kallsyms.h b/drivers/staging/lttng/wrapper/kallsyms.h
deleted file mode 100644
index a7b8ab136ed1..000000000000
--- a/drivers/staging/lttng/wrapper/kallsyms.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _LTT_WRAPPER_KALLSYMS_H
2#define _LTT_WRAPPER_KALLSYMS_H
3
4#include <linux/kallsyms.h>
5
6/*
7 * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
8 *
9 * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
10 * arches where the address of the start of the function body is different
11 * from the pointer which can be used to call the function, e.g. ARM THUMB2.
12 *
13 * Dual LGPL v2.1/GPL v2 license.
14 */
15
16static inline
17unsigned long kallsyms_lookup_funcptr(const char *name)
18{
19 unsigned long addr;
20
21 addr = kallsyms_lookup_name(name);
22#ifdef CONFIG_ARM
23#ifdef CONFIG_THUMB2_KERNEL
24 if (addr)
25 addr |= 1; /* set bit 0 in address for thumb mode */
26#endif
27#endif
28 return addr;
29}
30#endif /* _LTT_WRAPPER_KALLSYMS_H */
diff --git a/drivers/staging/lttng/wrapper/perf.h b/drivers/staging/lttng/wrapper/perf.h
deleted file mode 100644
index 9a6dbfc03c26..000000000000
--- a/drivers/staging/lttng/wrapper/perf.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _LTT_WRAPPER_PERF_H
2#define _LTT_WRAPPER_PERF_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/perf_event.h>
11
12#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
13static inline struct perf_event *
14wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
15 int cpu,
16 struct task_struct *task,
17 perf_overflow_handler_t callback)
18{
19 return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
20}
21#else
22static inline struct perf_event *
23wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
24 int cpu,
25 struct task_struct *task,
26 perf_overflow_handler_t callback)
27{
28 return perf_event_create_kernel_counter(attr, cpu, task, callback);
29}
30#endif
31
32#endif /* _LTT_WRAPPER_PERF_H */
diff --git a/drivers/staging/lttng/wrapper/poll.h b/drivers/staging/lttng/wrapper/poll.h
deleted file mode 100644
index 9c2d18f4a729..000000000000
--- a/drivers/staging/lttng/wrapper/poll.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _LTTNG_WRAPPER_POLL_H
2#define _LTTNG_WRAPPER_POLL_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/poll.h>
11
12#define poll_wait_set_exclusive(poll_table)
13
14#endif /* _LTTNG_WRAPPER_POLL_H */
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/api.h b/drivers/staging/lttng/wrapper/ringbuffer/api.h
deleted file mode 100644
index 182bee284016..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/api.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/api.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend.h b/drivers/staging/lttng/wrapper/ringbuffer/backend.h
deleted file mode 100644
index bfdd39def1a9..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/backend.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/backend.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h b/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
deleted file mode 100644
index 00d45e42a60e..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include "../../wrapper/inline_memcpy.h"
2#include "../../lib/ringbuffer/backend_internal.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h b/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
deleted file mode 100644
index c59effda7de3..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/backend_types.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/config.h b/drivers/staging/lttng/wrapper/ringbuffer/config.h
deleted file mode 100644
index 0ce7a9dac558..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/config.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/config.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend.h
deleted file mode 100644
index 7c6c07052afb..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/frontend.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/frontend.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
deleted file mode 100644
index b03c501cc3f0..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/frontend_api.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
deleted file mode 100644
index 18991014585d..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/frontend_internal.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
deleted file mode 100644
index 0c23244b5a2f..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/frontend_types.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/iterator.h b/drivers/staging/lttng/wrapper/ringbuffer/iterator.h
deleted file mode 100644
index 76e9edbb321b..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/iterator.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/iterator.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/nohz.h b/drivers/staging/lttng/wrapper/ringbuffer/nohz.h
deleted file mode 100644
index 9fbb84d2370d..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/nohz.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/nohz.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h b/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
deleted file mode 100644
index d57844520f62..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/vatomic.h"
diff --git a/drivers/staging/lttng/wrapper/ringbuffer/vfs.h b/drivers/staging/lttng/wrapper/ringbuffer/vfs.h
deleted file mode 100644
index f8e9ed949da0..000000000000
--- a/drivers/staging/lttng/wrapper/ringbuffer/vfs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../lib/ringbuffer/vfs.h"
diff --git a/drivers/staging/lttng/wrapper/spinlock.h b/drivers/staging/lttng/wrapper/spinlock.h
deleted file mode 100644
index 8b1ad9925595..000000000000
--- a/drivers/staging/lttng/wrapper/spinlock.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef _LTT_WRAPPER_SPINLOCK_H
2#define _LTT_WRAPPER_SPINLOCK_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/version.h>
11
12#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
13
14#include <linux/string.h>
15
16#define raw_spin_lock_init(lock) \
17 do { \
18 raw_spinlock_t __lock = __RAW_SPIN_LOCK_UNLOCKED; \
19 memcpy(lock, &__lock, sizeof(lock)); \
20 } while (0)
21
22#define raw_spin_is_locked(lock) __raw_spin_is_locked(lock)
23
24
25#endif
26#endif /* _LTT_WRAPPER_SPINLOCK_H */
diff --git a/drivers/staging/lttng/wrapper/splice.c b/drivers/staging/lttng/wrapper/splice.c
deleted file mode 100644
index ba224eea62fb..000000000000
--- a/drivers/staging/lttng/wrapper/splice.c
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
3 *
4 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
5 * available, else we need to have a kernel that exports this function to GPL
6 * modules.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#ifdef CONFIG_KALLSYMS
12
13#include <linux/kallsyms.h>
14#include <linux/fs.h>
15#include <linux/splice.h>
16#include "kallsyms.h"
17
18static
19ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
20 struct splice_pipe_desc *spd);
21
22ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
23 struct splice_pipe_desc *spd)
24{
25 if (!splice_to_pipe_sym)
26 splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
27 if (splice_to_pipe_sym) {
28 return splice_to_pipe_sym(pipe, spd);
29 } else {
30 printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
31 return -ENOSYS;
32 }
33}
34
35#else
36
37#include <linux/fs.h>
38#include <linux/splice.h>
39
40ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
41 struct splice_pipe_desc *spd)
42{
43 return splice_to_pipe(pipe, spd);
44}
45
46#endif
diff --git a/drivers/staging/lttng/wrapper/splice.h b/drivers/staging/lttng/wrapper/splice.h
deleted file mode 100644
index f75309a866fb..000000000000
--- a/drivers/staging/lttng/wrapper/splice.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef _LTT_WRAPPER_SPLICE_H
2#define _LTT_WRAPPER_SPLICE_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
8 * available, else we need to have a kernel that exports this function to GPL
9 * modules.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/splice.h>
15
16ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
17 struct splice_pipe_desc *spd);
18
19#ifndef PIPE_DEF_BUFFERS
20#define PIPE_DEF_BUFFERS 16
21#endif
22
23#endif /* _LTT_WRAPPER_SPLICE_H */
diff --git a/drivers/staging/lttng/wrapper/trace-clock.h b/drivers/staging/lttng/wrapper/trace-clock.h
deleted file mode 100644
index 8b77428a527b..000000000000
--- a/drivers/staging/lttng/wrapper/trace-clock.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
3 *
4 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
5 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#ifndef _LTT_TRACE_CLOCK_H
11#define _LTT_TRACE_CLOCK_H
12
13#ifdef CONFIG_HAVE_TRACE_CLOCK
14#include <linux/trace-clock.h>
15#else /* CONFIG_HAVE_TRACE_CLOCK */
16
17#include <linux/hardirq.h>
18#include <linux/ktime.h>
19#include <linux/time.h>
20#include <linux/hrtimer.h>
21
22static inline u64 trace_clock_monotonic_wrapper(void)
23{
24 ktime_t ktime;
25
26 /*
27 * Refuse to trace from NMIs with this wrapper, because an NMI could
28 * nest over the xtime write seqlock and deadlock.
29 */
30 if (in_nmi())
31 return (u64) -EIO;
32
33 ktime = ktime_get();
34 return (u64) ktime.tv64;
35}
36
37static inline u32 trace_clock_read32(void)
38{
39 return (u32) trace_clock_monotonic_wrapper();
40}
41
42static inline u64 trace_clock_read64(void)
43{
44 return (u64) trace_clock_monotonic_wrapper();
45}
46
47static inline u64 trace_clock_frequency(void)
48{
49 return (u64)NSEC_PER_SEC;
50}
51
52static inline u32 trace_clock_freq_scale(void)
53{
54 return 1;
55}
56
57static inline int get_trace_clock(void)
58{
59 printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
60 printk(KERN_WARNING " * NMIs will not be traced,\n");
61 printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
62 printk(KERN_WARNING " LTTng trace clocks.\n");
63 printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n");
64 printk(KERN_WARNING "in a near future.\n");
65
66 return 0;
67}
68
69static inline void put_trace_clock(void)
70{
71}
72
73#endif /* CONFIG_HAVE_TRACE_CLOCK */
74
75#endif /* _LTT_TRACE_CLOCK_H */
diff --git a/drivers/staging/lttng/wrapper/uuid.h b/drivers/staging/lttng/wrapper/uuid.h
deleted file mode 100644
index bfa67ff3c1db..000000000000
--- a/drivers/staging/lttng/wrapper/uuid.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _LTT_WRAPPER_UUID_H
2#define _LTT_WRAPPER_UUID_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10#include <linux/version.h>
11
12#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
13#include <linux/uuid.h>
14#else
15
16#include <linux/random.h>
17
18typedef struct {
19 __u8 b[16];
20} uuid_le;
21
22static inline
23void uuid_le_gen(uuid_le *u)
24{
25 generate_random_uuid(u->b);
26}
27
28#endif
29#endif /* _LTT_WRAPPER_UUID_H */
diff --git a/drivers/staging/lttng/wrapper/vmalloc.h b/drivers/staging/lttng/wrapper/vmalloc.h
deleted file mode 100644
index 765f2ad9e225..000000000000
--- a/drivers/staging/lttng/wrapper/vmalloc.h
+++ /dev/null
@@ -1,49 +0,0 @@
1#ifndef _LTT_WRAPPER_VMALLOC_H
2#define _LTT_WRAPPER_VMALLOC_H
3
4/*
5 * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
6 *
7 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
8 * available, else we need to have a kernel that exports this function to GPL
9 * modules.
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#ifdef CONFIG_KALLSYMS
15
16#include <linux/kallsyms.h>
17#include "kallsyms.h"
18
19static inline
20void wrapper_vmalloc_sync_all(void)
21{
22 void (*vmalloc_sync_all_sym)(void);
23
24 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
25 if (vmalloc_sync_all_sym) {
26 vmalloc_sync_all_sym();
27 } else {
28#ifdef CONFIG_X86
29 /*
30 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
31 * trigger recursive page faults.
32 */
33 printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
34 printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
35#endif
36 }
37}
38#else
39
40#include <linux/vmalloc.h>
41
42static inline
43void wrapper_vmalloc_sync_all(void)
44{
45 return vmalloc_sync_all();
46}
47#endif
48
49#endif /* _LTT_WRAPPER_VMALLOC_H */