aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/uapi/linux/bpf.h62
-rw-r--r--tools/include/uapi/linux/bpf.h62
2 files changed, 62 insertions, 62 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 23b334bba1a6..8daef7326bb7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -828,12 +828,12 @@ union bpf_attr {
828 * 828 *
829 * Also, be aware that the newer helper 829 * Also, be aware that the newer helper
830 * **bpf_perf_event_read_value**\ () is recommended over 830 * **bpf_perf_event_read_value**\ () is recommended over
831 * **bpf_perf_event_read*\ () in general. The latter has some ABI 831 * **bpf_perf_event_read**\ () in general. The latter has some ABI
832 * quirks where error and counter value are used as a return code 832 * quirks where error and counter value are used as a return code
833 * (which is wrong to do since ranges may overlap). This issue is 833 * (which is wrong to do since ranges may overlap). This issue is
834 * fixed with bpf_perf_event_read_value(), which at the same time 834 * fixed with **bpf_perf_event_read_value**\ (), which at the same
835 * provides more features over the **bpf_perf_event_read**\ () 835 * time provides more features over the **bpf_perf_event_read**\
836 * interface. Please refer to the description of 836 * () interface. Please refer to the description of
837 * **bpf_perf_event_read_value**\ () for details. 837 * **bpf_perf_event_read_value**\ () for details.
838 * Return 838 * Return
839 * The value of the perf event counter read from the map, or a 839 * The value of the perf event counter read from the map, or a
@@ -1770,33 +1770,33 @@ union bpf_attr {
1770 * 1770 *
1771 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) 1771 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1772 * Description 1772 * Description
1773 * Return a user or a kernel stack in bpf program provided buffer. 1773 * Return a user or a kernel stack in bpf program provided buffer.
1774 * To achieve this, the helper needs *ctx*, which is a pointer 1774 * To achieve this, the helper needs *ctx*, which is a pointer
1775 * to the context on which the tracing program is executed. 1775 * to the context on which the tracing program is executed.
1776 * To store the stacktrace, the bpf program provides *buf* with 1776 * To store the stacktrace, the bpf program provides *buf* with
1777 * a nonnegative *size*. 1777 * a nonnegative *size*.
1778 * 1778 *
1779 * The last argument, *flags*, holds the number of stack frames to 1779 * The last argument, *flags*, holds the number of stack frames to
1780 * skip (from 0 to 255), masked with 1780 * skip (from 0 to 255), masked with
1781 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1781 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1782 * the following flags: 1782 * the following flags:
1783 * 1783 *
1784 * **BPF_F_USER_STACK** 1784 * **BPF_F_USER_STACK**
1785 * Collect a user space stack instead of a kernel stack. 1785 * Collect a user space stack instead of a kernel stack.
1786 * **BPF_F_USER_BUILD_ID** 1786 * **BPF_F_USER_BUILD_ID**
1787 * Collect buildid+offset instead of ips for user stack, 1787 * Collect buildid+offset instead of ips for user stack,
1788 * only valid if **BPF_F_USER_STACK** is also specified. 1788 * only valid if **BPF_F_USER_STACK** is also specified.
1789 * 1789 *
1790 * **bpf_get_stack**\ () can collect up to 1790 * **bpf_get_stack**\ () can collect up to
1791 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 1791 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1792 * to sufficient large buffer size. Note that 1792 * to sufficient large buffer size. Note that
1793 * this limit can be controlled with the **sysctl** program, and 1793 * this limit can be controlled with the **sysctl** program, and
1794 * that it should be manually increased in order to profile long 1794 * that it should be manually increased in order to profile long
1795 * user stacks (such as stacks for Java programs). To do so, use: 1795 * user stacks (such as stacks for Java programs). To do so, use:
1796 * 1796 *
1797 * :: 1797 * ::
1798 * 1798 *
1799 * # sysctl kernel.perf_event_max_stack=<new value> 1799 * # sysctl kernel.perf_event_max_stack=<new value>
1800 * 1800 *
1801 * Return 1801 * Return
1802 * a non-negative value equal to or less than size on success, or 1802 * a non-negative value equal to or less than size on success, or
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 23b334bba1a6..8daef7326bb7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -828,12 +828,12 @@ union bpf_attr {
828 * 828 *
829 * Also, be aware that the newer helper 829 * Also, be aware that the newer helper
830 * **bpf_perf_event_read_value**\ () is recommended over 830 * **bpf_perf_event_read_value**\ () is recommended over
831 * **bpf_perf_event_read*\ () in general. The latter has some ABI 831 * **bpf_perf_event_read**\ () in general. The latter has some ABI
832 * quirks where error and counter value are used as a return code 832 * quirks where error and counter value are used as a return code
833 * (which is wrong to do since ranges may overlap). This issue is 833 * (which is wrong to do since ranges may overlap). This issue is
834 * fixed with bpf_perf_event_read_value(), which at the same time 834 * fixed with **bpf_perf_event_read_value**\ (), which at the same
835 * provides more features over the **bpf_perf_event_read**\ () 835 * time provides more features over the **bpf_perf_event_read**\
836 * interface. Please refer to the description of 836 * () interface. Please refer to the description of
837 * **bpf_perf_event_read_value**\ () for details. 837 * **bpf_perf_event_read_value**\ () for details.
838 * Return 838 * Return
839 * The value of the perf event counter read from the map, or a 839 * The value of the perf event counter read from the map, or a
@@ -1770,33 +1770,33 @@ union bpf_attr {
1770 * 1770 *
1771 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) 1771 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1772 * Description 1772 * Description
1773 * Return a user or a kernel stack in bpf program provided buffer. 1773 * Return a user or a kernel stack in bpf program provided buffer.
1774 * To achieve this, the helper needs *ctx*, which is a pointer 1774 * To achieve this, the helper needs *ctx*, which is a pointer
1775 * to the context on which the tracing program is executed. 1775 * to the context on which the tracing program is executed.
1776 * To store the stacktrace, the bpf program provides *buf* with 1776 * To store the stacktrace, the bpf program provides *buf* with
1777 * a nonnegative *size*. 1777 * a nonnegative *size*.
1778 * 1778 *
1779 * The last argument, *flags*, holds the number of stack frames to 1779 * The last argument, *flags*, holds the number of stack frames to
1780 * skip (from 0 to 255), masked with 1780 * skip (from 0 to 255), masked with
1781 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1781 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1782 * the following flags: 1782 * the following flags:
1783 * 1783 *
1784 * **BPF_F_USER_STACK** 1784 * **BPF_F_USER_STACK**
1785 * Collect a user space stack instead of a kernel stack. 1785 * Collect a user space stack instead of a kernel stack.
1786 * **BPF_F_USER_BUILD_ID** 1786 * **BPF_F_USER_BUILD_ID**
1787 * Collect buildid+offset instead of ips for user stack, 1787 * Collect buildid+offset instead of ips for user stack,
1788 * only valid if **BPF_F_USER_STACK** is also specified. 1788 * only valid if **BPF_F_USER_STACK** is also specified.
1789 * 1789 *
1790 * **bpf_get_stack**\ () can collect up to 1790 * **bpf_get_stack**\ () can collect up to
1791 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 1791 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1792 * to sufficient large buffer size. Note that 1792 * to sufficient large buffer size. Note that
1793 * this limit can be controlled with the **sysctl** program, and 1793 * this limit can be controlled with the **sysctl** program, and
1794 * that it should be manually increased in order to profile long 1794 * that it should be manually increased in order to profile long
1795 * user stacks (such as stacks for Java programs). To do so, use: 1795 * user stacks (such as stacks for Java programs). To do so, use:
1796 * 1796 *
1797 * :: 1797 * ::
1798 * 1798 *
1799 * # sysctl kernel.perf_event_max_stack=<new value> 1799 * # sysctl kernel.perf_event_max_stack=<new value>
1800 * 1800 *
1801 * Return 1801 * Return
1802 * a non-negative value equal to or less than size on success, or 1802 * a non-negative value equal to or less than size on success, or