summaryrefslogtreecommitdiffstats
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-02-25 17:28:39 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-27 11:22:50 -0500
commit492ecee892c2a4ba6a14903d5d586ff750b7e805 (patch)
tree6161a74e75b41fdb94944cd5451e8589da3cab7c /include/linux/bpf.h
parent143bdc2e27b44d2559596424bfb017d578be33eb (diff)
bpf: enable program stats
JITed BPF programs are indistinguishable from kernel functions, but unlike kernel code BPF code can be changed often. Typical approach of "perf record" + "perf report" profiling and tuning of kernel code works just as well for BPF programs, but kernel code doesn't need to be monitored whereas BPF programs do. Users load and run large amount of BPF programs. These BPF stats allow tools monitor the usage of BPF on the server. The monitoring tools will turn sysctl kernel.bpf_stats_enabled on and off for few seconds to sample average cost of the programs. Aggregated data over hours and days will provide an insight into cost of BPF and alarms can trigger in case given program suddenly gets more expensive. The cost of two sched_clock() per program invocation adds ~20 nsec. Fast BPF progs (like selftests/bpf/progs/test_pkt_access.c) will slow down from ~10 nsec to ~30 nsec. static_key minimizes the cost of the stats collection. There is no measurable difference before/after this patch with kernel.bpf_stats_enabled=0 Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index de18227b3d95..a2132e09dc1c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -16,6 +16,7 @@
16#include <linux/rbtree_latch.h> 16#include <linux/rbtree_latch.h>
17#include <linux/numa.h> 17#include <linux/numa.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19#include <linux/u64_stats_sync.h>
19 20
20struct bpf_verifier_env; 21struct bpf_verifier_env;
21struct perf_event; 22struct perf_event;
@@ -340,6 +341,12 @@ enum bpf_cgroup_storage_type {
340 341
341#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 342#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
342 343
344struct bpf_prog_stats {
345 u64 cnt;
346 u64 nsecs;
347 struct u64_stats_sync syncp;
348};
349
343struct bpf_prog_aux { 350struct bpf_prog_aux {
344 atomic_t refcnt; 351 atomic_t refcnt;
345 u32 used_map_cnt; 352 u32 used_map_cnt;
@@ -389,6 +396,7 @@ struct bpf_prog_aux {
389 * main prog always has linfo_idx == 0 396 * main prog always has linfo_idx == 0
390 */ 397 */
391 u32 linfo_idx; 398 u32 linfo_idx;
399 struct bpf_prog_stats __percpu *stats;
392 union { 400 union {
393 struct work_struct work; 401 struct work_struct work;
394 struct rcu_head rcu; 402 struct rcu_head rcu;
@@ -559,6 +567,7 @@ void bpf_map_area_free(void *base);
559void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 567void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
560 568
561extern int sysctl_unprivileged_bpf_disabled; 569extern int sysctl_unprivileged_bpf_disabled;
570extern int sysctl_bpf_stats_enabled;
562 571
563int bpf_map_new_fd(struct bpf_map *map, int flags); 572int bpf_map_new_fd(struct bpf_map *map, int flags);
564int bpf_prog_new_fd(struct bpf_prog *prog); 573int bpf_prog_new_fd(struct bpf_prog *prog);