summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-02-25 17:28:39 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-27 11:22:50 -0500
commit492ecee892c2a4ba6a14903d5d586ff750b7e805 (patch)
tree6161a74e75b41fdb94944cd5451e8589da3cab7c /kernel/bpf/syscall.c
parent143bdc2e27b44d2559596424bfb017d578be33eb (diff)
bpf: enable program stats
JITed BPF programs are indistinguishable from kernel functions, but unlike kernel code BPF code can be changed often. Typical approach of "perf record" + "perf report" profiling and tuning of kernel code works just as well for BPF programs, but kernel code doesn't need to be monitored whereas BPF programs do. Users load and run large amount of BPF programs. These BPF stats allow tools monitor the usage of BPF on the server. The monitoring tools will turn sysctl kernel.bpf_stats_enabled on and off for few seconds to sample average cost of the programs. Aggregated data over hours and days will provide an insight into cost of BPF and alarms can trigger in case given program suddenly gets more expensive. The cost of two sched_clock() per program invocation adds ~20 nsec. Fast BPF progs (like selftests/bpf/progs/test_pkt_access.c) will slow down from ~10 nsec to ~30 nsec. static_key minimizes the cost of the stats collection. There is no measurable difference before/after this patch with kernel.bpf_stats_enabled=0 Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ec7c552af76b..31cf66fc3f5c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1283,24 +1283,54 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
1283 return 0; 1283 return 0;
1284} 1284}
1285 1285
1286static void bpf_prog_get_stats(const struct bpf_prog *prog,
1287 struct bpf_prog_stats *stats)
1288{
1289 u64 nsecs = 0, cnt = 0;
1290 int cpu;
1291
1292 for_each_possible_cpu(cpu) {
1293 const struct bpf_prog_stats *st;
1294 unsigned int start;
1295 u64 tnsecs, tcnt;
1296
1297 st = per_cpu_ptr(prog->aux->stats, cpu);
1298 do {
1299 start = u64_stats_fetch_begin_irq(&st->syncp);
1300 tnsecs = st->nsecs;
1301 tcnt = st->cnt;
1302 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1303 nsecs += tnsecs;
1304 cnt += tcnt;
1305 }
1306 stats->nsecs = nsecs;
1307 stats->cnt = cnt;
1308}
1309
1286#ifdef CONFIG_PROC_FS 1310#ifdef CONFIG_PROC_FS
1287static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1311static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1288{ 1312{
1289 const struct bpf_prog *prog = filp->private_data; 1313 const struct bpf_prog *prog = filp->private_data;
1290 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1314 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1315 struct bpf_prog_stats stats;
1291 1316
1317 bpf_prog_get_stats(prog, &stats);
1292 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1318 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1293 seq_printf(m, 1319 seq_printf(m,
1294 "prog_type:\t%u\n" 1320 "prog_type:\t%u\n"
1295 "prog_jited:\t%u\n" 1321 "prog_jited:\t%u\n"
1296 "prog_tag:\t%s\n" 1322 "prog_tag:\t%s\n"
1297 "memlock:\t%llu\n" 1323 "memlock:\t%llu\n"
1298 "prog_id:\t%u\n", 1324 "prog_id:\t%u\n"
1325 "run_time_ns:\t%llu\n"
1326 "run_cnt:\t%llu\n",
1299 prog->type, 1327 prog->type,
1300 prog->jited, 1328 prog->jited,
1301 prog_tag, 1329 prog_tag,
1302 prog->pages * 1ULL << PAGE_SHIFT, 1330 prog->pages * 1ULL << PAGE_SHIFT,
1303 prog->aux->id); 1331 prog->aux->id,
1332 stats.nsecs,
1333 stats.cnt);
1304} 1334}
1305#endif 1335#endif
1306 1336