summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/net/bpf_jit_comp.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c27
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/btf.h5
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/net/tls.h9
-rw-r--r--include/uapi/linux/bpf.h13
-rw-r--r--include/uapi/linux/btf.h20
-rw-r--r--kernel/bpf/arraymap.c1
-rw-r--r--kernel/bpf/btf.c393
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/local_storage.c84
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/syscall.c80
-rw-r--r--kernel/bpf/verifier.c241
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/trace/bpf_trace.c99
-rw-r--r--net/core/filter.c30
-rw-r--r--net/core/skmsg.c23
-rw-r--r--net/ipv4/tcp_bpf.c15
-rw-r--r--net/tls/tls_main.c14
-rw-r--r--net/tls/tls_sw.c43
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst59
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst105
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst4
-rw-r--r--tools/bpf/bpftool/Makefile2
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool31
-rw-r--r--tools/bpf/bpftool/btf_dumper.c63
-rw-r--r--tools/bpf/bpftool/cfg.c36
-rw-r--r--tools/bpf/bpftool/cfg.h38
-rw-r--r--tools/bpf/bpftool/cgroup.c2
-rw-r--r--tools/bpf/bpftool/common.c69
-rw-r--r--tools/bpf/bpftool/jit_disasm.c1
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h1
-rw-r--r--tools/bpf/bpftool/main.c42
-rw-r--r--tools/bpf/bpftool/main.h42
-rw-r--r--tools/bpf/bpftool/map.c34
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c2
-rw-r--r--tools/bpf/bpftool/net.c2
-rw-r--r--tools/bpf/bpftool/netlink_dumper.c2
-rw-r--r--tools/bpf/bpftool/netlink_dumper.h2
-rw-r--r--tools/bpf/bpftool/perf.c2
-rw-r--r--tools/bpf/bpftool/prog.c45
-rw-r--r--tools/bpf/bpftool/tracelog.c23
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c43
-rw-r--r--tools/bpf/bpftool/xlated_dumper.h38
-rw-r--r--tools/include/uapi/linux/bpf.h13
-rw-r--r--tools/include/uapi/linux/btf.h20
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c6
-rw-r--r--tools/lib/bpf/libbpf.c1
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/connect4_prog.c6
-rw-r--r--tools/testing/selftests/bpf/connect6_prog.c6
-rw-r--r--tools/testing/selftests/bpf/netcnt_prog.c6
-rw-r--r--tools/testing/selftests/bpf/test_btf.c708
-rw-r--r--tools/testing/selftests/bpf/test_progs.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c391
64 files changed, 2331 insertions, 680 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 0a7371a86139..1542df00b23c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -932,6 +932,7 @@ skip_init_ctx:
932 prog->jited_len = image_size; 932 prog->jited_len = image_size;
933 933
934 if (!prog->is_func || extra_pass) { 934 if (!prog->is_func || extra_pass) {
935 bpf_prog_fill_jited_linfo(prog, ctx.offset);
935out_off: 936out_off:
936 kfree(ctx.offset); 937 kfree(ctx.offset);
937 kfree(jit_data); 938 kfree(jit_data);
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 5fda4f7bf15d..65428e79b2f3 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1575,6 +1575,7 @@ skip_init_ctx:
1575 prog->jited_len = image_size; 1575 prog->jited_len = image_size;
1576 1576
1577 if (!prog->is_func || extra_pass) { 1577 if (!prog->is_func || extra_pass) {
1578 bpf_prog_fill_jited_linfo(prog, ctx.offset);
1578out_off: 1579out_off:
1579 kfree(ctx.offset); 1580 kfree(ctx.offset);
1580 kfree(jit_data); 1581 kfree(jit_data);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 662cbc21d909..e23ca90289f7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3052,26 +3052,19 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3052{ 3052{
3053 const struct bpf_insn *insn = &meta->insn; 3053 const struct bpf_insn *insn = &meta->insn;
3054 u64 imm = insn->imm; /* sign extend */ 3054 u64 imm = insn->imm; /* sign extend */
3055 u8 dst_gpr = insn->dst_reg * 2;
3055 swreg tmp_reg; 3056 swreg tmp_reg;
3056 3057
3057 if (!imm) { 3058 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3058 meta->skip = true; 3059 emit_alu(nfp_prog, imm_b(nfp_prog),
3059 return 0; 3060 reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
3060 } 3061 /* Upper word of the mask can only be 0 or ~0 from sign extension,
3061 3062 * so either ignore it or OR the whole thing in.
3062 if (imm & ~0U) { 3063 */
3063 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); 3064 if (imm >> 32)
3064 emit_alu(nfp_prog, reg_none(),
3065 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
3066 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3067 }
3068
3069 if (imm >> 32) {
3070 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3071 emit_alu(nfp_prog, reg_none(), 3065 emit_alu(nfp_prog, reg_none(),
3072 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); 3066 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
3073 emit_br(nfp_prog, BR_BNE, insn->off, 0); 3067 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3074 }
3075 3068
3076 return 0; 3069 return 0;
3077} 3070}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0c992b86eb2c..e734f163bd0b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@ struct bpf_prog;
23struct bpf_map; 23struct bpf_map;
24struct sock; 24struct sock;
25struct seq_file; 25struct seq_file;
26struct btf;
26struct btf_type; 27struct btf_type;
27 28
28/* map is generic key/value storage optionally accesible by eBPF programs */ 29/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -52,6 +53,7 @@ struct bpf_map_ops {
52 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 53 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
53 struct seq_file *m); 54 struct seq_file *m);
54 int (*map_check_btf)(const struct bpf_map *map, 55 int (*map_check_btf)(const struct bpf_map *map,
56 const struct btf *btf,
55 const struct btf_type *key_type, 57 const struct btf_type *key_type,
56 const struct btf_type *value_type); 58 const struct btf_type *value_type);
57}; 59};
@@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
126} 128}
127 129
128int map_check_no_btf(const struct bpf_map *map, 130int map_check_no_btf(const struct bpf_map *map,
131 const struct btf *btf,
129 const struct btf_type *key_type, 132 const struct btf_type *key_type,
130 const struct btf_type *value_type); 133 const struct btf_type *value_type);
131 134
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c736945be7c5..c233efc106c6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -38,6 +38,7 @@ enum bpf_reg_liveness {
38 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 38 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
39 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ 39 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
40 REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ 40 REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
41 REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
41}; 42};
42 43
43struct bpf_reg_state { 44struct bpf_reg_state {
@@ -224,6 +225,7 @@ struct bpf_verifier_env {
224 bool allow_ptr_leaks; 225 bool allow_ptr_leaks;
225 bool seen_direct_write; 226 bool seen_direct_write;
226 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 227 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
228 const struct bpf_line_info *prev_linfo;
227 struct bpf_verifier_log log; 229 struct bpf_verifier_log log;
228 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 230 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
229 u32 subprog_cnt; 231 u32 subprog_cnt;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index b98405a56383..12502e25e767 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,7 @@
7#include <linux/types.h> 7#include <linux/types.h>
8 8
9struct btf; 9struct btf;
10struct btf_member;
10struct btf_type; 11struct btf_type;
11union bpf_attr; 12union bpf_attr;
12 13
@@ -46,7 +47,9 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
46 struct seq_file *m); 47 struct seq_file *m);
47int btf_get_fd_by_id(u32 id); 48int btf_get_fd_by_id(u32 id);
48u32 btf_id(const struct btf *btf); 49u32 btf_id(const struct btf *btf);
49bool btf_name_offset_valid(const struct btf *btf, u32 offset); 50bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
51 const struct btf_member *m,
52 u32 expected_offset, u32 expected_size);
50 53
51#ifdef CONFIG_BPF_SYSCALL 54#ifdef CONFIG_BPF_SYSCALL
52const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); 55const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
diff --git a/include/linux/module.h b/include/linux/module.h
index fce6b4335e36..5f147dd5e709 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -432,6 +432,10 @@ struct module {
432 unsigned int num_tracepoints; 432 unsigned int num_tracepoints;
433 tracepoint_ptr_t *tracepoints_ptrs; 433 tracepoint_ptr_t *tracepoints_ptrs;
434#endif 434#endif
435#ifdef CONFIG_BPF_EVENTS
436 unsigned int num_bpf_raw_events;
437 struct bpf_raw_event_map *bpf_raw_events;
438#endif
435#ifdef HAVE_JUMP_LABEL 439#ifdef HAVE_JUMP_LABEL
436 struct jump_entry *jump_entries; 440 struct jump_entry *jump_entries;
437 unsigned int num_jump_entries; 441 unsigned int num_jump_entries;
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 2a11e9d91dfa..178a3933a71b 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -36,6 +36,7 @@ struct sk_msg_sg {
36 struct scatterlist data[MAX_MSG_FRAGS + 1]; 36 struct scatterlist data[MAX_MSG_FRAGS + 1];
37}; 37};
38 38
39/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
39struct sk_msg { 40struct sk_msg {
40 struct sk_msg_sg sg; 41 struct sk_msg_sg sg;
41 void *data; 42 void *data;
@@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
416 sk_psock_drop(sk, psock); 417 sk_psock_drop(sk, psock);
417} 418}
418 419
420static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
421{
422 if (psock->parser.enabled)
423 psock->parser.saved_data_ready(sk);
424 else
425 sk->sk_data_ready(sk);
426}
427
419static inline void psock_set_prog(struct bpf_prog **pprog, 428static inline void psock_set_prog(struct bpf_prog **pprog,
420 struct bpf_prog *prog) 429 struct bpf_prog *prog)
421{ 430{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8b571e9b9f76..84c48a3c0227 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -286,6 +286,7 @@ struct ucred {
286#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ 286#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
287#define MSG_MORE 0x8000 /* Sender will send more */ 287#define MSG_MORE 0x8000 /* Sender will send more */
288#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ 288#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
289#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
289#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ 290#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
290#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ 291#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
291#define MSG_EOF MSG_FIN 292#define MSG_EOF MSG_FIN
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 4130a5497d40..8a62731673f7 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
471int perf_event_query_prog_array(struct perf_event *event, void __user *info); 471int perf_event_query_prog_array(struct perf_event *event, void __user *info);
472int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); 472int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
473int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); 473int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
474struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name); 474struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
475void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
475int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 476int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
476 u32 *fd_type, const char **buf, 477 u32 *fd_type, const char **buf,
477 u64 *probe_offset, u64 *probe_addr); 478 u64 *probe_offset, u64 *probe_addr);
@@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
502{ 503{
503 return -EOPNOTSUPP; 504 return -EOPNOTSUPP;
504} 505}
505static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) 506static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
506{ 507{
507 return NULL; 508 return NULL;
508} 509}
510static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
511{
512}
509static inline int bpf_get_perf_event_info(const struct perf_event *event, 513static inline int bpf_get_perf_event_info(const struct perf_event *event,
510 u32 *prog_id, u32 *fd_type, 514 u32 *prog_id, u32 *fd_type,
511 const char **buf, u64 *probe_offset, 515 const char **buf, u64 *probe_offset,
diff --git a/include/net/tls.h b/include/net/tls.h
index 3cbcd12303fd..2a6ac8d642af 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -460,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
460 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; 460 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
461} 461}
462 462
463static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
464{
465 struct tls_context *ctx = tls_get_ctx(sk);
466
467 if (!ctx)
468 return false;
469 return !!tls_sw_ctx_tx(ctx);
470}
471
463static inline struct tls_offload_context_rx * 472static inline struct tls_offload_context_rx *
464tls_offload_ctx_rx(const struct tls_context *tls_ctx) 473tls_offload_ctx_rx(const struct tls_context *tls_ctx)
465{ 474{
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index aa582cd5bfcf..91c43884f295 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -133,6 +133,14 @@ enum bpf_map_type {
133 BPF_MAP_TYPE_STACK, 133 BPF_MAP_TYPE_STACK,
134}; 134};
135 135
136/* Note that tracing related programs such as
137 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
138 * are not subject to a stable API since kernel internal data
139 * structures can change from release to release and may
140 * therefore break existing tracing BPF programs. Tracing BPF
141 * programs correspond to /a/ specific kernel which is to be
142 * analyzed, and not /a/ specific kernel /and/ all future ones.
143 */
136enum bpf_prog_type { 144enum bpf_prog_type {
137 BPF_PROG_TYPE_UNSPEC, 145 BPF_PROG_TYPE_UNSPEC,
138 BPF_PROG_TYPE_SOCKET_FILTER, 146 BPF_PROG_TYPE_SOCKET_FILTER,
@@ -343,7 +351,7 @@ union bpf_attr {
343 __u32 log_level; /* verbosity level of verifier */ 351 __u32 log_level; /* verbosity level of verifier */
344 __u32 log_size; /* size of user buffer */ 352 __u32 log_size; /* size of user buffer */
345 __aligned_u64 log_buf; /* user supplied buffer */ 353 __aligned_u64 log_buf; /* user supplied buffer */
346 __u32 kern_version; /* checked when prog_type=kprobe */ 354 __u32 kern_version; /* not used */
347 __u32 prog_flags; 355 __u32 prog_flags;
348 char prog_name[BPF_OBJ_NAME_LEN]; 356 char prog_name[BPF_OBJ_NAME_LEN];
349 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 357 __u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -2657,6 +2665,7 @@ struct sk_msg_md {
2657 __u32 local_ip6[4]; /* Stored in network byte order */ 2665 __u32 local_ip6[4]; /* Stored in network byte order */
2658 __u32 remote_port; /* Stored in network byte order */ 2666 __u32 remote_port; /* Stored in network byte order */
2659 __u32 local_port; /* stored in host byte order */ 2667 __u32 local_port; /* stored in host byte order */
2668 __u32 size; /* Total size of sk_msg */
2660}; 2669};
2661 2670
2662struct sk_reuseport_md { 2671struct sk_reuseport_md {
@@ -2717,6 +2726,8 @@ struct bpf_prog_info {
2717 __u32 nr_jited_line_info; 2726 __u32 nr_jited_line_info;
2718 __u32 line_info_rec_size; 2727 __u32 line_info_rec_size;
2719 __u32 jited_line_info_rec_size; 2728 __u32 jited_line_info_rec_size;
2729 __u32 nr_prog_tags;
2730 __aligned_u64 prog_tags;
2720} __attribute__((aligned(8))); 2731} __attribute__((aligned(8)));
2721 2732
2722struct bpf_map_info { 2733struct bpf_map_info {
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 14f66948fc95..7b7475ef2f17 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -34,7 +34,9 @@ struct btf_type {
34 * bits 0-15: vlen (e.g. # of struct's members) 34 * bits 0-15: vlen (e.g. # of struct's members)
35 * bits 16-23: unused 35 * bits 16-23: unused
36 * bits 24-27: kind (e.g. int, ptr, array...etc) 36 * bits 24-27: kind (e.g. int, ptr, array...etc)
37 * bits 28-31: unused 37 * bits 28-30: unused
38 * bit 31: kind_flag, currently used by
39 * struct, union and fwd
38 */ 40 */
39 __u32 info; 41 __u32 info;
40 /* "size" is used by INT, ENUM, STRUCT and UNION. 42 /* "size" is used by INT, ENUM, STRUCT and UNION.
@@ -52,6 +54,7 @@ struct btf_type {
52 54
53#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) 55#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
54#define BTF_INFO_VLEN(info) ((info) & 0xffff) 56#define BTF_INFO_VLEN(info) ((info) & 0xffff)
57#define BTF_INFO_KFLAG(info) ((info) >> 31)
55 58
56#define BTF_KIND_UNKN 0 /* Unknown */ 59#define BTF_KIND_UNKN 0 /* Unknown */
57#define BTF_KIND_INT 1 /* Integer */ 60#define BTF_KIND_INT 1 /* Integer */
@@ -110,9 +113,22 @@ struct btf_array {
110struct btf_member { 113struct btf_member {
111 __u32 name_off; 114 __u32 name_off;
112 __u32 type; 115 __u32 type;
113 __u32 offset; /* offset in bits */ 116 /* If the type info kind_flag is set, the btf_member offset
117 * contains both member bitfield size and bit offset. The
118 * bitfield size is set for bitfield members. If the type
119 * info kind_flag is not set, the offset contains only bit
120 * offset.
121 */
122 __u32 offset;
114}; 123};
115 124
125/* If the struct/union type info kind_flag is set, the
126 * following two macros are used to access bitfield_size
127 * and bit_offset from btf_member.offset.
128 */
129#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
130#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
131
116/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". 132/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
117 * The exact number of btf_param is stored in the vlen (of the 133 * The exact number of btf_param is stored in the vlen (of the
118 * info in "struct btf_type"). 134 * info in "struct btf_type").
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 24583da9ffd1..25632a75d630 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -382,6 +382,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
382} 382}
383 383
384static int array_map_check_btf(const struct bpf_map *map, 384static int array_map_check_btf(const struct bpf_map *map,
385 const struct btf *btf,
385 const struct btf_type *key_type, 386 const struct btf_type *key_type,
386 const struct btf_type *value_type) 387 const struct btf_type *value_type)
387{ 388{
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index bf34933cc413..715f9fcf4712 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -164,7 +164,7 @@
164#define BITS_ROUNDUP_BYTES(bits) \ 164#define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) 165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166 166
167#define BTF_INFO_MASK 0x0f00ffff 167#define BTF_INFO_MASK 0x8f00ffff
168#define BTF_INT_MASK 0x0fffffff 168#define BTF_INT_MASK 0x0fffffff
169#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) 169#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) 170#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
@@ -274,6 +274,10 @@ struct btf_kind_operations {
274 const struct btf_type *struct_type, 274 const struct btf_type *struct_type,
275 const struct btf_member *member, 275 const struct btf_member *member,
276 const struct btf_type *member_type); 276 const struct btf_type *member_type);
277 int (*check_kflag_member)(struct btf_verifier_env *env,
278 const struct btf_type *struct_type,
279 const struct btf_member *member,
280 const struct btf_type *member_type);
277 void (*log_details)(struct btf_verifier_env *env, 281 void (*log_details)(struct btf_verifier_env *env,
278 const struct btf_type *t); 282 const struct btf_type *t);
279 void (*seq_show)(const struct btf *btf, const struct btf_type *t, 283 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
@@ -419,6 +423,25 @@ static u16 btf_type_vlen(const struct btf_type *t)
419 return BTF_INFO_VLEN(t->info); 423 return BTF_INFO_VLEN(t->info);
420} 424}
421 425
426static bool btf_type_kflag(const struct btf_type *t)
427{
428 return BTF_INFO_KFLAG(t->info);
429}
430
431static u32 btf_member_bit_offset(const struct btf_type *struct_type,
432 const struct btf_member *member)
433{
434 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
435 : member->offset;
436}
437
438static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
439 const struct btf_member *member)
440{
441 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
442 : 0;
443}
444
422static u32 btf_type_int(const struct btf_type *t) 445static u32 btf_type_int(const struct btf_type *t)
423{ 446{
424 return *(u32 *)(t + 1); 447 return *(u32 *)(t + 1);
@@ -474,7 +497,7 @@ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
474 return !*src; 497 return !*src;
475} 498}
476 499
477const char *btf_name_by_offset(const struct btf *btf, u32 offset) 500static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
478{ 501{
479 if (!offset) 502 if (!offset)
480 return "(anon)"; 503 return "(anon)";
@@ -484,6 +507,14 @@ const char *btf_name_by_offset(const struct btf *btf, u32 offset)
484 return "(invalid-name-offset)"; 507 return "(invalid-name-offset)";
485} 508}
486 509
510const char *btf_name_by_offset(const struct btf *btf, u32 offset)
511{
512 if (offset < btf->hdr.str_len)
513 return &btf->strings[offset];
514
515 return NULL;
516}
517
487const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) 518const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
488{ 519{
489 if (type_id > btf->nr_types) 520 if (type_id > btf->nr_types)
@@ -514,6 +545,47 @@ static bool btf_type_int_is_regular(const struct btf_type *t)
514 return true; 545 return true;
515} 546}
516 547
548/*
549 * Check that given struct member is a regular int with expected
550 * offset and size.
551 */
552bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
553 const struct btf_member *m,
554 u32 expected_offset, u32 expected_size)
555{
556 const struct btf_type *t;
557 u32 id, int_data;
558 u8 nr_bits;
559
560 id = m->type;
561 t = btf_type_id_size(btf, &id, NULL);
562 if (!t || !btf_type_is_int(t))
563 return false;
564
565 int_data = btf_type_int(t);
566 nr_bits = BTF_INT_BITS(int_data);
567 if (btf_type_kflag(s)) {
568 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
569 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
570
571 /* if kflag set, int should be a regular int and
572 * bit offset should be at byte boundary.
573 */
574 return !bitfield_size &&
575 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
576 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
577 }
578
579 if (BTF_INT_OFFSET(int_data) ||
580 BITS_PER_BYTE_MASKED(m->offset) ||
581 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
582 BITS_PER_BYTE_MASKED(nr_bits) ||
583 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
584 return false;
585
586 return true;
587}
588
517__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, 589__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
518 const char *fmt, ...) 590 const char *fmt, ...)
519{ 591{
@@ -554,7 +626,7 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
554 __btf_verifier_log(log, "[%u] %s %s%s", 626 __btf_verifier_log(log, "[%u] %s %s%s",
555 env->log_type_id, 627 env->log_type_id,
556 btf_kind_str[kind], 628 btf_kind_str[kind],
557 btf_name_by_offset(btf, t->name_off), 629 __btf_name_by_offset(btf, t->name_off),
558 log_details ? " " : ""); 630 log_details ? " " : "");
559 631
560 if (log_details) 632 if (log_details)
@@ -597,9 +669,17 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
597 if (env->phase != CHECK_META) 669 if (env->phase != CHECK_META)
598 btf_verifier_log_type(env, struct_type, NULL); 670 btf_verifier_log_type(env, struct_type, NULL);
599 671
600 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", 672 if (btf_type_kflag(struct_type))
601 btf_name_by_offset(btf, member->name_off), 673 __btf_verifier_log(log,
602 member->type, member->offset); 674 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
675 __btf_name_by_offset(btf, member->name_off),
676 member->type,
677 BTF_MEMBER_BITFIELD_SIZE(member->offset),
678 BTF_MEMBER_BIT_OFFSET(member->offset));
679 else
680 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
681 __btf_name_by_offset(btf, member->name_off),
682 member->type, member->offset);
603 683
604 if (fmt && *fmt) { 684 if (fmt && *fmt) {
605 __btf_verifier_log(log, " "); 685 __btf_verifier_log(log, " ");
@@ -915,6 +995,38 @@ static int btf_df_check_member(struct btf_verifier_env *env,
915 return -EINVAL; 995 return -EINVAL;
916} 996}
917 997
998static int btf_df_check_kflag_member(struct btf_verifier_env *env,
999 const struct btf_type *struct_type,
1000 const struct btf_member *member,
1001 const struct btf_type *member_type)
1002{
1003 btf_verifier_log_basic(env, struct_type,
1004 "Unsupported check_kflag_member");
1005 return -EINVAL;
1006}
1007
1008/* Used for ptr, array and struct/union type members.
1009 * int, enum and modifier types have their specific callback functions.
1010 */
1011static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1012 const struct btf_type *struct_type,
1013 const struct btf_member *member,
1014 const struct btf_type *member_type)
1015{
1016 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1017 btf_verifier_log_member(env, struct_type, member,
1018 "Invalid member bitfield_size");
1019 return -EINVAL;
1020 }
1021
1022 /* bitfield size is 0, so member->offset represents bit offset only.
1023 * It is safe to call non kflag check_member variants.
1024 */
1025 return btf_type_ops(member_type)->check_member(env, struct_type,
1026 member,
1027 member_type);
1028}
1029
918static int btf_df_resolve(struct btf_verifier_env *env, 1030static int btf_df_resolve(struct btf_verifier_env *env,
919 const struct resolve_vertex *v) 1031 const struct resolve_vertex *v)
920{ 1032{
@@ -967,6 +1079,62 @@ static int btf_int_check_member(struct btf_verifier_env *env,
967 return 0; 1079 return 0;
968} 1080}
969 1081
1082static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1083 const struct btf_type *struct_type,
1084 const struct btf_member *member,
1085 const struct btf_type *member_type)
1086{
1087 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1088 u32 int_data = btf_type_int(member_type);
1089 u32 struct_size = struct_type->size;
1090 u32 nr_copy_bits;
1091
1092 /* a regular int type is required for the kflag int member */
1093 if (!btf_type_int_is_regular(member_type)) {
1094 btf_verifier_log_member(env, struct_type, member,
1095 "Invalid member base type");
1096 return -EINVAL;
1097 }
1098
1099 /* check sanity of bitfield size */
1100 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1101 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1102 nr_int_data_bits = BTF_INT_BITS(int_data);
1103 if (!nr_bits) {
1104 /* Not a bitfield member, member offset must be at byte
1105 * boundary.
1106 */
1107 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1108 btf_verifier_log_member(env, struct_type, member,
1109 "Invalid member offset");
1110 return -EINVAL;
1111 }
1112
1113 nr_bits = nr_int_data_bits;
1114 } else if (nr_bits > nr_int_data_bits) {
1115 btf_verifier_log_member(env, struct_type, member,
1116 "Invalid member bitfield_size");
1117 return -EINVAL;
1118 }
1119
1120 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1121 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1122 if (nr_copy_bits > BITS_PER_U64) {
1123 btf_verifier_log_member(env, struct_type, member,
1124 "nr_copy_bits exceeds 64");
1125 return -EINVAL;
1126 }
1127
1128 if (struct_size < bytes_offset ||
1129 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1130 btf_verifier_log_member(env, struct_type, member,
1131 "Member exceeds struct_size");
1132 return -EINVAL;
1133 }
1134
1135 return 0;
1136}
1137
970static s32 btf_int_check_meta(struct btf_verifier_env *env, 1138static s32 btf_int_check_meta(struct btf_verifier_env *env,
971 const struct btf_type *t, 1139 const struct btf_type *t,
972 u32 meta_left) 1140 u32 meta_left)
@@ -986,6 +1154,11 @@ static s32 btf_int_check_meta(struct btf_verifier_env *env,
986 return -EINVAL; 1154 return -EINVAL;
987 } 1155 }
988 1156
1157 if (btf_type_kflag(t)) {
1158 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1159 return -EINVAL;
1160 }
1161
989 int_data = btf_type_int(t); 1162 int_data = btf_type_int(t);
990 if (int_data & ~BTF_INT_MASK) { 1163 if (int_data & ~BTF_INT_MASK) {
991 btf_verifier_log_basic(env, t, "Invalid int_data:%x", 1164 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
@@ -1038,26 +1211,16 @@ static void btf_int_log(struct btf_verifier_env *env,
1038 btf_int_encoding_str(BTF_INT_ENCODING(int_data))); 1211 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1039} 1212}
1040 1213
1041static void btf_int_bits_seq_show(const struct btf *btf, 1214static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1042 const struct btf_type *t, 1215 u8 nr_bits, struct seq_file *m)
1043 void *data, u8 bits_offset,
1044 struct seq_file *m)
1045{ 1216{
1046 u16 left_shift_bits, right_shift_bits; 1217 u16 left_shift_bits, right_shift_bits;
1047 u32 int_data = btf_type_int(t);
1048 u8 nr_bits = BTF_INT_BITS(int_data);
1049 u8 total_bits_offset;
1050 u8 nr_copy_bytes; 1218 u8 nr_copy_bytes;
1051 u8 nr_copy_bits; 1219 u8 nr_copy_bits;
1052 u64 print_num; 1220 u64 print_num;
1053 1221
1054 /* 1222 data += BITS_ROUNDDOWN_BYTES(bits_offset);
1055 * bits_offset is at most 7. 1223 bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
1056 * BTF_INT_OFFSET() cannot exceed 64 bits.
1057 */
1058 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1059 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1060 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1061 nr_copy_bits = nr_bits + bits_offset; 1224 nr_copy_bits = nr_bits + bits_offset;
1062 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1225 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1063 1226
@@ -1077,6 +1240,24 @@ static void btf_int_bits_seq_show(const struct btf *btf,
1077 seq_printf(m, "0x%llx", print_num); 1240 seq_printf(m, "0x%llx", print_num);
1078} 1241}
1079 1242
1243
1244static void btf_int_bits_seq_show(const struct btf *btf,
1245 const struct btf_type *t,
1246 void *data, u8 bits_offset,
1247 struct seq_file *m)
1248{
1249 u32 int_data = btf_type_int(t);
1250 u8 nr_bits = BTF_INT_BITS(int_data);
1251 u8 total_bits_offset;
1252
1253 /*
1254 * bits_offset is at most 7.
1255 * BTF_INT_OFFSET() cannot exceed 64 bits.
1256 */
1257 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1258 btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
1259}
1260
1080static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1261static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1081 u32 type_id, void *data, u8 bits_offset, 1262 u32 type_id, void *data, u8 bits_offset,
1082 struct seq_file *m) 1263 struct seq_file *m)
@@ -1126,6 +1307,7 @@ static const struct btf_kind_operations int_ops = {
1126 .check_meta = btf_int_check_meta, 1307 .check_meta = btf_int_check_meta,
1127 .resolve = btf_df_resolve, 1308 .resolve = btf_df_resolve,
1128 .check_member = btf_int_check_member, 1309 .check_member = btf_int_check_member,
1310 .check_kflag_member = btf_int_check_kflag_member,
1129 .log_details = btf_int_log, 1311 .log_details = btf_int_log,
1130 .seq_show = btf_int_seq_show, 1312 .seq_show = btf_int_seq_show,
1131}; 1313};
@@ -1155,6 +1337,31 @@ static int btf_modifier_check_member(struct btf_verifier_env *env,
1155 resolved_type); 1337 resolved_type);
1156} 1338}
1157 1339
1340static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1341 const struct btf_type *struct_type,
1342 const struct btf_member *member,
1343 const struct btf_type *member_type)
1344{
1345 const struct btf_type *resolved_type;
1346 u32 resolved_type_id = member->type;
1347 struct btf_member resolved_member;
1348 struct btf *btf = env->btf;
1349
1350 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1351 if (!resolved_type) {
1352 btf_verifier_log_member(env, struct_type, member,
1353 "Invalid member");
1354 return -EINVAL;
1355 }
1356
1357 resolved_member = *member;
1358 resolved_member.type = resolved_type_id;
1359
1360 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1361 &resolved_member,
1362 resolved_type);
1363}
1364
1158static int btf_ptr_check_member(struct btf_verifier_env *env, 1365static int btf_ptr_check_member(struct btf_verifier_env *env,
1159 const struct btf_type *struct_type, 1366 const struct btf_type *struct_type,
1160 const struct btf_member *member, 1367 const struct btf_member *member,
@@ -1190,6 +1397,11 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1190 return -EINVAL; 1397 return -EINVAL;
1191 } 1398 }
1192 1399
1400 if (btf_type_kflag(t)) {
1401 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1402 return -EINVAL;
1403 }
1404
1193 if (!BTF_TYPE_ID_VALID(t->type)) { 1405 if (!BTF_TYPE_ID_VALID(t->type)) {
1194 btf_verifier_log_type(env, t, "Invalid type_id"); 1406 btf_verifier_log_type(env, t, "Invalid type_id");
1195 return -EINVAL; 1407 return -EINVAL;
@@ -1343,6 +1555,7 @@ static struct btf_kind_operations modifier_ops = {
1343 .check_meta = btf_ref_type_check_meta, 1555 .check_meta = btf_ref_type_check_meta,
1344 .resolve = btf_modifier_resolve, 1556 .resolve = btf_modifier_resolve,
1345 .check_member = btf_modifier_check_member, 1557 .check_member = btf_modifier_check_member,
1558 .check_kflag_member = btf_modifier_check_kflag_member,
1346 .log_details = btf_ref_type_log, 1559 .log_details = btf_ref_type_log,
1347 .seq_show = btf_modifier_seq_show, 1560 .seq_show = btf_modifier_seq_show,
1348}; 1561};
@@ -1351,6 +1564,7 @@ static struct btf_kind_operations ptr_ops = {
1351 .check_meta = btf_ref_type_check_meta, 1564 .check_meta = btf_ref_type_check_meta,
1352 .resolve = btf_ptr_resolve, 1565 .resolve = btf_ptr_resolve,
1353 .check_member = btf_ptr_check_member, 1566 .check_member = btf_ptr_check_member,
1567 .check_kflag_member = btf_generic_check_kflag_member,
1354 .log_details = btf_ref_type_log, 1568 .log_details = btf_ref_type_log,
1355 .seq_show = btf_ptr_seq_show, 1569 .seq_show = btf_ptr_seq_show,
1356}; 1570};
@@ -1381,11 +1595,18 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1381 return 0; 1595 return 0;
1382} 1596}
1383 1597
1598static void btf_fwd_type_log(struct btf_verifier_env *env,
1599 const struct btf_type *t)
1600{
1601 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1602}
1603
1384static struct btf_kind_operations fwd_ops = { 1604static struct btf_kind_operations fwd_ops = {
1385 .check_meta = btf_fwd_check_meta, 1605 .check_meta = btf_fwd_check_meta,
1386 .resolve = btf_df_resolve, 1606 .resolve = btf_df_resolve,
1387 .check_member = btf_df_check_member, 1607 .check_member = btf_df_check_member,
1388 .log_details = btf_ref_type_log, 1608 .check_kflag_member = btf_df_check_kflag_member,
1609 .log_details = btf_fwd_type_log,
1389 .seq_show = btf_df_seq_show, 1610 .seq_show = btf_df_seq_show,
1390}; 1611};
1391 1612
@@ -1443,6 +1664,11 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
1443 return -EINVAL; 1664 return -EINVAL;
1444 } 1665 }
1445 1666
1667 if (btf_type_kflag(t)) {
1668 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1669 return -EINVAL;
1670 }
1671
1446 if (t->size) { 1672 if (t->size) {
1447 btf_verifier_log_type(env, t, "size != 0"); 1673 btf_verifier_log_type(env, t, "size != 0");
1448 return -EINVAL; 1674 return -EINVAL;
@@ -1566,6 +1792,7 @@ static struct btf_kind_operations array_ops = {
1566 .check_meta = btf_array_check_meta, 1792 .check_meta = btf_array_check_meta,
1567 .resolve = btf_array_resolve, 1793 .resolve = btf_array_resolve,
1568 .check_member = btf_array_check_member, 1794 .check_member = btf_array_check_member,
1795 .check_kflag_member = btf_generic_check_kflag_member,
1569 .log_details = btf_array_log, 1796 .log_details = btf_array_log,
1570 .seq_show = btf_array_seq_show, 1797 .seq_show = btf_array_seq_show,
1571}; 1798};
@@ -1604,6 +1831,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1604 u32 meta_needed, last_offset; 1831 u32 meta_needed, last_offset;
1605 struct btf *btf = env->btf; 1832 struct btf *btf = env->btf;
1606 u32 struct_size = t->size; 1833 u32 struct_size = t->size;
1834 u32 offset;
1607 u16 i; 1835 u16 i;
1608 1836
1609 meta_needed = btf_type_vlen(t) * sizeof(*member); 1837 meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1645,7 +1873,8 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1645 return -EINVAL; 1873 return -EINVAL;
1646 } 1874 }
1647 1875
1648 if (is_union && member->offset) { 1876 offset = btf_member_bit_offset(t, member);
1877 if (is_union && offset) {
1649 btf_verifier_log_member(env, t, member, 1878 btf_verifier_log_member(env, t, member,
1650 "Invalid member bits_offset"); 1879 "Invalid member bits_offset");
1651 return -EINVAL; 1880 return -EINVAL;
@@ -1655,20 +1884,20 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1655 * ">" instead of ">=" because the last member could be 1884 * ">" instead of ">=" because the last member could be
1656 * "char a[0];" 1885 * "char a[0];"
1657 */ 1886 */
1658 if (last_offset > member->offset) { 1887 if (last_offset > offset) {
1659 btf_verifier_log_member(env, t, member, 1888 btf_verifier_log_member(env, t, member,
1660 "Invalid member bits_offset"); 1889 "Invalid member bits_offset");
1661 return -EINVAL; 1890 return -EINVAL;
1662 } 1891 }
1663 1892
1664 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { 1893 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
1665 btf_verifier_log_member(env, t, member, 1894 btf_verifier_log_member(env, t, member,
1666 "Member bits_offset exceeds its struct size"); 1895 "Member bits_offset exceeds its struct size");
1667 return -EINVAL; 1896 return -EINVAL;
1668 } 1897 }
1669 1898
1670 btf_verifier_log_member(env, t, member, NULL); 1899 btf_verifier_log_member(env, t, member, NULL);
1671 last_offset = member->offset; 1900 last_offset = offset;
1672 } 1901 }
1673 1902
1674 return meta_needed; 1903 return meta_needed;
@@ -1698,9 +1927,14 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
1698 1927
1699 last_member_type = btf_type_by_id(env->btf, 1928 last_member_type = btf_type_by_id(env->btf,
1700 last_member_type_id); 1929 last_member_type_id);
1701 err = btf_type_ops(last_member_type)->check_member(env, v->t, 1930 if (btf_type_kflag(v->t))
1702 last_member, 1931 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
1703 last_member_type); 1932 last_member,
1933 last_member_type);
1934 else
1935 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1936 last_member,
1937 last_member_type);
1704 if (err) 1938 if (err)
1705 return err; 1939 return err;
1706 } 1940 }
@@ -1722,9 +1956,14 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
1722 return env_stack_push(env, member_type, member_type_id); 1956 return env_stack_push(env, member_type, member_type_id);
1723 } 1957 }
1724 1958
1725 err = btf_type_ops(member_type)->check_member(env, v->t, 1959 if (btf_type_kflag(v->t))
1726 member, 1960 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
1727 member_type); 1961 member,
1962 member_type);
1963 else
1964 err = btf_type_ops(member_type)->check_member(env, v->t,
1965 member,
1966 member_type);
1728 if (err) 1967 if (err)
1729 return err; 1968 return err;
1730 } 1969 }
@@ -1752,17 +1991,26 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1752 for_each_member(i, t, member) { 1991 for_each_member(i, t, member) {
1753 const struct btf_type *member_type = btf_type_by_id(btf, 1992 const struct btf_type *member_type = btf_type_by_id(btf,
1754 member->type); 1993 member->type);
1755 u32 member_offset = member->offset;
1756 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1757 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1758 const struct btf_kind_operations *ops; 1994 const struct btf_kind_operations *ops;
1995 u32 member_offset, bitfield_size;
1996 u32 bytes_offset;
1997 u8 bits8_offset;
1759 1998
1760 if (i) 1999 if (i)
1761 seq_puts(m, seq); 2000 seq_puts(m, seq);
1762 2001
1763 ops = btf_type_ops(member_type); 2002 member_offset = btf_member_bit_offset(t, member);
1764 ops->seq_show(btf, member_type, member->type, 2003 bitfield_size = btf_member_bitfield_size(t, member);
1765 data + bytes_offset, bits8_offset, m); 2004 if (bitfield_size) {
2005 btf_bitfield_seq_show(data, member_offset,
2006 bitfield_size, m);
2007 } else {
2008 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2009 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2010 ops = btf_type_ops(member_type);
2011 ops->seq_show(btf, member_type, member->type,
2012 data + bytes_offset, bits8_offset, m);
2013 }
1766 } 2014 }
1767 seq_puts(m, "}"); 2015 seq_puts(m, "}");
1768} 2016}
@@ -1771,6 +2019,7 @@ static struct btf_kind_operations struct_ops = {
1771 .check_meta = btf_struct_check_meta, 2019 .check_meta = btf_struct_check_meta,
1772 .resolve = btf_struct_resolve, 2020 .resolve = btf_struct_resolve,
1773 .check_member = btf_struct_check_member, 2021 .check_member = btf_struct_check_member,
2022 .check_kflag_member = btf_generic_check_kflag_member,
1774 .log_details = btf_struct_log, 2023 .log_details = btf_struct_log,
1775 .seq_show = btf_struct_seq_show, 2024 .seq_show = btf_struct_seq_show,
1776}; 2025};
@@ -1800,6 +2049,41 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
1800 return 0; 2049 return 0;
1801} 2050}
1802 2051
2052static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2053 const struct btf_type *struct_type,
2054 const struct btf_member *member,
2055 const struct btf_type *member_type)
2056{
2057 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2058 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2059
2060 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2061 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2062 if (!nr_bits) {
2063 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2064 btf_verifier_log_member(env, struct_type, member,
2065 "Member is not byte aligned");
2066 return -EINVAL;
2067 }
2068
2069 nr_bits = int_bitsize;
2070 } else if (nr_bits > int_bitsize) {
2071 btf_verifier_log_member(env, struct_type, member,
2072 "Invalid member bitfield_size");
2073 return -EINVAL;
2074 }
2075
2076 struct_size = struct_type->size;
2077 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2078 if (struct_size < bytes_end) {
2079 btf_verifier_log_member(env, struct_type, member,
2080 "Member exceeds struct_size");
2081 return -EINVAL;
2082 }
2083
2084 return 0;
2085}
2086
1803static s32 btf_enum_check_meta(struct btf_verifier_env *env, 2087static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1804 const struct btf_type *t, 2088 const struct btf_type *t,
1805 u32 meta_left) 2089 u32 meta_left)
@@ -1819,6 +2103,11 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1819 return -EINVAL; 2103 return -EINVAL;
1820 } 2104 }
1821 2105
2106 if (btf_type_kflag(t)) {
2107 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2108 return -EINVAL;
2109 }
2110
1822 if (t->size != sizeof(int)) { 2111 if (t->size != sizeof(int)) {
1823 btf_verifier_log_type(env, t, "Expected size:%zu", 2112 btf_verifier_log_type(env, t, "Expected size:%zu",
1824 sizeof(int)); 2113 sizeof(int));
@@ -1850,7 +2139,7 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1850 2139
1851 2140
1852 btf_verifier_log(env, "\t%s val=%d\n", 2141 btf_verifier_log(env, "\t%s val=%d\n",
1853 btf_name_by_offset(btf, enums[i].name_off), 2142 __btf_name_by_offset(btf, enums[i].name_off),
1854 enums[i].val); 2143 enums[i].val);
1855 } 2144 }
1856 2145
@@ -1874,7 +2163,8 @@ static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1874 for (i = 0; i < nr_enums; i++) { 2163 for (i = 0; i < nr_enums; i++) {
1875 if (v == enums[i].val) { 2164 if (v == enums[i].val) {
1876 seq_printf(m, "%s", 2165 seq_printf(m, "%s",
1877 btf_name_by_offset(btf, enums[i].name_off)); 2166 __btf_name_by_offset(btf,
2167 enums[i].name_off));
1878 return; 2168 return;
1879 } 2169 }
1880 } 2170 }
@@ -1886,6 +2176,7 @@ static struct btf_kind_operations enum_ops = {
1886 .check_meta = btf_enum_check_meta, 2176 .check_meta = btf_enum_check_meta,
1887 .resolve = btf_df_resolve, 2177 .resolve = btf_df_resolve,
1888 .check_member = btf_enum_check_member, 2178 .check_member = btf_enum_check_member,
2179 .check_kflag_member = btf_enum_check_kflag_member,
1889 .log_details = btf_enum_log, 2180 .log_details = btf_enum_log,
1890 .seq_show = btf_enum_seq_show, 2181 .seq_show = btf_enum_seq_show,
1891}; 2182};
@@ -1908,6 +2199,11 @@ static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
1908 return -EINVAL; 2199 return -EINVAL;
1909 } 2200 }
1910 2201
2202 if (btf_type_kflag(t)) {
2203 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2204 return -EINVAL;
2205 }
2206
1911 btf_verifier_log_type(env, t, NULL); 2207 btf_verifier_log_type(env, t, NULL);
1912 2208
1913 return meta_needed; 2209 return meta_needed;
@@ -1932,20 +2228,20 @@ static void btf_func_proto_log(struct btf_verifier_env *env,
1932 } 2228 }
1933 2229
1934 btf_verifier_log(env, "%u %s", args[0].type, 2230 btf_verifier_log(env, "%u %s", args[0].type,
1935 btf_name_by_offset(env->btf, 2231 __btf_name_by_offset(env->btf,
1936 args[0].name_off)); 2232 args[0].name_off));
1937 for (i = 1; i < nr_args - 1; i++) 2233 for (i = 1; i < nr_args - 1; i++)
1938 btf_verifier_log(env, ", %u %s", args[i].type, 2234 btf_verifier_log(env, ", %u %s", args[i].type,
1939 btf_name_by_offset(env->btf, 2235 __btf_name_by_offset(env->btf,
1940 args[i].name_off)); 2236 args[i].name_off));
1941 2237
1942 if (nr_args > 1) { 2238 if (nr_args > 1) {
1943 const struct btf_param *last_arg = &args[nr_args - 1]; 2239 const struct btf_param *last_arg = &args[nr_args - 1];
1944 2240
1945 if (last_arg->type) 2241 if (last_arg->type)
1946 btf_verifier_log(env, ", %u %s", last_arg->type, 2242 btf_verifier_log(env, ", %u %s", last_arg->type,
1947 btf_name_by_offset(env->btf, 2243 __btf_name_by_offset(env->btf,
1948 last_arg->name_off)); 2244 last_arg->name_off));
1949 else 2245 else
1950 btf_verifier_log(env, ", vararg"); 2246 btf_verifier_log(env, ", vararg");
1951 } 2247 }
@@ -1967,6 +2263,7 @@ static struct btf_kind_operations func_proto_ops = {
1967 * Hence, there is no btf_func_check_member(). 2263 * Hence, there is no btf_func_check_member().
1968 */ 2264 */
1969 .check_member = btf_df_check_member, 2265 .check_member = btf_df_check_member,
2266 .check_kflag_member = btf_df_check_kflag_member,
1970 .log_details = btf_func_proto_log, 2267 .log_details = btf_func_proto_log,
1971 .seq_show = btf_df_seq_show, 2268 .seq_show = btf_df_seq_show,
1972}; 2269};
@@ -1986,6 +2283,11 @@ static s32 btf_func_check_meta(struct btf_verifier_env *env,
1986 return -EINVAL; 2283 return -EINVAL;
1987 } 2284 }
1988 2285
2286 if (btf_type_kflag(t)) {
2287 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2288 return -EINVAL;
2289 }
2290
1989 btf_verifier_log_type(env, t, NULL); 2291 btf_verifier_log_type(env, t, NULL);
1990 2292
1991 return 0; 2293 return 0;
@@ -1995,6 +2297,7 @@ static struct btf_kind_operations func_ops = {
1995 .check_meta = btf_func_check_meta, 2297 .check_meta = btf_func_check_meta,
1996 .resolve = btf_df_resolve, 2298 .resolve = btf_df_resolve,
1997 .check_member = btf_df_check_member, 2299 .check_member = btf_df_check_member,
2300 .check_kflag_member = btf_df_check_kflag_member,
1998 .log_details = btf_ref_type_log, 2301 .log_details = btf_ref_type_log,
1999 .seq_show = btf_df_seq_show, 2302 .seq_show = btf_df_seq_show,
2000}; 2303};
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 24aac0d0f412..8974b3755670 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -183,7 +183,7 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
183 * is not at a fixed memory location, with mixed length 183 * is not at a fixed memory location, with mixed length
184 * packets, which is bad for cache-line hotness. 184 * packets, which is bad for cache-line hotness.
185 */ 185 */
186 frame_size = SKB_DATA_ALIGN(xdpf->len) + xdpf->headroom + 186 frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
188 188
189 pkt_data_start = xdpf->data - xdpf->headroom; 189 pkt_data_start = xdpf->data - xdpf->headroom;
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index b65017dead44..07a34ef562a0 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -1,11 +1,13 @@
1//SPDX-License-Identifier: GPL-2.0 1//SPDX-License-Identifier: GPL-2.0
2#include <linux/bpf-cgroup.h> 2#include <linux/bpf-cgroup.h>
3#include <linux/bpf.h> 3#include <linux/bpf.h>
4#include <linux/btf.h>
4#include <linux/bug.h> 5#include <linux/bug.h>
5#include <linux/filter.h> 6#include <linux/filter.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7#include <linux/rbtree.h> 8#include <linux/rbtree.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
10#include <uapi/linux/btf.h>
9 11
10DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); 12DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
11 13
@@ -308,6 +310,85 @@ static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
308 return -EINVAL; 310 return -EINVAL;
309} 311}
310 312
313static int cgroup_storage_check_btf(const struct bpf_map *map,
314 const struct btf *btf,
315 const struct btf_type *key_type,
316 const struct btf_type *value_type)
317{
318 struct btf_member *m;
319 u32 offset, size;
320
321 /* Key is expected to be of struct bpf_cgroup_storage_key type,
322 * which is:
323 * struct bpf_cgroup_storage_key {
324 * __u64 cgroup_inode_id;
325 * __u32 attach_type;
326 * };
327 */
328
329 /*
330 * Key_type must be a structure with two fields.
331 */
332 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
333 BTF_INFO_VLEN(key_type->info) != 2)
334 return -EINVAL;
335
336 /*
337 * The first field must be a 64 bit integer at 0 offset.
338 */
339 m = (struct btf_member *)(key_type + 1);
340 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
341 if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
342 return -EINVAL;
343
344 /*
345 * The second field must be a 32 bit integer at 64 bit offset.
346 */
347 m++;
348 offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
349 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
350 if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
351 return -EINVAL;
352
353 return 0;
354}
355
356static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
357 struct seq_file *m)
358{
359 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
360 struct bpf_cgroup_storage_key *key = _key;
361 struct bpf_cgroup_storage *storage;
362 int cpu;
363
364 rcu_read_lock();
365 storage = cgroup_storage_lookup(map_to_storage(map), key, false);
366 if (!storage) {
367 rcu_read_unlock();
368 return;
369 }
370
371 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
372 stype = cgroup_storage_type(map);
373 if (stype == BPF_CGROUP_STORAGE_SHARED) {
374 seq_puts(m, ": ");
375 btf_type_seq_show(map->btf, map->btf_value_type_id,
376 &READ_ONCE(storage->buf)->data[0], m);
377 seq_puts(m, "\n");
378 } else {
379 seq_puts(m, ": {\n");
380 for_each_possible_cpu(cpu) {
381 seq_printf(m, "\tcpu%d: ", cpu);
382 btf_type_seq_show(map->btf, map->btf_value_type_id,
383 per_cpu_ptr(storage->percpu_buf, cpu),
384 m);
385 seq_puts(m, "\n");
386 }
387 seq_puts(m, "}\n");
388 }
389 rcu_read_unlock();
390}
391
311const struct bpf_map_ops cgroup_storage_map_ops = { 392const struct bpf_map_ops cgroup_storage_map_ops = {
312 .map_alloc = cgroup_storage_map_alloc, 393 .map_alloc = cgroup_storage_map_alloc,
313 .map_free = cgroup_storage_map_free, 394 .map_free = cgroup_storage_map_free,
@@ -315,7 +396,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
315 .map_lookup_elem = cgroup_storage_lookup_elem, 396 .map_lookup_elem = cgroup_storage_lookup_elem,
316 .map_update_elem = cgroup_storage_update_elem, 397 .map_update_elem = cgroup_storage_update_elem,
317 .map_delete_elem = cgroup_storage_delete_elem, 398 .map_delete_elem = cgroup_storage_delete_elem,
318 .map_check_btf = map_check_no_btf, 399 .map_check_btf = cgroup_storage_check_btf,
400 .map_seq_show_elem = cgroup_storage_seq_show_elem,
319}; 401};
320 402
321int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) 403int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index bfd4882e1106..abf1002080df 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -728,6 +728,7 @@ free_stack:
728} 728}
729 729
730static int trie_check_btf(const struct bpf_map *map, 730static int trie_check_btf(const struct bpf_map *map,
731 const struct btf *btf,
731 const struct btf_type *key_type, 732 const struct btf_type *key_type,
732 const struct btf_type *value_type) 733 const struct btf_type *value_type)
733{ 734{
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5745c7837621..0607db304def 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -456,6 +456,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
456} 456}
457 457
458int map_check_no_btf(const struct bpf_map *map, 458int map_check_no_btf(const struct bpf_map *map,
459 const struct btf *btf,
459 const struct btf_type *key_type, 460 const struct btf_type *key_type,
460 const struct btf_type *value_type) 461 const struct btf_type *value_type)
461{ 462{
@@ -478,7 +479,7 @@ static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
478 return -EINVAL; 479 return -EINVAL;
479 480
480 if (map->ops->map_check_btf) 481 if (map->ops->map_check_btf)
481 ret = map->ops->map_check_btf(map, key_type, value_type); 482 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
482 483
483 return ret; 484 return ret;
484} 485}
@@ -1472,11 +1473,6 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1472 1473
1473 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS) 1474 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1474 return -E2BIG; 1475 return -E2BIG;
1475
1476 if (type == BPF_PROG_TYPE_KPROBE &&
1477 attr->kern_version != LINUX_VERSION_CODE)
1478 return -EINVAL;
1479
1480 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1476 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1481 type != BPF_PROG_TYPE_CGROUP_SKB && 1477 type != BPF_PROG_TYPE_CGROUP_SKB &&
1482 !capable(CAP_SYS_ADMIN)) 1478 !capable(CAP_SYS_ADMIN))
@@ -1608,6 +1604,7 @@ static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1608 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1604 bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1609 bpf_prog_put(raw_tp->prog); 1605 bpf_prog_put(raw_tp->prog);
1610 } 1606 }
1607 bpf_put_raw_tracepoint(raw_tp->btp);
1611 kfree(raw_tp); 1608 kfree(raw_tp);
1612 return 0; 1609 return 0;
1613} 1610}
@@ -1633,13 +1630,15 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
1633 return -EFAULT; 1630 return -EFAULT;
1634 tp_name[sizeof(tp_name) - 1] = 0; 1631 tp_name[sizeof(tp_name) - 1] = 0;
1635 1632
1636 btp = bpf_find_raw_tracepoint(tp_name); 1633 btp = bpf_get_raw_tracepoint(tp_name);
1637 if (!btp) 1634 if (!btp)
1638 return -ENOENT; 1635 return -ENOENT;
1639 1636
1640 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1637 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1641 if (!raw_tp) 1638 if (!raw_tp) {
1642 return -ENOMEM; 1639 err = -ENOMEM;
1640 goto out_put_btp;
1641 }
1643 raw_tp->btp = btp; 1642 raw_tp->btp = btp;
1644 1643
1645 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, 1644 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
@@ -1667,6 +1666,8 @@ out_put_prog:
1667 bpf_prog_put(prog); 1666 bpf_prog_put(prog);
1668out_free_tp: 1667out_free_tp:
1669 kfree(raw_tp); 1668 kfree(raw_tp);
1669out_put_btp:
1670 bpf_put_raw_tracepoint(btp);
1670 return err; 1671 return err;
1671} 1672}
1672 1673
@@ -2031,13 +2032,6 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
2031 insns[i + 1].imm = 0; 2032 insns[i + 1].imm = 0;
2032 continue; 2033 continue;
2033 } 2034 }
2034
2035 if (!bpf_dump_raw_ok() &&
2036 imm == (unsigned long)prog->aux) {
2037 insns[i].imm = 0;
2038 insns[i + 1].imm = 0;
2039 continue;
2040 }
2041 } 2035 }
2042 2036
2043 return insns; 2037 return insns;
@@ -2271,33 +2265,25 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2271 ulen = info.nr_func_info; 2265 ulen = info.nr_func_info;
2272 info.nr_func_info = prog->aux->func_info_cnt; 2266 info.nr_func_info = prog->aux->func_info_cnt;
2273 if (info.nr_func_info && ulen) { 2267 if (info.nr_func_info && ulen) {
2274 if (bpf_dump_raw_ok()) { 2268 char __user *user_finfo;
2275 char __user *user_finfo;
2276 2269
2277 user_finfo = u64_to_user_ptr(info.func_info); 2270 user_finfo = u64_to_user_ptr(info.func_info);
2278 ulen = min_t(u32, info.nr_func_info, ulen); 2271 ulen = min_t(u32, info.nr_func_info, ulen);
2279 if (copy_to_user(user_finfo, prog->aux->func_info, 2272 if (copy_to_user(user_finfo, prog->aux->func_info,
2280 info.func_info_rec_size * ulen)) 2273 info.func_info_rec_size * ulen))
2281 return -EFAULT; 2274 return -EFAULT;
2282 } else {
2283 info.func_info = 0;
2284 }
2285 } 2275 }
2286 2276
2287 ulen = info.nr_line_info; 2277 ulen = info.nr_line_info;
2288 info.nr_line_info = prog->aux->nr_linfo; 2278 info.nr_line_info = prog->aux->nr_linfo;
2289 if (info.nr_line_info && ulen) { 2279 if (info.nr_line_info && ulen) {
2290 if (bpf_dump_raw_ok()) { 2280 __u8 __user *user_linfo;
2291 __u8 __user *user_linfo;
2292 2281
2293 user_linfo = u64_to_user_ptr(info.line_info); 2282 user_linfo = u64_to_user_ptr(info.line_info);
2294 ulen = min_t(u32, info.nr_line_info, ulen); 2283 ulen = min_t(u32, info.nr_line_info, ulen);
2295 if (copy_to_user(user_linfo, prog->aux->linfo, 2284 if (copy_to_user(user_linfo, prog->aux->linfo,
2296 info.line_info_rec_size * ulen)) 2285 info.line_info_rec_size * ulen))
2297 return -EFAULT; 2286 return -EFAULT;
2298 } else {
2299 info.line_info = 0;
2300 }
2301 } 2287 }
2302 2288
2303 ulen = info.nr_jited_line_info; 2289 ulen = info.nr_jited_line_info;
@@ -2322,6 +2308,28 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2322 } 2308 }
2323 } 2309 }
2324 2310
2311 ulen = info.nr_prog_tags;
2312 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
2313 if (ulen) {
2314 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
2315 u32 i;
2316
2317 user_prog_tags = u64_to_user_ptr(info.prog_tags);
2318 ulen = min_t(u32, info.nr_prog_tags, ulen);
2319 if (prog->aux->func_cnt) {
2320 for (i = 0; i < ulen; i++) {
2321 if (copy_to_user(user_prog_tags[i],
2322 prog->aux->func[i]->tag,
2323 BPF_TAG_SIZE))
2324 return -EFAULT;
2325 }
2326 } else {
2327 if (copy_to_user(user_prog_tags[0],
2328 prog->tag, BPF_TAG_SIZE))
2329 return -EFAULT;
2330 }
2331 }
2332
2325done: 2333done:
2326 if (copy_to_user(uinfo, &info, info_len) || 2334 if (copy_to_user(uinfo, &info, info_len) ||
2327 put_user(info_len, &uattr->info.info_len)) 2335 put_user(info_len, &uattr->info.info_len))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5b3c0a95244f..71d86e3024ae 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -26,6 +26,7 @@
26#include <linux/bsearch.h> 26#include <linux/bsearch.h>
27#include <linux/sort.h> 27#include <linux/sort.h>
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29#include <linux/ctype.h>
29 30
30#include "disasm.h" 31#include "disasm.h"
31 32
@@ -216,6 +217,27 @@ struct bpf_call_arg_meta {
216 217
217static DEFINE_MUTEX(bpf_verifier_lock); 218static DEFINE_MUTEX(bpf_verifier_lock);
218 219
220static const struct bpf_line_info *
221find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
222{
223 const struct bpf_line_info *linfo;
224 const struct bpf_prog *prog;
225 u32 i, nr_linfo;
226
227 prog = env->prog;
228 nr_linfo = prog->aux->nr_linfo;
229
230 if (!nr_linfo || insn_off >= prog->len)
231 return NULL;
232
233 linfo = prog->aux->linfo;
234 for (i = 1; i < nr_linfo; i++)
235 if (insn_off < linfo[i].insn_off)
236 break;
237
238 return &linfo[i - 1];
239}
240
219void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 241void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
220 va_list args) 242 va_list args)
221{ 243{
@@ -266,6 +288,42 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
266 va_end(args); 288 va_end(args);
267} 289}
268 290
291static const char *ltrim(const char *s)
292{
293 while (isspace(*s))
294 s++;
295
296 return s;
297}
298
299__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
300 u32 insn_off,
301 const char *prefix_fmt, ...)
302{
303 const struct bpf_line_info *linfo;
304
305 if (!bpf_verifier_log_needed(&env->log))
306 return;
307
308 linfo = find_linfo(env, insn_off);
309 if (!linfo || linfo == env->prev_linfo)
310 return;
311
312 if (prefix_fmt) {
313 va_list args;
314
315 va_start(args, prefix_fmt);
316 bpf_verifier_vlog(&env->log, prefix_fmt, args);
317 va_end(args);
318 }
319
320 verbose(env, "%s\n",
321 ltrim(btf_name_by_offset(env->prog->aux->btf,
322 linfo->line_off)));
323
324 env->prev_linfo = linfo;
325}
326
269static bool type_is_pkt_pointer(enum bpf_reg_type type) 327static bool type_is_pkt_pointer(enum bpf_reg_type type)
270{ 328{
271 return type == PTR_TO_PACKET || 329 return type == PTR_TO_PACKET ||
@@ -339,12 +397,14 @@ static char slot_type_char[] = {
339static void print_liveness(struct bpf_verifier_env *env, 397static void print_liveness(struct bpf_verifier_env *env,
340 enum bpf_reg_liveness live) 398 enum bpf_reg_liveness live)
341{ 399{
342 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN)) 400 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
343 verbose(env, "_"); 401 verbose(env, "_");
344 if (live & REG_LIVE_READ) 402 if (live & REG_LIVE_READ)
345 verbose(env, "r"); 403 verbose(env, "r");
346 if (live & REG_LIVE_WRITTEN) 404 if (live & REG_LIVE_WRITTEN)
347 verbose(env, "w"); 405 verbose(env, "w");
406 if (live & REG_LIVE_DONE)
407 verbose(env, "D");
348} 408}
349 409
350static struct bpf_func_state *func(struct bpf_verifier_env *env, 410static struct bpf_func_state *func(struct bpf_verifier_env *env,
@@ -1074,6 +1134,12 @@ static int mark_reg_read(struct bpf_verifier_env *env,
1074 /* if read wasn't screened by an earlier write ... */ 1134 /* if read wasn't screened by an earlier write ... */
1075 if (writes && state->live & REG_LIVE_WRITTEN) 1135 if (writes && state->live & REG_LIVE_WRITTEN)
1076 break; 1136 break;
1137 if (parent->live & REG_LIVE_DONE) {
1138 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1139 reg_type_str[parent->type],
1140 parent->var_off.value, parent->off);
1141 return -EFAULT;
1142 }
1077 /* ... then we depend on parent's value */ 1143 /* ... then we depend on parent's value */
1078 parent->live |= REG_LIVE_READ; 1144 parent->live |= REG_LIVE_READ;
1079 state = parent; 1145 state = parent;
@@ -1220,6 +1286,10 @@ static int check_stack_write(struct bpf_verifier_env *env,
1220 1286
1221 /* regular write of data into stack destroys any spilled ptr */ 1287 /* regular write of data into stack destroys any spilled ptr */
1222 state->stack[spi].spilled_ptr.type = NOT_INIT; 1288 state->stack[spi].spilled_ptr.type = NOT_INIT;
1289 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
1290 if (state->stack[spi].slot_type[0] == STACK_SPILL)
1291 for (i = 0; i < BPF_REG_SIZE; i++)
1292 state->stack[spi].slot_type[i] = STACK_MISC;
1223 1293
1224 /* only mark the slot as written if all 8 bytes were written 1294 /* only mark the slot as written if all 8 bytes were written
1225 * otherwise read propagation may incorrectly stop too soon 1295 * otherwise read propagation may incorrectly stop too soon
@@ -1237,6 +1307,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
1237 register_is_null(&cur->regs[value_regno])) 1307 register_is_null(&cur->regs[value_regno]))
1238 type = STACK_ZERO; 1308 type = STACK_ZERO;
1239 1309
1310 /* Mark slots affected by this stack write. */
1240 for (i = 0; i < size; i++) 1311 for (i = 0; i < size; i++)
1241 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 1312 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1242 type; 1313 type;
@@ -3788,6 +3859,12 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
3788 if (tnum_is_const(reg->var_off)) 3859 if (tnum_is_const(reg->var_off))
3789 return !tnum_equals_const(reg->var_off, val); 3860 return !tnum_equals_const(reg->var_off, val);
3790 break; 3861 break;
3862 case BPF_JSET:
3863 if ((~reg->var_off.mask & reg->var_off.value) & val)
3864 return 1;
3865 if (!((reg->var_off.mask | reg->var_off.value) & val))
3866 return 0;
3867 break;
3791 case BPF_JGT: 3868 case BPF_JGT:
3792 if (reg->umin_value > val) 3869 if (reg->umin_value > val)
3793 return 1; 3870 return 1;
@@ -3872,6 +3949,13 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
3872 */ 3949 */
3873 __mark_reg_known(false_reg, val); 3950 __mark_reg_known(false_reg, val);
3874 break; 3951 break;
3952 case BPF_JSET:
3953 false_reg->var_off = tnum_and(false_reg->var_off,
3954 tnum_const(~val));
3955 if (is_power_of_2(val))
3956 true_reg->var_off = tnum_or(true_reg->var_off,
3957 tnum_const(val));
3958 break;
3875 case BPF_JGT: 3959 case BPF_JGT:
3876 false_reg->umax_value = min(false_reg->umax_value, val); 3960 false_reg->umax_value = min(false_reg->umax_value, val);
3877 true_reg->umin_value = max(true_reg->umin_value, val + 1); 3961 true_reg->umin_value = max(true_reg->umin_value, val + 1);
@@ -3944,6 +4028,13 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3944 */ 4028 */
3945 __mark_reg_known(false_reg, val); 4029 __mark_reg_known(false_reg, val);
3946 break; 4030 break;
4031 case BPF_JSET:
4032 false_reg->var_off = tnum_and(false_reg->var_off,
4033 tnum_const(~val));
4034 if (is_power_of_2(val))
4035 true_reg->var_off = tnum_or(true_reg->var_off,
4036 tnum_const(val));
4037 break;
3947 case BPF_JGT: 4038 case BPF_JGT:
3948 true_reg->umax_value = min(true_reg->umax_value, val - 1); 4039 true_reg->umax_value = min(true_reg->umax_value, val - 1);
3949 false_reg->umin_value = max(false_reg->umin_value, val); 4040 false_reg->umin_value = max(false_reg->umin_value, val);
@@ -4561,6 +4652,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4561 return 0; 4652 return 0;
4562 4653
4563 if (w < 0 || w >= env->prog->len) { 4654 if (w < 0 || w >= env->prog->len) {
4655 verbose_linfo(env, t, "%d: ", t);
4564 verbose(env, "jump out of range from insn %d to %d\n", t, w); 4656 verbose(env, "jump out of range from insn %d to %d\n", t, w);
4565 return -EINVAL; 4657 return -EINVAL;
4566 } 4658 }
@@ -4578,6 +4670,8 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4578 insn_stack[cur_stack++] = w; 4670 insn_stack[cur_stack++] = w;
4579 return 1; 4671 return 1;
4580 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 4672 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4673 verbose_linfo(env, t, "%d: ", t);
4674 verbose_linfo(env, w, "%d: ", w);
4581 verbose(env, "back-edge from insn %d to %d\n", t, w); 4675 verbose(env, "back-edge from insn %d to %d\n", t, w);
4582 return -EINVAL; 4676 return -EINVAL;
4583 } else if (insn_state[w] == EXPLORED) { 4677 } else if (insn_state[w] == EXPLORED) {
@@ -4600,10 +4694,6 @@ static int check_cfg(struct bpf_verifier_env *env)
4600 int ret = 0; 4694 int ret = 0;
4601 int i, t; 4695 int i, t;
4602 4696
4603 ret = check_subprogs(env);
4604 if (ret < 0)
4605 return ret;
4606
4607 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 4697 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4608 if (!insn_state) 4698 if (!insn_state)
4609 return -ENOMEM; 4699 return -ENOMEM;
@@ -4910,8 +5000,16 @@ static int check_btf_line(struct bpf_verifier_env *env,
4910 goto err_free; 5000 goto err_free;
4911 } 5001 }
4912 5002
4913 if (!btf_name_offset_valid(btf, linfo[i].line_off) || 5003 if (!prog->insnsi[linfo[i].insn_off].code) {
4914 !btf_name_offset_valid(btf, linfo[i].file_name_off)) { 5004 verbose(env,
5005 "Invalid insn code at line_info[%u].insn_off\n",
5006 i);
5007 err = -EINVAL;
5008 goto err_free;
5009 }
5010
5011 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
5012 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
4915 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 5013 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
4916 err = -EINVAL; 5014 err = -EINVAL;
4917 goto err_free; 5015 goto err_free;
@@ -5021,6 +5119,102 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
5021 return false; 5119 return false;
5022} 5120}
5023 5121
5122static void clean_func_state(struct bpf_verifier_env *env,
5123 struct bpf_func_state *st)
5124{
5125 enum bpf_reg_liveness live;
5126 int i, j;
5127
5128 for (i = 0; i < BPF_REG_FP; i++) {
5129 live = st->regs[i].live;
5130 /* liveness must not touch this register anymore */
5131 st->regs[i].live |= REG_LIVE_DONE;
5132 if (!(live & REG_LIVE_READ))
5133 /* since the register is unused, clear its state
5134 * to make further comparison simpler
5135 */
5136 __mark_reg_not_init(&st->regs[i]);
5137 }
5138
5139 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
5140 live = st->stack[i].spilled_ptr.live;
5141 /* liveness must not touch this stack slot anymore */
5142 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
5143 if (!(live & REG_LIVE_READ)) {
5144 __mark_reg_not_init(&st->stack[i].spilled_ptr);
5145 for (j = 0; j < BPF_REG_SIZE; j++)
5146 st->stack[i].slot_type[j] = STACK_INVALID;
5147 }
5148 }
5149}
5150
5151static void clean_verifier_state(struct bpf_verifier_env *env,
5152 struct bpf_verifier_state *st)
5153{
5154 int i;
5155
5156 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
5157 /* all regs in this state in all frames were already marked */
5158 return;
5159
5160 for (i = 0; i <= st->curframe; i++)
5161 clean_func_state(env, st->frame[i]);
5162}
5163
5164/* the parentage chains form a tree.
5165 * the verifier states are added to state lists at given insn and
5166 * pushed into state stack for future exploration.
5167 * when the verifier reaches bpf_exit insn some of the verifer states
5168 * stored in the state lists have their final liveness state already,
5169 * but a lot of states will get revised from liveness point of view when
5170 * the verifier explores other branches.
5171 * Example:
5172 * 1: r0 = 1
5173 * 2: if r1 == 100 goto pc+1
5174 * 3: r0 = 2
5175 * 4: exit
5176 * when the verifier reaches exit insn the register r0 in the state list of
5177 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
5178 * of insn 2 and goes exploring further. At the insn 4 it will walk the
5179 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
5180 *
5181 * Since the verifier pushes the branch states as it sees them while exploring
5182 * the program the condition of walking the branch instruction for the second
5183 * time means that all states below this branch were already explored and
5184 * their final liveness markes are already propagated.
5185 * Hence when the verifier completes the search of state list in is_state_visited()
5186 * we can call this clean_live_states() function to mark all liveness states
5187 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
5188 * will not be used.
5189 * This function also clears the registers and stack for states that !READ
5190 * to simplify state merging.
5191 *
5192 * Important note here that walking the same branch instruction in the callee
5193 * doesn't meant that the states are DONE. The verifier has to compare
5194 * the callsites
5195 */
5196static void clean_live_states(struct bpf_verifier_env *env, int insn,
5197 struct bpf_verifier_state *cur)
5198{
5199 struct bpf_verifier_state_list *sl;
5200 int i;
5201
5202 sl = env->explored_states[insn];
5203 if (!sl)
5204 return;
5205
5206 while (sl != STATE_LIST_MARK) {
5207 if (sl->state.curframe != cur->curframe)
5208 goto next;
5209 for (i = 0; i <= cur->curframe; i++)
5210 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
5211 goto next;
5212 clean_verifier_state(env, &sl->state);
5213next:
5214 sl = sl->next;
5215 }
5216}
5217
5024/* Returns true if (rold safe implies rcur safe) */ 5218/* Returns true if (rold safe implies rcur safe) */
5025static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 5219static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
5026 struct idpair *idmap) 5220 struct idpair *idmap)
@@ -5134,12 +5328,6 @@ static bool stacksafe(struct bpf_func_state *old,
5134{ 5328{
5135 int i, spi; 5329 int i, spi;
5136 5330
5137 /* if explored stack has more populated slots than current stack
5138 * such stacks are not equivalent
5139 */
5140 if (old->allocated_stack > cur->allocated_stack)
5141 return false;
5142
5143 /* walk slots of the explored stack and ignore any additional 5331 /* walk slots of the explored stack and ignore any additional
5144 * slots in the current stack, since explored(safe) state 5332 * slots in the current stack, since explored(safe) state
5145 * didn't use them 5333 * didn't use them
@@ -5147,12 +5335,21 @@ static bool stacksafe(struct bpf_func_state *old,
5147 for (i = 0; i < old->allocated_stack; i++) { 5335 for (i = 0; i < old->allocated_stack; i++) {
5148 spi = i / BPF_REG_SIZE; 5336 spi = i / BPF_REG_SIZE;
5149 5337
5150 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) 5338 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
5339 i += BPF_REG_SIZE - 1;
5151 /* explored state didn't use this */ 5340 /* explored state didn't use this */
5152 continue; 5341 continue;
5342 }
5153 5343
5154 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 5344 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
5155 continue; 5345 continue;
5346
5347 /* explored stack has more populated slots than current stack
5348 * and these slots were used
5349 */
5350 if (i >= cur->allocated_stack)
5351 return false;
5352
5156 /* if old state was safe with misc data in the stack 5353 /* if old state was safe with misc data in the stack
5157 * it will be safe with zero-initialized stack. 5354 * it will be safe with zero-initialized stack.
5158 * The opposite is not true 5355 * The opposite is not true
@@ -5336,6 +5533,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
5336 */ 5533 */
5337 return 0; 5534 return 0;
5338 5535
5536 clean_live_states(env, insn_idx, cur);
5537
5339 while (sl != STATE_LIST_MARK) { 5538 while (sl != STATE_LIST_MARK) {
5340 if (states_equal(env, &sl->state, cur)) { 5539 if (states_equal(env, &sl->state, cur)) {
5341 /* reached equivalent register/stack state, 5540 /* reached equivalent register/stack state,
@@ -5455,6 +5654,8 @@ static int do_check(struct bpf_verifier_env *env)
5455 int insn_processed = 0; 5654 int insn_processed = 0;
5456 bool do_print_state = false; 5655 bool do_print_state = false;
5457 5656
5657 env->prev_linfo = NULL;
5658
5458 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 5659 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
5459 if (!state) 5660 if (!state)
5460 return -ENOMEM; 5661 return -ENOMEM;
@@ -5528,6 +5729,7 @@ static int do_check(struct bpf_verifier_env *env)
5528 .private_data = env, 5729 .private_data = env,
5529 }; 5730 };
5530 5731
5732 verbose_linfo(env, insn_idx, "; ");
5531 verbose(env, "%d: ", insn_idx); 5733 verbose(env, "%d: ", insn_idx);
5532 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 5734 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
5533 } 5735 }
@@ -6762,7 +6964,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
6762 6964
6763 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 6965 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
6764 6966
6765 ret = check_cfg(env); 6967 ret = check_subprogs(env);
6766 if (ret < 0) 6968 if (ret < 0)
6767 goto skip_full_check; 6969 goto skip_full_check;
6768 6970
@@ -6770,6 +6972,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
6770 if (ret < 0) 6972 if (ret < 0)
6771 goto skip_full_check; 6973 goto skip_full_check;
6772 6974
6975 ret = check_cfg(env);
6976 if (ret < 0)
6977 goto skip_full_check;
6978
6773 ret = do_check(env); 6979 ret = do_check(env);
6774 if (env->cur_state) { 6980 if (env->cur_state) {
6775 free_verifier_state(env->cur_state, true); 6981 free_verifier_state(env->cur_state, true);
@@ -6784,10 +6990,11 @@ skip_full_check:
6784 free_states(env); 6990 free_states(env);
6785 6991
6786 if (ret == 0) 6992 if (ret == 0)
6787 sanitize_dead_code(env); 6993 ret = check_max_stack_depth(env);
6788 6994
6995 /* instruction rewrites happen after this point */
6789 if (ret == 0) 6996 if (ret == 0)
6790 ret = check_max_stack_depth(env); 6997 sanitize_dead_code(env);
6791 6998
6792 if (ret == 0) 6999 if (ret == 0)
6793 /* program is valid, convert *(u32*)(ctx + off) accesses */ 7000 /* program is valid, convert *(u32*)(ctx + off) accesses */
diff --git a/kernel/module.c b/kernel/module.c
index 49a405891587..06ec68f08387 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3093,6 +3093,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
3093 sizeof(*mod->tracepoints_ptrs), 3093 sizeof(*mod->tracepoints_ptrs),
3094 &mod->num_tracepoints); 3094 &mod->num_tracepoints);
3095#endif 3095#endif
3096#ifdef CONFIG_BPF_EVENTS
3097 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3098 sizeof(*mod->bpf_raw_events),
3099 &mod->num_bpf_raw_events);
3100#endif
3096#ifdef HAVE_JUMP_LABEL 3101#ifdef HAVE_JUMP_LABEL
3097 mod->jump_entries = section_objs(info, "__jump_table", 3102 mod->jump_entries = section_objs(info, "__jump_table",
3098 sizeof(*mod->jump_entries), 3103 sizeof(*mod->jump_entries),
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9864a35c8bb5..9ddb6fddb4e0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -17,6 +17,43 @@
17#include "trace_probe.h" 17#include "trace_probe.h"
18#include "trace.h" 18#include "trace.h"
19 19
20#ifdef CONFIG_MODULES
21struct bpf_trace_module {
22 struct module *module;
23 struct list_head list;
24};
25
26static LIST_HEAD(bpf_trace_modules);
27static DEFINE_MUTEX(bpf_module_mutex);
28
29static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
30{
31 struct bpf_raw_event_map *btp, *ret = NULL;
32 struct bpf_trace_module *btm;
33 unsigned int i;
34
35 mutex_lock(&bpf_module_mutex);
36 list_for_each_entry(btm, &bpf_trace_modules, list) {
37 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
38 btp = &btm->module->bpf_raw_events[i];
39 if (!strcmp(btp->tp->name, name)) {
40 if (try_module_get(btm->module))
41 ret = btp;
42 goto out;
43 }
44 }
45 }
46out:
47 mutex_unlock(&bpf_module_mutex);
48 return ret;
49}
50#else
51static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
52{
53 return NULL;
54}
55#endif /* CONFIG_MODULES */
56
20u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 57u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
21u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 58u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
22 59
@@ -1076,7 +1113,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1076extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 1113extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1077extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 1114extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1078 1115
1079struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) 1116struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1080{ 1117{
1081 struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 1118 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1082 1119
@@ -1084,7 +1121,16 @@ struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
1084 if (!strcmp(btp->tp->name, name)) 1121 if (!strcmp(btp->tp->name, name))
1085 return btp; 1122 return btp;
1086 } 1123 }
1087 return NULL; 1124
1125 return bpf_get_raw_tracepoint_module(name);
1126}
1127
1128void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1129{
1130 struct module *mod = __module_address((unsigned long)btp);
1131
1132 if (mod)
1133 module_put(mod);
1088} 1134}
1089 1135
1090static __always_inline 1136static __always_inline
@@ -1222,3 +1268,52 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1222 1268
1223 return err; 1269 return err;
1224} 1270}
1271
1272#ifdef CONFIG_MODULES
1273int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module)
1274{
1275 struct bpf_trace_module *btm, *tmp;
1276 struct module *mod = module;
1277
1278 if (mod->num_bpf_raw_events == 0 ||
1279 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1280 return 0;
1281
1282 mutex_lock(&bpf_module_mutex);
1283
1284 switch (op) {
1285 case MODULE_STATE_COMING:
1286 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1287 if (btm) {
1288 btm->module = module;
1289 list_add(&btm->list, &bpf_trace_modules);
1290 }
1291 break;
1292 case MODULE_STATE_GOING:
1293 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1294 if (btm->module == module) {
1295 list_del(&btm->list);
1296 kfree(btm);
1297 break;
1298 }
1299 }
1300 break;
1301 }
1302
1303 mutex_unlock(&bpf_module_mutex);
1304
1305 return 0;
1306}
1307
1308static struct notifier_block bpf_module_nb = {
1309 .notifier_call = bpf_event_notify,
1310};
1311
1312int __init bpf_event_init(void)
1313{
1314 register_module_notifier(&bpf_module_nb);
1315 return 0;
1316}
1317
1318fs_initcall(bpf_event_init);
1319#endif /* CONFIG_MODULES */
diff --git a/net/core/filter.c b/net/core/filter.c
index f9348806e843..447dd1bad31f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6313,6 +6313,9 @@ static bool sk_msg_is_valid_access(int off, int size,
6313 if (type == BPF_WRITE) 6313 if (type == BPF_WRITE)
6314 return false; 6314 return false;
6315 6315
6316 if (off % size != 0)
6317 return false;
6318
6316 switch (off) { 6319 switch (off) {
6317 case offsetof(struct sk_msg_md, data): 6320 case offsetof(struct sk_msg_md, data):
6318 info->reg_type = PTR_TO_PACKET; 6321 info->reg_type = PTR_TO_PACKET;
@@ -6324,16 +6327,20 @@ static bool sk_msg_is_valid_access(int off, int size,
6324 if (size != sizeof(__u64)) 6327 if (size != sizeof(__u64))
6325 return false; 6328 return false;
6326 break; 6329 break;
6327 default: 6330 case bpf_ctx_range(struct sk_msg_md, family):
6331 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
6332 case bpf_ctx_range(struct sk_msg_md, local_ip4):
6333 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
6334 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
6335 case bpf_ctx_range(struct sk_msg_md, remote_port):
6336 case bpf_ctx_range(struct sk_msg_md, local_port):
6337 case bpf_ctx_range(struct sk_msg_md, size):
6328 if (size != sizeof(__u32)) 6338 if (size != sizeof(__u32))
6329 return false; 6339 return false;
6330 } 6340 break;
6331 6341 default:
6332 if (off < 0 || off >= sizeof(struct sk_msg_md))
6333 return false;
6334 if (off % size != 0)
6335 return false; 6342 return false;
6336 6343 }
6337 return true; 6344 return true;
6338} 6345}
6339 6346
@@ -7418,6 +7425,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
7418 int off; 7425 int off;
7419#endif 7426#endif
7420 7427
7428 /* convert ctx uses the fact sg element is first in struct */
7429 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
7430
7421 switch (si->off) { 7431 switch (si->off) {
7422 case offsetof(struct sk_msg_md, data): 7432 case offsetof(struct sk_msg_md, data):
7423 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), 7433 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
@@ -7530,6 +7540,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
7530 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, 7540 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7531 offsetof(struct sock_common, skc_num)); 7541 offsetof(struct sock_common, skc_num));
7532 break; 7542 break;
7543
7544 case offsetof(struct sk_msg_md, size):
7545 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
7546 si->dst_reg, si->src_reg,
7547 offsetof(struct sk_msg_sg, size));
7548 break;
7533 } 7549 }
7534 7550
7535 return insn - insn_buf; 7551 return insn - insn_buf;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 56a99d0c9aa0..86c9726fced8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -403,7 +403,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
403 msg->skb = skb; 403 msg->skb = skb;
404 404
405 sk_psock_queue_msg(psock, msg); 405 sk_psock_queue_msg(psock, msg);
406 sk->sk_data_ready(sk); 406 sk_psock_data_ready(sk, psock);
407 return copied; 407 return copied;
408} 408}
409 409
@@ -572,6 +572,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
572{ 572{
573 rcu_assign_sk_user_data(sk, NULL); 573 rcu_assign_sk_user_data(sk, NULL);
574 sk_psock_cork_free(psock); 574 sk_psock_cork_free(psock);
575 sk_psock_zap_ingress(psock);
575 sk_psock_restore_proto(sk, psock); 576 sk_psock_restore_proto(sk, psock);
576 577
577 write_lock_bh(&sk->sk_callback_lock); 578 write_lock_bh(&sk->sk_callback_lock);
@@ -669,6 +670,22 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
669 bool ingress; 670 bool ingress;
670 671
671 switch (verdict) { 672 switch (verdict) {
673 case __SK_PASS:
674 sk_other = psock->sk;
675 if (sock_flag(sk_other, SOCK_DEAD) ||
676 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
677 goto out_free;
678 }
679 if (atomic_read(&sk_other->sk_rmem_alloc) <=
680 sk_other->sk_rcvbuf) {
681 struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
682
683 tcp->bpf.flags |= BPF_F_INGRESS;
684 skb_queue_tail(&psock->ingress_skb, skb);
685 schedule_work(&psock->work);
686 break;
687 }
688 goto out_free;
672 case __SK_REDIRECT: 689 case __SK_REDIRECT:
673 sk_other = tcp_skb_bpf_redirect_fetch(skb); 690 sk_other = tcp_skb_bpf_redirect_fetch(skb);
674 if (unlikely(!sk_other)) 691 if (unlikely(!sk_other))
@@ -735,7 +752,7 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
735} 752}
736 753
737/* Called with socket lock held. */ 754/* Called with socket lock held. */
738static void sk_psock_data_ready(struct sock *sk) 755static void sk_psock_strp_data_ready(struct sock *sk)
739{ 756{
740 struct sk_psock *psock; 757 struct sk_psock *psock;
741 758
@@ -783,7 +800,7 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
783 return; 800 return;
784 801
785 parser->saved_data_ready = sk->sk_data_ready; 802 parser->saved_data_ready = sk->sk_data_ready;
786 sk->sk_data_ready = sk_psock_data_ready; 803 sk->sk_data_ready = sk_psock_strp_data_ready;
787 sk->sk_write_space = sk_psock_write_space; 804 sk->sk_write_space = sk_psock_write_space;
788 parser->enabled = true; 805 parser->enabled = true;
789} 806}
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index a47c1cdf90fc..1bb7321a256d 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -8,6 +8,7 @@
8#include <linux/wait.h> 8#include <linux/wait.h>
9 9
10#include <net/inet_common.h> 10#include <net/inet_common.h>
11#include <net/tls.h>
11 12
12static bool tcp_bpf_stream_read(const struct sock *sk) 13static bool tcp_bpf_stream_read(const struct sock *sk)
13{ 14{
@@ -198,7 +199,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
198 msg->sg.start = i; 199 msg->sg.start = i;
199 msg->sg.size -= apply_bytes; 200 msg->sg.size -= apply_bytes;
200 sk_psock_queue_msg(psock, tmp); 201 sk_psock_queue_msg(psock, tmp);
201 sk->sk_data_ready(sk); 202 sk_psock_data_ready(sk, psock);
202 } else { 203 } else {
203 sk_msg_free(sk, tmp); 204 sk_msg_free(sk, tmp);
204 kfree(tmp); 205 kfree(tmp);
@@ -218,6 +219,8 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
218 u32 off; 219 u32 off;
219 220
220 while (1) { 221 while (1) {
222 bool has_tx_ulp;
223
221 sge = sk_msg_elem(msg, msg->sg.start); 224 sge = sk_msg_elem(msg, msg->sg.start);
222 size = (apply && apply_bytes < sge->length) ? 225 size = (apply && apply_bytes < sge->length) ?
223 apply_bytes : sge->length; 226 apply_bytes : sge->length;
@@ -226,7 +229,15 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
226 229
227 tcp_rate_check_app_limited(sk); 230 tcp_rate_check_app_limited(sk);
228retry: 231retry:
229 ret = do_tcp_sendpages(sk, page, off, size, flags); 232 has_tx_ulp = tls_sw_has_ctx_tx(sk);
233 if (has_tx_ulp) {
234 flags |= MSG_SENDPAGE_NOPOLICY;
235 ret = kernel_sendpage_locked(sk,
236 page, off, size, flags);
237 } else {
238 ret = do_tcp_sendpages(sk, page, off, size, flags);
239 }
240
230 if (ret <= 0) 241 if (ret <= 0)
231 return ret; 242 return ret;
232 if (apply) 243 if (apply)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 28887cf628b8..78cb4a584080 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -55,6 +55,8 @@ enum {
55 55
56static struct proto *saved_tcpv6_prot; 56static struct proto *saved_tcpv6_prot;
57static DEFINE_MUTEX(tcpv6_prot_mutex); 57static DEFINE_MUTEX(tcpv6_prot_mutex);
58static struct proto *saved_tcpv4_prot;
59static DEFINE_MUTEX(tcpv4_prot_mutex);
58static LIST_HEAD(device_list); 60static LIST_HEAD(device_list);
59static DEFINE_SPINLOCK(device_spinlock); 61static DEFINE_SPINLOCK(device_spinlock);
60static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; 62static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
@@ -700,6 +702,16 @@ static int tls_init(struct sock *sk)
700 mutex_unlock(&tcpv6_prot_mutex); 702 mutex_unlock(&tcpv6_prot_mutex);
701 } 703 }
702 704
705 if (ip_ver == TLSV4 &&
706 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
707 mutex_lock(&tcpv4_prot_mutex);
708 if (likely(sk->sk_prot != saved_tcpv4_prot)) {
709 build_protos(tls_prots[TLSV4], sk->sk_prot);
710 smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
711 }
712 mutex_unlock(&tcpv4_prot_mutex);
713 }
714
703 ctx->tx_conf = TLS_BASE; 715 ctx->tx_conf = TLS_BASE;
704 ctx->rx_conf = TLS_BASE; 716 ctx->rx_conf = TLS_BASE;
705 update_sk_prot(sk, ctx); 717 update_sk_prot(sk, ctx);
@@ -731,8 +743,6 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
731 743
732static int __init tls_register(void) 744static int __init tls_register(void)
733{ 745{
734 build_protos(tls_prots[TLSV4], &tcp_prot);
735
736 tls_sw_proto_ops = inet_stream_ops; 746 tls_sw_proto_ops = inet_stream_ops;
737 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 747 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
738 748
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d4ecc66464e6..5aee9ae5ca53 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -686,12 +686,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
686 struct sk_psock *psock; 686 struct sk_psock *psock;
687 struct sock *sk_redir; 687 struct sock *sk_redir;
688 struct tls_rec *rec; 688 struct tls_rec *rec;
689 bool enospc, policy;
689 int err = 0, send; 690 int err = 0, send;
690 u32 delta = 0; 691 u32 delta = 0;
691 bool enospc;
692 692
693 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
693 psock = sk_psock_get(sk); 694 psock = sk_psock_get(sk);
694 if (!psock) 695 if (!psock || !policy)
695 return tls_push_record(sk, flags, record_type); 696 return tls_push_record(sk, flags, record_type);
696more_data: 697more_data:
697 enospc = sk_msg_full(msg); 698 enospc = sk_msg_full(msg);
@@ -1017,8 +1018,8 @@ send_end:
1017 return copied ? copied : ret; 1018 return copied ? copied : ret;
1018} 1019}
1019 1020
1020int tls_sw_sendpage(struct sock *sk, struct page *page, 1021int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1021 int offset, size_t size, int flags) 1022 int offset, size_t size, int flags)
1022{ 1023{
1023 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1024 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1024 struct tls_context *tls_ctx = tls_get_ctx(sk); 1025 struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -1033,15 +1034,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
1033 int ret = 0; 1034 int ret = 0;
1034 bool eor; 1035 bool eor;
1035 1036
1036 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1037 MSG_SENDPAGE_NOTLAST))
1038 return -ENOTSUPP;
1039
1040 /* No MSG_EOR from splice, only look at MSG_MORE */
1041 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1037 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1042
1043 lock_sock(sk);
1044
1045 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1038 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1046 1039
1047 /* Wait till there is any pending write on socket */ 1040 /* Wait till there is any pending write on socket */
@@ -1145,10 +1138,34 @@ wait_for_memory:
1145 } 1138 }
1146sendpage_end: 1139sendpage_end:
1147 ret = sk_stream_error(sk, flags, ret); 1140 ret = sk_stream_error(sk, flags, ret);
1148 release_sock(sk);
1149 return copied ? copied : ret; 1141 return copied ? copied : ret;
1150} 1142}
1151 1143
1144int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1145 int offset, size_t size, int flags)
1146{
1147 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1148 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1149 return -ENOTSUPP;
1150
1151 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1152}
1153
1154int tls_sw_sendpage(struct sock *sk, struct page *page,
1155 int offset, size_t size, int flags)
1156{
1157 int ret;
1158
1159 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1160 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1161 return -ENOTSUPP;
1162
1163 lock_sock(sk);
1164 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1165 release_sock(sk);
1166 return ret;
1167}
1168
1152static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock, 1169static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1153 int flags, long timeo, int *err) 1170 int flags, long timeo, int *err)
1154{ 1171{
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 07156f43d295..a03268454a27 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,6 +366,7 @@ static int xsk_release(struct socket *sock)
366 366
367 xskq_destroy(xs->rx); 367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 368 xskq_destroy(xs->tx);
369 xdp_put_umem(xs->umem);
369 370
370 sock_orphan(sk); 371 sock_orphan(sk);
371 sock->sk = NULL; 372 sock->sk = NULL;
@@ -713,18 +714,6 @@ static const struct proto_ops xsk_proto_ops = {
713 .sendpage = sock_no_sendpage, 714 .sendpage = sock_no_sendpage,
714}; 715};
715 716
716static void xsk_destruct(struct sock *sk)
717{
718 struct xdp_sock *xs = xdp_sk(sk);
719
720 if (!sock_flag(sk, SOCK_DEAD))
721 return;
722
723 xdp_put_umem(xs->umem);
724
725 sk_refcnt_debug_dec(sk);
726}
727
728static int xsk_create(struct net *net, struct socket *sock, int protocol, 717static int xsk_create(struct net *net, struct socket *sock, int protocol,
729 int kern) 718 int kern)
730{ 719{
@@ -751,9 +740,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
751 740
752 sk->sk_family = PF_XDP; 741 sk->sk_family = PF_XDP;
753 742
754 sk->sk_destruct = xsk_destruct;
755 sk_refcnt_debug_inc(sk);
756
757 sock_set_flag(sk, SOCK_RCU_FREE); 743 sock_set_flag(sk, SOCK_RCU_FREE);
758 744
759 xs = xdp_sk(sk); 745 xs = xdp_sk(sk);
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 5318dcb2085e..64b001b4f777 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -128,6 +128,10 @@ OPTIONS
128 -f, --bpffs 128 -f, --bpffs
129 Show file names of pinned maps. 129 Show file names of pinned maps.
130 130
131 -n, --nomount
132 Do not automatically attempt to mount any virtual file system
133 (such as tracefs or BPF virtual file system) when necessary.
134
131EXAMPLES 135EXAMPLES
132======== 136========
133**# bpftool map show** 137**# bpftool map show**
@@ -170,6 +174,61 @@ The following three commands are equivalent:
170| **# bpftool map pin id 10 /sys/fs/bpf/map** 174| **# bpftool map pin id 10 /sys/fs/bpf/map**
171| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00** 175| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
172 176
177Note that map update can also be used in order to change the program references
178hold by a program array map. This can be used, for example, to change the
179programs used for tail-call jumps at runtime, without having to reload the
180entry-point program. Below is an example for this use case: we load a program
181defining a prog array map, and with a main function that contains a tail call
182to other programs that can be used either to "process" packets or to "debug"
183processing. Note that the prog array map MUST be pinned into the BPF virtual
184file system for the map update to work successfully, as kernel flushes prog
185array maps when they have no more references from user space (and the update
186would be lost as soon as bpftool exits).
187
188|
189| **# bpftool prog loadall tail_calls.o /sys/fs/bpf/foo type xdp**
190| **# bpftool prog --bpffs**
191
192::
193
194 545: xdp name main_func tag 674b4b5597193dc3 gpl
195 loaded_at 2018-12-12T15:02:58+0000 uid 0
196 xlated 240B jited 257B memlock 4096B map_ids 294
197 pinned /sys/fs/bpf/foo/xdp
198 546: xdp name bpf_func_process tag e369a529024751fc gpl
199 loaded_at 2018-12-12T15:02:58+0000 uid 0
200 xlated 200B jited 164B memlock 4096B
201 pinned /sys/fs/bpf/foo/process
202 547: xdp name bpf_func_debug tag 0b597868bc7f0976 gpl
203 loaded_at 2018-12-12T15:02:58+0000 uid 0
204 xlated 200B jited 164B memlock 4096B
205 pinned /sys/fs/bpf/foo/debug
206
207**# bpftool map**
208
209::
210
211 294: prog_array name jmp_table flags 0x0
212 key 4B value 4B max_entries 1 memlock 4096B
213 owner_prog_type xdp owner jited
214
215|
216| **# bpftool map pin id 294 /sys/fs/bpf/bar**
217| **# bpftool map dump pinned /sys/fs/bpf/bar**
218
219::
220
221 Found 0 elements
222
223|
224| **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
225| **# bpftool map dump pinned /sys/fs/bpf/bar**
226
227::
228
229 key: 00 00 00 00 value: 22 02 00 00
230 Found 1 element
231
173SEE ALSO 232SEE ALSO
174======== 233========
175 **bpf**\ (2), 234 **bpf**\ (2),
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index bb1aeb98b6da..58c8369b77dd 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -158,83 +158,98 @@ OPTIONS
158 When showing BPF programs, show file names of pinned 158 When showing BPF programs, show file names of pinned
159 programs. 159 programs.
160 160
161 -m, --mapcompat
162 Allow loading maps with unknown map definitions.
163
164 -n, --nomount
165 Do not automatically attempt to mount any virtual file system
166 (such as tracefs or BPF virtual file system) when necessary.
167
161EXAMPLES 168EXAMPLES
162======== 169========
163**# bpftool prog show** 170**# bpftool prog show**
171
164:: 172::
165 173
166 10: xdp name some_prog tag 005a3d2123620c8b gpl 174 10: xdp name some_prog tag 005a3d2123620c8b gpl
167 loaded_at Sep 29/20:11 uid 0 175 loaded_at 2017-09-29T20:11:00+0000 uid 0
168 xlated 528B jited 370B memlock 4096B map_ids 10 176 xlated 528B jited 370B memlock 4096B map_ids 10
169 177
170**# bpftool --json --pretty prog show** 178**# bpftool --json --pretty prog show**
171 179
172:: 180::
173 181
174 { 182 [{
175 "programs": [{ 183 "id": 10,
176 "id": 10, 184 "type": "xdp",
177 "type": "xdp", 185 "tag": "005a3d2123620c8b",
178 "tag": "005a3d2123620c8b", 186 "gpl_compatible": true,
179 "gpl_compatible": true, 187 "loaded_at": 1506715860,
180 "loaded_at": "Sep 29/20:11", 188 "uid": 0,
181 "uid": 0, 189 "bytes_xlated": 528,
182 "bytes_xlated": 528, 190 "jited": true,
183 "jited": true, 191 "bytes_jited": 370,
184 "bytes_jited": 370, 192 "bytes_memlock": 4096,
185 "bytes_memlock": 4096, 193 "map_ids": [10
186 "map_ids": [10 194 ]
187 ] 195 }
188 } 196 ]
189 ]
190 }
191 197
192| 198|
193| **# bpftool prog dump xlated id 10 file /tmp/t** 199| **# bpftool prog dump xlated id 10 file /tmp/t**
194| **# ls -l /tmp/t** 200| **# ls -l /tmp/t**
195| -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
196 201
197**# bpftool prog dum jited tag 005a3d2123620c8b** 202::
203
204 -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
205
206**# bpftool prog dump jited tag 005a3d2123620c8b**
198 207
199:: 208::
200 209
201 push %rbp 210 0: push %rbp
202 mov %rsp,%rbp 211 1: mov %rsp,%rbp
203 sub $0x228,%rsp 212 2: sub $0x228,%rsp
204 sub $0x28,%rbp 213 3: sub $0x28,%rbp
205 mov %rbx,0x0(%rbp) 214 4: mov %rbx,0x0(%rbp)
206 215
207| 216|
208| **# mount -t bpf none /sys/fs/bpf/** 217| **# mount -t bpf none /sys/fs/bpf/**
209| **# bpftool prog pin id 10 /sys/fs/bpf/prog** 218| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
210| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2** 219| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
211| **# ls -l /sys/fs/bpf/** 220| **# ls -l /sys/fs/bpf/**
212| -rw------- 1 root root 0 Jul 22 01:43 prog
213| -rw------- 1 root root 0 Jul 22 01:44 prog2
214 221
215**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes** 222::
223
224 -rw------- 1 root root 0 Jul 22 01:43 prog
225 -rw------- 1 root root 0 Jul 22 01:44 prog2
226
227**# bpftool prog dump jited pinned /sys/fs/bpf/prog opcodes**
216 228
217:: 229::
218 230
219 push %rbp 231 0: push %rbp
220 55 232 55
221 mov %rsp,%rbp 233 1: mov %rsp,%rbp
222 48 89 e5 234 48 89 e5
223 sub $0x228,%rsp 235 4: sub $0x228,%rsp
224 48 81 ec 28 02 00 00 236 48 81 ec 28 02 00 00
225 sub $0x28,%rbp 237 b: sub $0x28,%rbp
226 48 83 ed 28 238 48 83 ed 28
227 mov %rbx,0x0(%rbp) 239 f: mov %rbx,0x0(%rbp)
228 48 89 5d 00 240 48 89 5d 00
229 241
230| 242|
231| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7** 243| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
232| **# bpftool prog show pinned /sys/fs/bpf/xdp1** 244| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
233| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl 245
234| loaded_at 2018-06-25T16:17:31-0700 uid 0 246::
235| xlated 488B jited 336B memlock 4096B map_ids 7 247
236| **# rm /sys/fs/bpf/xdp1** 248 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
237| 249 loaded_at 2018-06-25T16:17:31-0700 uid 0
250 xlated 488B jited 336B memlock 4096B map_ids 7
251
252**# rm /sys/fs/bpf/xdp1**
238 253
239SEE ALSO 254SEE ALSO
240======== 255========
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 129b7a9c0f9b..e1677e81ed59 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -60,6 +60,10 @@ OPTIONS
60 -m, --mapcompat 60 -m, --mapcompat
61 Allow loading maps with unknown map definitions. 61 Allow loading maps with unknown map definitions.
62 62
63 -n, --nomount
64 Do not automatically attempt to mount any virtual file system
65 (such as tracefs or BPF virtual file system) when necessary.
66
63 67
64SEE ALSO 68SEE ALSO
65======== 69========
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 1bea6b979082..492f0f24e2d3 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -35,8 +35,6 @@ $(LIBBPF)-clean:
35prefix ?= /usr/local 35prefix ?= /usr/local
36bash_compdir ?= /usr/share/bash-completion/completions 36bash_compdir ?= /usr/share/bash-completion/completions
37 37
38CC = gcc
39
40CFLAGS += -O2 38CFLAGS += -O2
41CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers 39CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers
42CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ 40CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index a57febd6abb1..e4e4fab1b8c7 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -1,37 +1,8 @@
1# bpftool(8) bash completion -*- shell-script -*- 1# bpftool(8) bash completion -*- shell-script -*-
2# 2#
3# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
3# Copyright (C) 2017-2018 Netronome Systems, Inc. 4# Copyright (C) 2017-2018 Netronome Systems, Inc.
4# 5#
5# This software is dual licensed under the GNU General License
6# Version 2, June 1991 as shown in the file COPYING in the top-level
7# directory of this source tree or the BSD 2-Clause License provided
8# below. You have the option to license this software under the
9# complete terms of either license.
10#
11# The BSD 2-Clause License:
12#
13# Redistribution and use in source and binary forms, with or
14# without modification, are permitted provided that the following
15# conditions are met:
16#
17# 1. Redistributions of source code must retain the above
18# copyright notice, this list of conditions and the following
19# disclaimer.
20#
21# 2. Redistributions in binary form must reproduce the above
22# copyright notice, this list of conditions and the following
23# disclaimer in the documentation and/or other materials
24# provided with the distribution.
25#
26# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33# SOFTWARE.
34#
35# Author: Quentin Monnet <quentin.monnet@netronome.com> 6# Author: Quentin Monnet <quentin.monnet@netronome.com>
36 7
37# Takes a list of words in argument; each one of them is added to COMPREPLY if 8# Takes a list of words in argument; each one of them is added to COMPREPLY if
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 2392ccdc918f..3f0629edbca5 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <ctype.h> 4#include <ctype.h>
@@ -73,20 +73,17 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
73 return ret; 73 return ret;
74} 74}
75 75
76static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, 76static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
77 const void *data, json_writer_t *jw, 77 const void *data, json_writer_t *jw,
78 bool is_plain_text) 78 bool is_plain_text)
79{ 79{
80 int left_shift_bits, right_shift_bits; 80 int left_shift_bits, right_shift_bits;
81 int nr_bits = BTF_INT_BITS(int_type);
82 int total_bits_offset;
83 int bytes_to_copy; 81 int bytes_to_copy;
84 int bits_to_copy; 82 int bits_to_copy;
85 __u64 print_num; 83 __u64 print_num;
86 84
87 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); 85 data += BITS_ROUNDDOWN_BYTES(bit_offset);
88 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 86 bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
89 bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
90 bits_to_copy = bit_offset + nr_bits; 87 bits_to_copy = bit_offset + nr_bits;
91 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); 88 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
92 89
@@ -109,6 +106,22 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
109 jsonw_printf(jw, "%llu", print_num); 106 jsonw_printf(jw, "%llu", print_num);
110} 107}
111 108
109
110static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
111 const void *data, json_writer_t *jw,
112 bool is_plain_text)
113{
114 int nr_bits = BTF_INT_BITS(int_type);
115 int total_bits_offset;
116
117 /* bits_offset is at most 7.
118 * BTF_INT_OFFSET() cannot exceed 64 bits.
119 */
120 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
121 btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
122 is_plain_text);
123}
124
112static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, 125static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
113 const void *data, json_writer_t *jw, 126 const void *data, json_writer_t *jw,
114 bool is_plain_text) 127 bool is_plain_text)
@@ -180,6 +193,7 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
180 const struct btf_type *t; 193 const struct btf_type *t;
181 struct btf_member *m; 194 struct btf_member *m;
182 const void *data_off; 195 const void *data_off;
196 int kind_flag;
183 int ret = 0; 197 int ret = 0;
184 int i, vlen; 198 int i, vlen;
185 199
@@ -187,18 +201,32 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
187 if (!t) 201 if (!t)
188 return -EINVAL; 202 return -EINVAL;
189 203
204 kind_flag = BTF_INFO_KFLAG(t->info);
190 vlen = BTF_INFO_VLEN(t->info); 205 vlen = BTF_INFO_VLEN(t->info);
191 jsonw_start_object(d->jw); 206 jsonw_start_object(d->jw);
192 m = (struct btf_member *)(t + 1); 207 m = (struct btf_member *)(t + 1);
193 208
194 for (i = 0; i < vlen; i++) { 209 for (i = 0; i < vlen; i++) {
195 data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset); 210 __u32 bit_offset = m[i].offset;
211 __u32 bitfield_size = 0;
212
213 if (kind_flag) {
214 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
215 bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
216 }
217
196 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); 218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
197 ret = btf_dumper_do_type(d, m[i].type, 219 if (bitfield_size) {
198 BITS_PER_BYTE_MASKED(m[i].offset), 220 btf_dumper_bitfield(bitfield_size, bit_offset,
199 data_off); 221 data, d->jw, d->is_plain_text);
200 if (ret) 222 } else {
201 break; 223 data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
224 ret = btf_dumper_do_type(d, m[i].type,
225 BITS_PER_BYTE_MASKED(bit_offset),
226 data_off);
227 if (ret)
228 break;
229 }
202 } 230 }
203 231
204 jsonw_end_object(d->jw); 232 jsonw_end_object(d->jw);
@@ -285,6 +313,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
285 313
286 switch (BTF_INFO_KIND(t->info)) { 314 switch (BTF_INFO_KIND(t->info)) {
287 case BTF_KIND_INT: 315 case BTF_KIND_INT:
316 case BTF_KIND_TYPEDEF:
288 BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off)); 317 BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
289 break; 318 break;
290 case BTF_KIND_STRUCT: 319 case BTF_KIND_STRUCT:
@@ -308,10 +337,11 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
308 BTF_PRINT_TYPE(t->type); 337 BTF_PRINT_TYPE(t->type);
309 BTF_PRINT_ARG("* "); 338 BTF_PRINT_ARG("* ");
310 break; 339 break;
311 case BTF_KIND_UNKN:
312 case BTF_KIND_FWD: 340 case BTF_KIND_FWD:
313 case BTF_KIND_TYPEDEF: 341 BTF_PRINT_ARG("%s %s ",
314 return -1; 342 BTF_INFO_KFLAG(t->info) ? "union" : "struct",
343 btf__name_by_offset(btf, t->name_off));
344 break;
315 case BTF_KIND_VOLATILE: 345 case BTF_KIND_VOLATILE:
316 BTF_PRINT_ARG("volatile "); 346 BTF_PRINT_ARG("volatile ");
317 BTF_PRINT_TYPE(t->type); 347 BTF_PRINT_TYPE(t->type);
@@ -335,6 +365,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
335 if (pos == -1) 365 if (pos == -1)
336 return -1; 366 return -1;
337 break; 367 break;
368 case BTF_KIND_UNKN:
338 default: 369 default:
339 return -1; 370 return -1;
340 } 371 }
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index f30b3a4a840b..31f0db41513f 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -1,39 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3 * Copyright (C) 2018 Netronome Systems, Inc.
4 *
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
9 *
10 * The BSD 2-Clause License:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 3
38#include <linux/list.h> 4#include <linux/list.h>
39#include <stdlib.h> 5#include <stdlib.h>
diff --git a/tools/bpf/bpftool/cfg.h b/tools/bpf/bpftool/cfg.h
index 2cc9bd990b13..e144257ea6d2 100644
--- a/tools/bpf/bpftool/cfg.h
+++ b/tools/bpf/bpftool/cfg.h
@@ -1,39 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2/* 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3 * Copyright (C) 2018 Netronome Systems, Inc.
4 *
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
9 *
10 * The BSD 2-Clause License:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 3
38#ifndef __BPF_TOOL_CFG_H 4#ifndef __BPF_TOOL_CFG_H
39#define __BPF_TOOL_CFG_H 5#define __BPF_TOOL_CFG_H
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index ee7a9765c6b3..4b5c8da2a7c0 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (C) 2017 Facebook 2// Copyright (C) 2017 Facebook
3// Author: Roman Gushchin <guro@fb.com> 3// Author: Roman Gushchin <guro@fb.com>
4 4
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 172d3761d9ab..897483457bf0 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -1,35 +1,5 @@
1/* 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 3
34#include <ctype.h> 4#include <ctype.h>
35#include <errno.h> 5#include <errno.h>
@@ -58,7 +28,7 @@
58#define BPF_FS_MAGIC 0xcafe4a11 28#define BPF_FS_MAGIC 0xcafe4a11
59#endif 29#endif
60 30
61void p_err(const char *fmt, ...) 31void __printf(1, 2) p_err(const char *fmt, ...)
62{ 32{
63 va_list ap; 33 va_list ap;
64 34
@@ -76,7 +46,7 @@ void p_err(const char *fmt, ...)
76 va_end(ap); 46 va_end(ap);
77} 47}
78 48
79void p_info(const char *fmt, ...) 49void __printf(1, 2) p_info(const char *fmt, ...)
80{ 50{
81 va_list ap; 51 va_list ap;
82 52
@@ -106,7 +76,8 @@ void set_max_rlimit(void)
106 setrlimit(RLIMIT_MEMLOCK, &rinf); 76 setrlimit(RLIMIT_MEMLOCK, &rinf);
107} 77}
108 78
109static int mnt_bpffs(const char *target, char *buff, size_t bufflen) 79static int
80mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
110{ 81{
111 bool bind_done = false; 82 bool bind_done = false;
112 83
@@ -128,15 +99,29 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
128 bind_done = true; 99 bind_done = true;
129 } 100 }
130 101
131 if (mount("bpf", target, "bpf", 0, "mode=0700")) { 102 if (mount(type, target, type, 0, "mode=0700")) {
132 snprintf(buff, bufflen, "mount -t bpf bpf %s failed: %s", 103 snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
133 target, strerror(errno)); 104 type, type, target, strerror(errno));
134 return -1; 105 return -1;
135 } 106 }
136 107
137 return 0; 108 return 0;
138} 109}
139 110
111int mount_tracefs(const char *target)
112{
113 char err_str[ERR_MAX_LEN];
114 int err;
115
116 err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
117 if (err) {
118 err_str[ERR_MAX_LEN - 1] = '\0';
119 p_err("can't mount tracefs: %s", err_str);
120 }
121
122 return err;
123}
124
140int open_obj_pinned(char *path, bool quiet) 125int open_obj_pinned(char *path, bool quiet)
141{ 126{
142 int fd; 127 int fd;
@@ -192,7 +177,13 @@ int mount_bpffs_for_pin(const char *name)
192 /* nothing to do if already mounted */ 177 /* nothing to do if already mounted */
193 goto out_free; 178 goto out_free;
194 179
195 err = mnt_bpffs(dir, err_str, ERR_MAX_LEN); 180 if (block_mount) {
181 p_err("no BPF file system found, not mounting it due to --nomount option");
182 err = -1;
183 goto out_free;
184 }
185
186 err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
196 if (err) { 187 if (err) {
197 err_str[ERR_MAX_LEN - 1] = '\0'; 188 err_str[ERR_MAX_LEN - 1] = '\0';
198 p_err("can't mount BPF file system to pin the object (%s): %s", 189 p_err("can't mount BPF file system to pin the object (%s): %s",
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index f381f8628ce9..3ef3093560ba 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
1/* 2/*
2 * Based on: 3 * Based on:
3 * 4 *
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index c6eef76322ae..bff7ee026680 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
1/* 2/*
2 * Simple streaming JSON writer 3 * Simple streaming JSON writer
3 * 4 *
@@ -19,6 +20,7 @@
19#include <malloc.h> 20#include <malloc.h>
20#include <inttypes.h> 21#include <inttypes.h>
21#include <stdint.h> 22#include <stdint.h>
23#include <linux/compiler.h>
22 24
23#include "json_writer.h" 25#include "json_writer.h"
24 26
@@ -156,7 +158,8 @@ void jsonw_name(json_writer_t *self, const char *name)
156 putc(' ', self->out); 158 putc(' ', self->out);
157} 159}
158 160
159void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap) 161void __printf(2, 0)
162jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
160{ 163{
161 jsonw_eor(self); 164 jsonw_eor(self);
162 putc('"', self->out); 165 putc('"', self->out);
@@ -164,7 +167,7 @@ void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
164 putc('"', self->out); 167 putc('"', self->out);
165} 168}
166 169
167void jsonw_printf(json_writer_t *self, const char *fmt, ...) 170void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...)
168{ 171{
169 va_list ap; 172 va_list ap;
170 173
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index 0fa2fb1b6351..c1ab51aed99c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
1/* 2/*
2 * Simple streaming JSON writer 3 * Simple streaming JSON writer
3 * 4 *
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 5c4c1cd5a7ba..f44a1c2c4ea0 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -1,35 +1,5 @@
1/* 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 3
34#include <ctype.h> 4#include <ctype.h>
35#include <errno.h> 5#include <errno.h>
@@ -54,6 +24,7 @@ json_writer_t *json_wtr;
54bool pretty_output; 24bool pretty_output;
55bool json_output; 25bool json_output;
56bool show_pinned; 26bool show_pinned;
27bool block_mount;
57int bpf_flags; 28int bpf_flags;
58struct pinned_obj_table prog_table; 29struct pinned_obj_table prog_table;
59struct pinned_obj_table map_table; 30struct pinned_obj_table map_table;
@@ -343,6 +314,7 @@ int main(int argc, char **argv)
343 { "version", no_argument, NULL, 'V' }, 314 { "version", no_argument, NULL, 'V' },
344 { "bpffs", no_argument, NULL, 'f' }, 315 { "bpffs", no_argument, NULL, 'f' },
345 { "mapcompat", no_argument, NULL, 'm' }, 316 { "mapcompat", no_argument, NULL, 'm' },
317 { "nomount", no_argument, NULL, 'n' },
346 { 0 } 318 { 0 }
347 }; 319 };
348 int opt, ret; 320 int opt, ret;
@@ -351,13 +323,14 @@ int main(int argc, char **argv)
351 pretty_output = false; 323 pretty_output = false;
352 json_output = false; 324 json_output = false;
353 show_pinned = false; 325 show_pinned = false;
326 block_mount = false;
354 bin_name = argv[0]; 327 bin_name = argv[0];
355 328
356 hash_init(prog_table.table); 329 hash_init(prog_table.table);
357 hash_init(map_table.table); 330 hash_init(map_table.table);
358 331
359 opterr = 0; 332 opterr = 0;
360 while ((opt = getopt_long(argc, argv, "Vhpjfm", 333 while ((opt = getopt_long(argc, argv, "Vhpjfmn",
361 options, NULL)) >= 0) { 334 options, NULL)) >= 0) {
362 switch (opt) { 335 switch (opt) {
363 case 'V': 336 case 'V':
@@ -384,6 +357,9 @@ int main(int argc, char **argv)
384 case 'm': 357 case 'm':
385 bpf_flags = MAPS_RELAX_COMPAT; 358 bpf_flags = MAPS_RELAX_COMPAT;
386 break; 359 break;
360 case 'n':
361 block_mount = true;
362 break;
387 default: 363 default:
388 p_err("unrecognized option '%s'", argv[optind - 1]); 364 p_err("unrecognized option '%s'", argv[optind - 1]);
389 if (json_output) 365 if (json_output)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 0b37599f8cda..052c91d4dc55 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -1,35 +1,5 @@
1/* 1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 3
34#ifndef __BPF_TOOL_H 4#ifndef __BPF_TOOL_H
35#define __BPF_TOOL_H 5#define __BPF_TOOL_H
@@ -74,7 +44,8 @@
74#define HELP_SPEC_PROGRAM \ 44#define HELP_SPEC_PROGRAM \
75 "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }" 45 "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
76#define HELP_SPEC_OPTIONS \ 46#define HELP_SPEC_OPTIONS \
77 "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} | {-m|--mapcompat}" 47 "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \
48 "\t {-m|--mapcompat} | {-n|--nomount} }"
78#define HELP_SPEC_MAP \ 49#define HELP_SPEC_MAP \
79 "MAP := { id MAP_ID | pinned FILE }" 50 "MAP := { id MAP_ID | pinned FILE }"
80 51
@@ -115,6 +86,7 @@ extern const char *bin_name;
115extern json_writer_t *json_wtr; 86extern json_writer_t *json_wtr;
116extern bool json_output; 87extern bool json_output;
117extern bool show_pinned; 88extern bool show_pinned;
89extern bool block_mount;
118extern int bpf_flags; 90extern int bpf_flags;
119extern struct pinned_obj_table prog_table; 91extern struct pinned_obj_table prog_table;
120extern struct pinned_obj_table map_table; 92extern struct pinned_obj_table map_table;
@@ -128,6 +100,8 @@ void usage(void) __noreturn;
128 100
129void set_max_rlimit(void); 101void set_max_rlimit(void);
130 102
103int mount_tracefs(const char *target);
104
131struct pinned_obj_table { 105struct pinned_obj_table {
132 DECLARE_HASHTABLE(table, 16); 106 DECLARE_HASHTABLE(table, 16);
133}; 107};
@@ -177,8 +151,8 @@ int prog_parse_fd(int *argc, char ***argv);
177int map_parse_fd(int *argc, char ***argv); 151int map_parse_fd(int *argc, char ***argv);
178int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); 152int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
179 153
180#ifdef HAVE_LIBBFD_SUPPORT
181struct bpf_prog_linfo; 154struct bpf_prog_linfo;
155#ifdef HAVE_LIBBFD_SUPPORT
182void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, 156void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
183 const char *arch, const char *disassembler_options, 157 const char *arch, const char *disassembler_options,
184 const struct btf *btf, 158 const struct btf *btf,
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 8469ea6cf1c8..2037e3dc864b 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1,35 +1,5 @@
1/* 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 3
34#include <assert.h> 4#include <assert.h>
35#include <errno.h> 5#include <errno.h>
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index bdaf4062e26e..0507dfaf7a8f 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2018 Netronome Systems, Inc. */ 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3/* This program is free software; you can redistribute it and/or 3/* This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index d441bb7035ca..db0e7de49d49 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (C) 2018 Facebook 2// Copyright (C) 2018 Facebook
3 3
4#define _GNU_SOURCE 4#define _GNU_SOURCE
diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c
index 4e9f4531269f..550a0f537eed 100644
--- a/tools/bpf/bpftool/netlink_dumper.c
+++ b/tools/bpf/bpftool/netlink_dumper.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (C) 2018 Facebook 2// Copyright (C) 2018 Facebook
3 3
4#include <stdlib.h> 4#include <stdlib.h>
diff --git a/tools/bpf/bpftool/netlink_dumper.h b/tools/bpf/bpftool/netlink_dumper.h
index e3516b586a34..774af6c62ef5 100644
--- a/tools/bpf/bpftool/netlink_dumper.h
+++ b/tools/bpf/bpftool/netlink_dumper.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2// Copyright (C) 2018 Facebook 2// Copyright (C) 2018 Facebook
3 3
4#ifndef _NETLINK_DUMPER_H_ 4#ifndef _NETLINK_DUMPER_H_
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index b76b77dcfd1f..f2a545e667c4 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (C) 2018 Facebook 2// Copyright (C) 2018 Facebook
3// Author: Yonghong Song <yhs@fb.com> 3// Author: Yonghong Song <yhs@fb.com>
4 4
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index b73b4e473948..2d1bb7d6ff51 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,35 +1,5 @@
1/* 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 3
34#define _GNU_SOURCE 4#define _GNU_SOURCE
35#include <errno.h> 5#include <errno.h>
@@ -62,7 +32,7 @@ static const char * const attach_type_strings[] = {
62 [__MAX_BPF_ATTACH_TYPE] = NULL, 32 [__MAX_BPF_ATTACH_TYPE] = NULL,
63}; 33};
64 34
65enum bpf_attach_type parse_attach_type(const char *str) 35static enum bpf_attach_type parse_attach_type(const char *str)
66{ 36{
67 enum bpf_attach_type type; 37 enum bpf_attach_type type;
68 38
@@ -626,13 +596,6 @@ static int do_dump(int argc, char **argv)
626 goto err_free; 596 goto err_free;
627 } 597 }
628 598
629 if (func_info && !info.func_info) {
630 /* kernel.kptr_restrict is set. No func_info available. */
631 free(func_info);
632 func_info = NULL;
633 nr_finfo = 0;
634 }
635
636 if (linfo && info.nr_line_info != nr_linfo) { 599 if (linfo && info.nr_line_info != nr_linfo) {
637 p_err("incorrect nr_line_info %u vs. expected %u", 600 p_err("incorrect nr_line_info %u vs. expected %u",
638 info.nr_line_info, nr_linfo); 601 info.nr_line_info, nr_linfo);
@@ -835,7 +798,7 @@ struct map_replace {
835 char *name; 798 char *name;
836}; 799};
837 800
838int map_replace_compar(const void *p1, const void *p2) 801static int map_replace_compar(const void *p1, const void *p2)
839{ 802{
840 const struct map_replace *a = p1, *b = p2; 803 const struct map_replace *a = p1, *b = p2;
841 804
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index 1fa8e513f590..e80a5c79b38f 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -54,7 +54,7 @@ find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
54 return true; 54 return true;
55} 55}
56 56
57static bool find_tracefs_pipe(char *mnt) 57static bool get_tracefs_pipe(char *mnt)
58{ 58{
59 static const char * const known_mnts[] = { 59 static const char * const known_mnts[] = {
60 "/sys/kernel/debug/tracing", 60 "/sys/kernel/debug/tracing",
@@ -88,7 +88,20 @@ static bool find_tracefs_pipe(char *mnt)
88 fclose(fp); 88 fclose(fp);
89 89
90 /* The string from fscanf() might be truncated, check mnt is valid */ 90 /* The string from fscanf() might be truncated, check mnt is valid */
91 if (!found || validate_tracefs_mnt(mnt, TRACEFS_MAGIC)) 91 if (found && validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
92 goto exit_found;
93
94 if (block_mount)
95 return false;
96
97 p_info("could not find tracefs, attempting to mount it now");
98 /* Most of the time, tracefs is automatically mounted by debugfs at
99 * /sys/kernel/debug/tracing when we try to access it. If we could not
100 * find it, it is likely that debugfs is not mounted. Let's give one
101 * attempt at mounting just tracefs at /sys/kernel/tracing.
102 */
103 strcpy(mnt, known_mnts[1]);
104 if (mount_tracefs(mnt))
92 return false; 105 return false;
93 106
94exit_found: 107exit_found:
@@ -115,17 +128,13 @@ int do_tracelog(int argc, char **argv)
115 .sa_handler = exit_tracelog 128 .sa_handler = exit_tracelog
116 }; 129 };
117 char trace_pipe[PATH_MAX]; 130 char trace_pipe[PATH_MAX];
118 bool found_trace_pipe;
119 size_t buff_len = 0; 131 size_t buff_len = 0;
120 132
121 if (json_output) 133 if (json_output)
122 jsonw_start_array(json_wtr); 134 jsonw_start_array(json_wtr);
123 135
124 found_trace_pipe = find_tracefs_pipe(trace_pipe); 136 if (!get_tracefs_pipe(trace_pipe))
125 if (!found_trace_pipe) {
126 p_err("could not find trace pipe, tracefs not mounted?");
127 return -1; 137 return -1;
128 }
129 138
130 trace_pipe_fd = fopen(trace_pipe, "r"); 139 trace_pipe_fd = fopen(trace_pipe, "r");
131 if (!trace_pipe_fd) { 140 if (!trace_pipe_fd) {
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index aef628dcccb6..7073dbe1ff27 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -1,39 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3 * Copyright (C) 2018 Netronome Systems, Inc.
4 *
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
9 *
10 * The BSD 2-Clause License:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 3
38#define _GNU_SOURCE 4#define _GNU_SOURCE
39#include <stdarg.h> 5#include <stdarg.h>
@@ -115,7 +81,7 @@ struct kernel_sym *kernel_syms_search(struct dump_data *dd,
115 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL; 81 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
116} 82}
117 83
118static void print_insn(void *private_data, const char *fmt, ...) 84static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
119{ 85{
120 va_list args; 86 va_list args;
121 87
@@ -124,7 +90,7 @@ static void print_insn(void *private_data, const char *fmt, ...)
124 va_end(args); 90 va_end(args);
125} 91}
126 92
127static void 93static void __printf(2, 3)
128print_insn_for_graph(void *private_data, const char *fmt, ...) 94print_insn_for_graph(void *private_data, const char *fmt, ...)
129{ 95{
130 char buf[64], *p; 96 char buf[64], *p;
@@ -155,7 +121,8 @@ print_insn_for_graph(void *private_data, const char *fmt, ...)
155 printf("%s", buf); 121 printf("%s", buf);
156} 122}
157 123
158static void print_insn_json(void *private_data, const char *fmt, ...) 124static void __printf(2, 3)
125print_insn_json(void *private_data, const char *fmt, ...)
159{ 126{
160 unsigned int l = strlen(fmt); 127 unsigned int l = strlen(fmt);
161 char chomped_fmt[l]; 128 char chomped_fmt[l];
diff --git a/tools/bpf/bpftool/xlated_dumper.h b/tools/bpf/bpftool/xlated_dumper.h
index a24f89df8cb2..54847e174273 100644
--- a/tools/bpf/bpftool/xlated_dumper.h
+++ b/tools/bpf/bpftool/xlated_dumper.h
@@ -1,39 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2/* 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3 * Copyright (C) 2018 Netronome Systems, Inc.
4 *
5 * This software is dual licensed under the GNU General License Version 2,
6 * June 1991 as shown in the file COPYING in the top-level directory of this
7 * source tree or the BSD 2-Clause License provided below. You have the
8 * option to license this software under the complete terms of either license.
9 *
10 * The BSD 2-Clause License:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * 2. Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 3
38#ifndef __BPF_TOOL_XLATED_DUMPER_H 4#ifndef __BPF_TOOL_XLATED_DUMPER_H
39#define __BPF_TOOL_XLATED_DUMPER_H 5#define __BPF_TOOL_XLATED_DUMPER_H
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index aa582cd5bfcf..91c43884f295 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -133,6 +133,14 @@ enum bpf_map_type {
133 BPF_MAP_TYPE_STACK, 133 BPF_MAP_TYPE_STACK,
134}; 134};
135 135
136/* Note that tracing related programs such as
137 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
138 * are not subject to a stable API since kernel internal data
139 * structures can change from release to release and may
140 * therefore break existing tracing BPF programs. Tracing BPF
141 * programs correspond to /a/ specific kernel which is to be
142 * analyzed, and not /a/ specific kernel /and/ all future ones.
143 */
136enum bpf_prog_type { 144enum bpf_prog_type {
137 BPF_PROG_TYPE_UNSPEC, 145 BPF_PROG_TYPE_UNSPEC,
138 BPF_PROG_TYPE_SOCKET_FILTER, 146 BPF_PROG_TYPE_SOCKET_FILTER,
@@ -343,7 +351,7 @@ union bpf_attr {
343 __u32 log_level; /* verbosity level of verifier */ 351 __u32 log_level; /* verbosity level of verifier */
344 __u32 log_size; /* size of user buffer */ 352 __u32 log_size; /* size of user buffer */
345 __aligned_u64 log_buf; /* user supplied buffer */ 353 __aligned_u64 log_buf; /* user supplied buffer */
346 __u32 kern_version; /* checked when prog_type=kprobe */ 354 __u32 kern_version; /* not used */
347 __u32 prog_flags; 355 __u32 prog_flags;
348 char prog_name[BPF_OBJ_NAME_LEN]; 356 char prog_name[BPF_OBJ_NAME_LEN];
349 __u32 prog_ifindex; /* ifindex of netdev to prep for */ 357 __u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -2657,6 +2665,7 @@ struct sk_msg_md {
2657 __u32 local_ip6[4]; /* Stored in network byte order */ 2665 __u32 local_ip6[4]; /* Stored in network byte order */
2658 __u32 remote_port; /* Stored in network byte order */ 2666 __u32 remote_port; /* Stored in network byte order */
2659 __u32 local_port; /* stored in host byte order */ 2667 __u32 local_port; /* stored in host byte order */
2668 __u32 size; /* Total size of sk_msg */
2660}; 2669};
2661 2670
2662struct sk_reuseport_md { 2671struct sk_reuseport_md {
@@ -2717,6 +2726,8 @@ struct bpf_prog_info {
2717 __u32 nr_jited_line_info; 2726 __u32 nr_jited_line_info;
2718 __u32 line_info_rec_size; 2727 __u32 line_info_rec_size;
2719 __u32 jited_line_info_rec_size; 2728 __u32 jited_line_info_rec_size;
2729 __u32 nr_prog_tags;
2730 __aligned_u64 prog_tags;
2720} __attribute__((aligned(8))); 2731} __attribute__((aligned(8)));
2721 2732
2722struct bpf_map_info { 2733struct bpf_map_info {
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 14f66948fc95..7b7475ef2f17 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -34,7 +34,9 @@ struct btf_type {
34 * bits 0-15: vlen (e.g. # of struct's members) 34 * bits 0-15: vlen (e.g. # of struct's members)
35 * bits 16-23: unused 35 * bits 16-23: unused
36 * bits 24-27: kind (e.g. int, ptr, array...etc) 36 * bits 24-27: kind (e.g. int, ptr, array...etc)
37 * bits 28-31: unused 37 * bits 28-30: unused
38 * bit 31: kind_flag, currently used by
39 * struct, union and fwd
38 */ 40 */
39 __u32 info; 41 __u32 info;
40 /* "size" is used by INT, ENUM, STRUCT and UNION. 42 /* "size" is used by INT, ENUM, STRUCT and UNION.
@@ -52,6 +54,7 @@ struct btf_type {
52 54
53#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) 55#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
54#define BTF_INFO_VLEN(info) ((info) & 0xffff) 56#define BTF_INFO_VLEN(info) ((info) & 0xffff)
57#define BTF_INFO_KFLAG(info) ((info) >> 31)
55 58
56#define BTF_KIND_UNKN 0 /* Unknown */ 59#define BTF_KIND_UNKN 0 /* Unknown */
57#define BTF_KIND_INT 1 /* Integer */ 60#define BTF_KIND_INT 1 /* Integer */
@@ -110,9 +113,22 @@ struct btf_array {
110struct btf_member { 113struct btf_member {
111 __u32 name_off; 114 __u32 name_off;
112 __u32 type; 115 __u32 type;
113 __u32 offset; /* offset in bits */ 116 /* If the type info kind_flag is set, the btf_member offset
117 * contains both member bitfield size and bit offset. The
118 * bitfield size is set for bitfield members. If the type
119 * info kind_flag is not set, the offset contains only bit
120 * offset.
121 */
122 __u32 offset;
114}; 123};
115 124
125/* If the struct/union type info kind_flag is set, the
126 * following two macros are used to access bitfield_size
127 * and bit_offset from btf_member.offset.
128 */
129#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
130#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
131
116/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". 132/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
117 * The exact number of btf_param is stored in the vlen (of the 133 * The exact number of btf_param is stored in the vlen (of the
118 * info in "struct btf_type"). 134 * info in "struct btf_type").
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index addd6e9971cc..6978314ea7f6 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -107,11 +107,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
107 107
108 nr_linfo = info->nr_line_info; 108 nr_linfo = info->nr_line_info;
109 109
110 /* 110 if (!nr_linfo)
111 * Test !info->line_info because the kernel may NULL
112 * the ptr if kernel.kptr_restrict is set.
113 */
114 if (!nr_linfo || !info->line_info)
115 return NULL; 111 return NULL;
116 112
117 /* 113 /*
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e2bc75ee1614..169e347c76f6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -266,6 +266,7 @@ void bpf_program__unload(struct bpf_program *prog)
266 266
267 zclose(prog->btf_fd); 267 zclose(prog->btf_fd);
268 zfree(&prog->func_info); 268 zfree(&prog->func_info);
269 zfree(&prog->line_info);
269} 270}
270 271
271static void bpf_program__exit(struct bpf_program *prog) 272static void bpf_program__exit(struct bpf_program *prog)
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 1b799e30c06d..4a9785043a39 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -27,3 +27,4 @@ test_flow_dissector
27flow_dissector_load 27flow_dissector_load
28test_netcnt 28test_netcnt
29test_section_names 29test_section_names
30test_tcpnotify_user
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/connect4_prog.c
index b8395f3c43e9..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/connect4_prog.c
@@ -35,9 +35,11 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
35 if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) 35 if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
36 return 0; 36 return 0;
37 else if (ctx->type == SOCK_STREAM) 37 else if (ctx->type == SOCK_STREAM)
38 sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4), 0, 0); 38 sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4),
39 BPF_F_CURRENT_NETNS, 0);
39 else 40 else
40 sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4), 0, 0); 41 sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4),
42 BPF_F_CURRENT_NETNS, 0);
41 43
42 if (!sk) 44 if (!sk)
43 return 0; 45 return 0;
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/connect6_prog.c
index 25f5dc7b7aa0..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/connect6_prog.c
@@ -47,9 +47,11 @@ int connect_v6_prog(struct bpf_sock_addr *ctx)
47 if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) 47 if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
48 return 0; 48 return 0;
49 else if (ctx->type == SOCK_STREAM) 49 else if (ctx->type == SOCK_STREAM)
50 sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6), 0, 0); 50 sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6),
51 BPF_F_CURRENT_NETNS, 0);
51 else 52 else
52 sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6), 0, 0); 53 sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6),
54 BPF_F_CURRENT_NETNS, 0);
53 55
54 if (!sk) 56 if (!sk)
55 return 0; 57 return 0;
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/netcnt_prog.c
index 1198abca1360..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/netcnt_prog.c
@@ -16,12 +16,18 @@ struct bpf_map_def SEC("maps") percpu_netcnt = {
16 .value_size = sizeof(struct percpu_net_cnt), 16 .value_size = sizeof(struct percpu_net_cnt),
17}; 17};
18 18
19BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key,
20 struct percpu_net_cnt);
21
19struct bpf_map_def SEC("maps") netcnt = { 22struct bpf_map_def SEC("maps") netcnt = {
20 .type = BPF_MAP_TYPE_CGROUP_STORAGE, 23 .type = BPF_MAP_TYPE_CGROUP_STORAGE,
21 .key_size = sizeof(struct bpf_cgroup_storage_key), 24 .key_size = sizeof(struct bpf_cgroup_storage_key),
22 .value_size = sizeof(struct net_cnt), 25 .value_size = sizeof(struct net_cnt),
23}; 26};
24 27
28BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
29 struct net_cnt);
30
25SEC("cgroup/skb") 31SEC("cgroup/skb")
26int bpf_nextcnt(struct __sk_buff *skb) 32int bpf_nextcnt(struct __sk_buff *skb)
27{ 33{
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index f570e0a39959..8bcd38010582 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -65,8 +65,8 @@ static int __base_pr(const char *format, ...)
65 return err; 65 return err;
66} 66}
67 67
68#define BTF_INFO_ENC(kind, root, vlen) \ 68#define BTF_INFO_ENC(kind, kind_flag, vlen) \
69 ((!!(root) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) 69 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
70 70
71#define BTF_TYPE_ENC(name, info, size_or_type) \ 71#define BTF_TYPE_ENC(name, info, size_or_type) \
72 (name), (info), (size_or_type) 72 (name), (info), (size_or_type)
@@ -86,6 +86,8 @@ static int __base_pr(const char *format, ...)
86#define BTF_MEMBER_ENC(name, type, bits_offset) \ 86#define BTF_MEMBER_ENC(name, type, bits_offset) \
87 (name), (type), (bits_offset) 87 (name), (type), (bits_offset)
88#define BTF_ENUM_ENC(name, val) (name), (val) 88#define BTF_ENUM_ENC(name, val) (name), (val)
89#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
90 ((bitfield_size) << 24 | (bits_offset))
89 91
90#define BTF_TYPEDEF_ENC(name, type) \ 92#define BTF_TYPEDEF_ENC(name, type) \
91 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type) 93 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
@@ -2215,6 +2217,496 @@ static struct btf_raw_test raw_tests[] = {
2215 .err_str = "Invalid type_id", 2217 .err_str = "Invalid type_id",
2216}, 2218},
2217 2219
2220{
2221 .descr = "invalid int kind_flag",
2222 .raw_types = {
2223 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2224 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 1, 0), 4), /* [2] */
2225 BTF_INT_ENC(0, 0, 32),
2226 BTF_END_RAW,
2227 },
2228 BTF_STR_SEC(""),
2229 .map_type = BPF_MAP_TYPE_ARRAY,
2230 .map_name = "int_type_check_btf",
2231 .key_size = sizeof(int),
2232 .value_size = sizeof(int),
2233 .key_type_id = 1,
2234 .value_type_id = 1,
2235 .max_entries = 4,
2236 .btf_load_err = true,
2237 .err_str = "Invalid btf_info kind_flag",
2238},
2239
2240{
2241 .descr = "invalid ptr kind_flag",
2242 .raw_types = {
2243 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2244 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 1, 0), 1), /* [2] */
2245 BTF_END_RAW,
2246 },
2247 BTF_STR_SEC(""),
2248 .map_type = BPF_MAP_TYPE_ARRAY,
2249 .map_name = "ptr_type_check_btf",
2250 .key_size = sizeof(int),
2251 .value_size = sizeof(int),
2252 .key_type_id = 1,
2253 .value_type_id = 1,
2254 .max_entries = 4,
2255 .btf_load_err = true,
2256 .err_str = "Invalid btf_info kind_flag",
2257},
2258
2259{
2260 .descr = "invalid array kind_flag",
2261 .raw_types = {
2262 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2263 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 1, 0), 0), /* [2] */
2264 BTF_ARRAY_ENC(1, 1, 1),
2265 BTF_END_RAW,
2266 },
2267 BTF_STR_SEC(""),
2268 .map_type = BPF_MAP_TYPE_ARRAY,
2269 .map_name = "array_type_check_btf",
2270 .key_size = sizeof(int),
2271 .value_size = sizeof(int),
2272 .key_type_id = 1,
2273 .value_type_id = 1,
2274 .max_entries = 4,
2275 .btf_load_err = true,
2276 .err_str = "Invalid btf_info kind_flag",
2277},
2278
2279{
2280 .descr = "invalid enum kind_flag",
2281 .raw_types = {
2282 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2283 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 1, 1), 4), /* [2] */
2284 BTF_ENUM_ENC(NAME_TBD, 0),
2285 BTF_END_RAW,
2286 },
2287 BTF_STR_SEC("\0A"),
2288 .map_type = BPF_MAP_TYPE_ARRAY,
2289 .map_name = "enum_type_check_btf",
2290 .key_size = sizeof(int),
2291 .value_size = sizeof(int),
2292 .key_type_id = 1,
2293 .value_type_id = 1,
2294 .max_entries = 4,
2295 .btf_load_err = true,
2296 .err_str = "Invalid btf_info kind_flag",
2297},
2298
2299{
2300 .descr = "valid fwd kind_flag",
2301 .raw_types = {
2302 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2303 BTF_TYPE_ENC(NAME_TBD,
2304 BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [2] */
2305 BTF_END_RAW,
2306 },
2307 BTF_STR_SEC("\0A"),
2308 .map_type = BPF_MAP_TYPE_ARRAY,
2309 .map_name = "fwd_type_check_btf",
2310 .key_size = sizeof(int),
2311 .value_size = sizeof(int),
2312 .key_type_id = 1,
2313 .value_type_id = 1,
2314 .max_entries = 4,
2315},
2316
2317{
2318 .descr = "invalid typedef kind_flag",
2319 .raw_types = {
2320 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2321 BTF_TYPE_ENC(NAME_TBD,
2322 BTF_INFO_ENC(BTF_KIND_TYPEDEF, 1, 0), 1), /* [2] */
2323 BTF_END_RAW,
2324 },
2325 BTF_STR_SEC("\0A"),
2326 .map_type = BPF_MAP_TYPE_ARRAY,
2327 .map_name = "typedef_type_check_btf",
2328 .key_size = sizeof(int),
2329 .value_size = sizeof(int),
2330 .key_type_id = 1,
2331 .value_type_id = 1,
2332 .max_entries = 4,
2333 .btf_load_err = true,
2334 .err_str = "Invalid btf_info kind_flag",
2335},
2336
2337{
2338 .descr = "invalid volatile kind_flag",
2339 .raw_types = {
2340 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2341 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 1, 0), 1), /* [2] */
2342 BTF_END_RAW,
2343 },
2344 BTF_STR_SEC(""),
2345 .map_type = BPF_MAP_TYPE_ARRAY,
2346 .map_name = "volatile_type_check_btf",
2347 .key_size = sizeof(int),
2348 .value_size = sizeof(int),
2349 .key_type_id = 1,
2350 .value_type_id = 1,
2351 .max_entries = 4,
2352 .btf_load_err = true,
2353 .err_str = "Invalid btf_info kind_flag",
2354},
2355
2356{
2357 .descr = "invalid const kind_flag",
2358 .raw_types = {
2359 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2360 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 1, 0), 1), /* [2] */
2361 BTF_END_RAW,
2362 },
2363 BTF_STR_SEC(""),
2364 .map_type = BPF_MAP_TYPE_ARRAY,
2365 .map_name = "const_type_check_btf",
2366 .key_size = sizeof(int),
2367 .value_size = sizeof(int),
2368 .key_type_id = 1,
2369 .value_type_id = 1,
2370 .max_entries = 4,
2371 .btf_load_err = true,
2372 .err_str = "Invalid btf_info kind_flag",
2373},
2374
2375{
2376 .descr = "invalid restrict kind_flag",
2377 .raw_types = {
2378 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2379 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 1, 0), 1), /* [2] */
2380 BTF_END_RAW,
2381 },
2382 BTF_STR_SEC(""),
2383 .map_type = BPF_MAP_TYPE_ARRAY,
2384 .map_name = "restrict_type_check_btf",
2385 .key_size = sizeof(int),
2386 .value_size = sizeof(int),
2387 .key_type_id = 1,
2388 .value_type_id = 1,
2389 .max_entries = 4,
2390 .btf_load_err = true,
2391 .err_str = "Invalid btf_info kind_flag",
2392},
2393
2394{
2395 .descr = "invalid func kind_flag",
2396 .raw_types = {
2397 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2398 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 0), 0), /* [2] */
2399 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 1, 0), 2), /* [3] */
2400 BTF_END_RAW,
2401 },
2402 BTF_STR_SEC("\0A"),
2403 .map_type = BPF_MAP_TYPE_ARRAY,
2404 .map_name = "func_type_check_btf",
2405 .key_size = sizeof(int),
2406 .value_size = sizeof(int),
2407 .key_type_id = 1,
2408 .value_type_id = 1,
2409 .max_entries = 4,
2410 .btf_load_err = true,
2411 .err_str = "Invalid btf_info kind_flag",
2412},
2413
2414{
2415 .descr = "invalid func_proto kind_flag",
2416 .raw_types = {
2417 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2418 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 1, 0), 0), /* [2] */
2419 BTF_END_RAW,
2420 },
2421 BTF_STR_SEC(""),
2422 .map_type = BPF_MAP_TYPE_ARRAY,
2423 .map_name = "func_proto_type_check_btf",
2424 .key_size = sizeof(int),
2425 .value_size = sizeof(int),
2426 .key_type_id = 1,
2427 .value_type_id = 1,
2428 .max_entries = 4,
2429 .btf_load_err = true,
2430 .err_str = "Invalid btf_info kind_flag",
2431},
2432
2433{
2434 .descr = "valid struct, kind_flag, bitfield_size = 0",
2435 .raw_types = {
2436 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2437 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 8), /* [2] */
2438 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 0)),
2439 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 32)),
2440 BTF_END_RAW,
2441 },
2442 BTF_STR_SEC("\0A\0B"),
2443 .map_type = BPF_MAP_TYPE_ARRAY,
2444 .map_name = "struct_type_check_btf",
2445 .key_size = sizeof(int),
2446 .value_size = sizeof(int),
2447 .key_type_id = 1,
2448 .value_type_id = 1,
2449 .max_entries = 4,
2450},
2451
2452{
2453 .descr = "valid struct, kind_flag, int member, bitfield_size != 0",
2454 .raw_types = {
2455 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2456 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
2457 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
2458 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 4)),
2459 BTF_END_RAW,
2460 },
2461 BTF_STR_SEC("\0A\0B"),
2462 .map_type = BPF_MAP_TYPE_ARRAY,
2463 .map_name = "struct_type_check_btf",
2464 .key_size = sizeof(int),
2465 .value_size = sizeof(int),
2466 .key_type_id = 1,
2467 .value_type_id = 1,
2468 .max_entries = 4,
2469},
2470
2471{
2472 .descr = "valid union, kind_flag, int member, bitfield_size != 0",
2473 .raw_types = {
2474 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2475 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [2] */
2476 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
2477 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
2478 BTF_END_RAW,
2479 },
2480 BTF_STR_SEC("\0A\0B"),
2481 .map_type = BPF_MAP_TYPE_ARRAY,
2482 .map_name = "union_type_check_btf",
2483 .key_size = sizeof(int),
2484 .value_size = sizeof(int),
2485 .key_type_id = 1,
2486 .value_type_id = 1,
2487 .max_entries = 4,
2488},
2489
2490{
2491 .descr = "valid struct, kind_flag, enum member, bitfield_size != 0",
2492 .raw_types = {
2493 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2494 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
2495 BTF_ENUM_ENC(NAME_TBD, 0),
2496 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
2497 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
2498 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 4)),
2499 BTF_END_RAW,
2500 },
2501 BTF_STR_SEC("\0A\0B\0C"),
2502 .map_type = BPF_MAP_TYPE_ARRAY,
2503 .map_name = "struct_type_check_btf",
2504 .key_size = sizeof(int),
2505 .value_size = sizeof(int),
2506 .key_type_id = 1,
2507 .value_type_id = 1,
2508 .max_entries = 4,
2509},
2510
2511{
2512 .descr = "valid union, kind_flag, enum member, bitfield_size != 0",
2513 .raw_types = {
2514 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2515 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
2516 BTF_ENUM_ENC(NAME_TBD, 0),
2517 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
2518 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
2519 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
2520 BTF_END_RAW,
2521 },
2522 BTF_STR_SEC("\0A\0B\0C"),
2523 .map_type = BPF_MAP_TYPE_ARRAY,
2524 .map_name = "union_type_check_btf",
2525 .key_size = sizeof(int),
2526 .value_size = sizeof(int),
2527 .key_type_id = 1,
2528 .value_type_id = 1,
2529 .max_entries = 4,
2530},
2531
2532{
2533 .descr = "valid struct, kind_flag, typedef member, bitfield_size != 0",
2534 .raw_types = {
2535 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2536 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
2537 BTF_ENUM_ENC(NAME_TBD, 0),
2538 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
2539 BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
2540 BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 4)),
2541 BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
2542 BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
2543 BTF_END_RAW,
2544 },
2545 BTF_STR_SEC("\0A\0B\0C\0D\0E"),
2546 .map_type = BPF_MAP_TYPE_ARRAY,
2547 .map_name = "struct_type_check_btf",
2548 .key_size = sizeof(int),
2549 .value_size = sizeof(int),
2550 .key_type_id = 1,
2551 .value_type_id = 1,
2552 .max_entries = 4,
2553},
2554
2555{
2556 .descr = "valid union, kind_flag, typedef member, bitfield_size != 0",
2557 .raw_types = {
2558 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2559 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
2560 BTF_ENUM_ENC(NAME_TBD, 0),
2561 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
2562 BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
2563 BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 0)),
2564 BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
2565 BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
2566 BTF_END_RAW,
2567 },
2568 BTF_STR_SEC("\0A\0B\0C\0D\0E"),
2569 .map_type = BPF_MAP_TYPE_ARRAY,
2570 .map_name = "union_type_check_btf",
2571 .key_size = sizeof(int),
2572 .value_size = sizeof(int),
2573 .key_type_id = 1,
2574 .value_type_id = 1,
2575 .max_entries = 4,
2576},
2577
2578{
2579 .descr = "invalid struct, kind_flag, bitfield_size greater than struct size",
2580 .raw_types = {
2581 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2582 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
2583 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
2584 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 20)),
2585 BTF_END_RAW,
2586 },
2587 BTF_STR_SEC("\0A\0B"),
2588 .map_type = BPF_MAP_TYPE_ARRAY,
2589 .map_name = "struct_type_check_btf",
2590 .key_size = sizeof(int),
2591 .value_size = sizeof(int),
2592 .key_type_id = 1,
2593 .value_type_id = 1,
2594 .max_entries = 4,
2595 .btf_load_err = true,
2596 .err_str = "Member exceeds struct_size",
2597},
2598
2599{
2600 .descr = "invalid struct, kind_flag, bitfield base_type int not regular",
2601 .raw_types = {
2602 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2603 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 20, 4), /* [2] */
2604 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
2605 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 0)),
2606 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 20)),
2607 BTF_END_RAW,
2608 },
2609 BTF_STR_SEC("\0A\0B"),
2610 .map_type = BPF_MAP_TYPE_ARRAY,
2611 .map_name = "struct_type_check_btf",
2612 .key_size = sizeof(int),
2613 .value_size = sizeof(int),
2614 .key_type_id = 1,
2615 .value_type_id = 1,
2616 .max_entries = 4,
2617 .btf_load_err = true,
2618 .err_str = "Invalid member base type",
2619},
2620
2621{
2622 .descr = "invalid struct, kind_flag, base_type int not regular",
2623 .raw_types = {
2624 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2625 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 12, 4), /* [2] */
2626 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
2627 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 0)),
2628 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 8)),
2629 BTF_END_RAW,
2630 },
2631 BTF_STR_SEC("\0A\0B"),
2632 .map_type = BPF_MAP_TYPE_ARRAY,
2633 .map_name = "struct_type_check_btf",
2634 .key_size = sizeof(int),
2635 .value_size = sizeof(int),
2636 .key_type_id = 1,
2637 .value_type_id = 1,
2638 .max_entries = 4,
2639 .btf_load_err = true,
2640 .err_str = "Invalid member base type",
2641},
2642
2643{
2644 .descr = "invalid union, kind_flag, bitfield_size greater than struct size",
2645 .raw_types = {
2646 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2647 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 2), /* [2] */
2648 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(8, 0)),
2649 BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
2650 BTF_END_RAW,
2651 },
2652 BTF_STR_SEC("\0A\0B"),
2653 .map_type = BPF_MAP_TYPE_ARRAY,
2654 .map_name = "union_type_check_btf",
2655 .key_size = sizeof(int),
2656 .value_size = sizeof(int),
2657 .key_type_id = 1,
2658 .value_type_id = 1,
2659 .max_entries = 4,
2660 .btf_load_err = true,
2661 .err_str = "Member exceeds struct_size",
2662},
2663
2664{
2665 .descr = "invalid struct, kind_flag, int member, bitfield_size = 0, wrong byte alignment",
2666 .raw_types = {
2667 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2668 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
2669 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
2670 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
2671 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
2672 BTF_END_RAW,
2673 },
2674 BTF_STR_SEC("\0A\0B"),
2675 .map_type = BPF_MAP_TYPE_ARRAY,
2676 .map_name = "struct_type_check_btf",
2677 .key_size = sizeof(int),
2678 .value_size = sizeof(int),
2679 .key_type_id = 1,
2680 .value_type_id = 1,
2681 .max_entries = 4,
2682 .btf_load_err = true,
2683 .err_str = "Invalid member offset",
2684},
2685
2686{
2687 .descr = "invalid struct, kind_flag, enum member, bitfield_size = 0, wrong byte alignment",
2688 .raw_types = {
2689 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2690 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
2691 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
2692 BTF_ENUM_ENC(NAME_TBD, 0),
2693 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
2694 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
2695 BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
2696 BTF_END_RAW,
2697 },
2698 BTF_STR_SEC("\0A\0B\0C"),
2699 .map_type = BPF_MAP_TYPE_ARRAY,
2700 .map_name = "struct_type_check_btf",
2701 .key_size = sizeof(int),
2702 .value_size = sizeof(int),
2703 .key_type_id = 1,
2704 .value_type_id = 1,
2705 .max_entries = 4,
2706 .btf_load_err = true,
2707 .err_str = "Invalid member offset",
2708},
2709
2218}; /* struct btf_raw_test raw_tests[] */ 2710}; /* struct btf_raw_test raw_tests[] */
2219 2711
2220static const char *get_next_str(const char *start, const char *end) 2712static const char *get_next_str(const char *start, const char *end)
@@ -2916,7 +3408,7 @@ static int do_test_file(unsigned int test_num)
2916 goto done; 3408 goto done;
2917 } 3409 }
2918 rec_size = info.func_info_rec_size; 3410 rec_size = info.func_info_rec_size;
2919 if (CHECK(rec_size < 4, 3411 if (CHECK(rec_size != sizeof(struct bpf_func_info),
2920 "incorrect info.func_info_rec_size (1st) %d\n", rec_size)) { 3412 "incorrect info.func_info_rec_size (1st) %d\n", rec_size)) {
2921 err = -1; 3413 err = -1;
2922 goto done; 3414 goto done;
@@ -3036,7 +3528,8 @@ struct pprint_mapv {
3036 } aenum; 3528 } aenum;
3037}; 3529};
3038 3530
3039static struct btf_raw_test pprint_test_template = { 3531static struct btf_raw_test pprint_test_template[] = {
3532{
3040 .raw_types = { 3533 .raw_types = {
3041 /* unsighed char */ /* [1] */ 3534 /* unsighed char */ /* [1] */
3042 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), 3535 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
@@ -3086,13 +3579,140 @@ static struct btf_raw_test pprint_test_template = {
3086 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ 3579 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
3087 BTF_END_RAW, 3580 BTF_END_RAW,
3088 }, 3581 },
3089 .str_sec = "\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum", 3582 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
3090 .str_sec_size = sizeof("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3583 .key_size = sizeof(unsigned int),
3584 .value_size = sizeof(struct pprint_mapv),
3585 .key_type_id = 3, /* unsigned int */
3586 .value_type_id = 16, /* struct pprint_mapv */
3587 .max_entries = 128 * 1024,
3588},
3589
3590{
3591 /* this type will have the same type as the
3592 * first .raw_types definition, but struct type will
3593 * be encoded with kind_flag set.
3594 */
3595 .raw_types = {
3596 /* unsighed char */ /* [1] */
3597 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
3598 /* unsigned short */ /* [2] */
3599 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
3600 /* unsigned int */ /* [3] */
3601 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
3602 /* int */ /* [4] */
3603 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
3604 /* unsigned long long */ /* [5] */
3605 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
3606 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
3607 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
3608 /* uint8_t[8] */ /* [8] */
3609 BTF_TYPE_ARRAY_ENC(9, 1, 8),
3610 /* typedef unsigned char uint8_t */ /* [9] */
3611 BTF_TYPEDEF_ENC(NAME_TBD, 1),
3612 /* typedef unsigned short uint16_t */ /* [10] */
3613 BTF_TYPEDEF_ENC(NAME_TBD, 2),
3614 /* typedef unsigned int uint32_t */ /* [11] */
3615 BTF_TYPEDEF_ENC(NAME_TBD, 3),
3616 /* typedef int int32_t */ /* [12] */
3617 BTF_TYPEDEF_ENC(NAME_TBD, 4),
3618 /* typedef unsigned long long uint64_t *//* [13] */
3619 BTF_TYPEDEF_ENC(NAME_TBD, 5),
3620 /* union (anon) */ /* [14] */
3621 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
3622 BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
3623 BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
3624 /* enum (anon) */ /* [15] */
3625 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
3626 BTF_ENUM_ENC(NAME_TBD, 0),
3627 BTF_ENUM_ENC(NAME_TBD, 1),
3628 BTF_ENUM_ENC(NAME_TBD, 2),
3629 BTF_ENUM_ENC(NAME_TBD, 3),
3630 /* struct pprint_mapv */ /* [16] */
3631 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
3632 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3633 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3634 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
3635 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
3636 BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
3637 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
3638 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3639 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3640 BTF_END_RAW,
3641 },
3642 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
3643 .key_size = sizeof(unsigned int),
3644 .value_size = sizeof(struct pprint_mapv),
3645 .key_type_id = 3, /* unsigned int */
3646 .value_type_id = 16, /* struct pprint_mapv */
3647 .max_entries = 128 * 1024,
3648},
3649
3650{
3651 /* this type will have the same layout as the
3652 * first .raw_types definition. The struct type will
3653 * be encoded with kind_flag set, bitfield members
3654 * are added typedef/const/volatile, and bitfield members
3655 * will have both int and enum types.
3656 */
3657 .raw_types = {
3658 /* unsighed char */ /* [1] */
3659 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
3660 /* unsigned short */ /* [2] */
3661 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
3662 /* unsigned int */ /* [3] */
3663 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
3664 /* int */ /* [4] */
3665 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
3666 /* unsigned long long */ /* [5] */
3667 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
3668 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
3669 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
3670 /* uint8_t[8] */ /* [8] */
3671 BTF_TYPE_ARRAY_ENC(9, 1, 8),
3672 /* typedef unsigned char uint8_t */ /* [9] */
3673 BTF_TYPEDEF_ENC(NAME_TBD, 1),
3674 /* typedef unsigned short uint16_t */ /* [10] */
3675 BTF_TYPEDEF_ENC(NAME_TBD, 2),
3676 /* typedef unsigned int uint32_t */ /* [11] */
3677 BTF_TYPEDEF_ENC(NAME_TBD, 3),
3678 /* typedef int int32_t */ /* [12] */
3679 BTF_TYPEDEF_ENC(NAME_TBD, 4),
3680 /* typedef unsigned long long uint64_t *//* [13] */
3681 BTF_TYPEDEF_ENC(NAME_TBD, 5),
3682 /* union (anon) */ /* [14] */
3683 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
3684 BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
3685 BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
3686 /* enum (anon) */ /* [15] */
3687 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
3688 BTF_ENUM_ENC(NAME_TBD, 0),
3689 BTF_ENUM_ENC(NAME_TBD, 1),
3690 BTF_ENUM_ENC(NAME_TBD, 2),
3691 BTF_ENUM_ENC(NAME_TBD, 3),
3692 /* struct pprint_mapv */ /* [16] */
3693 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
3694 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3695 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3696 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
3697 BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
3698 BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
3699 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
3700 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3701 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3702 /* typedef unsigned int ___int */ /* [17] */
3703 BTF_TYPEDEF_ENC(NAME_TBD, 18),
3704 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
3705 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
3706 BTF_END_RAW,
3707 },
3708 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"),
3091 .key_size = sizeof(unsigned int), 3709 .key_size = sizeof(unsigned int),
3092 .value_size = sizeof(struct pprint_mapv), 3710 .value_size = sizeof(struct pprint_mapv),
3093 .key_type_id = 3, /* unsigned int */ 3711 .key_type_id = 3, /* unsigned int */
3094 .value_type_id = 16, /* struct pprint_mapv */ 3712 .value_type_id = 16, /* struct pprint_mapv */
3095 .max_entries = 128 * 1024, 3713 .max_entries = 128 * 1024,
3714},
3715
3096}; 3716};
3097 3717
3098static struct btf_pprint_test_meta { 3718static struct btf_pprint_test_meta {
@@ -3195,9 +3815,9 @@ static int check_line(const char *expected_line, int nexpected_line,
3195} 3815}
3196 3816
3197 3817
3198static int do_test_pprint(void) 3818static int do_test_pprint(int test_num)
3199{ 3819{
3200 const struct btf_raw_test *test = &pprint_test_template; 3820 const struct btf_raw_test *test = &pprint_test_template[test_num];
3201 struct bpf_create_map_attr create_attr = {}; 3821 struct bpf_create_map_attr create_attr = {};
3202 bool ordered_map, lossless_map, percpu_map; 3822 bool ordered_map, lossless_map, percpu_map;
3203 int err, ret, num_cpus, rounded_value_size; 3823 int err, ret, num_cpus, rounded_value_size;
@@ -3213,7 +3833,7 @@ static int do_test_pprint(void)
3213 uint8_t *raw_btf; 3833 uint8_t *raw_btf;
3214 ssize_t nread; 3834 ssize_t nread;
3215 3835
3216 fprintf(stderr, "%s......", test->descr); 3836 fprintf(stderr, "%s(#%d)......", test->descr, test_num);
3217 raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types, 3837 raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
3218 test->str_sec, test->str_sec_size, 3838 test->str_sec, test->str_sec_size,
3219 &raw_btf_size, NULL); 3839 &raw_btf_size, NULL);
@@ -3406,15 +4026,27 @@ static int test_pprint(void)
3406 unsigned int i; 4026 unsigned int i;
3407 int err = 0; 4027 int err = 0;
3408 4028
4029 /* test various maps with the first test template */
3409 for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) { 4030 for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
3410 pprint_test_template.descr = pprint_tests_meta[i].descr; 4031 pprint_test_template[0].descr = pprint_tests_meta[i].descr;
3411 pprint_test_template.map_type = pprint_tests_meta[i].map_type; 4032 pprint_test_template[0].map_type = pprint_tests_meta[i].map_type;
3412 pprint_test_template.map_name = pprint_tests_meta[i].map_name; 4033 pprint_test_template[0].map_name = pprint_tests_meta[i].map_name;
3413 pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map; 4034 pprint_test_template[0].ordered_map = pprint_tests_meta[i].ordered_map;
3414 pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map; 4035 pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
3415 pprint_test_template.percpu_map = pprint_tests_meta[i].percpu_map; 4036 pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
3416 4037
3417 err |= count_result(do_test_pprint()); 4038 err |= count_result(do_test_pprint(0));
4039 }
4040
4041 /* test rest test templates with the first map */
4042 for (i = 1; i < ARRAY_SIZE(pprint_test_template); i++) {
4043 pprint_test_template[i].descr = pprint_tests_meta[0].descr;
4044 pprint_test_template[i].map_type = pprint_tests_meta[0].map_type;
4045 pprint_test_template[i].map_name = pprint_tests_meta[0].map_name;
4046 pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
4047 pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
4048 pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
4049 err |= count_result(do_test_pprint(i));
3418 } 4050 }
3419 4051
3420 return err; 4052 return err;
@@ -3622,6 +4254,33 @@ static struct prog_info_raw_test {
3622}, 4254},
3623 4255
3624{ 4256{
4257 .descr = "line_info (Zero bpf insn code)",
4258 .raw_types = {
4259 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
4260 BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), /* [2] */
4261 BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [3] */
4262 BTF_END_RAW,
4263 },
4264 BTF_STR_SEC("\0int\0unsigned long\0u64\0u64 a=1;\0return a;"),
4265 .insns = {
4266 BPF_LD_IMM64(BPF_REG_0, 1),
4267 BPF_EXIT_INSN(),
4268 },
4269 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4270 .func_info_cnt = 0,
4271 .line_info = {
4272 BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
4273 BPF_LINE_INFO_ENC(1, 0, 0, 2, 9),
4274 BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
4275 BTF_END_RAW,
4276 },
4277 .line_info_rec_size = sizeof(struct bpf_line_info),
4278 .nr_jited_ksyms = 1,
4279 .err_str = "Invalid insn code at line_info[1]",
4280 .expected_prog_load_failure = true,
4281},
4282
4283{
3625 .descr = "line_info (No subprog. zero tailing line_info", 4284 .descr = "line_info (No subprog. zero tailing line_info",
3626 .raw_types = { 4285 .raw_types = {
3627 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4286 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -3912,7 +4571,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
3912 } 4571 }
3913 4572
3914 rec_size = info.func_info_rec_size; 4573 rec_size = info.func_info_rec_size;
3915 if (CHECK(rec_size < 8, 4574 if (CHECK(rec_size != sizeof(struct bpf_func_info),
3916 "incorrect info.func_info_rec_size (1st) %d", rec_size)) { 4575 "incorrect info.func_info_rec_size (1st) %d", rec_size)) {
3917 return -1; 4576 return -1;
3918 } 4577 }
@@ -3941,19 +4600,13 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
3941 err = -1; 4600 err = -1;
3942 goto done; 4601 goto done;
3943 } 4602 }
3944 if (CHECK(info.func_info_rec_size < 8, 4603 if (CHECK(info.func_info_rec_size != rec_size,
3945 "incorrect info.func_info_rec_size (2nd) %d", 4604 "incorrect info.func_info_rec_size (2nd) %d",
3946 info.func_info_rec_size)) { 4605 info.func_info_rec_size)) {
3947 err = -1; 4606 err = -1;
3948 goto done; 4607 goto done;
3949 } 4608 }
3950 4609
3951 if (CHECK(!info.func_info,
3952 "info.func_info == 0. kernel.kptr_restrict is set?")) {
3953 err = -1;
3954 goto done;
3955 }
3956
3957 finfo = func_info; 4610 finfo = func_info;
3958 for (i = 0; i < test->func_info_cnt; i++) { 4611 for (i = 0; i < test->func_info_cnt; i++) {
3959 if (CHECK(finfo->type_id != test->func_info[i][1], 4612 if (CHECK(finfo->type_id != test->func_info[i][1],
@@ -4023,8 +4676,8 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
4023 goto done; 4676 goto done;
4024 } 4677 }
4025 4678
4026 if (CHECK(info.line_info_rec_size < 16 || 4679 if (CHECK(info.line_info_rec_size != sizeof(struct bpf_line_info) ||
4027 info.jited_line_info_rec_size < 8, 4680 info.jited_line_info_rec_size != sizeof(__u64),
4028 "info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)", 4681 "info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)",
4029 info.line_info_rec_size, rec_size, 4682 info.line_info_rec_size, rec_size,
4030 info.jited_line_info_rec_size, jited_rec_size)) { 4683 info.jited_line_info_rec_size, jited_rec_size)) {
@@ -4077,7 +4730,6 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
4077 * Other fields are not the concern of this test. 4730 * Other fields are not the concern of this test.
4078 */ 4731 */
4079 if (CHECK(err == -1 || 4732 if (CHECK(err == -1 ||
4080 !info.line_info ||
4081 info.nr_line_info != cnt || 4733 info.nr_line_info != cnt ||
4082 (jited_cnt && !info.jited_line_info) || 4734 (jited_cnt && !info.jited_line_info) ||
4083 info.nr_jited_line_info != jited_cnt || 4735 info.nr_jited_line_info != jited_cnt ||
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 26f1fdf3e2bf..126fc624290d 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -51,10 +51,10 @@ static struct {
51 struct iphdr iph; 51 struct iphdr iph;
52 struct tcphdr tcp; 52 struct tcphdr tcp;
53} __packed pkt_v4 = { 53} __packed pkt_v4 = {
54 .eth.h_proto = bpf_htons(ETH_P_IP), 54 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
55 .iph.ihl = 5, 55 .iph.ihl = 5,
56 .iph.protocol = 6, 56 .iph.protocol = 6,
57 .iph.tot_len = bpf_htons(MAGIC_BYTES), 57 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
58 .tcp.urg_ptr = 123, 58 .tcp.urg_ptr = 123,
59}; 59};
60 60
@@ -64,9 +64,9 @@ static struct {
64 struct ipv6hdr iph; 64 struct ipv6hdr iph;
65 struct tcphdr tcp; 65 struct tcphdr tcp;
66} __packed pkt_v6 = { 66} __packed pkt_v6 = {
67 .eth.h_proto = bpf_htons(ETH_P_IPV6), 67 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
68 .iph.nexthdr = 6, 68 .iph.nexthdr = 6,
69 .iph.payload_len = bpf_htons(MAGIC_BYTES), 69 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
70 .tcp.urg_ptr = 123, 70 .tcp.urg_ptr = 123,
71}; 71};
72 72
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index c3b799c1ee97..baafe5c76aca 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -49,6 +49,7 @@
49#define MAX_INSNS BPF_MAXINSNS 49#define MAX_INSNS BPF_MAXINSNS
50#define MAX_FIXUPS 8 50#define MAX_FIXUPS 8
51#define MAX_NR_MAPS 13 51#define MAX_NR_MAPS 13
52#define MAX_TEST_RUNS 8
52#define POINTER_VALUE 0xcafe4all 53#define POINTER_VALUE 0xcafe4all
53#define TEST_DATA_LEN 64 54#define TEST_DATA_LEN 64
54 55
@@ -76,7 +77,7 @@ struct bpf_test {
76 int fixup_percpu_cgroup_storage[MAX_FIXUPS]; 77 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 const char *errstr; 78 const char *errstr;
78 const char *errstr_unpriv; 79 const char *errstr_unpriv;
79 uint32_t retval, retval_unpriv; 80 uint32_t retval, retval_unpriv, insn_processed;
80 enum { 81 enum {
81 UNDEF, 82 UNDEF,
82 ACCEPT, 83 ACCEPT,
@@ -86,6 +87,14 @@ struct bpf_test {
86 uint8_t flags; 87 uint8_t flags;
87 __u8 data[TEST_DATA_LEN]; 88 __u8 data[TEST_DATA_LEN];
88 void (*fill_helper)(struct bpf_test *self); 89 void (*fill_helper)(struct bpf_test *self);
90 uint8_t runs;
91 struct {
92 uint32_t retval, retval_unpriv;
93 union {
94 __u8 data[TEST_DATA_LEN];
95 __u64 data64[TEST_DATA_LEN / 8];
96 };
97 } retvals[MAX_TEST_RUNS];
89}; 98};
90 99
91/* Note we want this to be 64 bit aligned so that the end of our array is 100/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -1001,15 +1010,45 @@ static struct bpf_test tests[] = {
1001 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 1010 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1002 /* mess up with R1 pointer on stack */ 1011 /* mess up with R1 pointer on stack */
1003 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), 1012 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1004 /* fill back into R0 should fail */ 1013 /* fill back into R0 is fine for priv.
1014 * R0 now becomes SCALAR_VALUE.
1015 */
1005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 1016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1017 /* Load from R0 should fail. */
1018 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
1006 BPF_EXIT_INSN(), 1019 BPF_EXIT_INSN(),
1007 }, 1020 },
1008 .errstr_unpriv = "attempt to corrupt spilled", 1021 .errstr_unpriv = "attempt to corrupt spilled",
1009 .errstr = "corrupted spill", 1022 .errstr = "R0 invalid mem access 'inv",
1010 .result = REJECT, 1023 .result = REJECT,
1011 }, 1024 },
1012 { 1025 {
1026 "check corrupted spill/fill, LSB",
1027 .insns = {
1028 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1029 BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
1030 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1031 BPF_EXIT_INSN(),
1032 },
1033 .errstr_unpriv = "attempt to corrupt spilled",
1034 .result_unpriv = REJECT,
1035 .result = ACCEPT,
1036 .retval = POINTER_VALUE,
1037 },
1038 {
1039 "check corrupted spill/fill, MSB",
1040 .insns = {
1041 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1042 BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
1043 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1044 BPF_EXIT_INSN(),
1045 },
1046 .errstr_unpriv = "attempt to corrupt spilled",
1047 .result_unpriv = REJECT,
1048 .result = ACCEPT,
1049 .retval = POINTER_VALUE,
1050 },
1051 {
1013 "invalid src register in STX", 1052 "invalid src register in STX",
1014 .insns = { 1053 .insns = {
1015 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), 1054 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
@@ -1813,10 +1852,20 @@ static struct bpf_test tests[] = {
1813 .prog_type = BPF_PROG_TYPE_SK_SKB, 1852 .prog_type = BPF_PROG_TYPE_SK_SKB,
1814 }, 1853 },
1815 { 1854 {
1816 "invalid 64B read of family in SK_MSG", 1855 "valid access size in SK_MSG",
1856 .insns = {
1857 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1858 offsetof(struct sk_msg_md, size)),
1859 BPF_EXIT_INSN(),
1860 },
1861 .result = ACCEPT,
1862 .prog_type = BPF_PROG_TYPE_SK_MSG,
1863 },
1864 {
1865 "invalid 64B read of size in SK_MSG",
1817 .insns = { 1866 .insns = {
1818 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 1867 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1819 offsetof(struct sk_msg_md, family)), 1868 offsetof(struct sk_msg_md, size)),
1820 BPF_EXIT_INSN(), 1869 BPF_EXIT_INSN(),
1821 }, 1870 },
1822 .errstr = "invalid bpf_context access", 1871 .errstr = "invalid bpf_context access",
@@ -1827,10 +1876,10 @@ static struct bpf_test tests[] = {
1827 "invalid read past end of SK_MSG", 1876 "invalid read past end of SK_MSG",
1828 .insns = { 1877 .insns = {
1829 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1878 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1830 offsetof(struct sk_msg_md, local_port) + 4), 1879 offsetof(struct sk_msg_md, size) + 4),
1831 BPF_EXIT_INSN(), 1880 BPF_EXIT_INSN(),
1832 }, 1881 },
1833 .errstr = "R0 !read_ok", 1882 .errstr = "invalid bpf_context access",
1834 .result = REJECT, 1883 .result = REJECT,
1835 .prog_type = BPF_PROG_TYPE_SK_MSG, 1884 .prog_type = BPF_PROG_TYPE_SK_MSG,
1836 }, 1885 },
@@ -13648,6 +13697,28 @@ static struct bpf_test tests[] = {
13648 .result = ACCEPT, 13697 .result = ACCEPT,
13649 }, 13698 },
13650 { 13699 {
13700 "allocated_stack",
13701 .insns = {
13702 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
13704 BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
13705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13706 BPF_MOV64_IMM(BPF_REG_0, 0),
13707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
13708 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
13709 BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
13710 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
13711 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13712 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13713 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13714 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13715 BPF_EXIT_INSN(),
13716 },
13717 .result = ACCEPT,
13718 .result_unpriv = ACCEPT,
13719 .insn_processed = 15,
13720 },
13721 {
13651 "reference tracking in call: free reference in subprog and outside", 13722 "reference tracking in call: free reference in subprog and outside",
13652 .insns = { 13723 .insns = {
13653 BPF_SK_LOOKUP, 13724 BPF_SK_LOOKUP,
@@ -14099,6 +14170,7 @@ static struct bpf_test tests[] = {
14099 .errstr_unpriv = "R1 leaks addr", 14170 .errstr_unpriv = "R1 leaks addr",
14100 .result = REJECT, 14171 .result = REJECT,
14101 }, 14172 },
14173 {
14102 "calls: cross frame pruning", 14174 "calls: cross frame pruning",
14103 .insns = { 14175 .insns = {
14104 /* r8 = !!random(); 14176 /* r8 = !!random();
@@ -14122,10 +14194,199 @@ static struct bpf_test tests[] = {
14122 }, 14194 },
14123 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 14195 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14124 .errstr_unpriv = "function calls to other bpf functions are allowed for root only", 14196 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14197 .result = REJECT,
14198 },
14199 {
14200 "jset: functional",
14201 .insns = {
14202 /* r0 = 0 */
14203 BPF_MOV64_IMM(BPF_REG_0, 0),
14204 /* prep for direct packet access via r2 */
14205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14206 offsetof(struct __sk_buff, data)),
14207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14208 offsetof(struct __sk_buff, data_end)),
14209 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14211 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14212 BPF_EXIT_INSN(),
14213
14214 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14215
14216 /* reg, bit 63 or bit 0 set, taken */
14217 BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
14218 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14219 BPF_EXIT_INSN(),
14220
14221 /* reg, bit 62, not taken */
14222 BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
14223 BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14224 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14225 BPF_EXIT_INSN(),
14226
14227 /* imm, any bit set, taken */
14228 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
14229 BPF_EXIT_INSN(),
14230
14231 /* imm, bit 31 set, taken */
14232 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14233 BPF_EXIT_INSN(),
14234
14235 /* all good - return r0 == 2 */
14236 BPF_MOV64_IMM(BPF_REG_0, 2),
14237 BPF_EXIT_INSN(),
14238 },
14239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14240 .result = ACCEPT,
14241 .runs = 7,
14242 .retvals = {
14243 { .retval = 2,
14244 .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
14245 },
14246 { .retval = 2,
14247 .data64 = { (1ULL << 63) | (1U << 31), }
14248 },
14249 { .retval = 2,
14250 .data64 = { (1ULL << 31) | (1U << 0), }
14251 },
14252 { .retval = 2,
14253 .data64 = { (__u32)-1, }
14254 },
14255 { .retval = 2,
14256 .data64 = { ~0x4000000000000000ULL, }
14257 },
14258 { .retval = 0,
14259 .data64 = { 0, }
14260 },
14261 { .retval = 0,
14262 .data64 = { ~0ULL, }
14263 },
14264 },
14265 },
14266 {
14267 "jset: sign-extend",
14268 .insns = {
14269 /* r0 = 0 */
14270 BPF_MOV64_IMM(BPF_REG_0, 0),
14271 /* prep for direct packet access via r2 */
14272 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14273 offsetof(struct __sk_buff, data)),
14274 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14275 offsetof(struct __sk_buff, data_end)),
14276 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14278 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14279 BPF_EXIT_INSN(),
14280
14281 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14282
14283 BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14284 BPF_EXIT_INSN(),
14285
14286 BPF_MOV64_IMM(BPF_REG_0, 2),
14287 BPF_EXIT_INSN(),
14288 },
14289 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
14290 .result = ACCEPT,
14291 .retval = 2,
14292 .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
14293 },
14294 {
14295 "jset: known const compare",
14296 .insns = {
14297 BPF_MOV64_IMM(BPF_REG_0, 1),
14298 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14299 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14300 BPF_EXIT_INSN(),
14301 },
14302 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14303 .retval_unpriv = 1,
14304 .result_unpriv = ACCEPT,
14305 .retval = 1,
14306 .result = ACCEPT,
14307 },
14308 {
14309 "jset: known const compare bad",
14310 .insns = {
14311 BPF_MOV64_IMM(BPF_REG_0, 0),
14312 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14313 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14314 BPF_EXIT_INSN(),
14315 },
14316 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14317 .errstr_unpriv = "!read_ok",
14125 .result_unpriv = REJECT, 14318 .result_unpriv = REJECT,
14126 .errstr = "!read_ok", 14319 .errstr = "!read_ok",
14127 .result = REJECT, 14320 .result = REJECT,
14128 }, 14321 },
14322 {
14323 "jset: unknown const compare taken",
14324 .insns = {
14325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14326 BPF_FUNC_get_prandom_u32),
14327 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14328 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14329 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14330 BPF_EXIT_INSN(),
14331 },
14332 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14333 .errstr_unpriv = "!read_ok",
14334 .result_unpriv = REJECT,
14335 .errstr = "!read_ok",
14336 .result = REJECT,
14337 },
14338 {
14339 "jset: unknown const compare not taken",
14340 .insns = {
14341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14342 BPF_FUNC_get_prandom_u32),
14343 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14344 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14345 BPF_EXIT_INSN(),
14346 },
14347 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14348 .errstr_unpriv = "!read_ok",
14349 .result_unpriv = REJECT,
14350 .errstr = "!read_ok",
14351 .result = REJECT,
14352 },
14353 {
14354 "jset: half-known const compare",
14355 .insns = {
14356 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14357 BPF_FUNC_get_prandom_u32),
14358 BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
14359 BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
14360 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14361 BPF_MOV64_IMM(BPF_REG_0, 0),
14362 BPF_EXIT_INSN(),
14363 },
14364 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14365 .result_unpriv = ACCEPT,
14366 .result = ACCEPT,
14367 },
14368 {
14369 "jset: range",
14370 .insns = {
14371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14372 BPF_FUNC_get_prandom_u32),
14373 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14374 BPF_MOV64_IMM(BPF_REG_0, 0),
14375 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
14376 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
14377 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
14378 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14379 BPF_EXIT_INSN(),
14380 BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
14381 BPF_EXIT_INSN(),
14382 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
14383 BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14384 BPF_EXIT_INSN(),
14385 },
14386 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14387 .result_unpriv = ACCEPT,
14388 .result = ACCEPT,
14389 },
14129}; 14390};
14130 14391
14131static int probe_filter_length(const struct bpf_insn *fp) 14392static int probe_filter_length(const struct bpf_insn *fp)
@@ -14408,16 +14669,42 @@ out:
14408 return ret; 14669 return ret;
14409} 14670}
14410 14671
14672static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
14673 void *data, size_t size_data)
14674{
14675 __u8 tmp[TEST_DATA_LEN << 2];
14676 __u32 size_tmp = sizeof(tmp);
14677 uint32_t retval;
14678 int err;
14679
14680 if (unpriv)
14681 set_admin(true);
14682 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
14683 tmp, &size_tmp, &retval, NULL);
14684 if (unpriv)
14685 set_admin(false);
14686 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14687 printf("Unexpected bpf_prog_test_run error ");
14688 return err;
14689 }
14690 if (!err && retval != expected_val &&
14691 expected_val != POINTER_VALUE) {
14692 printf("FAIL retval %d != %d ", retval, expected_val);
14693 return 1;
14694 }
14695
14696 return 0;
14697}
14698
14411static void do_test_single(struct bpf_test *test, bool unpriv, 14699static void do_test_single(struct bpf_test *test, bool unpriv,
14412 int *passes, int *errors) 14700 int *passes, int *errors)
14413{ 14701{
14414 int fd_prog, expected_ret, alignment_prevented_execution; 14702 int fd_prog, expected_ret, alignment_prevented_execution;
14415 int prog_len, prog_type = test->prog_type; 14703 int prog_len, prog_type = test->prog_type;
14416 struct bpf_insn *prog = test->insns; 14704 struct bpf_insn *prog = test->insns;
14705 int run_errs, run_successes;
14417 int map_fds[MAX_NR_MAPS]; 14706 int map_fds[MAX_NR_MAPS];
14418 const char *expected_err; 14707 const char *expected_err;
14419 uint32_t expected_val;
14420 uint32_t retval;
14421 __u32 pflags; 14708 __u32 pflags;
14422 int i, err; 14709 int i, err;
14423 14710
@@ -14441,8 +14728,6 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
14441 test->result_unpriv : test->result; 14728 test->result_unpriv : test->result;
14442 expected_err = unpriv && test->errstr_unpriv ? 14729 expected_err = unpriv && test->errstr_unpriv ?
14443 test->errstr_unpriv : test->errstr; 14730 test->errstr_unpriv : test->errstr;
14444 expected_val = unpriv && test->retval_unpriv ?
14445 test->retval_unpriv : test->retval;
14446 14731
14447 alignment_prevented_execution = 0; 14732 alignment_prevented_execution = 0;
14448 14733
@@ -14454,10 +14739,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
14454 } 14739 }
14455#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 14740#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14456 if (fd_prog >= 0 && 14741 if (fd_prog >= 0 &&
14457 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) { 14742 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
14458 alignment_prevented_execution = 1; 14743 alignment_prevented_execution = 1;
14459 goto test_ok;
14460 }
14461#endif 14744#endif
14462 } else { 14745 } else {
14463 if (fd_prog >= 0) { 14746 if (fd_prog >= 0) {
@@ -14471,33 +14754,67 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
14471 } 14754 }
14472 } 14755 }
14473 14756
14474 if (fd_prog >= 0) { 14757 if (test->insn_processed) {
14475 __u8 tmp[TEST_DATA_LEN << 2]; 14758 uint32_t insn_processed;
14476 __u32 size_tmp = sizeof(tmp); 14759 char *proc;
14477 14760
14478 if (unpriv) 14761 proc = strstr(bpf_vlog, "processed ");
14479 set_admin(true); 14762 insn_processed = atoi(proc + 10);
14480 err = bpf_prog_test_run(fd_prog, 1, test->data, 14763 if (test->insn_processed != insn_processed) {
14481 sizeof(test->data), tmp, &size_tmp, 14764 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
14482 &retval, NULL); 14765 insn_processed, test->insn_processed);
14483 if (unpriv)
14484 set_admin(false);
14485 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14486 printf("Unexpected bpf_prog_test_run error\n");
14487 goto fail_log; 14766 goto fail_log;
14488 } 14767 }
14489 if (!err && retval != expected_val && 14768 }
14490 expected_val != POINTER_VALUE) { 14769
14491 printf("FAIL retval %d != %d\n", retval, expected_val); 14770 run_errs = 0;
14492 goto fail_log; 14771 run_successes = 0;
14772 if (!alignment_prevented_execution && fd_prog >= 0) {
14773 uint32_t expected_val;
14774 int i;
14775
14776 if (!test->runs) {
14777 expected_val = unpriv && test->retval_unpriv ?
14778 test->retval_unpriv : test->retval;
14779
14780 err = do_prog_test_run(fd_prog, unpriv, expected_val,
14781 test->data, sizeof(test->data));
14782 if (err)
14783 run_errs++;
14784 else
14785 run_successes++;
14786 }
14787
14788 for (i = 0; i < test->runs; i++) {
14789 if (unpriv && test->retvals[i].retval_unpriv)
14790 expected_val = test->retvals[i].retval_unpriv;
14791 else
14792 expected_val = test->retvals[i].retval;
14793
14794 err = do_prog_test_run(fd_prog, unpriv, expected_val,
14795 test->retvals[i].data,
14796 sizeof(test->retvals[i].data));
14797 if (err) {
14798 printf("(run %d/%d) ", i + 1, test->runs);
14799 run_errs++;
14800 } else {
14801 run_successes++;
14802 }
14493 } 14803 }
14494 } 14804 }
14495#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 14805
14496test_ok: 14806 if (!run_errs) {
14497#endif 14807 (*passes)++;
14498 (*passes)++; 14808 if (run_successes > 1)
14499 printf("OK%s\n", alignment_prevented_execution ? 14809 printf("%d cases ", run_successes);
14500 " (NOTE: not executed due to unknown alignment)" : ""); 14810 printf("OK");
14811 if (alignment_prevented_execution)
14812 printf(" (NOTE: not executed due to unknown alignment)");
14813 printf("\n");
14814 } else {
14815 printf("\n");
14816 goto fail_log;
14817 }
14501close_fds: 14818close_fds:
14502 close(fd_prog); 14819 close(fd_prog);
14503 for (i = 0; i < MAX_NR_MAPS; i++) 14820 for (i = 0; i < MAX_NR_MAPS; i++)