aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/btf.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/btf.c')
-rw-r--r--kernel/bpf/btf.c2064
1 files changed, 2064 insertions, 0 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
new file mode 100644
index 000000000000..eb56ac760547
--- /dev/null
+++ b/kernel/bpf/btf.c
@@ -0,0 +1,2064 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018 Facebook */
3
4#include <uapi/linux/btf.h>
5#include <uapi/linux/types.h>
6#include <linux/seq_file.h>
7#include <linux/compiler.h>
8#include <linux/errno.h>
9#include <linux/slab.h>
10#include <linux/anon_inodes.h>
11#include <linux/file.h>
12#include <linux/uaccess.h>
13#include <linux/kernel.h>
14#include <linux/bpf_verifier.h>
15#include <linux/btf.h>
16
17/* BTF (BPF Type Format) is the meta data format which describes
18 * the data types of BPF program/map. Hence, it basically focus
19 * on the C programming language which the modern BPF is primary
20 * using.
21 *
22 * ELF Section:
23 * ~~~~~~~~~~~
24 * The BTF data is stored under the ".BTF" ELF section
25 *
26 * struct btf_type:
27 * ~~~~~~~~~~~~~~~
28 * Each 'struct btf_type' object describes a C data type.
29 * Depending on the type it is describing, a 'struct btf_type'
30 * object may be followed by more data. F.e.
31 * To describe an array, 'struct btf_type' is followed by
32 * 'struct btf_array'.
33 *
34 * 'struct btf_type' and any extra data following it are
35 * 4 bytes aligned.
36 *
37 * Type section:
38 * ~~~~~~~~~~~~~
39 * The BTF type section contains a list of 'struct btf_type' objects.
40 * Each one describes a C type. Recall from the above section
41 * that a 'struct btf_type' object could be immediately followed by extra
42 * data in order to desribe some particular C types.
43 *
44 * type_id:
45 * ~~~~~~~
46 * Each btf_type object is identified by a type_id. The type_id
47 * is implicitly implied by the location of the btf_type object in
48 * the BTF type section. The first one has type_id 1. The second
49 * one has type_id 2...etc. Hence, an earlier btf_type has
50 * a smaller type_id.
51 *
52 * A btf_type object may refer to another btf_type object by using
53 * type_id (i.e. the "type" in the "struct btf_type").
54 *
55 * NOTE that we cannot assume any reference-order.
56 * A btf_type object can refer to an earlier btf_type object
57 * but it can also refer to a later btf_type object.
58 *
59 * For example, to describe "const void *". A btf_type
60 * object describing "const" may refer to another btf_type
61 * object describing "void *". This type-reference is done
62 * by specifying type_id:
63 *
64 * [1] CONST (anon) type_id=2
65 * [2] PTR (anon) type_id=0
66 *
67 * The above is the btf_verifier debug log:
68 * - Each line started with "[?]" is a btf_type object
69 * - [?] is the type_id of the btf_type object.
70 * - CONST/PTR is the BTF_KIND_XXX
71 * - "(anon)" is the name of the type. It just
72 * happens that CONST and PTR has no name.
73 * - type_id=XXX is the 'u32 type' in btf_type
74 *
75 * NOTE: "void" has type_id 0
76 *
77 * String section:
78 * ~~~~~~~~~~~~~~
79 * The BTF string section contains the names used by the type section.
80 * Each string is referred by an "offset" from the beginning of the
81 * string section.
82 *
83 * Each string is '\0' terminated.
84 *
85 * The first character in the string section must be '\0'
86 * which is used to mean 'anonymous'. Some btf_type may not
87 * have a name.
88 */
89
90/* BTF verification:
91 *
92 * To verify BTF data, two passes are needed.
93 *
94 * Pass #1
95 * ~~~~~~~
96 * The first pass is to collect all btf_type objects to
97 * an array: "btf->types".
98 *
99 * Depending on the C type that a btf_type is describing,
100 * a btf_type may be followed by extra data. We don't know
101 * how many btf_type is there, and more importantly we don't
102 * know where each btf_type is located in the type section.
103 *
104 * Without knowing the location of each type_id, most verifications
105 * cannot be done. e.g. an earlier btf_type may refer to a later
106 * btf_type (recall the "const void *" above), so we cannot
107 * check this type-reference in the first pass.
108 *
109 * In the first pass, it still does some verifications (e.g.
110 * checking the name is a valid offset to the string section).
111 *
112 * Pass #2
113 * ~~~~~~~
114 * The main focus is to resolve a btf_type that is referring
115 * to another type.
116 *
117 * We have to ensure the referring type:
118 * 1) does exist in the BTF (i.e. in btf->types[])
119 * 2) does not cause a loop:
120 * struct A {
121 * struct B b;
122 * };
123 *
124 * struct B {
125 * struct A a;
126 * };
127 *
128 * btf_type_needs_resolve() decides if a btf_type needs
129 * to be resolved.
130 *
131 * The needs_resolve type implements the "resolve()" ops which
132 * essentially does a DFS and detects backedge.
133 *
134 * During resolve (or DFS), different C types have different
135 * "RESOLVED" conditions.
136 *
137 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
138 * members because a member is always referring to another
139 * type. A struct's member can be treated as "RESOLVED" if
140 * it is referring to a BTF_KIND_PTR. Otherwise, the
141 * following valid C struct would be rejected:
142 *
143 * struct A {
144 * int m;
145 * struct A *a;
146 * };
147 *
148 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
149 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
150 * detect a pointer loop, e.g.:
151 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
152 * ^ |
153 * +-----------------------------------------+
154 *
155 */
156
157#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
158#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
159#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
160#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
161#define BITS_ROUNDUP_BYTES(bits) \
162 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
163
164/* 16MB for 64k structs and each has 16 members and
165 * a few MB spaces for the string section.
166 * The hard limit is S32_MAX.
167 */
168#define BTF_MAX_SIZE (16 * 1024 * 1024)
169/* 64k. We can raise it later. The hard limit is S32_MAX. */
170#define BTF_MAX_NR_TYPES 65535
171
172#define for_each_member(i, struct_type, member) \
173 for (i = 0, member = btf_type_member(struct_type); \
174 i < btf_type_vlen(struct_type); \
175 i++, member++)
176
177#define for_each_member_from(i, from, struct_type, member) \
178 for (i = from, member = btf_type_member(struct_type) + from; \
179 i < btf_type_vlen(struct_type); \
180 i++, member++)
181
182struct btf {
183 union {
184 struct btf_header *hdr;
185 void *data;
186 };
187 struct btf_type **types;
188 u32 *resolved_ids;
189 u32 *resolved_sizes;
190 const char *strings;
191 void *nohdr_data;
192 u32 nr_types;
193 u32 types_size;
194 u32 data_size;
195 refcount_t refcnt;
196};
197
198enum verifier_phase {
199 CHECK_META,
200 CHECK_TYPE,
201};
202
203struct resolve_vertex {
204 const struct btf_type *t;
205 u32 type_id;
206 u16 next_member;
207};
208
209enum visit_state {
210 NOT_VISITED,
211 VISITED,
212 RESOLVED,
213};
214
215enum resolve_mode {
216 RESOLVE_TBD, /* To Be Determined */
217 RESOLVE_PTR, /* Resolving for Pointer */
218 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
219 * or array
220 */
221};
222
223#define MAX_RESOLVE_DEPTH 32
224
225struct btf_verifier_env {
226 struct btf *btf;
227 u8 *visit_states;
228 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
229 struct bpf_verifier_log log;
230 u32 log_type_id;
231 u32 top_stack;
232 enum verifier_phase phase;
233 enum resolve_mode resolve_mode;
234};
235
236static const char * const btf_kind_str[NR_BTF_KINDS] = {
237 [BTF_KIND_UNKN] = "UNKNOWN",
238 [BTF_KIND_INT] = "INT",
239 [BTF_KIND_PTR] = "PTR",
240 [BTF_KIND_ARRAY] = "ARRAY",
241 [BTF_KIND_STRUCT] = "STRUCT",
242 [BTF_KIND_UNION] = "UNION",
243 [BTF_KIND_ENUM] = "ENUM",
244 [BTF_KIND_FWD] = "FWD",
245 [BTF_KIND_TYPEDEF] = "TYPEDEF",
246 [BTF_KIND_VOLATILE] = "VOLATILE",
247 [BTF_KIND_CONST] = "CONST",
248 [BTF_KIND_RESTRICT] = "RESTRICT",
249};
250
251struct btf_kind_operations {
252 s32 (*check_meta)(struct btf_verifier_env *env,
253 const struct btf_type *t,
254 u32 meta_left);
255 int (*resolve)(struct btf_verifier_env *env,
256 const struct resolve_vertex *v);
257 int (*check_member)(struct btf_verifier_env *env,
258 const struct btf_type *struct_type,
259 const struct btf_member *member,
260 const struct btf_type *member_type);
261 void (*log_details)(struct btf_verifier_env *env,
262 const struct btf_type *t);
263 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
264 u32 type_id, void *data, u8 bits_offsets,
265 struct seq_file *m);
266};
267
268static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
269static struct btf_type btf_void;
270
271static bool btf_type_is_modifier(const struct btf_type *t)
272{
273 /* Some of them is not strictly a C modifier
274 * but they are grouped into the same bucket
275 * for BTF concern:
276 * A type (t) that refers to another
277 * type through t->type AND its size cannot
278 * be determined without following the t->type.
279 *
280 * ptr does not fall into this bucket
281 * because its size is always sizeof(void *).
282 */
283 switch (BTF_INFO_KIND(t->info)) {
284 case BTF_KIND_TYPEDEF:
285 case BTF_KIND_VOLATILE:
286 case BTF_KIND_CONST:
287 case BTF_KIND_RESTRICT:
288 return true;
289 }
290
291 return false;
292}
293
294static bool btf_type_is_void(const struct btf_type *t)
295{
296 /* void => no type and size info.
297 * Hence, FWD is also treated as void.
298 */
299 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
300}
301
302static bool btf_type_is_void_or_null(const struct btf_type *t)
303{
304 return !t || btf_type_is_void(t);
305}
306
307/* union is only a special case of struct:
308 * all its offsetof(member) == 0
309 */
310static bool btf_type_is_struct(const struct btf_type *t)
311{
312 u8 kind = BTF_INFO_KIND(t->info);
313
314 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
315}
316
317static bool btf_type_is_array(const struct btf_type *t)
318{
319 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
320}
321
322static bool btf_type_is_ptr(const struct btf_type *t)
323{
324 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
325}
326
327static bool btf_type_is_int(const struct btf_type *t)
328{
329 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
330}
331
332/* What types need to be resolved?
333 *
334 * btf_type_is_modifier() is an obvious one.
335 *
336 * btf_type_is_struct() because its member refers to
337 * another type (through member->type).
338
339 * btf_type_is_array() because its element (array->type)
340 * refers to another type. Array can be thought of a
341 * special case of struct while array just has the same
342 * member-type repeated by array->nelems of times.
343 */
344static bool btf_type_needs_resolve(const struct btf_type *t)
345{
346 return btf_type_is_modifier(t) ||
347 btf_type_is_ptr(t) ||
348 btf_type_is_struct(t) ||
349 btf_type_is_array(t);
350}
351
352/* t->size can be used */
353static bool btf_type_has_size(const struct btf_type *t)
354{
355 switch (BTF_INFO_KIND(t->info)) {
356 case BTF_KIND_INT:
357 case BTF_KIND_STRUCT:
358 case BTF_KIND_UNION:
359 case BTF_KIND_ENUM:
360 return true;
361 }
362
363 return false;
364}
365
366static const char *btf_int_encoding_str(u8 encoding)
367{
368 if (encoding == 0)
369 return "(none)";
370 else if (encoding == BTF_INT_SIGNED)
371 return "SIGNED";
372 else if (encoding == BTF_INT_CHAR)
373 return "CHAR";
374 else if (encoding == BTF_INT_BOOL)
375 return "BOOL";
376 else if (encoding == BTF_INT_VARARGS)
377 return "VARARGS";
378 else
379 return "UNKN";
380}
381
382static u16 btf_type_vlen(const struct btf_type *t)
383{
384 return BTF_INFO_VLEN(t->info);
385}
386
387static u32 btf_type_int(const struct btf_type *t)
388{
389 return *(u32 *)(t + 1);
390}
391
392static const struct btf_array *btf_type_array(const struct btf_type *t)
393{
394 return (const struct btf_array *)(t + 1);
395}
396
397static const struct btf_member *btf_type_member(const struct btf_type *t)
398{
399 return (const struct btf_member *)(t + 1);
400}
401
402static const struct btf_enum *btf_type_enum(const struct btf_type *t)
403{
404 return (const struct btf_enum *)(t + 1);
405}
406
407static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
408{
409 return kind_ops[BTF_INFO_KIND(t->info)];
410}
411
412static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
413{
414 return !BTF_STR_TBL_ELF_ID(offset) &&
415 BTF_STR_OFFSET(offset) < btf->hdr->str_len;
416}
417
418static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
419{
420 if (!BTF_STR_OFFSET(offset))
421 return "(anon)";
422 else if (BTF_STR_OFFSET(offset) < btf->hdr->str_len)
423 return &btf->strings[BTF_STR_OFFSET(offset)];
424 else
425 return "(invalid-name-offset)";
426}
427
428static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
429{
430 if (type_id > btf->nr_types)
431 return NULL;
432
433 return btf->types[type_id];
434}
435
436__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
437 const char *fmt, ...)
438{
439 va_list args;
440
441 va_start(args, fmt);
442 bpf_verifier_vlog(log, fmt, args);
443 va_end(args);
444}
445
446__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
447 const char *fmt, ...)
448{
449 struct bpf_verifier_log *log = &env->log;
450 va_list args;
451
452 if (!bpf_verifier_log_needed(log))
453 return;
454
455 va_start(args, fmt);
456 bpf_verifier_vlog(log, fmt, args);
457 va_end(args);
458}
459
460__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
461 const struct btf_type *t,
462 bool log_details,
463 const char *fmt, ...)
464{
465 struct bpf_verifier_log *log = &env->log;
466 u8 kind = BTF_INFO_KIND(t->info);
467 struct btf *btf = env->btf;
468 va_list args;
469
470 if (!bpf_verifier_log_needed(log))
471 return;
472
473 __btf_verifier_log(log, "[%u] %s %s%s",
474 env->log_type_id,
475 btf_kind_str[kind],
476 btf_name_by_offset(btf, t->name),
477 log_details ? " " : "");
478
479 if (log_details)
480 btf_type_ops(t)->log_details(env, t);
481
482 if (fmt && *fmt) {
483 __btf_verifier_log(log, " ");
484 va_start(args, fmt);
485 bpf_verifier_vlog(log, fmt, args);
486 va_end(args);
487 }
488
489 __btf_verifier_log(log, "\n");
490}
491
492#define btf_verifier_log_type(env, t, ...) \
493 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
494#define btf_verifier_log_basic(env, t, ...) \
495 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
496
497__printf(4, 5)
498static void btf_verifier_log_member(struct btf_verifier_env *env,
499 const struct btf_type *struct_type,
500 const struct btf_member *member,
501 const char *fmt, ...)
502{
503 struct bpf_verifier_log *log = &env->log;
504 struct btf *btf = env->btf;
505 va_list args;
506
507 if (!bpf_verifier_log_needed(log))
508 return;
509
510 /* The CHECK_META phase already did a btf dump.
511 *
512 * If member is logged again, it must hit an error in
513 * parsing this member. It is useful to print out which
514 * struct this member belongs to.
515 */
516 if (env->phase != CHECK_META)
517 btf_verifier_log_type(env, struct_type, NULL);
518
519 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
520 btf_name_by_offset(btf, member->name),
521 member->type, member->offset);
522
523 if (fmt && *fmt) {
524 __btf_verifier_log(log, " ");
525 va_start(args, fmt);
526 bpf_verifier_vlog(log, fmt, args);
527 va_end(args);
528 }
529
530 __btf_verifier_log(log, "\n");
531}
532
533static void btf_verifier_log_hdr(struct btf_verifier_env *env)
534{
535 struct bpf_verifier_log *log = &env->log;
536 const struct btf *btf = env->btf;
537 const struct btf_header *hdr;
538
539 if (!bpf_verifier_log_needed(log))
540 return;
541
542 hdr = btf->hdr;
543 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
544 __btf_verifier_log(log, "version: %u\n", hdr->version);
545 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
546 __btf_verifier_log(log, "parent_label: %u\n", hdr->parent_label);
547 __btf_verifier_log(log, "parent_name: %u\n", hdr->parent_name);
548 __btf_verifier_log(log, "label_off: %u\n", hdr->label_off);
549 __btf_verifier_log(log, "object_off: %u\n", hdr->object_off);
550 __btf_verifier_log(log, "func_off: %u\n", hdr->func_off);
551 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
552 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
553 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
554 __btf_verifier_log(log, "btf_total_size: %u\n", btf->data_size);
555}
556
557static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
558{
559 struct btf *btf = env->btf;
560
561 /* < 2 because +1 for btf_void which is always in btf->types[0].
562 * btf_void is not accounted in btf->nr_types because btf_void
563 * does not come from the BTF file.
564 */
565 if (btf->types_size - btf->nr_types < 2) {
566 /* Expand 'types' array */
567
568 struct btf_type **new_types;
569 u32 expand_by, new_size;
570
571 if (btf->types_size == BTF_MAX_NR_TYPES) {
572 btf_verifier_log(env, "Exceeded max num of types");
573 return -E2BIG;
574 }
575
576 expand_by = max_t(u32, btf->types_size >> 2, 16);
577 new_size = min_t(u32, BTF_MAX_NR_TYPES,
578 btf->types_size + expand_by);
579
580 new_types = kvzalloc(new_size * sizeof(*new_types),
581 GFP_KERNEL | __GFP_NOWARN);
582 if (!new_types)
583 return -ENOMEM;
584
585 if (btf->nr_types == 0)
586 new_types[0] = &btf_void;
587 else
588 memcpy(new_types, btf->types,
589 sizeof(*btf->types) * (btf->nr_types + 1));
590
591 kvfree(btf->types);
592 btf->types = new_types;
593 btf->types_size = new_size;
594 }
595
596 btf->types[++(btf->nr_types)] = t;
597
598 return 0;
599}
600
601static void btf_free(struct btf *btf)
602{
603 kvfree(btf->types);
604 kvfree(btf->resolved_sizes);
605 kvfree(btf->resolved_ids);
606 kvfree(btf->data);
607 kfree(btf);
608}
609
610static void btf_get(struct btf *btf)
611{
612 refcount_inc(&btf->refcnt);
613}
614
615void btf_put(struct btf *btf)
616{
617 if (btf && refcount_dec_and_test(&btf->refcnt))
618 btf_free(btf);
619}
620
621static int env_resolve_init(struct btf_verifier_env *env)
622{
623 struct btf *btf = env->btf;
624 u32 nr_types = btf->nr_types;
625 u32 *resolved_sizes = NULL;
626 u32 *resolved_ids = NULL;
627 u8 *visit_states = NULL;
628
629 /* +1 for btf_void */
630 resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
631 GFP_KERNEL | __GFP_NOWARN);
632 if (!resolved_sizes)
633 goto nomem;
634
635 resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
636 GFP_KERNEL | __GFP_NOWARN);
637 if (!resolved_ids)
638 goto nomem;
639
640 visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
641 GFP_KERNEL | __GFP_NOWARN);
642 if (!visit_states)
643 goto nomem;
644
645 btf->resolved_sizes = resolved_sizes;
646 btf->resolved_ids = resolved_ids;
647 env->visit_states = visit_states;
648
649 return 0;
650
651nomem:
652 kvfree(resolved_sizes);
653 kvfree(resolved_ids);
654 kvfree(visit_states);
655 return -ENOMEM;
656}
657
658static void btf_verifier_env_free(struct btf_verifier_env *env)
659{
660 kvfree(env->visit_states);
661 kfree(env);
662}
663
664static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
665 const struct btf_type *next_type)
666{
667 switch (env->resolve_mode) {
668 case RESOLVE_TBD:
669 /* int, enum or void is a sink */
670 return !btf_type_needs_resolve(next_type);
671 case RESOLVE_PTR:
672 /* int, enum, void, struct or array is a sink for ptr */
673 return !btf_type_is_modifier(next_type) &&
674 !btf_type_is_ptr(next_type);
675 case RESOLVE_STRUCT_OR_ARRAY:
676 /* int, enum, void or ptr is a sink for struct and array */
677 return !btf_type_is_modifier(next_type) &&
678 !btf_type_is_array(next_type) &&
679 !btf_type_is_struct(next_type);
680 default:
681 BUG_ON(1);
682 }
683}
684
685static bool env_type_is_resolved(const struct btf_verifier_env *env,
686 u32 type_id)
687{
688 return env->visit_states[type_id] == RESOLVED;
689}
690
691static int env_stack_push(struct btf_verifier_env *env,
692 const struct btf_type *t, u32 type_id)
693{
694 struct resolve_vertex *v;
695
696 if (env->top_stack == MAX_RESOLVE_DEPTH)
697 return -E2BIG;
698
699 if (env->visit_states[type_id] != NOT_VISITED)
700 return -EEXIST;
701
702 env->visit_states[type_id] = VISITED;
703
704 v = &env->stack[env->top_stack++];
705 v->t = t;
706 v->type_id = type_id;
707 v->next_member = 0;
708
709 if (env->resolve_mode == RESOLVE_TBD) {
710 if (btf_type_is_ptr(t))
711 env->resolve_mode = RESOLVE_PTR;
712 else if (btf_type_is_struct(t) || btf_type_is_array(t))
713 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
714 }
715
716 return 0;
717}
718
719static void env_stack_set_next_member(struct btf_verifier_env *env,
720 u16 next_member)
721{
722 env->stack[env->top_stack - 1].next_member = next_member;
723}
724
725static void env_stack_pop_resolved(struct btf_verifier_env *env,
726 u32 resolved_type_id,
727 u32 resolved_size)
728{
729 u32 type_id = env->stack[--(env->top_stack)].type_id;
730 struct btf *btf = env->btf;
731
732 btf->resolved_sizes[type_id] = resolved_size;
733 btf->resolved_ids[type_id] = resolved_type_id;
734 env->visit_states[type_id] = RESOLVED;
735}
736
737static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
738{
739 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
740}
741
742/* The input param "type_id" must point to a needs_resolve type */
743static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
744 u32 *type_id)
745{
746 *type_id = btf->resolved_ids[*type_id];
747 return btf_type_by_id(btf, *type_id);
748}
749
750const struct btf_type *btf_type_id_size(const struct btf *btf,
751 u32 *type_id, u32 *ret_size)
752{
753 const struct btf_type *size_type;
754 u32 size_type_id = *type_id;
755 u32 size = 0;
756
757 size_type = btf_type_by_id(btf, size_type_id);
758 if (btf_type_is_void_or_null(size_type))
759 return NULL;
760
761 if (btf_type_has_size(size_type)) {
762 size = size_type->size;
763 } else if (btf_type_is_array(size_type)) {
764 size = btf->resolved_sizes[size_type_id];
765 } else if (btf_type_is_ptr(size_type)) {
766 size = sizeof(void *);
767 } else {
768 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
769 return NULL;
770
771 size = btf->resolved_sizes[size_type_id];
772 size_type_id = btf->resolved_ids[size_type_id];
773 size_type = btf_type_by_id(btf, size_type_id);
774 if (btf_type_is_void(size_type))
775 return NULL;
776 }
777
778 *type_id = size_type_id;
779 if (ret_size)
780 *ret_size = size;
781
782 return size_type;
783}
784
785static int btf_df_check_member(struct btf_verifier_env *env,
786 const struct btf_type *struct_type,
787 const struct btf_member *member,
788 const struct btf_type *member_type)
789{
790 btf_verifier_log_basic(env, struct_type,
791 "Unsupported check_member");
792 return -EINVAL;
793}
794
795static int btf_df_resolve(struct btf_verifier_env *env,
796 const struct resolve_vertex *v)
797{
798 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
799 return -EINVAL;
800}
801
802static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
803 u32 type_id, void *data, u8 bits_offsets,
804 struct seq_file *m)
805{
806 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
807}
808
809static int btf_int_check_member(struct btf_verifier_env *env,
810 const struct btf_type *struct_type,
811 const struct btf_member *member,
812 const struct btf_type *member_type)
813{
814 u32 int_data = btf_type_int(member_type);
815 u32 struct_bits_off = member->offset;
816 u32 struct_size = struct_type->size;
817 u32 nr_copy_bits;
818 u32 bytes_offset;
819
820 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
821 btf_verifier_log_member(env, struct_type, member,
822 "bits_offset exceeds U32_MAX");
823 return -EINVAL;
824 }
825
826 struct_bits_off += BTF_INT_OFFSET(int_data);
827 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
828 nr_copy_bits = BTF_INT_BITS(int_data) +
829 BITS_PER_BYTE_MASKED(struct_bits_off);
830
831 if (nr_copy_bits > BITS_PER_U64) {
832 btf_verifier_log_member(env, struct_type, member,
833 "nr_copy_bits exceeds 64");
834 return -EINVAL;
835 }
836
837 if (struct_size < bytes_offset ||
838 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
839 btf_verifier_log_member(env, struct_type, member,
840 "Member exceeds struct_size");
841 return -EINVAL;
842 }
843
844 return 0;
845}
846
847static s32 btf_int_check_meta(struct btf_verifier_env *env,
848 const struct btf_type *t,
849 u32 meta_left)
850{
851 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
852 u16 encoding;
853
854 if (meta_left < meta_needed) {
855 btf_verifier_log_basic(env, t,
856 "meta_left:%u meta_needed:%u",
857 meta_left, meta_needed);
858 return -EINVAL;
859 }
860
861 if (btf_type_vlen(t)) {
862 btf_verifier_log_type(env, t, "vlen != 0");
863 return -EINVAL;
864 }
865
866 int_data = btf_type_int(t);
867 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
868
869 if (nr_bits > BITS_PER_U64) {
870 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
871 BITS_PER_U64);
872 return -EINVAL;
873 }
874
875 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
876 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
877 return -EINVAL;
878 }
879
880 encoding = BTF_INT_ENCODING(int_data);
881 if (encoding &&
882 encoding != BTF_INT_SIGNED &&
883 encoding != BTF_INT_CHAR &&
884 encoding != BTF_INT_BOOL &&
885 encoding != BTF_INT_VARARGS) {
886 btf_verifier_log_type(env, t, "Unsupported encoding");
887 return -ENOTSUPP;
888 }
889
890 btf_verifier_log_type(env, t, NULL);
891
892 return meta_needed;
893}
894
895static void btf_int_log(struct btf_verifier_env *env,
896 const struct btf_type *t)
897{
898 int int_data = btf_type_int(t);
899
900 btf_verifier_log(env,
901 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
902 t->size, BTF_INT_OFFSET(int_data),
903 BTF_INT_BITS(int_data),
904 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
905}
906
907static void btf_int_bits_seq_show(const struct btf *btf,
908 const struct btf_type *t,
909 void *data, u8 bits_offset,
910 struct seq_file *m)
911{
912 u32 int_data = btf_type_int(t);
913 u16 nr_bits = BTF_INT_BITS(int_data);
914 u16 total_bits_offset;
915 u16 nr_copy_bytes;
916 u16 nr_copy_bits;
917 u8 nr_upper_bits;
918 union {
919 u64 u64_num;
920 u8 u8_nums[8];
921 } print_num;
922
923 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
924 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
925 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
926 nr_copy_bits = nr_bits + bits_offset;
927 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
928
929 print_num.u64_num = 0;
930 memcpy(&print_num.u64_num, data, nr_copy_bytes);
931
932 /* Ditch the higher order bits */
933 nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
934 if (nr_upper_bits) {
935 /* We need to mask out some bits of the upper byte. */
936 u8 mask = (1 << nr_upper_bits) - 1;
937
938 print_num.u8_nums[nr_copy_bytes - 1] &= mask;
939 }
940
941 print_num.u64_num >>= bits_offset;
942
943 seq_printf(m, "0x%llx", print_num.u64_num);
944}
945
946static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
947 u32 type_id, void *data, u8 bits_offset,
948 struct seq_file *m)
949{
950 u32 int_data = btf_type_int(t);
951 u8 encoding = BTF_INT_ENCODING(int_data);
952 bool sign = encoding & BTF_INT_SIGNED;
953 u32 nr_bits = BTF_INT_BITS(int_data);
954
955 if (bits_offset || BTF_INT_OFFSET(int_data) ||
956 BITS_PER_BYTE_MASKED(nr_bits)) {
957 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
958 return;
959 }
960
961 switch (nr_bits) {
962 case 64:
963 if (sign)
964 seq_printf(m, "%lld", *(s64 *)data);
965 else
966 seq_printf(m, "%llu", *(u64 *)data);
967 break;
968 case 32:
969 if (sign)
970 seq_printf(m, "%d", *(s32 *)data);
971 else
972 seq_printf(m, "%u", *(u32 *)data);
973 break;
974 case 16:
975 if (sign)
976 seq_printf(m, "%d", *(s16 *)data);
977 else
978 seq_printf(m, "%u", *(u16 *)data);
979 break;
980 case 8:
981 if (sign)
982 seq_printf(m, "%d", *(s8 *)data);
983 else
984 seq_printf(m, "%u", *(u8 *)data);
985 break;
986 default:
987 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
988 }
989}
990
991static const struct btf_kind_operations int_ops = {
992 .check_meta = btf_int_check_meta,
993 .resolve = btf_df_resolve,
994 .check_member = btf_int_check_member,
995 .log_details = btf_int_log,
996 .seq_show = btf_int_seq_show,
997};
998
999static int btf_modifier_check_member(struct btf_verifier_env *env,
1000 const struct btf_type *struct_type,
1001 const struct btf_member *member,
1002 const struct btf_type *member_type)
1003{
1004 const struct btf_type *resolved_type;
1005 u32 resolved_type_id = member->type;
1006 struct btf_member resolved_member;
1007 struct btf *btf = env->btf;
1008
1009 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1010 if (!resolved_type) {
1011 btf_verifier_log_member(env, struct_type, member,
1012 "Invalid member");
1013 return -EINVAL;
1014 }
1015
1016 resolved_member = *member;
1017 resolved_member.type = resolved_type_id;
1018
1019 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1020 &resolved_member,
1021 resolved_type);
1022}
1023
1024static int btf_ptr_check_member(struct btf_verifier_env *env,
1025 const struct btf_type *struct_type,
1026 const struct btf_member *member,
1027 const struct btf_type *member_type)
1028{
1029 u32 struct_size, struct_bits_off, bytes_offset;
1030
1031 struct_size = struct_type->size;
1032 struct_bits_off = member->offset;
1033 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1034
1035 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1036 btf_verifier_log_member(env, struct_type, member,
1037 "Member is not byte aligned");
1038 return -EINVAL;
1039 }
1040
1041 if (struct_size - bytes_offset < sizeof(void *)) {
1042 btf_verifier_log_member(env, struct_type, member,
1043 "Member exceeds struct_size");
1044 return -EINVAL;
1045 }
1046
1047 return 0;
1048}
1049
1050static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1051 const struct btf_type *t,
1052 u32 meta_left)
1053{
1054 if (btf_type_vlen(t)) {
1055 btf_verifier_log_type(env, t, "vlen != 0");
1056 return -EINVAL;
1057 }
1058
1059 if (BTF_TYPE_PARENT(t->type)) {
1060 btf_verifier_log_type(env, t, "Invalid type_id");
1061 return -EINVAL;
1062 }
1063
1064 btf_verifier_log_type(env, t, NULL);
1065
1066 return 0;
1067}
1068
1069static int btf_modifier_resolve(struct btf_verifier_env *env,
1070 const struct resolve_vertex *v)
1071{
1072 const struct btf_type *t = v->t;
1073 const struct btf_type *next_type;
1074 u32 next_type_id = t->type;
1075 struct btf *btf = env->btf;
1076 u32 next_type_size = 0;
1077
1078 next_type = btf_type_by_id(btf, next_type_id);
1079 if (!next_type) {
1080 btf_verifier_log_type(env, v->t, "Invalid type_id");
1081 return -EINVAL;
1082 }
1083
1084 /* "typedef void new_void", "const void"...etc */
1085 if (btf_type_is_void(next_type))
1086 goto resolved;
1087
1088 if (!env_type_is_resolve_sink(env, next_type) &&
1089 !env_type_is_resolved(env, next_type_id))
1090 return env_stack_push(env, next_type, next_type_id);
1091
1092 /* Figure out the resolved next_type_id with size.
1093 * They will be stored in the current modifier's
1094 * resolved_ids and resolved_sizes such that it can
1095 * save us a few type-following when we use it later (e.g. in
1096 * pretty print).
1097 */
1098 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1099 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1100 btf_verifier_log_type(env, v->t, "Invalid type_id");
1101 return -EINVAL;
1102 }
1103
1104resolved:
1105 env_stack_pop_resolved(env, next_type_id, next_type_size);
1106
1107 return 0;
1108}
1109
1110static int btf_ptr_resolve(struct btf_verifier_env *env,
1111 const struct resolve_vertex *v)
1112{
1113 const struct btf_type *next_type;
1114 const struct btf_type *t = v->t;
1115 u32 next_type_id = t->type;
1116 struct btf *btf = env->btf;
1117 u32 next_type_size = 0;
1118
1119 next_type = btf_type_by_id(btf, next_type_id);
1120 if (!next_type) {
1121 btf_verifier_log_type(env, v->t, "Invalid type_id");
1122 return -EINVAL;
1123 }
1124
1125 /* "void *" */
1126 if (btf_type_is_void(next_type))
1127 goto resolved;
1128
1129 if (!env_type_is_resolve_sink(env, next_type) &&
1130 !env_type_is_resolved(env, next_type_id))
1131 return env_stack_push(env, next_type, next_type_id);
1132
1133 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1134 * the modifier may have stopped resolving when it was resolved
1135 * to a ptr (last-resolved-ptr).
1136 *
1137 * We now need to continue from the last-resolved-ptr to
1138 * ensure the last-resolved-ptr will not referring back to
1139 * the currenct ptr (t).
1140 */
1141 if (btf_type_is_modifier(next_type)) {
1142 const struct btf_type *resolved_type;
1143 u32 resolved_type_id;
1144
1145 resolved_type_id = next_type_id;
1146 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1147
1148 if (btf_type_is_ptr(resolved_type) &&
1149 !env_type_is_resolve_sink(env, resolved_type) &&
1150 !env_type_is_resolved(env, resolved_type_id))
1151 return env_stack_push(env, resolved_type,
1152 resolved_type_id);
1153 }
1154
1155 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1156 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1157 btf_verifier_log_type(env, v->t, "Invalid type_id");
1158 return -EINVAL;
1159 }
1160
1161resolved:
1162 env_stack_pop_resolved(env, next_type_id, 0);
1163
1164 return 0;
1165}
1166
1167static void btf_modifier_seq_show(const struct btf *btf,
1168 const struct btf_type *t,
1169 u32 type_id, void *data,
1170 u8 bits_offset, struct seq_file *m)
1171{
1172 t = btf_type_id_resolve(btf, &type_id);
1173
1174 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1175}
1176
1177static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1178 u32 type_id, void *data, u8 bits_offset,
1179 struct seq_file *m)
1180{
1181 /* It is a hashed value */
1182 seq_printf(m, "%p", *(void **)data);
1183}
1184
1185static void btf_ref_type_log(struct btf_verifier_env *env,
1186 const struct btf_type *t)
1187{
1188 btf_verifier_log(env, "type_id=%u", t->type);
1189}
1190
1191static struct btf_kind_operations modifier_ops = {
1192 .check_meta = btf_ref_type_check_meta,
1193 .resolve = btf_modifier_resolve,
1194 .check_member = btf_modifier_check_member,
1195 .log_details = btf_ref_type_log,
1196 .seq_show = btf_modifier_seq_show,
1197};
1198
1199static struct btf_kind_operations ptr_ops = {
1200 .check_meta = btf_ref_type_check_meta,
1201 .resolve = btf_ptr_resolve,
1202 .check_member = btf_ptr_check_member,
1203 .log_details = btf_ref_type_log,
1204 .seq_show = btf_ptr_seq_show,
1205};
1206
1207static struct btf_kind_operations fwd_ops = {
1208 .check_meta = btf_ref_type_check_meta,
1209 .resolve = btf_df_resolve,
1210 .check_member = btf_df_check_member,
1211 .log_details = btf_ref_type_log,
1212 .seq_show = btf_df_seq_show,
1213};
1214
1215static int btf_array_check_member(struct btf_verifier_env *env,
1216 const struct btf_type *struct_type,
1217 const struct btf_member *member,
1218 const struct btf_type *member_type)
1219{
1220 u32 struct_bits_off = member->offset;
1221 u32 struct_size, bytes_offset;
1222 u32 array_type_id, array_size;
1223 struct btf *btf = env->btf;
1224
1225 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1226 btf_verifier_log_member(env, struct_type, member,
1227 "Member is not byte aligned");
1228 return -EINVAL;
1229 }
1230
1231 array_type_id = member->type;
1232 btf_type_id_size(btf, &array_type_id, &array_size);
1233 struct_size = struct_type->size;
1234 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1235 if (struct_size - bytes_offset < array_size) {
1236 btf_verifier_log_member(env, struct_type, member,
1237 "Member exceeds struct_size");
1238 return -EINVAL;
1239 }
1240
1241 return 0;
1242}
1243
1244static s32 btf_array_check_meta(struct btf_verifier_env *env,
1245 const struct btf_type *t,
1246 u32 meta_left)
1247{
1248 const struct btf_array *array = btf_type_array(t);
1249 u32 meta_needed = sizeof(*array);
1250
1251 if (meta_left < meta_needed) {
1252 btf_verifier_log_basic(env, t,
1253 "meta_left:%u meta_needed:%u",
1254 meta_left, meta_needed);
1255 return -EINVAL;
1256 }
1257
1258 if (btf_type_vlen(t)) {
1259 btf_verifier_log_type(env, t, "vlen != 0");
1260 return -EINVAL;
1261 }
1262
1263 /* We are a little forgiving on array->index_type since
1264 * the kernel is not using it.
1265 */
1266 /* Array elem cannot be in type void,
1267 * so !array->type is not allowed.
1268 */
1269 if (!array->type || BTF_TYPE_PARENT(array->type)) {
1270 btf_verifier_log_type(env, t, "Invalid type_id");
1271 return -EINVAL;
1272 }
1273
1274 btf_verifier_log_type(env, t, NULL);
1275
1276 return meta_needed;
1277}
1278
1279static int btf_array_resolve(struct btf_verifier_env *env,
1280 const struct resolve_vertex *v)
1281{
1282 const struct btf_array *array = btf_type_array(v->t);
1283 const struct btf_type *elem_type;
1284 u32 elem_type_id = array->type;
1285 struct btf *btf = env->btf;
1286 u32 elem_size;
1287
1288 elem_type = btf_type_by_id(btf, elem_type_id);
1289 if (btf_type_is_void_or_null(elem_type)) {
1290 btf_verifier_log_type(env, v->t,
1291 "Invalid elem");
1292 return -EINVAL;
1293 }
1294
1295 if (!env_type_is_resolve_sink(env, elem_type) &&
1296 !env_type_is_resolved(env, elem_type_id))
1297 return env_stack_push(env, elem_type, elem_type_id);
1298
1299 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1300 if (!elem_type) {
1301 btf_verifier_log_type(env, v->t, "Invalid elem");
1302 return -EINVAL;
1303 }
1304
1305 if (btf_type_is_int(elem_type)) {
1306 int int_type_data = btf_type_int(elem_type);
1307 u16 nr_bits = BTF_INT_BITS(int_type_data);
1308 u16 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
1309
1310 /* Put more restriction on array of int. The int cannot
1311 * be a bit field and it must be either u8/u16/u32/u64.
1312 */
1313 if (BITS_PER_BYTE_MASKED(nr_bits) ||
1314 BTF_INT_OFFSET(int_type_data) ||
1315 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
1316 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
1317 btf_verifier_log_type(env, v->t,
1318 "Invalid array of int");
1319 return -EINVAL;
1320 }
1321 }
1322
1323 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1324 btf_verifier_log_type(env, v->t,
1325 "Array size overflows U32_MAX");
1326 return -EINVAL;
1327 }
1328
1329 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1330
1331 return 0;
1332}
1333
1334static void btf_array_log(struct btf_verifier_env *env,
1335 const struct btf_type *t)
1336{
1337 const struct btf_array *array = btf_type_array(t);
1338
1339 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1340 array->type, array->index_type, array->nelems);
1341}
1342
1343static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1344 u32 type_id, void *data, u8 bits_offset,
1345 struct seq_file *m)
1346{
1347 const struct btf_array *array = btf_type_array(t);
1348 const struct btf_kind_operations *elem_ops;
1349 const struct btf_type *elem_type;
1350 u32 i, elem_size, elem_type_id;
1351
1352 elem_type_id = array->type;
1353 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1354 elem_ops = btf_type_ops(elem_type);
1355 seq_puts(m, "[");
1356 for (i = 0; i < array->nelems; i++) {
1357 if (i)
1358 seq_puts(m, ",");
1359
1360 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1361 bits_offset, m);
1362 data += elem_size;
1363 }
1364 seq_puts(m, "]");
1365}
1366
1367static struct btf_kind_operations array_ops = {
1368 .check_meta = btf_array_check_meta,
1369 .resolve = btf_array_resolve,
1370 .check_member = btf_array_check_member,
1371 .log_details = btf_array_log,
1372 .seq_show = btf_array_seq_show,
1373};
1374
1375static int btf_struct_check_member(struct btf_verifier_env *env,
1376 const struct btf_type *struct_type,
1377 const struct btf_member *member,
1378 const struct btf_type *member_type)
1379{
1380 u32 struct_bits_off = member->offset;
1381 u32 struct_size, bytes_offset;
1382
1383 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1384 btf_verifier_log_member(env, struct_type, member,
1385 "Member is not byte aligned");
1386 return -EINVAL;
1387 }
1388
1389 struct_size = struct_type->size;
1390 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1391 if (struct_size - bytes_offset < member_type->size) {
1392 btf_verifier_log_member(env, struct_type, member,
1393 "Member exceeds struct_size");
1394 return -EINVAL;
1395 }
1396
1397 return 0;
1398}
1399
1400static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1401 const struct btf_type *t,
1402 u32 meta_left)
1403{
1404 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1405 const struct btf_member *member;
1406 struct btf *btf = env->btf;
1407 u32 struct_size = t->size;
1408 u32 meta_needed;
1409 u16 i;
1410
1411 meta_needed = btf_type_vlen(t) * sizeof(*member);
1412 if (meta_left < meta_needed) {
1413 btf_verifier_log_basic(env, t,
1414 "meta_left:%u meta_needed:%u",
1415 meta_left, meta_needed);
1416 return -EINVAL;
1417 }
1418
1419 btf_verifier_log_type(env, t, NULL);
1420
1421 for_each_member(i, t, member) {
1422 if (!btf_name_offset_valid(btf, member->name)) {
1423 btf_verifier_log_member(env, t, member,
1424 "Invalid member name_offset:%u",
1425 member->name);
1426 return -EINVAL;
1427 }
1428
1429 /* A member cannot be in type void */
1430 if (!member->type || BTF_TYPE_PARENT(member->type)) {
1431 btf_verifier_log_member(env, t, member,
1432 "Invalid type_id");
1433 return -EINVAL;
1434 }
1435
1436 if (is_union && member->offset) {
1437 btf_verifier_log_member(env, t, member,
1438 "Invalid member bits_offset");
1439 return -EINVAL;
1440 }
1441
1442 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1443 btf_verifier_log_member(env, t, member,
1444 "Memmber bits_offset exceeds its struct size");
1445 return -EINVAL;
1446 }
1447
1448 btf_verifier_log_member(env, t, member, NULL);
1449 }
1450
1451 return meta_needed;
1452}
1453
1454static int btf_struct_resolve(struct btf_verifier_env *env,
1455 const struct resolve_vertex *v)
1456{
1457 const struct btf_member *member;
1458 int err;
1459 u16 i;
1460
1461 /* Before continue resolving the next_member,
1462 * ensure the last member is indeed resolved to a
1463 * type with size info.
1464 */
1465 if (v->next_member) {
1466 const struct btf_type *last_member_type;
1467 const struct btf_member *last_member;
1468 u16 last_member_type_id;
1469
1470 last_member = btf_type_member(v->t) + v->next_member - 1;
1471 last_member_type_id = last_member->type;
1472 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1473 last_member_type_id)))
1474 return -EINVAL;
1475
1476 last_member_type = btf_type_by_id(env->btf,
1477 last_member_type_id);
1478 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1479 last_member,
1480 last_member_type);
1481 if (err)
1482 return err;
1483 }
1484
1485 for_each_member_from(i, v->next_member, v->t, member) {
1486 u32 member_type_id = member->type;
1487 const struct btf_type *member_type = btf_type_by_id(env->btf,
1488 member_type_id);
1489
1490 if (btf_type_is_void_or_null(member_type)) {
1491 btf_verifier_log_member(env, v->t, member,
1492 "Invalid member");
1493 return -EINVAL;
1494 }
1495
1496 if (!env_type_is_resolve_sink(env, member_type) &&
1497 !env_type_is_resolved(env, member_type_id)) {
1498 env_stack_set_next_member(env, i + 1);
1499 return env_stack_push(env, member_type, member_type_id);
1500 }
1501
1502 err = btf_type_ops(member_type)->check_member(env, v->t,
1503 member,
1504 member_type);
1505 if (err)
1506 return err;
1507 }
1508
1509 env_stack_pop_resolved(env, 0, 0);
1510
1511 return 0;
1512}
1513
1514static void btf_struct_log(struct btf_verifier_env *env,
1515 const struct btf_type *t)
1516{
1517 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1518}
1519
1520static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1521 u32 type_id, void *data, u8 bits_offset,
1522 struct seq_file *m)
1523{
1524 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1525 const struct btf_member *member;
1526 u32 i;
1527
1528 seq_puts(m, "{");
1529 for_each_member(i, t, member) {
1530 const struct btf_type *member_type = btf_type_by_id(btf,
1531 member->type);
1532 u32 member_offset = member->offset;
1533 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1534 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1535 const struct btf_kind_operations *ops;
1536
1537 if (i)
1538 seq_puts(m, seq);
1539
1540 ops = btf_type_ops(member_type);
1541 ops->seq_show(btf, member_type, member->type,
1542 data + bytes_offset, bits8_offset, m);
1543 }
1544 seq_puts(m, "}");
1545}
1546
1547static struct btf_kind_operations struct_ops = {
1548 .check_meta = btf_struct_check_meta,
1549 .resolve = btf_struct_resolve,
1550 .check_member = btf_struct_check_member,
1551 .log_details = btf_struct_log,
1552 .seq_show = btf_struct_seq_show,
1553};
1554
1555static int btf_enum_check_member(struct btf_verifier_env *env,
1556 const struct btf_type *struct_type,
1557 const struct btf_member *member,
1558 const struct btf_type *member_type)
1559{
1560 u32 struct_bits_off = member->offset;
1561 u32 struct_size, bytes_offset;
1562
1563 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1564 btf_verifier_log_member(env, struct_type, member,
1565 "Member is not byte aligned");
1566 return -EINVAL;
1567 }
1568
1569 struct_size = struct_type->size;
1570 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1571 if (struct_size - bytes_offset < sizeof(int)) {
1572 btf_verifier_log_member(env, struct_type, member,
1573 "Member exceeds struct_size");
1574 return -EINVAL;
1575 }
1576
1577 return 0;
1578}
1579
1580static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1581 const struct btf_type *t,
1582 u32 meta_left)
1583{
1584 const struct btf_enum *enums = btf_type_enum(t);
1585 struct btf *btf = env->btf;
1586 u16 i, nr_enums;
1587 u32 meta_needed;
1588
1589 nr_enums = btf_type_vlen(t);
1590 meta_needed = nr_enums * sizeof(*enums);
1591
1592 if (meta_left < meta_needed) {
1593 btf_verifier_log_basic(env, t,
1594 "meta_left:%u meta_needed:%u",
1595 meta_left, meta_needed);
1596 return -EINVAL;
1597 }
1598
1599 if (t->size != sizeof(int)) {
1600 btf_verifier_log_type(env, t, "Expected size:%zu",
1601 sizeof(int));
1602 return -EINVAL;
1603 }
1604
1605 btf_verifier_log_type(env, t, NULL);
1606
1607 for (i = 0; i < nr_enums; i++) {
1608 if (!btf_name_offset_valid(btf, enums[i].name)) {
1609 btf_verifier_log(env, "\tInvalid name_offset:%u",
1610 enums[i].name);
1611 return -EINVAL;
1612 }
1613
1614 btf_verifier_log(env, "\t%s val=%d\n",
1615 btf_name_by_offset(btf, enums[i].name),
1616 enums[i].val);
1617 }
1618
1619 return meta_needed;
1620}
1621
1622static void btf_enum_log(struct btf_verifier_env *env,
1623 const struct btf_type *t)
1624{
1625 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1626}
1627
1628static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1629 u32 type_id, void *data, u8 bits_offset,
1630 struct seq_file *m)
1631{
1632 const struct btf_enum *enums = btf_type_enum(t);
1633 u32 i, nr_enums = btf_type_vlen(t);
1634 int v = *(int *)data;
1635
1636 for (i = 0; i < nr_enums; i++) {
1637 if (v == enums[i].val) {
1638 seq_printf(m, "%s",
1639 btf_name_by_offset(btf, enums[i].name));
1640 return;
1641 }
1642 }
1643
1644 seq_printf(m, "%d", v);
1645}
1646
1647static struct btf_kind_operations enum_ops = {
1648 .check_meta = btf_enum_check_meta,
1649 .resolve = btf_df_resolve,
1650 .check_member = btf_enum_check_member,
1651 .log_details = btf_enum_log,
1652 .seq_show = btf_enum_seq_show,
1653};
1654
1655static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
1656 [BTF_KIND_INT] = &int_ops,
1657 [BTF_KIND_PTR] = &ptr_ops,
1658 [BTF_KIND_ARRAY] = &array_ops,
1659 [BTF_KIND_STRUCT] = &struct_ops,
1660 [BTF_KIND_UNION] = &struct_ops,
1661 [BTF_KIND_ENUM] = &enum_ops,
1662 [BTF_KIND_FWD] = &fwd_ops,
1663 [BTF_KIND_TYPEDEF] = &modifier_ops,
1664 [BTF_KIND_VOLATILE] = &modifier_ops,
1665 [BTF_KIND_CONST] = &modifier_ops,
1666 [BTF_KIND_RESTRICT] = &modifier_ops,
1667};
1668
1669static s32 btf_check_meta(struct btf_verifier_env *env,
1670 const struct btf_type *t,
1671 u32 meta_left)
1672{
1673 u32 saved_meta_left = meta_left;
1674 s32 var_meta_size;
1675
1676 if (meta_left < sizeof(*t)) {
1677 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
1678 env->log_type_id, meta_left, sizeof(*t));
1679 return -EINVAL;
1680 }
1681 meta_left -= sizeof(*t);
1682
1683 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
1684 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
1685 btf_verifier_log(env, "[%u] Invalid kind:%u",
1686 env->log_type_id, BTF_INFO_KIND(t->info));
1687 return -EINVAL;
1688 }
1689
1690 if (!btf_name_offset_valid(env->btf, t->name)) {
1691 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
1692 env->log_type_id, t->name);
1693 return -EINVAL;
1694 }
1695
1696 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
1697 if (var_meta_size < 0)
1698 return var_meta_size;
1699
1700 meta_left -= var_meta_size;
1701
1702 return saved_meta_left - meta_left;
1703}
1704
1705static int btf_check_all_metas(struct btf_verifier_env *env)
1706{
1707 struct btf *btf = env->btf;
1708 struct btf_header *hdr;
1709 void *cur, *end;
1710
1711 hdr = btf->hdr;
1712 cur = btf->nohdr_data + hdr->type_off;
1713 end = btf->nohdr_data + hdr->str_off;
1714
1715 env->log_type_id = 1;
1716 while (cur < end) {
1717 struct btf_type *t = cur;
1718 s32 meta_size;
1719
1720 meta_size = btf_check_meta(env, t, end - cur);
1721 if (meta_size < 0)
1722 return meta_size;
1723
1724 btf_add_type(env, t);
1725 cur += meta_size;
1726 env->log_type_id++;
1727 }
1728
1729 return 0;
1730}
1731
1732static int btf_resolve(struct btf_verifier_env *env,
1733 const struct btf_type *t, u32 type_id)
1734{
1735 const struct resolve_vertex *v;
1736 int err = 0;
1737
1738 env->resolve_mode = RESOLVE_TBD;
1739 env_stack_push(env, t, type_id);
1740 while (!err && (v = env_stack_peak(env))) {
1741 env->log_type_id = v->type_id;
1742 err = btf_type_ops(v->t)->resolve(env, v);
1743 }
1744
1745 env->log_type_id = type_id;
1746 if (err == -E2BIG)
1747 btf_verifier_log_type(env, t,
1748 "Exceeded max resolving depth:%u",
1749 MAX_RESOLVE_DEPTH);
1750 else if (err == -EEXIST)
1751 btf_verifier_log_type(env, t, "Loop detected");
1752
1753 return err;
1754}
1755
1756static bool btf_resolve_valid(struct btf_verifier_env *env,
1757 const struct btf_type *t,
1758 u32 type_id)
1759{
1760 struct btf *btf = env->btf;
1761
1762 if (!env_type_is_resolved(env, type_id))
1763 return false;
1764
1765 if (btf_type_is_struct(t))
1766 return !btf->resolved_ids[type_id] &&
1767 !btf->resolved_sizes[type_id];
1768
1769 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
1770 t = btf_type_id_resolve(btf, &type_id);
1771 return t && !btf_type_is_modifier(t);
1772 }
1773
1774 if (btf_type_is_array(t)) {
1775 const struct btf_array *array = btf_type_array(t);
1776 const struct btf_type *elem_type;
1777 u32 elem_type_id = array->type;
1778 u32 elem_size;
1779
1780 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1781 return elem_type && !btf_type_is_modifier(elem_type) &&
1782 (array->nelems * elem_size ==
1783 btf->resolved_sizes[type_id]);
1784 }
1785
1786 return false;
1787}
1788
1789static int btf_check_all_types(struct btf_verifier_env *env)
1790{
1791 struct btf *btf = env->btf;
1792 u32 type_id;
1793 int err;
1794
1795 err = env_resolve_init(env);
1796 if (err)
1797 return err;
1798
1799 env->phase++;
1800 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
1801 const struct btf_type *t = btf_type_by_id(btf, type_id);
1802
1803 env->log_type_id = type_id;
1804 if (btf_type_needs_resolve(t) &&
1805 !env_type_is_resolved(env, type_id)) {
1806 err = btf_resolve(env, t, type_id);
1807 if (err)
1808 return err;
1809 }
1810
1811 if (btf_type_needs_resolve(t) &&
1812 !btf_resolve_valid(env, t, type_id)) {
1813 btf_verifier_log_type(env, t, "Invalid resolve state");
1814 return -EINVAL;
1815 }
1816 }
1817
1818 return 0;
1819}
1820
1821static int btf_parse_type_sec(struct btf_verifier_env *env)
1822{
1823 int err;
1824
1825 err = btf_check_all_metas(env);
1826 if (err)
1827 return err;
1828
1829 return btf_check_all_types(env);
1830}
1831
1832static int btf_parse_str_sec(struct btf_verifier_env *env)
1833{
1834 const struct btf_header *hdr;
1835 struct btf *btf = env->btf;
1836 const char *start, *end;
1837
1838 hdr = btf->hdr;
1839 start = btf->nohdr_data + hdr->str_off;
1840 end = start + hdr->str_len;
1841
1842 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
1843 start[0] || end[-1]) {
1844 btf_verifier_log(env, "Invalid string section");
1845 return -EINVAL;
1846 }
1847
1848 btf->strings = start;
1849
1850 return 0;
1851}
1852
1853static int btf_parse_hdr(struct btf_verifier_env *env)
1854{
1855 const struct btf_header *hdr;
1856 struct btf *btf = env->btf;
1857 u32 meta_left;
1858
1859 if (btf->data_size < sizeof(*hdr)) {
1860 btf_verifier_log(env, "btf_header not found");
1861 return -EINVAL;
1862 }
1863
1864 btf_verifier_log_hdr(env);
1865
1866 hdr = btf->hdr;
1867 if (hdr->magic != BTF_MAGIC) {
1868 btf_verifier_log(env, "Invalid magic");
1869 return -EINVAL;
1870 }
1871
1872 if (hdr->version != BTF_VERSION) {
1873 btf_verifier_log(env, "Unsupported version");
1874 return -ENOTSUPP;
1875 }
1876
1877 if (hdr->flags) {
1878 btf_verifier_log(env, "Unsupported flags");
1879 return -ENOTSUPP;
1880 }
1881
1882 meta_left = btf->data_size - sizeof(*hdr);
1883 if (!meta_left) {
1884 btf_verifier_log(env, "No data");
1885 return -EINVAL;
1886 }
1887
1888 if (meta_left < hdr->type_off || hdr->str_off <= hdr->type_off ||
1889 /* Type section must align to 4 bytes */
1890 hdr->type_off & (sizeof(u32) - 1)) {
1891 btf_verifier_log(env, "Invalid type_off");
1892 return -EINVAL;
1893 }
1894
1895 if (meta_left < hdr->str_off ||
1896 meta_left - hdr->str_off < hdr->str_len) {
1897 btf_verifier_log(env, "Invalid str_off or str_len");
1898 return -EINVAL;
1899 }
1900
1901 btf->nohdr_data = btf->hdr + 1;
1902
1903 return 0;
1904}
1905
1906static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
1907 u32 log_level, char __user *log_ubuf, u32 log_size)
1908{
1909 struct btf_verifier_env *env = NULL;
1910 struct bpf_verifier_log *log;
1911 struct btf *btf = NULL;
1912 u8 *data;
1913 int err;
1914
1915 if (btf_data_size > BTF_MAX_SIZE)
1916 return ERR_PTR(-E2BIG);
1917
1918 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
1919 if (!env)
1920 return ERR_PTR(-ENOMEM);
1921
1922 log = &env->log;
1923 if (log_level || log_ubuf || log_size) {
1924 /* user requested verbose verifier output
1925 * and supplied buffer to store the verification trace
1926 */
1927 log->level = log_level;
1928 log->ubuf = log_ubuf;
1929 log->len_total = log_size;
1930
1931 /* log attributes have to be sane */
1932 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
1933 !log->level || !log->ubuf) {
1934 err = -EINVAL;
1935 goto errout;
1936 }
1937 }
1938
1939 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
1940 if (!btf) {
1941 err = -ENOMEM;
1942 goto errout;
1943 }
1944
1945 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
1946 if (!data) {
1947 err = -ENOMEM;
1948 goto errout;
1949 }
1950
1951 btf->data = data;
1952 btf->data_size = btf_data_size;
1953
1954 if (copy_from_user(data, btf_data, btf_data_size)) {
1955 err = -EFAULT;
1956 goto errout;
1957 }
1958
1959 env->btf = btf;
1960
1961 err = btf_parse_hdr(env);
1962 if (err)
1963 goto errout;
1964
1965 err = btf_parse_str_sec(env);
1966 if (err)
1967 goto errout;
1968
1969 err = btf_parse_type_sec(env);
1970 if (err)
1971 goto errout;
1972
1973 if (!err && log->level && bpf_verifier_log_full(log)) {
1974 err = -ENOSPC;
1975 goto errout;
1976 }
1977
1978 if (!err) {
1979 btf_verifier_env_free(env);
1980 btf_get(btf);
1981 return btf;
1982 }
1983
1984errout:
1985 btf_verifier_env_free(env);
1986 if (btf)
1987 btf_free(btf);
1988 return ERR_PTR(err);
1989}
1990
1991void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
1992 struct seq_file *m)
1993{
1994 const struct btf_type *t = btf_type_by_id(btf, type_id);
1995
1996 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
1997}
1998
1999static int btf_release(struct inode *inode, struct file *filp)
2000{
2001 btf_put(filp->private_data);
2002 return 0;
2003}
2004
2005const struct file_operations btf_fops = {
2006 .release = btf_release,
2007};
2008
2009int btf_new_fd(const union bpf_attr *attr)
2010{
2011 struct btf *btf;
2012 int fd;
2013
2014 btf = btf_parse(u64_to_user_ptr(attr->btf),
2015 attr->btf_size, attr->btf_log_level,
2016 u64_to_user_ptr(attr->btf_log_buf),
2017 attr->btf_log_size);
2018 if (IS_ERR(btf))
2019 return PTR_ERR(btf);
2020
2021 fd = anon_inode_getfd("btf", &btf_fops, btf,
2022 O_RDONLY | O_CLOEXEC);
2023 if (fd < 0)
2024 btf_put(btf);
2025
2026 return fd;
2027}
2028
2029struct btf *btf_get_by_fd(int fd)
2030{
2031 struct btf *btf;
2032 struct fd f;
2033
2034 f = fdget(fd);
2035
2036 if (!f.file)
2037 return ERR_PTR(-EBADF);
2038
2039 if (f.file->f_op != &btf_fops) {
2040 fdput(f);
2041 return ERR_PTR(-EINVAL);
2042 }
2043
2044 btf = f.file->private_data;
2045 btf_get(btf);
2046 fdput(f);
2047
2048 return btf;
2049}
2050
2051int btf_get_info_by_fd(const struct btf *btf,
2052 const union bpf_attr *attr,
2053 union bpf_attr __user *uattr)
2054{
2055 void __user *udata = u64_to_user_ptr(attr->info.info);
2056 u32 copy_len = min_t(u32, btf->data_size,
2057 attr->info.info_len);
2058
2059 if (copy_to_user(udata, btf->data, copy_len) ||
2060 put_user(btf->data_size, &uattr->info.info_len))
2061 return -EFAULT;
2062
2063 return 0;
2064}