aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-09-26 03:17:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-26 15:05:15 -0400
commit3c731eba48e1b0650decfc91a839b80f0e05ce8f (patch)
treead7927653cfca896fd60e2e7b2fb12750e46fd2e
parent17a5267067f3c372fec9ffb798d6eaba6b5e6a4c (diff)
bpf: mini eBPF library, test stubs and verifier testsuite
1. the library includes a trivial set of BPF syscall wrappers: int bpf_create_map(int key_size, int value_size, int max_entries); int bpf_update_elem(int fd, void *key, void *value); int bpf_lookup_elem(int fd, void *key, void *value); int bpf_delete_elem(int fd, void *key); int bpf_get_next_key(int fd, void *key, void *next_key); int bpf_prog_load(enum bpf_prog_type prog_type, const struct sock_filter_int *insns, int insn_len, const char *license); bpf_prog_load() stores verifier log into global bpf_log_buf[] array and BPF_*() macros to build instructions 2. test stubs configure eBPF infra with 'unspec' map and program types. These are fake types used by user space testsuite only. 3. verifier tests valid and invalid programs and expects predefined error log messages from kernel. 40 tests so far. $ sudo ./test_verifier #0 add+sub+mul OK #1 unreachable OK #2 unreachable2 OK #3 out of range jump OK #4 out of range jump2 OK #5 test1 ld_imm64 OK ... Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--kernel/bpf/Makefile4
-rw-r--r--kernel/bpf/test_stub.c116
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--samples/bpf/Makefile12
-rw-r--r--samples/bpf/libbpf.c94
-rw-r--r--samples/bpf/libbpf.h172
-rw-r--r--samples/bpf/test_verifier.c548
7 files changed, 948 insertions, 1 deletions
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 3c726b0995b7..45427239f375 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1 +1,5 @@
1obj-y := core.o syscall.o verifier.o 1obj-y := core.o syscall.o verifier.o
2
3ifdef CONFIG_TEST_BPF
4obj-y += test_stub.o
5endif
diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c
new file mode 100644
index 000000000000..fcaddff4003e
--- /dev/null
+++ b/kernel/bpf/test_stub.c
@@ -0,0 +1,116 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/bpf.h>
12
13/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
14 * to be used by user space verifier testsuite
15 */
16struct bpf_context {
17 u64 arg1;
18 u64 arg2;
19};
20
21static u64 test_func(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
22{
23 return 0;
24}
25
26static struct bpf_func_proto test_funcs[] = {
27 [BPF_FUNC_unspec] = {
28 .func = test_func,
29 .gpl_only = true,
30 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
31 .arg1_type = ARG_CONST_MAP_PTR,
32 .arg2_type = ARG_PTR_TO_MAP_KEY,
33 },
34};
35
36static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
37{
38 if (func_id < 0 || func_id >= ARRAY_SIZE(test_funcs))
39 return NULL;
40 return &test_funcs[func_id];
41}
42
43static const struct bpf_context_access {
44 int size;
45 enum bpf_access_type type;
46} test_ctx_access[] = {
47 [offsetof(struct bpf_context, arg1)] = {
48 FIELD_SIZEOF(struct bpf_context, arg1),
49 BPF_READ
50 },
51 [offsetof(struct bpf_context, arg2)] = {
52 FIELD_SIZEOF(struct bpf_context, arg2),
53 BPF_READ
54 },
55};
56
57static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
58{
59 const struct bpf_context_access *access;
60
61 if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
62 return false;
63
64 access = &test_ctx_access[off];
65 if (access->size == size && (access->type & type))
66 return true;
67
68 return false;
69}
70
71static struct bpf_verifier_ops test_ops = {
72 .get_func_proto = test_func_proto,
73 .is_valid_access = test_is_valid_access,
74};
75
76static struct bpf_prog_type_list tl_prog = {
77 .ops = &test_ops,
78 .type = BPF_PROG_TYPE_UNSPEC,
79};
80
81static struct bpf_map *test_map_alloc(union bpf_attr *attr)
82{
83 struct bpf_map *map;
84
85 map = kzalloc(sizeof(*map), GFP_USER);
86 if (!map)
87 return ERR_PTR(-ENOMEM);
88
89 map->key_size = attr->key_size;
90 map->value_size = attr->value_size;
91 map->max_entries = attr->max_entries;
92 return map;
93}
94
95static void test_map_free(struct bpf_map *map)
96{
97 kfree(map);
98}
99
100static struct bpf_map_ops test_map_ops = {
101 .map_alloc = test_map_alloc,
102 .map_free = test_map_free,
103};
104
105static struct bpf_map_type_list tl_map = {
106 .ops = &test_map_ops,
107 .type = BPF_MAP_TYPE_UNSPEC,
108};
109
110static int __init register_test_ops(void)
111{
112 bpf_register_map_type(&tl_map);
113 bpf_register_prog_type(&tl_prog);
114 return 0;
115}
116late_initcall(register_test_ops);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a28590083622..3ac43f34437b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1672,7 +1672,8 @@ config TEST_BPF
1672 against the BPF interpreter or BPF JIT compiler depending on the 1672 against the BPF interpreter or BPF JIT compiler depending on the
1673 current setting. This is in particular useful for BPF JIT compiler 1673 current setting. This is in particular useful for BPF JIT compiler
1674 development, but also to run regression tests against changes in 1674 development, but also to run regression tests against changes in
1675 the interpreter code. 1675 the interpreter code. It also enables test stubs for eBPF maps and
1676 verifier used by user space verifier testsuite.
1676 1677
1677 If unsure, say N. 1678 If unsure, say N.
1678 1679
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
new file mode 100644
index 000000000000..634391797856
--- /dev/null
+++ b/samples/bpf/Makefile
@@ -0,0 +1,12 @@
1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
3
4# List of programs to build
5hostprogs-y := test_verifier
6
7test_verifier-objs := test_verifier.o libbpf.o
8
9# Tell kbuild to always build the programs
10always := $(hostprogs-y)
11
12HOSTCFLAGS += -I$(objtree)/usr/include
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
new file mode 100644
index 000000000000..ff6504420738
--- /dev/null
+++ b/samples/bpf/libbpf.c
@@ -0,0 +1,94 @@
1/* eBPF mini library */
2#include <stdlib.h>
3#include <stdio.h>
4#include <linux/unistd.h>
5#include <unistd.h>
6#include <string.h>
7#include <linux/netlink.h>
8#include <linux/bpf.h>
9#include <errno.h>
10#include "libbpf.h"
11
12static __u64 ptr_to_u64(void *ptr)
13{
14 return (__u64) (unsigned long) ptr;
15}
16
17int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
18 int max_entries)
19{
20 union bpf_attr attr = {
21 .map_type = map_type,
22 .key_size = key_size,
23 .value_size = value_size,
24 .max_entries = max_entries
25 };
26
27 return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
28}
29
30int bpf_update_elem(int fd, void *key, void *value)
31{
32 union bpf_attr attr = {
33 .map_fd = fd,
34 .key = ptr_to_u64(key),
35 .value = ptr_to_u64(value),
36 };
37
38 return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
39}
40
41int bpf_lookup_elem(int fd, void *key, void *value)
42{
43 union bpf_attr attr = {
44 .map_fd = fd,
45 .key = ptr_to_u64(key),
46 .value = ptr_to_u64(value),
47 };
48
49 return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
50}
51
52int bpf_delete_elem(int fd, void *key)
53{
54 union bpf_attr attr = {
55 .map_fd = fd,
56 .key = ptr_to_u64(key),
57 };
58
59 return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
60}
61
62int bpf_get_next_key(int fd, void *key, void *next_key)
63{
64 union bpf_attr attr = {
65 .map_fd = fd,
66 .key = ptr_to_u64(key),
67 .next_key = ptr_to_u64(next_key),
68 };
69
70 return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
71}
72
73#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
74
75char bpf_log_buf[LOG_BUF_SIZE];
76
77int bpf_prog_load(enum bpf_prog_type prog_type,
78 const struct bpf_insn *insns, int prog_len,
79 const char *license)
80{
81 union bpf_attr attr = {
82 .prog_type = prog_type,
83 .insns = ptr_to_u64((void *) insns),
84 .insn_cnt = prog_len / sizeof(struct bpf_insn),
85 .license = ptr_to_u64((void *) license),
86 .log_buf = ptr_to_u64(bpf_log_buf),
87 .log_size = LOG_BUF_SIZE,
88 .log_level = 1,
89 };
90
91 bpf_log_buf[0] = 0;
92
93 return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
94}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
new file mode 100644
index 000000000000..8a31babeca5d
--- /dev/null
+++ b/samples/bpf/libbpf.h
@@ -0,0 +1,172 @@
1/* eBPF mini library */
2#ifndef __LIBBPF_H
3#define __LIBBPF_H
4
5struct bpf_insn;
6
7int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
8 int max_entries);
9int bpf_update_elem(int fd, void *key, void *value);
10int bpf_lookup_elem(int fd, void *key, void *value);
11int bpf_delete_elem(int fd, void *key);
12int bpf_get_next_key(int fd, void *key, void *next_key);
13
14int bpf_prog_load(enum bpf_prog_type prog_type,
15 const struct bpf_insn *insns, int insn_len,
16 const char *license);
17
18#define LOG_BUF_SIZE 8192
19extern char bpf_log_buf[LOG_BUF_SIZE];
20
21/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
22
23#define BPF_ALU64_REG(OP, DST, SRC) \
24 ((struct bpf_insn) { \
25 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
26 .dst_reg = DST, \
27 .src_reg = SRC, \
28 .off = 0, \
29 .imm = 0 })
30
31#define BPF_ALU32_REG(OP, DST, SRC) \
32 ((struct bpf_insn) { \
33 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
34 .dst_reg = DST, \
35 .src_reg = SRC, \
36 .off = 0, \
37 .imm = 0 })
38
39/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
40
41#define BPF_ALU64_IMM(OP, DST, IMM) \
42 ((struct bpf_insn) { \
43 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
44 .dst_reg = DST, \
45 .src_reg = 0, \
46 .off = 0, \
47 .imm = IMM })
48
49#define BPF_ALU32_IMM(OP, DST, IMM) \
50 ((struct bpf_insn) { \
51 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
52 .dst_reg = DST, \
53 .src_reg = 0, \
54 .off = 0, \
55 .imm = IMM })
56
57/* Short form of mov, dst_reg = src_reg */
58
59#define BPF_MOV64_REG(DST, SRC) \
60 ((struct bpf_insn) { \
61 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
62 .dst_reg = DST, \
63 .src_reg = SRC, \
64 .off = 0, \
65 .imm = 0 })
66
67/* Short form of mov, dst_reg = imm32 */
68
69#define BPF_MOV64_IMM(DST, IMM) \
70 ((struct bpf_insn) { \
71 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
72 .dst_reg = DST, \
73 .src_reg = 0, \
74 .off = 0, \
75 .imm = IMM })
76
77/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
78#define BPF_LD_IMM64(DST, IMM) \
79 BPF_LD_IMM64_RAW(DST, 0, IMM)
80
81#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
82 ((struct bpf_insn) { \
83 .code = BPF_LD | BPF_DW | BPF_IMM, \
84 .dst_reg = DST, \
85 .src_reg = SRC, \
86 .off = 0, \
87 .imm = (__u32) (IMM) }), \
88 ((struct bpf_insn) { \
89 .code = 0, /* zero is reserved opcode */ \
90 .dst_reg = 0, \
91 .src_reg = 0, \
92 .off = 0, \
93 .imm = ((__u64) (IMM)) >> 32 })
94
95#define BPF_PSEUDO_MAP_FD 1
96
97/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
98#define BPF_LD_MAP_FD(DST, MAP_FD) \
99 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
100
101
102/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
103
104#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
105 ((struct bpf_insn) { \
106 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
107 .dst_reg = DST, \
108 .src_reg = SRC, \
109 .off = OFF, \
110 .imm = 0 })
111
112/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
113
114#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
115 ((struct bpf_insn) { \
116 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
117 .dst_reg = DST, \
118 .src_reg = SRC, \
119 .off = OFF, \
120 .imm = 0 })
121
122/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
123
124#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
125 ((struct bpf_insn) { \
126 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
127 .dst_reg = DST, \
128 .src_reg = 0, \
129 .off = OFF, \
130 .imm = IMM })
131
132/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
133
134#define BPF_JMP_REG(OP, DST, SRC, OFF) \
135 ((struct bpf_insn) { \
136 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = OFF, \
140 .imm = 0 })
141
142/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
143
144#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
145 ((struct bpf_insn) { \
146 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
147 .dst_reg = DST, \
148 .src_reg = 0, \
149 .off = OFF, \
150 .imm = IMM })
151
152/* Raw code statement block */
153
154#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
155 ((struct bpf_insn) { \
156 .code = CODE, \
157 .dst_reg = DST, \
158 .src_reg = SRC, \
159 .off = OFF, \
160 .imm = IMM })
161
162/* Program exit */
163
164#define BPF_EXIT_INSN() \
165 ((struct bpf_insn) { \
166 .code = BPF_JMP | BPF_EXIT, \
167 .dst_reg = 0, \
168 .src_reg = 0, \
169 .off = 0, \
170 .imm = 0 })
171
172#endif
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
new file mode 100644
index 000000000000..d10992e2740e
--- /dev/null
+++ b/samples/bpf/test_verifier.c
@@ -0,0 +1,548 @@
1/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10#include <stdio.h>
11#include <unistd.h>
12#include <linux/bpf.h>
13#include <errno.h>
14#include <linux/unistd.h>
15#include <string.h>
16#include <linux/filter.h>
17#include "libbpf.h"
18
19#define MAX_INSNS 512
20#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
21
22struct bpf_test {
23 const char *descr;
24 struct bpf_insn insns[MAX_INSNS];
25 int fixup[32];
26 const char *errstr;
27 enum {
28 ACCEPT,
29 REJECT
30 } result;
31};
32
33static struct bpf_test tests[] = {
34 {
35 "add+sub+mul",
36 .insns = {
37 BPF_MOV64_IMM(BPF_REG_1, 1),
38 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
39 BPF_MOV64_IMM(BPF_REG_2, 3),
40 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
41 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
42 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
43 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
44 BPF_EXIT_INSN(),
45 },
46 .result = ACCEPT,
47 },
48 {
49 "unreachable",
50 .insns = {
51 BPF_EXIT_INSN(),
52 BPF_EXIT_INSN(),
53 },
54 .errstr = "unreachable",
55 .result = REJECT,
56 },
57 {
58 "unreachable2",
59 .insns = {
60 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
61 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
62 BPF_EXIT_INSN(),
63 },
64 .errstr = "unreachable",
65 .result = REJECT,
66 },
67 {
68 "out of range jump",
69 .insns = {
70 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
71 BPF_EXIT_INSN(),
72 },
73 .errstr = "jump out of range",
74 .result = REJECT,
75 },
76 {
77 "out of range jump2",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
80 BPF_EXIT_INSN(),
81 },
82 .errstr = "jump out of range",
83 .result = REJECT,
84 },
85 {
86 "test1 ld_imm64",
87 .insns = {
88 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
89 BPF_LD_IMM64(BPF_REG_0, 0),
90 BPF_LD_IMM64(BPF_REG_0, 0),
91 BPF_LD_IMM64(BPF_REG_0, 1),
92 BPF_LD_IMM64(BPF_REG_0, 1),
93 BPF_MOV64_IMM(BPF_REG_0, 2),
94 BPF_EXIT_INSN(),
95 },
96 .errstr = "invalid BPF_LD_IMM insn",
97 .result = REJECT,
98 },
99 {
100 "test2 ld_imm64",
101 .insns = {
102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
103 BPF_LD_IMM64(BPF_REG_0, 0),
104 BPF_LD_IMM64(BPF_REG_0, 0),
105 BPF_LD_IMM64(BPF_REG_0, 1),
106 BPF_LD_IMM64(BPF_REG_0, 1),
107 BPF_EXIT_INSN(),
108 },
109 .errstr = "invalid BPF_LD_IMM insn",
110 .result = REJECT,
111 },
112 {
113 "test3 ld_imm64",
114 .insns = {
115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
116 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
117 BPF_LD_IMM64(BPF_REG_0, 0),
118 BPF_LD_IMM64(BPF_REG_0, 0),
119 BPF_LD_IMM64(BPF_REG_0, 1),
120 BPF_LD_IMM64(BPF_REG_0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "invalid bpf_ld_imm64 insn",
124 .result = REJECT,
125 },
126 {
127 "test4 ld_imm64",
128 .insns = {
129 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "invalid bpf_ld_imm64 insn",
133 .result = REJECT,
134 },
135 {
136 "test5 ld_imm64",
137 .insns = {
138 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
139 },
140 .errstr = "invalid bpf_ld_imm64 insn",
141 .result = REJECT,
142 },
143 {
144 "no bpf_exit",
145 .insns = {
146 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
147 },
148 .errstr = "jump out of range",
149 .result = REJECT,
150 },
151 {
152 "loop (back-edge)",
153 .insns = {
154 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
155 BPF_EXIT_INSN(),
156 },
157 .errstr = "back-edge",
158 .result = REJECT,
159 },
160 {
161 "loop2 (back-edge)",
162 .insns = {
163 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
165 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
166 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
167 BPF_EXIT_INSN(),
168 },
169 .errstr = "back-edge",
170 .result = REJECT,
171 },
172 {
173 "conditional loop",
174 .insns = {
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
177 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
179 BPF_EXIT_INSN(),
180 },
181 .errstr = "back-edge",
182 .result = REJECT,
183 },
184 {
185 "read uninitialized register",
186 .insns = {
187 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
188 BPF_EXIT_INSN(),
189 },
190 .errstr = "R2 !read_ok",
191 .result = REJECT,
192 },
193 {
194 "read invalid register",
195 .insns = {
196 BPF_MOV64_REG(BPF_REG_0, -1),
197 BPF_EXIT_INSN(),
198 },
199 .errstr = "R15 is invalid",
200 .result = REJECT,
201 },
202 {
203 "program doesn't init R0 before exit",
204 .insns = {
205 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
206 BPF_EXIT_INSN(),
207 },
208 .errstr = "R0 !read_ok",
209 .result = REJECT,
210 },
211 {
212 "stack out of bounds",
213 .insns = {
214 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
215 BPF_EXIT_INSN(),
216 },
217 .errstr = "invalid stack",
218 .result = REJECT,
219 },
220 {
221 "invalid call insn1",
222 .insns = {
223 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
224 BPF_EXIT_INSN(),
225 },
226 .errstr = "BPF_CALL uses reserved",
227 .result = REJECT,
228 },
229 {
230 "invalid call insn2",
231 .insns = {
232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
233 BPF_EXIT_INSN(),
234 },
235 .errstr = "BPF_CALL uses reserved",
236 .result = REJECT,
237 },
238 {
239 "invalid function call",
240 .insns = {
241 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
242 BPF_EXIT_INSN(),
243 },
244 .errstr = "invalid func 1234567",
245 .result = REJECT,
246 },
247 {
248 "uninitialized stack1",
249 .insns = {
250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
252 BPF_LD_MAP_FD(BPF_REG_1, 0),
253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
254 BPF_EXIT_INSN(),
255 },
256 .fixup = {2},
257 .errstr = "invalid indirect read from stack",
258 .result = REJECT,
259 },
260 {
261 "uninitialized stack2",
262 .insns = {
263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
264 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
265 BPF_EXIT_INSN(),
266 },
267 .errstr = "invalid read from stack",
268 .result = REJECT,
269 },
270 {
271 "check valid spill/fill",
272 .insns = {
273 /* spill R1(ctx) into stack */
274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
275
276 /* fill it back into R2 */
277 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
278
279 /* should be able to access R0 = *(R2 + 8) */
280 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
281 BPF_EXIT_INSN(),
282 },
283 .result = ACCEPT,
284 },
285 {
286 "check corrupted spill/fill",
287 .insns = {
288 /* spill R1(ctx) into stack */
289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
290
291 /* mess up with R1 pointer on stack */
292 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
293
294 /* fill back into R0 should fail */
295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
296
297 BPF_EXIT_INSN(),
298 },
299 .errstr = "corrupted spill",
300 .result = REJECT,
301 },
302 {
303 "invalid src register in STX",
304 .insns = {
305 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
306 BPF_EXIT_INSN(),
307 },
308 .errstr = "R15 is invalid",
309 .result = REJECT,
310 },
311 {
312 "invalid dst register in STX",
313 .insns = {
314 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
315 BPF_EXIT_INSN(),
316 },
317 .errstr = "R14 is invalid",
318 .result = REJECT,
319 },
320 {
321 "invalid dst register in ST",
322 .insns = {
323 BPF_ST_MEM(BPF_B, 14, -1, -1),
324 BPF_EXIT_INSN(),
325 },
326 .errstr = "R14 is invalid",
327 .result = REJECT,
328 },
329 {
330 "invalid src register in LDX",
331 .insns = {
332 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
333 BPF_EXIT_INSN(),
334 },
335 .errstr = "R12 is invalid",
336 .result = REJECT,
337 },
338 {
339 "invalid dst register in LDX",
340 .insns = {
341 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
342 BPF_EXIT_INSN(),
343 },
344 .errstr = "R11 is invalid",
345 .result = REJECT,
346 },
347 {
348 "junk insn",
349 .insns = {
350 BPF_RAW_INSN(0, 0, 0, 0, 0),
351 BPF_EXIT_INSN(),
352 },
353 .errstr = "invalid BPF_LD_IMM",
354 .result = REJECT,
355 },
356 {
357 "junk insn2",
358 .insns = {
359 BPF_RAW_INSN(1, 0, 0, 0, 0),
360 BPF_EXIT_INSN(),
361 },
362 .errstr = "BPF_LDX uses reserved fields",
363 .result = REJECT,
364 },
365 {
366 "junk insn3",
367 .insns = {
368 BPF_RAW_INSN(-1, 0, 0, 0, 0),
369 BPF_EXIT_INSN(),
370 },
371 .errstr = "invalid BPF_ALU opcode f0",
372 .result = REJECT,
373 },
374 {
375 "junk insn4",
376 .insns = {
377 BPF_RAW_INSN(-1, -1, -1, -1, -1),
378 BPF_EXIT_INSN(),
379 },
380 .errstr = "invalid BPF_ALU opcode f0",
381 .result = REJECT,
382 },
383 {
384 "junk insn5",
385 .insns = {
386 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
387 BPF_EXIT_INSN(),
388 },
389 .errstr = "BPF_ALU uses reserved fields",
390 .result = REJECT,
391 },
392 {
393 "misaligned read from stack",
394 .insns = {
395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
396 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
397 BPF_EXIT_INSN(),
398 },
399 .errstr = "misaligned access",
400 .result = REJECT,
401 },
402 {
403 "invalid map_fd for function call",
404 .insns = {
405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
406 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
408 BPF_LD_MAP_FD(BPF_REG_1, 0),
409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "fd 0 is not pointing to valid bpf_map",
413 .result = REJECT,
414 },
415 {
416 "don't check return value before access",
417 .insns = {
418 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
421 BPF_LD_MAP_FD(BPF_REG_1, 0),
422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
423 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
424 BPF_EXIT_INSN(),
425 },
426 .fixup = {3},
427 .errstr = "R0 invalid mem access 'map_value_or_null'",
428 .result = REJECT,
429 },
430 {
431 "access memory with incorrect alignment",
432 .insns = {
433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
436 BPF_LD_MAP_FD(BPF_REG_1, 0),
437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
439 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
440 BPF_EXIT_INSN(),
441 },
442 .fixup = {3},
443 .errstr = "misaligned access",
444 .result = REJECT,
445 },
446 {
447 "sometimes access memory with incorrect alignment",
448 .insns = {
449 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
450 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
452 BPF_LD_MAP_FD(BPF_REG_1, 0),
453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
455 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
456 BPF_EXIT_INSN(),
457 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
458 BPF_EXIT_INSN(),
459 },
460 .fixup = {3},
461 .errstr = "R0 invalid mem access",
462 .result = REJECT,
463 },
464};
465
466static int probe_filter_length(struct bpf_insn *fp)
467{
468 int len = 0;
469
470 for (len = MAX_INSNS - 1; len > 0; --len)
471 if (fp[len].code != 0 || fp[len].imm != 0)
472 break;
473
474 return len + 1;
475}
476
477static int create_map(void)
478{
479 long long key, value = 0;
480 int map_fd;
481
482 map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024);
483 if (map_fd < 0) {
484 printf("failed to create map '%s'\n", strerror(errno));
485 }
486
487 return map_fd;
488}
489
490static int test(void)
491{
492 int prog_fd, i;
493
494 for (i = 0; i < ARRAY_SIZE(tests); i++) {
495 struct bpf_insn *prog = tests[i].insns;
496 int prog_len = probe_filter_length(prog);
497 int *fixup = tests[i].fixup;
498 int map_fd = -1;
499
500 if (*fixup) {
501 map_fd = create_map();
502
503 do {
504 prog[*fixup].imm = map_fd;
505 fixup++;
506 } while (*fixup);
507 }
508 printf("#%d %s ", i, tests[i].descr);
509
510 prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
511 prog_len * sizeof(struct bpf_insn),
512 "GPL");
513
514 if (tests[i].result == ACCEPT) {
515 if (prog_fd < 0) {
516 printf("FAIL\nfailed to load prog '%s'\n",
517 strerror(errno));
518 printf("%s", bpf_log_buf);
519 goto fail;
520 }
521 } else {
522 if (prog_fd >= 0) {
523 printf("FAIL\nunexpected success to load\n");
524 printf("%s", bpf_log_buf);
525 goto fail;
526 }
527 if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
528 printf("FAIL\nunexpected error message: %s",
529 bpf_log_buf);
530 goto fail;
531 }
532 }
533
534 printf("OK\n");
535fail:
536 if (map_fd >= 0)
537 close(map_fd);
538 close(prog_fd);
539
540 }
541
542 return 0;
543}
544
545int main(void)
546{
547 return test();
548}