aboutsummaryrefslogtreecommitdiffstats
path: root/samples
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /samples
parentd5935b07da53f74726e2a65dd4281d0f2c70e5d4 (diff)
parent64b1f00a0830e1c53874067273a096b228d83d36 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'samples')
-rw-r--r--samples/bpf/Makefile12
-rw-r--r--samples/bpf/libbpf.c94
-rw-r--r--samples/bpf/libbpf.h172
-rw-r--r--samples/bpf/test_verifier.c678
4 files changed, 956 insertions, 0 deletions
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
new file mode 100644
index 000000000000..634391797856
--- /dev/null
+++ b/samples/bpf/Makefile
@@ -0,0 +1,12 @@
1# kbuild trick to avoid linker error. Can be omitted if a module is built.
2obj- := dummy.o
3
4# List of programs to build
5hostprogs-y := test_verifier
6
7test_verifier-objs := test_verifier.o libbpf.o
8
9# Tell kbuild to always build the programs
10always := $(hostprogs-y)
11
12HOSTCFLAGS += -I$(objtree)/usr/include
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
new file mode 100644
index 000000000000..ff6504420738
--- /dev/null
+++ b/samples/bpf/libbpf.c
@@ -0,0 +1,94 @@
1/* eBPF mini library */
2#include <stdlib.h>
3#include <stdio.h>
4#include <linux/unistd.h>
5#include <unistd.h>
6#include <string.h>
7#include <linux/netlink.h>
8#include <linux/bpf.h>
9#include <errno.h>
10#include "libbpf.h"
11
12static __u64 ptr_to_u64(void *ptr)
13{
14 return (__u64) (unsigned long) ptr;
15}
16
17int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
18 int max_entries)
19{
20 union bpf_attr attr = {
21 .map_type = map_type,
22 .key_size = key_size,
23 .value_size = value_size,
24 .max_entries = max_entries
25 };
26
27 return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
28}
29
30int bpf_update_elem(int fd, void *key, void *value)
31{
32 union bpf_attr attr = {
33 .map_fd = fd,
34 .key = ptr_to_u64(key),
35 .value = ptr_to_u64(value),
36 };
37
38 return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
39}
40
41int bpf_lookup_elem(int fd, void *key, void *value)
42{
43 union bpf_attr attr = {
44 .map_fd = fd,
45 .key = ptr_to_u64(key),
46 .value = ptr_to_u64(value),
47 };
48
49 return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
50}
51
52int bpf_delete_elem(int fd, void *key)
53{
54 union bpf_attr attr = {
55 .map_fd = fd,
56 .key = ptr_to_u64(key),
57 };
58
59 return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
60}
61
62int bpf_get_next_key(int fd, void *key, void *next_key)
63{
64 union bpf_attr attr = {
65 .map_fd = fd,
66 .key = ptr_to_u64(key),
67 .next_key = ptr_to_u64(next_key),
68 };
69
70 return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
71}
72
73#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
74
75char bpf_log_buf[LOG_BUF_SIZE];
76
77int bpf_prog_load(enum bpf_prog_type prog_type,
78 const struct bpf_insn *insns, int prog_len,
79 const char *license)
80{
81 union bpf_attr attr = {
82 .prog_type = prog_type,
83 .insns = ptr_to_u64((void *) insns),
84 .insn_cnt = prog_len / sizeof(struct bpf_insn),
85 .license = ptr_to_u64((void *) license),
86 .log_buf = ptr_to_u64(bpf_log_buf),
87 .log_size = LOG_BUF_SIZE,
88 .log_level = 1,
89 };
90
91 bpf_log_buf[0] = 0;
92
93 return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
94}
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
new file mode 100644
index 000000000000..8a31babeca5d
--- /dev/null
+++ b/samples/bpf/libbpf.h
@@ -0,0 +1,172 @@
1/* eBPF mini library */
2#ifndef __LIBBPF_H
3#define __LIBBPF_H
4
5struct bpf_insn;
6
7int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
8 int max_entries);
9int bpf_update_elem(int fd, void *key, void *value);
10int bpf_lookup_elem(int fd, void *key, void *value);
11int bpf_delete_elem(int fd, void *key);
12int bpf_get_next_key(int fd, void *key, void *next_key);
13
14int bpf_prog_load(enum bpf_prog_type prog_type,
15 const struct bpf_insn *insns, int insn_len,
16 const char *license);
17
18#define LOG_BUF_SIZE 8192
19extern char bpf_log_buf[LOG_BUF_SIZE];
20
21/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
22
23#define BPF_ALU64_REG(OP, DST, SRC) \
24 ((struct bpf_insn) { \
25 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
26 .dst_reg = DST, \
27 .src_reg = SRC, \
28 .off = 0, \
29 .imm = 0 })
30
31#define BPF_ALU32_REG(OP, DST, SRC) \
32 ((struct bpf_insn) { \
33 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
34 .dst_reg = DST, \
35 .src_reg = SRC, \
36 .off = 0, \
37 .imm = 0 })
38
39/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
40
41#define BPF_ALU64_IMM(OP, DST, IMM) \
42 ((struct bpf_insn) { \
43 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
44 .dst_reg = DST, \
45 .src_reg = 0, \
46 .off = 0, \
47 .imm = IMM })
48
49#define BPF_ALU32_IMM(OP, DST, IMM) \
50 ((struct bpf_insn) { \
51 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
52 .dst_reg = DST, \
53 .src_reg = 0, \
54 .off = 0, \
55 .imm = IMM })
56
57/* Short form of mov, dst_reg = src_reg */
58
59#define BPF_MOV64_REG(DST, SRC) \
60 ((struct bpf_insn) { \
61 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
62 .dst_reg = DST, \
63 .src_reg = SRC, \
64 .off = 0, \
65 .imm = 0 })
66
67/* Short form of mov, dst_reg = imm32 */
68
69#define BPF_MOV64_IMM(DST, IMM) \
70 ((struct bpf_insn) { \
71 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
72 .dst_reg = DST, \
73 .src_reg = 0, \
74 .off = 0, \
75 .imm = IMM })
76
77/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
78#define BPF_LD_IMM64(DST, IMM) \
79 BPF_LD_IMM64_RAW(DST, 0, IMM)
80
81#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
82 ((struct bpf_insn) { \
83 .code = BPF_LD | BPF_DW | BPF_IMM, \
84 .dst_reg = DST, \
85 .src_reg = SRC, \
86 .off = 0, \
87 .imm = (__u32) (IMM) }), \
88 ((struct bpf_insn) { \
89 .code = 0, /* zero is reserved opcode */ \
90 .dst_reg = 0, \
91 .src_reg = 0, \
92 .off = 0, \
93 .imm = ((__u64) (IMM)) >> 32 })
94
95#define BPF_PSEUDO_MAP_FD 1
96
97/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
98#define BPF_LD_MAP_FD(DST, MAP_FD) \
99 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
100
101
102/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
103
104#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
105 ((struct bpf_insn) { \
106 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
107 .dst_reg = DST, \
108 .src_reg = SRC, \
109 .off = OFF, \
110 .imm = 0 })
111
112/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
113
114#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
115 ((struct bpf_insn) { \
116 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
117 .dst_reg = DST, \
118 .src_reg = SRC, \
119 .off = OFF, \
120 .imm = 0 })
121
122/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
123
124#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
125 ((struct bpf_insn) { \
126 .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
127 .dst_reg = DST, \
128 .src_reg = 0, \
129 .off = OFF, \
130 .imm = IMM })
131
132/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
133
134#define BPF_JMP_REG(OP, DST, SRC, OFF) \
135 ((struct bpf_insn) { \
136 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = OFF, \
140 .imm = 0 })
141
142/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
143
144#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
145 ((struct bpf_insn) { \
146 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
147 .dst_reg = DST, \
148 .src_reg = 0, \
149 .off = OFF, \
150 .imm = IMM })
151
152/* Raw code statement block */
153
154#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
155 ((struct bpf_insn) { \
156 .code = CODE, \
157 .dst_reg = DST, \
158 .src_reg = SRC, \
159 .off = OFF, \
160 .imm = IMM })
161
162/* Program exit */
163
164#define BPF_EXIT_INSN() \
165 ((struct bpf_insn) { \
166 .code = BPF_JMP | BPF_EXIT, \
167 .dst_reg = 0, \
168 .src_reg = 0, \
169 .off = 0, \
170 .imm = 0 })
171
172#endif
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
new file mode 100644
index 000000000000..f44ef11f65a7
--- /dev/null
+++ b/samples/bpf/test_verifier.c
@@ -0,0 +1,678 @@
1/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10#include <stdio.h>
11#include <unistd.h>
12#include <linux/bpf.h>
13#include <errno.h>
14#include <linux/unistd.h>
15#include <string.h>
16#include <linux/filter.h>
17#include "libbpf.h"
18
19#define MAX_INSNS 512
20#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
21
22struct bpf_test {
23 const char *descr;
24 struct bpf_insn insns[MAX_INSNS];
25 int fixup[32];
26 const char *errstr;
27 enum {
28 ACCEPT,
29 REJECT
30 } result;
31};
32
33static struct bpf_test tests[] = {
34 {
35 "add+sub+mul",
36 .insns = {
37 BPF_MOV64_IMM(BPF_REG_1, 1),
38 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
39 BPF_MOV64_IMM(BPF_REG_2, 3),
40 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
41 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
42 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
43 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
44 BPF_EXIT_INSN(),
45 },
46 .result = ACCEPT,
47 },
48 {
49 "unreachable",
50 .insns = {
51 BPF_EXIT_INSN(),
52 BPF_EXIT_INSN(),
53 },
54 .errstr = "unreachable",
55 .result = REJECT,
56 },
57 {
58 "unreachable2",
59 .insns = {
60 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
61 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
62 BPF_EXIT_INSN(),
63 },
64 .errstr = "unreachable",
65 .result = REJECT,
66 },
67 {
68 "out of range jump",
69 .insns = {
70 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
71 BPF_EXIT_INSN(),
72 },
73 .errstr = "jump out of range",
74 .result = REJECT,
75 },
76 {
77 "out of range jump2",
78 .insns = {
79 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
80 BPF_EXIT_INSN(),
81 },
82 .errstr = "jump out of range",
83 .result = REJECT,
84 },
85 {
86 "test1 ld_imm64",
87 .insns = {
88 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
89 BPF_LD_IMM64(BPF_REG_0, 0),
90 BPF_LD_IMM64(BPF_REG_0, 0),
91 BPF_LD_IMM64(BPF_REG_0, 1),
92 BPF_LD_IMM64(BPF_REG_0, 1),
93 BPF_MOV64_IMM(BPF_REG_0, 2),
94 BPF_EXIT_INSN(),
95 },
96 .errstr = "invalid BPF_LD_IMM insn",
97 .result = REJECT,
98 },
99 {
100 "test2 ld_imm64",
101 .insns = {
102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
103 BPF_LD_IMM64(BPF_REG_0, 0),
104 BPF_LD_IMM64(BPF_REG_0, 0),
105 BPF_LD_IMM64(BPF_REG_0, 1),
106 BPF_LD_IMM64(BPF_REG_0, 1),
107 BPF_EXIT_INSN(),
108 },
109 .errstr = "invalid BPF_LD_IMM insn",
110 .result = REJECT,
111 },
112 {
113 "test3 ld_imm64",
114 .insns = {
115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
116 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
117 BPF_LD_IMM64(BPF_REG_0, 0),
118 BPF_LD_IMM64(BPF_REG_0, 0),
119 BPF_LD_IMM64(BPF_REG_0, 1),
120 BPF_LD_IMM64(BPF_REG_0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "invalid bpf_ld_imm64 insn",
124 .result = REJECT,
125 },
126 {
127 "test4 ld_imm64",
128 .insns = {
129 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "invalid bpf_ld_imm64 insn",
133 .result = REJECT,
134 },
135 {
136 "test5 ld_imm64",
137 .insns = {
138 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
139 },
140 .errstr = "invalid bpf_ld_imm64 insn",
141 .result = REJECT,
142 },
143 {
144 "no bpf_exit",
145 .insns = {
146 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
147 },
148 .errstr = "jump out of range",
149 .result = REJECT,
150 },
151 {
152 "loop (back-edge)",
153 .insns = {
154 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
155 BPF_EXIT_INSN(),
156 },
157 .errstr = "back-edge",
158 .result = REJECT,
159 },
160 {
161 "loop2 (back-edge)",
162 .insns = {
163 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
164 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
165 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
166 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
167 BPF_EXIT_INSN(),
168 },
169 .errstr = "back-edge",
170 .result = REJECT,
171 },
172 {
173 "conditional loop",
174 .insns = {
175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
176 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
177 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
179 BPF_EXIT_INSN(),
180 },
181 .errstr = "back-edge",
182 .result = REJECT,
183 },
184 {
185 "read uninitialized register",
186 .insns = {
187 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
188 BPF_EXIT_INSN(),
189 },
190 .errstr = "R2 !read_ok",
191 .result = REJECT,
192 },
193 {
194 "read invalid register",
195 .insns = {
196 BPF_MOV64_REG(BPF_REG_0, -1),
197 BPF_EXIT_INSN(),
198 },
199 .errstr = "R15 is invalid",
200 .result = REJECT,
201 },
202 {
203 "program doesn't init R0 before exit",
204 .insns = {
205 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
206 BPF_EXIT_INSN(),
207 },
208 .errstr = "R0 !read_ok",
209 .result = REJECT,
210 },
211 {
212 "stack out of bounds",
213 .insns = {
214 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
215 BPF_EXIT_INSN(),
216 },
217 .errstr = "invalid stack",
218 .result = REJECT,
219 },
220 {
221 "invalid call insn1",
222 .insns = {
223 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
224 BPF_EXIT_INSN(),
225 },
226 .errstr = "BPF_CALL uses reserved",
227 .result = REJECT,
228 },
229 {
230 "invalid call insn2",
231 .insns = {
232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
233 BPF_EXIT_INSN(),
234 },
235 .errstr = "BPF_CALL uses reserved",
236 .result = REJECT,
237 },
238 {
239 "invalid function call",
240 .insns = {
241 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
242 BPF_EXIT_INSN(),
243 },
244 .errstr = "invalid func 1234567",
245 .result = REJECT,
246 },
247 {
248 "uninitialized stack1",
249 .insns = {
250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
252 BPF_LD_MAP_FD(BPF_REG_1, 0),
253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
254 BPF_EXIT_INSN(),
255 },
256 .fixup = {2},
257 .errstr = "invalid indirect read from stack",
258 .result = REJECT,
259 },
260 {
261 "uninitialized stack2",
262 .insns = {
263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
264 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
265 BPF_EXIT_INSN(),
266 },
267 .errstr = "invalid read from stack",
268 .result = REJECT,
269 },
270 {
271 "check valid spill/fill",
272 .insns = {
273 /* spill R1(ctx) into stack */
274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
275
276 /* fill it back into R2 */
277 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
278
279 /* should be able to access R0 = *(R2 + 8) */
280 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
281 BPF_EXIT_INSN(),
282 },
283 .result = ACCEPT,
284 },
285 {
286 "check corrupted spill/fill",
287 .insns = {
288 /* spill R1(ctx) into stack */
289 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
290
291 /* mess up with R1 pointer on stack */
292 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
293
294 /* fill back into R0 should fail */
295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
296
297 BPF_EXIT_INSN(),
298 },
299 .errstr = "corrupted spill",
300 .result = REJECT,
301 },
302 {
303 "invalid src register in STX",
304 .insns = {
305 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
306 BPF_EXIT_INSN(),
307 },
308 .errstr = "R15 is invalid",
309 .result = REJECT,
310 },
311 {
312 "invalid dst register in STX",
313 .insns = {
314 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
315 BPF_EXIT_INSN(),
316 },
317 .errstr = "R14 is invalid",
318 .result = REJECT,
319 },
320 {
321 "invalid dst register in ST",
322 .insns = {
323 BPF_ST_MEM(BPF_B, 14, -1, -1),
324 BPF_EXIT_INSN(),
325 },
326 .errstr = "R14 is invalid",
327 .result = REJECT,
328 },
329 {
330 "invalid src register in LDX",
331 .insns = {
332 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
333 BPF_EXIT_INSN(),
334 },
335 .errstr = "R12 is invalid",
336 .result = REJECT,
337 },
338 {
339 "invalid dst register in LDX",
340 .insns = {
341 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
342 BPF_EXIT_INSN(),
343 },
344 .errstr = "R11 is invalid",
345 .result = REJECT,
346 },
347 {
348 "junk insn",
349 .insns = {
350 BPF_RAW_INSN(0, 0, 0, 0, 0),
351 BPF_EXIT_INSN(),
352 },
353 .errstr = "invalid BPF_LD_IMM",
354 .result = REJECT,
355 },
356 {
357 "junk insn2",
358 .insns = {
359 BPF_RAW_INSN(1, 0, 0, 0, 0),
360 BPF_EXIT_INSN(),
361 },
362 .errstr = "BPF_LDX uses reserved fields",
363 .result = REJECT,
364 },
365 {
366 "junk insn3",
367 .insns = {
368 BPF_RAW_INSN(-1, 0, 0, 0, 0),
369 BPF_EXIT_INSN(),
370 },
371 .errstr = "invalid BPF_ALU opcode f0",
372 .result = REJECT,
373 },
374 {
375 "junk insn4",
376 .insns = {
377 BPF_RAW_INSN(-1, -1, -1, -1, -1),
378 BPF_EXIT_INSN(),
379 },
380 .errstr = "invalid BPF_ALU opcode f0",
381 .result = REJECT,
382 },
383 {
384 "junk insn5",
385 .insns = {
386 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
387 BPF_EXIT_INSN(),
388 },
389 .errstr = "BPF_ALU uses reserved fields",
390 .result = REJECT,
391 },
392 {
393 "misaligned read from stack",
394 .insns = {
395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
396 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
397 BPF_EXIT_INSN(),
398 },
399 .errstr = "misaligned access",
400 .result = REJECT,
401 },
402 {
403 "invalid map_fd for function call",
404 .insns = {
405 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
406 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
408 BPF_LD_MAP_FD(BPF_REG_1, 0),
409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "fd 0 is not pointing to valid bpf_map",
413 .result = REJECT,
414 },
415 {
416 "don't check return value before access",
417 .insns = {
418 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
419 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
421 BPF_LD_MAP_FD(BPF_REG_1, 0),
422 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
423 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
424 BPF_EXIT_INSN(),
425 },
426 .fixup = {3},
427 .errstr = "R0 invalid mem access 'map_value_or_null'",
428 .result = REJECT,
429 },
430 {
431 "access memory with incorrect alignment",
432 .insns = {
433 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
436 BPF_LD_MAP_FD(BPF_REG_1, 0),
437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
439 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
440 BPF_EXIT_INSN(),
441 },
442 .fixup = {3},
443 .errstr = "misaligned access",
444 .result = REJECT,
445 },
446 {
447 "sometimes access memory with incorrect alignment",
448 .insns = {
449 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
450 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
452 BPF_LD_MAP_FD(BPF_REG_1, 0),
453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
455 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
456 BPF_EXIT_INSN(),
457 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
458 BPF_EXIT_INSN(),
459 },
460 .fixup = {3},
461 .errstr = "R0 invalid mem access",
462 .result = REJECT,
463 },
464 {
465 "jump test 1",
466 .insns = {
467 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
468 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
469 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
470 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
471 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
472 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
474 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
475 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
476 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
477 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
478 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
480 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
481 BPF_MOV64_IMM(BPF_REG_0, 0),
482 BPF_EXIT_INSN(),
483 },
484 .result = ACCEPT,
485 },
486 {
487 "jump test 2",
488 .insns = {
489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
490 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
491 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
492 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
493 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
494 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
495 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
496 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
497 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
498 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
500 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
501 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
502 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
503 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
504 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
506 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
507 BPF_MOV64_IMM(BPF_REG_0, 0),
508 BPF_EXIT_INSN(),
509 },
510 .result = ACCEPT,
511 },
512 {
513 "jump test 3",
514 .insns = {
515 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
516 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
517 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
519 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
520 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
521 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
523 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
525 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
527 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
529 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
531 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
533 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
535 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
537 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
539 BPF_LD_MAP_FD(BPF_REG_1, 0),
540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec),
541 BPF_EXIT_INSN(),
542 },
543 .fixup = {24},
544 .result = ACCEPT,
545 },
546 {
547 "jump test 4",
548 .insns = {
549 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
555 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
556 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
565 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
567 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
569 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
574 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
576 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
586 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
589 BPF_MOV64_IMM(BPF_REG_0, 0),
590 BPF_EXIT_INSN(),
591 },
592 .result = ACCEPT,
593 },
594};
595
596static int probe_filter_length(struct bpf_insn *fp)
597{
598 int len = 0;
599
600 for (len = MAX_INSNS - 1; len > 0; --len)
601 if (fp[len].code != 0 || fp[len].imm != 0)
602 break;
603
604 return len + 1;
605}
606
607static int create_map(void)
608{
609 long long key, value = 0;
610 int map_fd;
611
612 map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024);
613 if (map_fd < 0) {
614 printf("failed to create map '%s'\n", strerror(errno));
615 }
616
617 return map_fd;
618}
619
620static int test(void)
621{
622 int prog_fd, i;
623
624 for (i = 0; i < ARRAY_SIZE(tests); i++) {
625 struct bpf_insn *prog = tests[i].insns;
626 int prog_len = probe_filter_length(prog);
627 int *fixup = tests[i].fixup;
628 int map_fd = -1;
629
630 if (*fixup) {
631 map_fd = create_map();
632
633 do {
634 prog[*fixup].imm = map_fd;
635 fixup++;
636 } while (*fixup);
637 }
638 printf("#%d %s ", i, tests[i].descr);
639
640 prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
641 prog_len * sizeof(struct bpf_insn),
642 "GPL");
643
644 if (tests[i].result == ACCEPT) {
645 if (prog_fd < 0) {
646 printf("FAIL\nfailed to load prog '%s'\n",
647 strerror(errno));
648 printf("%s", bpf_log_buf);
649 goto fail;
650 }
651 } else {
652 if (prog_fd >= 0) {
653 printf("FAIL\nunexpected success to load\n");
654 printf("%s", bpf_log_buf);
655 goto fail;
656 }
657 if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
658 printf("FAIL\nunexpected error message: %s",
659 bpf_log_buf);
660 goto fail;
661 }
662 }
663
664 printf("OK\n");
665fail:
666 if (map_fd >= 0)
667 close(map_fd);
668 close(prog_fd);
669
670 }
671
672 return 0;
673}
674
675int main(void)
676{
677 return test();
678}