aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-12 00:25:51 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-12 00:25:51 -0400
commitbb399fbd223f478988ede5e6f2dfcc6750bf1f05 (patch)
treeaa3f671c1be28e93a6951e51fc669fb32520d89b /include
parent05ab2dae650e09add1c5295392b5516704c03a4b (diff)
parent9def624afdf2a8122eed5f2beec7448513c9a703 (diff)
Merge branch 'filter-next'
Alexei Starovoitov says: ==================== BPF testsuite and cleanup This patchset adds BPF testsuite and improves readability of classic to internal BPF converter. The testsuite helped to find 'negative offset bug' in x64 JIT that was fixed by commit fdfaf64e ("x86: bpf_jit: support negative offsets") It can be very useful for classic and internal JIT compiler developers. Also it serves as performance benchmark. x86_64/i386 pass all tests with and without JIT. arm32 JIT is failing negative offset tests which are unsupported. Internal BPF tests are much larger than classic tests to cover different combinations of registers. Negative tests check correctness of classic BPF verifier which must reject them. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/filter.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ed1efab10b8f..4457b383961c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -79,6 +79,57 @@ enum {
79/* BPF program can access up to 512 bytes of stack space. */ 79/* BPF program can access up to 512 bytes of stack space. */
80#define MAX_BPF_STACK 512 80#define MAX_BPF_STACK 512
81 81
82/* bpf_add|sub|...: a += x, bpf_mov: a = x */
83#define BPF_ALU64_REG(op, a, x) \
84 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
85#define BPF_ALU32_REG(op, a, x) \
86 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
87
88/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
89#define BPF_ALU64_IMM(op, a, imm) \
90 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
91#define BPF_ALU32_IMM(op, a, imm) \
92 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
93
94/* R0 = *(uint *) (skb->data + off) */
95#define BPF_LD_ABS(size, off) \
96 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
97
98/* R0 = *(uint *) (skb->data + x + off) */
99#define BPF_LD_IND(size, x, off) \
100 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
101
102/* a = *(uint *) (x + off) */
103#define BPF_LDX_MEM(sz, a, x, off) \
104 ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
105
106/* if (a 'op' x) goto pc+off */
107#define BPF_JMP_REG(op, a, x, off) \
108 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
109
110/* if (a 'op' imm) goto pc+off */
111#define BPF_JMP_IMM(op, a, imm, off) \
112 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
113
114#define BPF_EXIT_INSN() \
115 ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
116
117static inline int size_to_bpf(int size)
118{
119 switch (size) {
120 case 1:
121 return BPF_B;
122 case 2:
123 return BPF_H;
124 case 4:
125 return BPF_W;
126 case 8:
127 return BPF_DW;
128 default:
129 return -EINVAL;
130 }
131}
132
82/* Macro to invoke filter function. */ 133/* Macro to invoke filter function. */
83#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 134#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
84 135