summaryrefslogtreecommitdiffstats
path: root/lib/test_bpf.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-05-26 16:35:43 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-27 14:05:59 -0400
commitbde28bc6ad0c575f8b4eebe8cd27e36d6c3b09c6 (patch)
tree679251f45a7a1ab02886e816ff418fdc4f227cb8 /lib/test_bpf.c
parent5474b13233eb46ab9b80f12f9c8003aabd383283 (diff)
test_bpf: add similarly conflicting jump test case only for classic
While 3b52960266a3 ("test_bpf: add more eBPF jump torture cases") added the int3 bug test case only for eBPF, which needs exactly 11 passes to converge, here's a version for classic BPF with 11 passes, and one that would need 70 passes on x86_64 to actually converge for being successfully JITed. Effectively, all jumps are being optimized out resulting in a JIT image of just 89 bytes (from originally max BPF insns), only returning K. Might be useful as a receipe for folks wanting to craft a test case when backporting the fix in commit 3f7352bf21f8 ("x86: bpf_jit: fix compilation of large bpf programs") while not having eBPF. The 2nd one is delegated to the interpreter as the last pass still results in shrinking, in other words, this one won't be JITed on x86_64. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/test_bpf.c')
-rw-r--r--lib/test_bpf.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c07b8e7db330..7f58c735d745 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -314,6 +314,47 @@ static int bpf_fill_maxinsns10(struct bpf_test *self)
314 return 0; 314 return 0;
315} 315}
316 316
317static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
318 unsigned int plen)
319{
320 struct sock_filter *insn;
321 unsigned int rlen;
322 int i, j;
323
324 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
325 if (!insn)
326 return -ENOMEM;
327
328 rlen = (len % plen) - 1;
329
330 for (i = 0; i + plen < len; i += plen)
331 for (j = 0; j < plen; j++)
332 insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
333 plen - 1 - j, 0, 0);
334 for (j = 0; j < rlen; j++)
335 insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
336 0, 0);
337
338 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
339
340 self->u.ptr.insns = insn;
341 self->u.ptr.len = len;
342
343 return 0;
344}
345
346static int bpf_fill_maxinsns11(struct bpf_test *self)
347{
348 /* Hits 70 passes on x86_64, so cannot get JITed there. */
349 return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
350}
351
352static int bpf_fill_ja(struct bpf_test *self)
353{
354 /* Hits exactly 11 passes on x86_64 JIT. */
355 return __bpf_fill_ja(self, 12, 9);
356}
357
317static struct bpf_test tests[] = { 358static struct bpf_test tests[] = {
318 { 359 {
319 "TAX", 360 "TAX",
@@ -4252,6 +4293,14 @@ static struct bpf_test tests[] = {
4252 { }, 4293 { },
4253 { { 0, 1 } }, 4294 { { 0, 1 } },
4254 }, 4295 },
4296 {
4297 "JMP_JA: Jump, gap, jump, ...",
4298 { },
4299 CLASSIC | FLAG_NO_DATA,
4300 { },
4301 { { 0, 0xababcbac } },
4302 .fill_helper = bpf_fill_ja,
4303 },
4255 { /* Mainly checking JIT here. */ 4304 { /* Mainly checking JIT here. */
4256 "BPF_MAXINSNS: Maximum possible literals", 4305 "BPF_MAXINSNS: Maximum possible literals",
4257 { }, 4306 { },
@@ -4335,6 +4384,14 @@ static struct bpf_test tests[] = {
4335 { { 0, 0xabababac } }, 4384 { { 0, 0xabababac } },
4336 .fill_helper = bpf_fill_maxinsns10, 4385 .fill_helper = bpf_fill_maxinsns10,
4337 }, 4386 },
4387 {
4388 "BPF_MAXINSNS: Jump, gap, jump, ...",
4389 { },
4390 CLASSIC | FLAG_NO_DATA,
4391 { },
4392 { { 0, 0xababcbac } },
4393 .fill_helper = bpf_fill_maxinsns11,
4394 },
4338}; 4395};
4339 4396
4340static struct net_device dev; 4397static struct net_device dev;