summaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-02-23 19:08:01 -0500
committerAlexei Starovoitov <ast@kernel.org>2018-02-24 01:50:00 -0500
commit4c38e2f386b4fc5fd95d1203c74819948e2e903d (patch)
tree26e9b52c57d79f91206e09624e5eaaa360275f1f /arch/x86/net
parentd806a0cf2a1ddb97c91d902ef1c8219e1e2b2c4c (diff)
bpf, x64: save few bytes when mul is in alu32
Add a generic emit_mov_reg() helper in order to reuse it in BPF multiplication to load the src into rax, we can save a few bytes in alu32 while doing so. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9895ca383023..5b8fc1326aa1 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -422,6 +422,24 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
422 *pprog = prog; 422 *pprog = prog;
423} 423}
424 424
425static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
426{
427 u8 *prog = *pprog;
428 int cnt = 0;
429
430 if (is64) {
431 /* mov dst, src */
432 EMIT_mov(dst_reg, src_reg);
433 } else {
434 /* mov32 dst, src */
435 if (is_ereg(dst_reg) || is_ereg(src_reg))
436 EMIT1(add_2mod(0x40, dst_reg, src_reg));
437 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
438 }
439
440 *pprog = prog;
441}
442
425static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 443static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
426 int oldproglen, struct jit_context *ctx) 444 int oldproglen, struct jit_context *ctx)
427{ 445{
@@ -480,16 +498,11 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
480 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); 498 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
481 break; 499 break;
482 500
483 /* mov dst, src */
484 case BPF_ALU64 | BPF_MOV | BPF_X: 501 case BPF_ALU64 | BPF_MOV | BPF_X:
485 EMIT_mov(dst_reg, src_reg);
486 break;
487
488 /* mov32 dst, src */
489 case BPF_ALU | BPF_MOV | BPF_X: 502 case BPF_ALU | BPF_MOV | BPF_X:
490 if (is_ereg(dst_reg) || is_ereg(src_reg)) 503 emit_mov_reg(&prog,
491 EMIT1(add_2mod(0x40, dst_reg, src_reg)); 504 BPF_CLASS(insn->code) == BPF_ALU64,
492 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); 505 dst_reg, src_reg);
493 break; 506 break;
494 507
495 /* neg dst */ 508 /* neg dst */
@@ -615,6 +628,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
615 case BPF_ALU | BPF_MUL | BPF_X: 628 case BPF_ALU | BPF_MUL | BPF_X:
616 case BPF_ALU64 | BPF_MUL | BPF_K: 629 case BPF_ALU64 | BPF_MUL | BPF_K:
617 case BPF_ALU64 | BPF_MUL | BPF_X: 630 case BPF_ALU64 | BPF_MUL | BPF_X:
631 {
632 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
633
618 if (dst_reg != BPF_REG_0) 634 if (dst_reg != BPF_REG_0)
619 EMIT1(0x50); /* push rax */ 635 EMIT1(0x50); /* push rax */
620 if (dst_reg != BPF_REG_3) 636 if (dst_reg != BPF_REG_3)
@@ -624,14 +640,11 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
624 EMIT_mov(AUX_REG, dst_reg); 640 EMIT_mov(AUX_REG, dst_reg);
625 641
626 if (BPF_SRC(insn->code) == BPF_X) 642 if (BPF_SRC(insn->code) == BPF_X)
627 /* mov rax, src_reg */ 643 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
628 EMIT_mov(BPF_REG_0, src_reg);
629 else 644 else
630 /* mov rax, imm32 */ 645 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
631 emit_mov_imm32(&prog, true,
632 BPF_REG_0, imm32);
633 646
634 if (BPF_CLASS(insn->code) == BPF_ALU64) 647 if (is64)
635 EMIT1(add_1mod(0x48, AUX_REG)); 648 EMIT1(add_1mod(0x48, AUX_REG));
636 else if (is_ereg(AUX_REG)) 649 else if (is_ereg(AUX_REG))
637 EMIT1(add_1mod(0x40, AUX_REG)); 650 EMIT1(add_1mod(0x40, AUX_REG));
@@ -646,7 +659,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
646 EMIT1(0x58); /* pop rax */ 659 EMIT1(0x58); /* pop rax */
647 } 660 }
648 break; 661 break;
649 662 }
650 /* shifts */ 663 /* shifts */
651 case BPF_ALU | BPF_LSH | BPF_K: 664 case BPF_ALU | BPF_LSH | BPF_K:
652 case BPF_ALU | BPF_RSH | BPF_K: 665 case BPF_ALU | BPF_RSH | BPF_K: