aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-04-30 20:57:20 -0400
committerDavid S. Miller <davem@davemloft.net>2017-05-02 15:04:50 -0400
commit85f68fe89832057584a9e66e1e7e53d53e50faff (patch)
treeaf912d9385e6e29acc556896e82199b422302190
parenta481649e1c2a4900450e967165524282bbdf91e0 (diff)
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore completes JITing of all BPF instructions, meaning we can thus also remove the 'notyet' label and do not need to fall back to the interpreter when BPF_XADD is used in a program! This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64, where all current eBPF features are supported. BPF_W example from test_bpf: .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0x12), BPF_ST_MEM(BPF_W, R10, -40, 0x10), BPF_STX_XADD(BPF_W, R10, R0, -40), BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, [...] 00000020: 52800247 mov w7, #0x12 // #18 00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40 00000028: d280020a mov x10, #0x10 // #16 0000002c: b82b6b2a str w10, [x25,x11] // start of xadd mapping: 00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40 00000034: 8b19014a add x10, x10, x25 00000038: f9800151 prfm pstl1strm, [x10] 0000003c: 885f7d4b ldxr w11, [x10] 00000040: 0b07016b add w11, w11, w7 00000044: 880b7d4b stxr w11, w11, [x10] 00000048: 35ffffab cbnz w11, 0x0000003c // end of xadd mapping: [...] BPF_DW example from test_bpf: .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0x12), BPF_ST_MEM(BPF_DW, R10, -40, 0x10), BPF_STX_XADD(BPF_DW, R10, R0, -40), BPF_LDX_MEM(BPF_DW, R0, R10, -40), BPF_EXIT_INSN(), }, [...] 00000020: 52800247 mov w7, #0x12 // #18 00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40 00000028: d280020a mov x10, #0x10 // #16 0000002c: f82b6b2a str x10, [x25,x11] // start of xadd mapping: 00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40 00000034: 8b19014a add x10, x10, x25 00000038: f9800151 prfm pstl1strm, [x10] 0000003c: c85f7d4b ldxr x11, [x10] 00000040: 8b07016b add x11, x11, x7 00000044: c80b7d4b stxr w11, x11, [x10] 00000048: 35ffffab cbnz w11, 0x0000003c // end of xadd mapping: [...] Tested on Cavium ThunderX ARMv8, test suite results after the patch: No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed] With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed] Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/arm64/include/asm/insn.h30
-rw-r--r--arch/arm64/kernel/insn.c106
-rw-r--r--arch/arm64/net/bpf_jit.h19
-rw-r--r--arch/arm64/net/bpf_jit_comp.c16
-rw-r--r--lib/test_bpf.c105
5 files changed, 271 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index aecc07e09a18..29cb2ca756f6 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -80,6 +80,7 @@ enum aarch64_insn_register_type {
80 AARCH64_INSN_REGTYPE_RM, 80 AARCH64_INSN_REGTYPE_RM,
81 AARCH64_INSN_REGTYPE_RD, 81 AARCH64_INSN_REGTYPE_RD,
82 AARCH64_INSN_REGTYPE_RA, 82 AARCH64_INSN_REGTYPE_RA,
83 AARCH64_INSN_REGTYPE_RS,
83}; 84};
84 85
85enum aarch64_insn_register { 86enum aarch64_insn_register {
@@ -188,6 +189,8 @@ enum aarch64_insn_ldst_type {
188 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, 189 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
189 AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, 190 AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
190 AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, 191 AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
192 AARCH64_INSN_LDST_LOAD_EX,
193 AARCH64_INSN_LDST_STORE_EX,
191}; 194};
192 195
193enum aarch64_insn_adsb_type { 196enum aarch64_insn_adsb_type {
@@ -240,6 +243,23 @@ enum aarch64_insn_logic_type {
240 AARCH64_INSN_LOGIC_BIC_SETFLAGS 243 AARCH64_INSN_LOGIC_BIC_SETFLAGS
241}; 244};
242 245
246enum aarch64_insn_prfm_type {
247 AARCH64_INSN_PRFM_TYPE_PLD,
248 AARCH64_INSN_PRFM_TYPE_PLI,
249 AARCH64_INSN_PRFM_TYPE_PST,
250};
251
252enum aarch64_insn_prfm_target {
253 AARCH64_INSN_PRFM_TARGET_L1,
254 AARCH64_INSN_PRFM_TARGET_L2,
255 AARCH64_INSN_PRFM_TARGET_L3,
256};
257
258enum aarch64_insn_prfm_policy {
259 AARCH64_INSN_PRFM_POLICY_KEEP,
260 AARCH64_INSN_PRFM_POLICY_STRM,
261};
262
243#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ 263#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
244static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ 264static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
245{ return (code & (mask)) == (val); } \ 265{ return (code & (mask)) == (val); } \
@@ -248,6 +268,7 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
248 268
249__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000) 269__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
250__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) 270__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
271__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
251__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) 272__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
252__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) 273__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
253__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) 274__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
@@ -357,6 +378,11 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
357 int offset, 378 int offset,
358 enum aarch64_insn_variant variant, 379 enum aarch64_insn_variant variant,
359 enum aarch64_insn_ldst_type type); 380 enum aarch64_insn_ldst_type type);
381u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
382 enum aarch64_insn_register base,
383 enum aarch64_insn_register state,
384 enum aarch64_insn_size_type size,
385 enum aarch64_insn_ldst_type type);
360u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, 386u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
361 enum aarch64_insn_register src, 387 enum aarch64_insn_register src,
362 int imm, enum aarch64_insn_variant variant, 388 int imm, enum aarch64_insn_variant variant,
@@ -397,6 +423,10 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
397 int shift, 423 int shift,
398 enum aarch64_insn_variant variant, 424 enum aarch64_insn_variant variant,
399 enum aarch64_insn_logic_type type); 425 enum aarch64_insn_logic_type type);
426u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
427 enum aarch64_insn_prfm_type type,
428 enum aarch64_insn_prfm_target target,
429 enum aarch64_insn_prfm_policy policy);
400s32 aarch64_get_branch_offset(u32 insn); 430s32 aarch64_get_branch_offset(u32 insn);
401u32 aarch64_set_branch_offset(u32 insn, s32 offset); 431u32 aarch64_set_branch_offset(u32 insn, s32 offset);
402 432
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 3a63954a8b14..b884a926a632 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -474,6 +474,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
474 shift = 10; 474 shift = 10;
475 break; 475 break;
476 case AARCH64_INSN_REGTYPE_RM: 476 case AARCH64_INSN_REGTYPE_RM:
477 case AARCH64_INSN_REGTYPE_RS:
477 shift = 16; 478 shift = 16;
478 break; 479 break;
479 default: 480 default:
@@ -757,6 +758,111 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
757 offset >> shift); 758 offset >> shift);
758} 759}
759 760
761u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
762 enum aarch64_insn_register base,
763 enum aarch64_insn_register state,
764 enum aarch64_insn_size_type size,
765 enum aarch64_insn_ldst_type type)
766{
767 u32 insn;
768
769 switch (type) {
770 case AARCH64_INSN_LDST_LOAD_EX:
771 insn = aarch64_insn_get_load_ex_value();
772 break;
773 case AARCH64_INSN_LDST_STORE_EX:
774 insn = aarch64_insn_get_store_ex_value();
775 break;
776 default:
777 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
778 return AARCH64_BREAK_FAULT;
779 }
780
781 insn = aarch64_insn_encode_ldst_size(size, insn);
782
783 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
784 reg);
785
786 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
787 base);
788
789 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
790 AARCH64_INSN_REG_ZR);
791
792 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
793 state);
794}
795
796static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
797 enum aarch64_insn_prfm_target target,
798 enum aarch64_insn_prfm_policy policy,
799 u32 insn)
800{
801 u32 imm_type = 0, imm_target = 0, imm_policy = 0;
802
803 switch (type) {
804 case AARCH64_INSN_PRFM_TYPE_PLD:
805 break;
806 case AARCH64_INSN_PRFM_TYPE_PLI:
807 imm_type = BIT(0);
808 break;
809 case AARCH64_INSN_PRFM_TYPE_PST:
810 imm_type = BIT(1);
811 break;
812 default:
813 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
814 return AARCH64_BREAK_FAULT;
815 }
816
817 switch (target) {
818 case AARCH64_INSN_PRFM_TARGET_L1:
819 break;
820 case AARCH64_INSN_PRFM_TARGET_L2:
821 imm_target = BIT(0);
822 break;
823 case AARCH64_INSN_PRFM_TARGET_L3:
824 imm_target = BIT(1);
825 break;
826 default:
827 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
828 return AARCH64_BREAK_FAULT;
829 }
830
831 switch (policy) {
832 case AARCH64_INSN_PRFM_POLICY_KEEP:
833 break;
834 case AARCH64_INSN_PRFM_POLICY_STRM:
835 imm_policy = BIT(0);
836 break;
837 default:
838 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
839 return AARCH64_BREAK_FAULT;
840 }
841
842 /* In this case, imm5 is encoded into Rt field. */
843 insn &= ~GENMASK(4, 0);
844 insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
845
846 return insn;
847}
848
849u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
850 enum aarch64_insn_prfm_type type,
851 enum aarch64_insn_prfm_target target,
852 enum aarch64_insn_prfm_policy policy)
853{
854 u32 insn = aarch64_insn_get_prfm_value();
855
856 insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
857
858 insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
859
860 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
861 base);
862
863 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
864}
865
760u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, 866u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
761 enum aarch64_insn_register src, 867 enum aarch64_insn_register src,
762 int imm, enum aarch64_insn_variant variant, 868 int imm, enum aarch64_insn_variant variant,
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 7c16e547ccb2..b02a9268dfbf 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -83,6 +83,25 @@
83/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */ 83/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
84#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX) 84#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
85 85
86/* Load/store exclusive */
87#define A64_SIZE(sf) \
88 ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
89#define A64_LSX(sf, Rt, Rn, Rs, type) \
90 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
91 AARCH64_INSN_LDST_##type)
92/* Rt = [Rn]; (atomic) */
93#define A64_LDXR(sf, Rt, Rn) \
94 A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
95/* [Rn] = Rt; (atomic) Rs = [state] */
96#define A64_STXR(sf, Rt, Rn, Rs) \
97 A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
98
99/* Prefetch */
100#define A64_PRFM(Rn, type, target, policy) \
101 aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
102 AARCH64_INSN_PRFM_TARGET_##target, \
103 AARCH64_INSN_PRFM_POLICY_##policy)
104
86/* Add/subtract (immediate) */ 105/* Add/subtract (immediate) */
87#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ 106#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
88 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ 107 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 304736870dca..4f2b35130f3c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -321,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
321 const s32 imm = insn->imm; 321 const s32 imm = insn->imm;
322 const int i = insn - ctx->prog->insnsi; 322 const int i = insn - ctx->prog->insnsi;
323 const bool is64 = BPF_CLASS(code) == BPF_ALU64; 323 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
324 const bool isdw = BPF_SIZE(code) == BPF_DW;
324 u8 jmp_cond; 325 u8 jmp_cond;
325 s32 jmp_offset; 326 s32 jmp_offset;
326 327
@@ -681,7 +682,16 @@ emit_cond_jmp:
681 case BPF_STX | BPF_XADD | BPF_W: 682 case BPF_STX | BPF_XADD | BPF_W:
682 /* STX XADD: lock *(u64 *)(dst + off) += src */ 683 /* STX XADD: lock *(u64 *)(dst + off) += src */
683 case BPF_STX | BPF_XADD | BPF_DW: 684 case BPF_STX | BPF_XADD | BPF_DW:
684 goto notyet; 685 emit_a64_mov_i(1, tmp, off, ctx);
686 emit(A64_ADD(1, tmp, tmp, dst), ctx);
687 emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
688 emit(A64_LDXR(isdw, tmp2, tmp), ctx);
689 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
690 emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
691 jmp_offset = -3;
692 check_imm19(jmp_offset);
693 emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
694 break;
685 695
686 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 696 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
687 case BPF_LD | BPF_ABS | BPF_W: 697 case BPF_LD | BPF_ABS | BPF_W:
@@ -748,10 +758,6 @@ emit_cond_jmp:
748 } 758 }
749 break; 759 break;
750 } 760 }
751notyet:
752 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
753 return -EFAULT;
754
755 default: 761 default:
756 pr_err_once("unknown opcode %02x\n", code); 762 pr_err_once("unknown opcode %02x\n", code);
757 return -EINVAL; 763 return -EINVAL;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0362da0b66c3..3a7730ca81be 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -434,6 +434,41 @@ loop:
434 return 0; 434 return 0;
435} 435}
436 436
437static int __bpf_fill_stxdw(struct bpf_test *self, int size)
438{
439 unsigned int len = BPF_MAXINSNS;
440 struct bpf_insn *insn;
441 int i;
442
443 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
444 if (!insn)
445 return -ENOMEM;
446
447 insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
448 insn[1] = BPF_ST_MEM(size, R10, -40, 42);
449
450 for (i = 2; i < len - 2; i++)
451 insn[i] = BPF_STX_XADD(size, R10, R0, -40);
452
453 insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
454 insn[len - 1] = BPF_EXIT_INSN();
455
456 self->u.ptr.insns = insn;
457 self->u.ptr.len = len;
458
459 return 0;
460}
461
462static int bpf_fill_stxw(struct bpf_test *self)
463{
464 return __bpf_fill_stxdw(self, BPF_W);
465}
466
467static int bpf_fill_stxdw(struct bpf_test *self)
468{
469 return __bpf_fill_stxdw(self, BPF_DW);
470}
471
437static struct bpf_test tests[] = { 472static struct bpf_test tests[] = {
438 { 473 {
439 "TAX", 474 "TAX",
@@ -4303,6 +4338,41 @@ static struct bpf_test tests[] = {
4303 { { 0, 0x22 } }, 4338 { { 0, 0x22 } },
4304 }, 4339 },
4305 { 4340 {
4341 "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
4342 .u.insns_int = {
4343 BPF_ALU64_REG(BPF_MOV, R1, R10),
4344 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
4345 BPF_ST_MEM(BPF_W, R10, -40, 0x10),
4346 BPF_STX_XADD(BPF_W, R10, R0, -40),
4347 BPF_ALU64_REG(BPF_MOV, R0, R10),
4348 BPF_ALU64_REG(BPF_SUB, R0, R1),
4349 BPF_EXIT_INSN(),
4350 },
4351 INTERNAL,
4352 { },
4353 { { 0, 0 } },
4354 },
4355 {
4356 "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
4357 .u.insns_int = {
4358 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
4359 BPF_ST_MEM(BPF_W, R10, -40, 0x10),
4360 BPF_STX_XADD(BPF_W, R10, R0, -40),
4361 BPF_EXIT_INSN(),
4362 },
4363 INTERNAL,
4364 { },
4365 { { 0, 0x12 } },
4366 },
4367 {
4368 "STX_XADD_W: X + 1 + 1 + 1 + ...",
4369 { },
4370 INTERNAL,
4371 { },
4372 { { 0, 4134 } },
4373 .fill_helper = bpf_fill_stxw,
4374 },
4375 {
4306 "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", 4376 "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
4307 .u.insns_int = { 4377 .u.insns_int = {
4308 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4378 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
@@ -4315,6 +4385,41 @@ static struct bpf_test tests[] = {
4315 { }, 4385 { },
4316 { { 0, 0x22 } }, 4386 { { 0, 0x22 } },
4317 }, 4387 },
4388 {
4389 "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
4390 .u.insns_int = {
4391 BPF_ALU64_REG(BPF_MOV, R1, R10),
4392 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
4393 BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
4394 BPF_STX_XADD(BPF_DW, R10, R0, -40),
4395 BPF_ALU64_REG(BPF_MOV, R0, R10),
4396 BPF_ALU64_REG(BPF_SUB, R0, R1),
4397 BPF_EXIT_INSN(),
4398 },
4399 INTERNAL,
4400 { },
4401 { { 0, 0 } },
4402 },
4403 {
4404 "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
4405 .u.insns_int = {
4406 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
4407 BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
4408 BPF_STX_XADD(BPF_DW, R10, R0, -40),
4409 BPF_EXIT_INSN(),
4410 },
4411 INTERNAL,
4412 { },
4413 { { 0, 0x12 } },
4414 },
4415 {
4416 "STX_XADD_DW: X + 1 + 1 + 1 + ...",
4417 { },
4418 INTERNAL,
4419 { },
4420 { { 0, 4134 } },
4421 .fill_helper = bpf_fill_stxdw,
4422 },
4318 /* BPF_JMP | BPF_EXIT */ 4423 /* BPF_JMP | BPF_EXIT */
4319 { 4424 {
4320 "JMP_EXIT", 4425 "JMP_EXIT",