aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/net
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-05-29 04:22:50 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-02 01:16:58 -0400
commit3480593131e0b781287dae0139bf7ccee7cba7ff (patch)
tree6e259a45b3767bd80b789814e4d484ee0ac069bf /arch/arm/net
parentd50bc1575096250aa37f17299c86ea548156efe8 (diff)
net: filter: get rid of BPF_S_* enum
This patch finally allows us to get rid of the BPF_S_* enum. Currently, the code performs unnecessary encode and decode workarounds in seccomp and filter migration itself when a filter is being attached in order to overcome BPF_S_* encoding which is not used anymore by the new interpreter resp. JIT compilers. Keeping it around would mean that also in future we would need to extend and maintain this enum and related encoders/decoders. We can get rid of all that and save us these operations during filter attaching. Naturally, also JIT compilers need to be updated by this. Before JIT conversion is being done, each compiler checks if A is being loaded at startup to obtain information if it needs to emit instructions to clear A first. Since BPF extensions are a subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements for extensions can be removed at that point. To ease and minimalize code changes in the classic JITs, we have introduced bpf_anc_helper(). Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int), arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we unfortunately didn't have access, but changes are analogous to the rest. Joint work with Alexei Starovoitov. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Mircea Gherzan <mgherzan@gmail.com> Cc: Kees Cook <keescook@chromium.org> Acked-by: Chema Gonzalez <chemag@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm/net')
-rw-r--r--arch/arm/net/bpf_jit_32.c139
1 files changed, 67 insertions, 72 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6f879c319a9d..fb5503ce016f 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx)
136 u16 ret = 0; 136 u16 ret = 0;
137 137
138 if ((ctx->skf->len > 1) || 138 if ((ctx->skf->len > 1) ||
139 (ctx->skf->insns[0].code == BPF_S_RET_A)) 139 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
140 ret |= 1 << r_A; 140 ret |= 1 << r_A;
141 141
142#ifdef CONFIG_FRAME_POINTER 142#ifdef CONFIG_FRAME_POINTER
@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx)
164static inline bool is_load_to_a(u16 inst) 164static inline bool is_load_to_a(u16 inst)
165{ 165{
166 switch (inst) { 166 switch (inst) {
167 case BPF_S_LD_W_LEN: 167 case BPF_LD | BPF_W | BPF_LEN:
168 case BPF_S_LD_W_ABS: 168 case BPF_LD | BPF_W | BPF_ABS:
169 case BPF_S_LD_H_ABS: 169 case BPF_LD | BPF_H | BPF_ABS:
170 case BPF_S_LD_B_ABS: 170 case BPF_LD | BPF_B | BPF_ABS:
171 case BPF_S_ANC_CPU:
172 case BPF_S_ANC_IFINDEX:
173 case BPF_S_ANC_MARK:
174 case BPF_S_ANC_PROTOCOL:
175 case BPF_S_ANC_RXHASH:
176 case BPF_S_ANC_VLAN_TAG:
177 case BPF_S_ANC_VLAN_TAG_PRESENT:
178 case BPF_S_ANC_QUEUE:
179 return true; 171 return true;
180 default: 172 default:
181 return false; 173 return false;
@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx)
215 emit(ARM_MOV_I(r_X, 0), ctx); 207 emit(ARM_MOV_I(r_X, 0), ctx);
216 208
217 /* do not leak kernel data to userspace */ 209 /* do not leak kernel data to userspace */
218 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 210 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
219 emit(ARM_MOV_I(r_A, 0), ctx); 211 emit(ARM_MOV_I(r_A, 0), ctx);
220 212
221 /* stack space for the BPF_MEM words */ 213 /* stack space for the BPF_MEM words */
@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx)
480 u32 k; 472 u32 k;
481 473
482 for (i = 0; i < prog->len; i++) { 474 for (i = 0; i < prog->len; i++) {
475 u16 code;
476
483 inst = &(prog->insns[i]); 477 inst = &(prog->insns[i]);
484 /* K as an immediate value operand */ 478 /* K as an immediate value operand */
485 k = inst->k; 479 k = inst->k;
480 code = bpf_anc_helper(inst);
486 481
487 /* compute offsets only in the fake pass */ 482 /* compute offsets only in the fake pass */
488 if (ctx->target == NULL) 483 if (ctx->target == NULL)
489 ctx->offsets[i] = ctx->idx * 4; 484 ctx->offsets[i] = ctx->idx * 4;
490 485
491 switch (inst->code) { 486 switch (code) {
492 case BPF_S_LD_IMM: 487 case BPF_LD | BPF_IMM:
493 emit_mov_i(r_A, k, ctx); 488 emit_mov_i(r_A, k, ctx);
494 break; 489 break;
495 case BPF_S_LD_W_LEN: 490 case BPF_LD | BPF_W | BPF_LEN:
496 ctx->seen |= SEEN_SKB; 491 ctx->seen |= SEEN_SKB;
497 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 492 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
498 emit(ARM_LDR_I(r_A, r_skb, 493 emit(ARM_LDR_I(r_A, r_skb,
499 offsetof(struct sk_buff, len)), ctx); 494 offsetof(struct sk_buff, len)), ctx);
500 break; 495 break;
501 case BPF_S_LD_MEM: 496 case BPF_LD | BPF_MEM:
502 /* A = scratch[k] */ 497 /* A = scratch[k] */
503 ctx->seen |= SEEN_MEM_WORD(k); 498 ctx->seen |= SEEN_MEM_WORD(k);
504 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 499 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
505 break; 500 break;
506 case BPF_S_LD_W_ABS: 501 case BPF_LD | BPF_W | BPF_ABS:
507 load_order = 2; 502 load_order = 2;
508 goto load; 503 goto load;
509 case BPF_S_LD_H_ABS: 504 case BPF_LD | BPF_H | BPF_ABS:
510 load_order = 1; 505 load_order = 1;
511 goto load; 506 goto load;
512 case BPF_S_LD_B_ABS: 507 case BPF_LD | BPF_B | BPF_ABS:
513 load_order = 0; 508 load_order = 0;
514load: 509load:
515 /* the interpreter will deal with the negative K */ 510 /* the interpreter will deal with the negative K */
@@ -552,31 +547,31 @@ load_common:
552 emit_err_ret(ARM_COND_NE, ctx); 547 emit_err_ret(ARM_COND_NE, ctx);
553 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 548 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
554 break; 549 break;
555 case BPF_S_LD_W_IND: 550 case BPF_LD | BPF_W | BPF_IND:
556 load_order = 2; 551 load_order = 2;
557 goto load_ind; 552 goto load_ind;
558 case BPF_S_LD_H_IND: 553 case BPF_LD | BPF_H | BPF_IND:
559 load_order = 1; 554 load_order = 1;
560 goto load_ind; 555 goto load_ind;
561 case BPF_S_LD_B_IND: 556 case BPF_LD | BPF_B | BPF_IND:
562 load_order = 0; 557 load_order = 0;
563load_ind: 558load_ind:
564 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 559 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
565 goto load_common; 560 goto load_common;
566 case BPF_S_LDX_IMM: 561 case BPF_LDX | BPF_IMM:
567 ctx->seen |= SEEN_X; 562 ctx->seen |= SEEN_X;
568 emit_mov_i(r_X, k, ctx); 563 emit_mov_i(r_X, k, ctx);
569 break; 564 break;
570 case BPF_S_LDX_W_LEN: 565 case BPF_LDX | BPF_W | BPF_LEN:
571 ctx->seen |= SEEN_X | SEEN_SKB; 566 ctx->seen |= SEEN_X | SEEN_SKB;
572 emit(ARM_LDR_I(r_X, r_skb, 567 emit(ARM_LDR_I(r_X, r_skb,
573 offsetof(struct sk_buff, len)), ctx); 568 offsetof(struct sk_buff, len)), ctx);
574 break; 569 break;
575 case BPF_S_LDX_MEM: 570 case BPF_LDX | BPF_MEM:
576 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 571 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
577 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 572 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
578 break; 573 break;
579 case BPF_S_LDX_B_MSH: 574 case BPF_LDX | BPF_B | BPF_MSH:
580 /* x = ((*(frame + k)) & 0xf) << 2; */ 575 /* x = ((*(frame + k)) & 0xf) << 2; */
581 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 576 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
582 /* the interpreter should deal with the negative K */ 577 /* the interpreter should deal with the negative K */
@@ -606,113 +601,113 @@ load_ind:
606 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 601 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
607 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 602 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
608 break; 603 break;
609 case BPF_S_ST: 604 case BPF_ST:
610 ctx->seen |= SEEN_MEM_WORD(k); 605 ctx->seen |= SEEN_MEM_WORD(k);
611 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 606 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
612 break; 607 break;
613 case BPF_S_STX: 608 case BPF_STX:
614 update_on_xread(ctx); 609 update_on_xread(ctx);
615 ctx->seen |= SEEN_MEM_WORD(k); 610 ctx->seen |= SEEN_MEM_WORD(k);
616 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 611 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
617 break; 612 break;
618 case BPF_S_ALU_ADD_K: 613 case BPF_ALU | BPF_ADD | BPF_K:
619 /* A += K */ 614 /* A += K */
620 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 615 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
621 break; 616 break;
622 case BPF_S_ALU_ADD_X: 617 case BPF_ALU | BPF_ADD | BPF_X:
623 update_on_xread(ctx); 618 update_on_xread(ctx);
624 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 619 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
625 break; 620 break;
626 case BPF_S_ALU_SUB_K: 621 case BPF_ALU | BPF_SUB | BPF_K:
627 /* A -= K */ 622 /* A -= K */
628 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 623 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
629 break; 624 break;
630 case BPF_S_ALU_SUB_X: 625 case BPF_ALU | BPF_SUB | BPF_X:
631 update_on_xread(ctx); 626 update_on_xread(ctx);
632 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 627 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
633 break; 628 break;
634 case BPF_S_ALU_MUL_K: 629 case BPF_ALU | BPF_MUL | BPF_K:
635 /* A *= K */ 630 /* A *= K */
636 emit_mov_i(r_scratch, k, ctx); 631 emit_mov_i(r_scratch, k, ctx);
637 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 632 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
638 break; 633 break;
639 case BPF_S_ALU_MUL_X: 634 case BPF_ALU | BPF_MUL | BPF_X:
640 update_on_xread(ctx); 635 update_on_xread(ctx);
641 emit(ARM_MUL(r_A, r_A, r_X), ctx); 636 emit(ARM_MUL(r_A, r_A, r_X), ctx);
642 break; 637 break;
643 case BPF_S_ALU_DIV_K: 638 case BPF_ALU | BPF_DIV | BPF_K:
644 if (k == 1) 639 if (k == 1)
645 break; 640 break;
646 emit_mov_i(r_scratch, k, ctx); 641 emit_mov_i(r_scratch, k, ctx);
647 emit_udiv(r_A, r_A, r_scratch, ctx); 642 emit_udiv(r_A, r_A, r_scratch, ctx);
648 break; 643 break;
649 case BPF_S_ALU_DIV_X: 644 case BPF_ALU | BPF_DIV | BPF_X:
650 update_on_xread(ctx); 645 update_on_xread(ctx);
651 emit(ARM_CMP_I(r_X, 0), ctx); 646 emit(ARM_CMP_I(r_X, 0), ctx);
652 emit_err_ret(ARM_COND_EQ, ctx); 647 emit_err_ret(ARM_COND_EQ, ctx);
653 emit_udiv(r_A, r_A, r_X, ctx); 648 emit_udiv(r_A, r_A, r_X, ctx);
654 break; 649 break;
655 case BPF_S_ALU_OR_K: 650 case BPF_ALU | BPF_OR | BPF_K:
656 /* A |= K */ 651 /* A |= K */
657 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 652 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
658 break; 653 break;
659 case BPF_S_ALU_OR_X: 654 case BPF_ALU | BPF_OR | BPF_X:
660 update_on_xread(ctx); 655 update_on_xread(ctx);
661 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 656 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
662 break; 657 break;
663 case BPF_S_ALU_XOR_K: 658 case BPF_ALU | BPF_XOR | BPF_K:
664 /* A ^= K; */ 659 /* A ^= K; */
665 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 660 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
666 break; 661 break;
667 case BPF_S_ANC_ALU_XOR_X: 662 case BPF_ANC | SKF_AD_ALU_XOR_X:
668 case BPF_S_ALU_XOR_X: 663 case BPF_ALU | BPF_XOR | BPF_X:
669 /* A ^= X */ 664 /* A ^= X */
670 update_on_xread(ctx); 665 update_on_xread(ctx);
671 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 666 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
672 break; 667 break;
673 case BPF_S_ALU_AND_K: 668 case BPF_ALU | BPF_AND | BPF_K:
674 /* A &= K */ 669 /* A &= K */
675 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 670 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
676 break; 671 break;
677 case BPF_S_ALU_AND_X: 672 case BPF_ALU | BPF_AND | BPF_X:
678 update_on_xread(ctx); 673 update_on_xread(ctx);
679 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 674 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
680 break; 675 break;
681 case BPF_S_ALU_LSH_K: 676 case BPF_ALU | BPF_LSH | BPF_K:
682 if (unlikely(k > 31)) 677 if (unlikely(k > 31))
683 return -1; 678 return -1;
684 emit(ARM_LSL_I(r_A, r_A, k), ctx); 679 emit(ARM_LSL_I(r_A, r_A, k), ctx);
685 break; 680 break;
686 case BPF_S_ALU_LSH_X: 681 case BPF_ALU | BPF_LSH | BPF_X:
687 update_on_xread(ctx); 682 update_on_xread(ctx);
688 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 683 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
689 break; 684 break;
690 case BPF_S_ALU_RSH_K: 685 case BPF_ALU | BPF_RSH | BPF_K:
691 if (unlikely(k > 31)) 686 if (unlikely(k > 31))
692 return -1; 687 return -1;
693 emit(ARM_LSR_I(r_A, r_A, k), ctx); 688 emit(ARM_LSR_I(r_A, r_A, k), ctx);
694 break; 689 break;
695 case BPF_S_ALU_RSH_X: 690 case BPF_ALU | BPF_RSH | BPF_X:
696 update_on_xread(ctx); 691 update_on_xread(ctx);
697 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 692 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
698 break; 693 break;
699 case BPF_S_ALU_NEG: 694 case BPF_ALU | BPF_NEG:
700 /* A = -A */ 695 /* A = -A */
701 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 696 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
702 break; 697 break;
703 case BPF_S_JMP_JA: 698 case BPF_JMP | BPF_JA:
704 /* pc += K */ 699 /* pc += K */
705 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 700 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
706 break; 701 break;
707 case BPF_S_JMP_JEQ_K: 702 case BPF_JMP | BPF_JEQ | BPF_K:
708 /* pc += (A == K) ? pc->jt : pc->jf */ 703 /* pc += (A == K) ? pc->jt : pc->jf */
709 condt = ARM_COND_EQ; 704 condt = ARM_COND_EQ;
710 goto cmp_imm; 705 goto cmp_imm;
711 case BPF_S_JMP_JGT_K: 706 case BPF_JMP | BPF_JGT | BPF_K:
712 /* pc += (A > K) ? pc->jt : pc->jf */ 707 /* pc += (A > K) ? pc->jt : pc->jf */
713 condt = ARM_COND_HI; 708 condt = ARM_COND_HI;
714 goto cmp_imm; 709 goto cmp_imm;
715 case BPF_S_JMP_JGE_K: 710 case BPF_JMP | BPF_JGE | BPF_K:
716 /* pc += (A >= K) ? pc->jt : pc->jf */ 711 /* pc += (A >= K) ? pc->jt : pc->jf */
717 condt = ARM_COND_HS; 712 condt = ARM_COND_HS;
718cmp_imm: 713cmp_imm:
@@ -731,22 +726,22 @@ cond_jump:
731 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 726 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
732 ctx)), ctx); 727 ctx)), ctx);
733 break; 728 break;
734 case BPF_S_JMP_JEQ_X: 729 case BPF_JMP | BPF_JEQ | BPF_X:
735 /* pc += (A == X) ? pc->jt : pc->jf */ 730 /* pc += (A == X) ? pc->jt : pc->jf */
736 condt = ARM_COND_EQ; 731 condt = ARM_COND_EQ;
737 goto cmp_x; 732 goto cmp_x;
738 case BPF_S_JMP_JGT_X: 733 case BPF_JMP | BPF_JGT | BPF_X:
739 /* pc += (A > X) ? pc->jt : pc->jf */ 734 /* pc += (A > X) ? pc->jt : pc->jf */
740 condt = ARM_COND_HI; 735 condt = ARM_COND_HI;
741 goto cmp_x; 736 goto cmp_x;
742 case BPF_S_JMP_JGE_X: 737 case BPF_JMP | BPF_JGE | BPF_X:
743 /* pc += (A >= X) ? pc->jt : pc->jf */ 738 /* pc += (A >= X) ? pc->jt : pc->jf */
744 condt = ARM_COND_CS; 739 condt = ARM_COND_CS;
745cmp_x: 740cmp_x:
746 update_on_xread(ctx); 741 update_on_xread(ctx);
747 emit(ARM_CMP_R(r_A, r_X), ctx); 742 emit(ARM_CMP_R(r_A, r_X), ctx);
748 goto cond_jump; 743 goto cond_jump;
749 case BPF_S_JMP_JSET_K: 744 case BPF_JMP | BPF_JSET | BPF_K:
750 /* pc += (A & K) ? pc->jt : pc->jf */ 745 /* pc += (A & K) ? pc->jt : pc->jf */
751 condt = ARM_COND_NE; 746 condt = ARM_COND_NE;
752 /* not set iff all zeroes iff Z==1 iff EQ */ 747 /* not set iff all zeroes iff Z==1 iff EQ */
@@ -759,16 +754,16 @@ cmp_x:
759 emit(ARM_TST_I(r_A, imm12), ctx); 754 emit(ARM_TST_I(r_A, imm12), ctx);
760 } 755 }
761 goto cond_jump; 756 goto cond_jump;
762 case BPF_S_JMP_JSET_X: 757 case BPF_JMP | BPF_JSET | BPF_X:
763 /* pc += (A & X) ? pc->jt : pc->jf */ 758 /* pc += (A & X) ? pc->jt : pc->jf */
764 update_on_xread(ctx); 759 update_on_xread(ctx);
765 condt = ARM_COND_NE; 760 condt = ARM_COND_NE;
766 emit(ARM_TST_R(r_A, r_X), ctx); 761 emit(ARM_TST_R(r_A, r_X), ctx);
767 goto cond_jump; 762 goto cond_jump;
768 case BPF_S_RET_A: 763 case BPF_RET | BPF_A:
769 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 764 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
770 goto b_epilogue; 765 goto b_epilogue;
771 case BPF_S_RET_K: 766 case BPF_RET | BPF_K:
772 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 767 if ((k == 0) && (ctx->ret0_fp_idx < 0))
773 ctx->ret0_fp_idx = i; 768 ctx->ret0_fp_idx = i;
774 emit_mov_i(ARM_R0, k, ctx); 769 emit_mov_i(ARM_R0, k, ctx);
@@ -776,17 +771,17 @@ b_epilogue:
776 if (i != ctx->skf->len - 1) 771 if (i != ctx->skf->len - 1)
777 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 772 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
778 break; 773 break;
779 case BPF_S_MISC_TAX: 774 case BPF_MISC | BPF_TAX:
780 /* X = A */ 775 /* X = A */
781 ctx->seen |= SEEN_X; 776 ctx->seen |= SEEN_X;
782 emit(ARM_MOV_R(r_X, r_A), ctx); 777 emit(ARM_MOV_R(r_X, r_A), ctx);
783 break; 778 break;
784 case BPF_S_MISC_TXA: 779 case BPF_MISC | BPF_TXA:
785 /* A = X */ 780 /* A = X */
786 update_on_xread(ctx); 781 update_on_xread(ctx);
787 emit(ARM_MOV_R(r_A, r_X), ctx); 782 emit(ARM_MOV_R(r_A, r_X), ctx);
788 break; 783 break;
789 case BPF_S_ANC_PROTOCOL: 784 case BPF_ANC | SKF_AD_PROTOCOL:
790 /* A = ntohs(skb->protocol) */ 785 /* A = ntohs(skb->protocol) */
791 ctx->seen |= SEEN_SKB; 786 ctx->seen |= SEEN_SKB;
792 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 787 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -795,7 +790,7 @@ b_epilogue:
795 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 790 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
796 emit_swap16(r_A, r_scratch, ctx); 791 emit_swap16(r_A, r_scratch, ctx);
797 break; 792 break;
798 case BPF_S_ANC_CPU: 793 case BPF_ANC | SKF_AD_CPU:
799 /* r_scratch = current_thread_info() */ 794 /* r_scratch = current_thread_info() */
800 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 795 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
801 /* A = current_thread_info()->cpu */ 796 /* A = current_thread_info()->cpu */
@@ -803,7 +798,7 @@ b_epilogue:
803 off = offsetof(struct thread_info, cpu); 798 off = offsetof(struct thread_info, cpu);
804 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 799 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
805 break; 800 break;
806 case BPF_S_ANC_IFINDEX: 801 case BPF_ANC | SKF_AD_IFINDEX:
807 /* A = skb->dev->ifindex */ 802 /* A = skb->dev->ifindex */
808 ctx->seen |= SEEN_SKB; 803 ctx->seen |= SEEN_SKB;
809 off = offsetof(struct sk_buff, dev); 804 off = offsetof(struct sk_buff, dev);
@@ -817,30 +812,30 @@ b_epilogue:
817 off = offsetof(struct net_device, ifindex); 812 off = offsetof(struct net_device, ifindex);
818 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 813 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
819 break; 814 break;
820 case BPF_S_ANC_MARK: 815 case BPF_ANC | SKF_AD_MARK:
821 ctx->seen |= SEEN_SKB; 816 ctx->seen |= SEEN_SKB;
822 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 817 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
823 off = offsetof(struct sk_buff, mark); 818 off = offsetof(struct sk_buff, mark);
824 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 819 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
825 break; 820 break;
826 case BPF_S_ANC_RXHASH: 821 case BPF_ANC | SKF_AD_RXHASH:
827 ctx->seen |= SEEN_SKB; 822 ctx->seen |= SEEN_SKB;
828 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 823 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
829 off = offsetof(struct sk_buff, hash); 824 off = offsetof(struct sk_buff, hash);
830 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 825 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
831 break; 826 break;
832 case BPF_S_ANC_VLAN_TAG: 827 case BPF_ANC | SKF_AD_VLAN_TAG:
833 case BPF_S_ANC_VLAN_TAG_PRESENT: 828 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
834 ctx->seen |= SEEN_SKB; 829 ctx->seen |= SEEN_SKB;
835 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 830 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
836 off = offsetof(struct sk_buff, vlan_tci); 831 off = offsetof(struct sk_buff, vlan_tci);
837 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 832 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
838 if (inst->code == BPF_S_ANC_VLAN_TAG) 833 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
839 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 834 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
840 else 835 else
841 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 836 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
842 break; 837 break;
843 case BPF_S_ANC_QUEUE: 838 case BPF_ANC | SKF_AD_QUEUE:
844 ctx->seen |= SEEN_SKB; 839 ctx->seen |= SEEN_SKB;
845 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 840 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846 queue_mapping) != 2); 841 queue_mapping) != 2);