aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-05-29 04:22:50 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-02 01:16:58 -0400
commit3480593131e0b781287dae0139bf7ccee7cba7ff (patch)
tree6e259a45b3767bd80b789814e4d484ee0ac069bf /arch
parentd50bc1575096250aa37f17299c86ea548156efe8 (diff)
net: filter: get rid of BPF_S_* enum
This patch finally allows us to get rid of the BPF_S_* enum. Currently, the code performs unnecessary encode and decode workarounds in seccomp and filter migration itself when a filter is being attached in order to overcome BPF_S_* encoding which is not used anymore by the new interpreter resp. JIT compilers. Keeping it around would mean that also in future we would need to extend and maintain this enum and related encoders/decoders. We can get rid of all that and save us these operations during filter attaching. Naturally, also JIT compilers need to be updated by this. Before JIT conversion is being done, each compiler checks if A is being loaded at startup to obtain information if it needs to emit instructions to clear A first. Since BPF extensions are a subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements for extensions can be removed at that point. To ease and minimalize code changes in the classic JITs, we have introduced bpf_anc_helper(). Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int), arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we unfortunately didn't have access, but changes are analogous to the rest. Joint work with Alexei Starovoitov. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Mircea Gherzan <mgherzan@gmail.com> Cc: Kees Cook <keescook@chromium.org> Acked-by: Chema Gonzalez <chemag@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c139
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c157
-rw-r--r--arch/s390/net/bpf_jit_comp.c163
-rw-r--r--arch/sparc/net/bpf_jit_comp.c154
5 files changed, 294 insertions, 321 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6f879c319a9d..fb5503ce016f 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx)
136 u16 ret = 0; 136 u16 ret = 0;
137 137
138 if ((ctx->skf->len > 1) || 138 if ((ctx->skf->len > 1) ||
139 (ctx->skf->insns[0].code == BPF_S_RET_A)) 139 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
140 ret |= 1 << r_A; 140 ret |= 1 << r_A;
141 141
142#ifdef CONFIG_FRAME_POINTER 142#ifdef CONFIG_FRAME_POINTER
@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx)
164static inline bool is_load_to_a(u16 inst) 164static inline bool is_load_to_a(u16 inst)
165{ 165{
166 switch (inst) { 166 switch (inst) {
167 case BPF_S_LD_W_LEN: 167 case BPF_LD | BPF_W | BPF_LEN:
168 case BPF_S_LD_W_ABS: 168 case BPF_LD | BPF_W | BPF_ABS:
169 case BPF_S_LD_H_ABS: 169 case BPF_LD | BPF_H | BPF_ABS:
170 case BPF_S_LD_B_ABS: 170 case BPF_LD | BPF_B | BPF_ABS:
171 case BPF_S_ANC_CPU:
172 case BPF_S_ANC_IFINDEX:
173 case BPF_S_ANC_MARK:
174 case BPF_S_ANC_PROTOCOL:
175 case BPF_S_ANC_RXHASH:
176 case BPF_S_ANC_VLAN_TAG:
177 case BPF_S_ANC_VLAN_TAG_PRESENT:
178 case BPF_S_ANC_QUEUE:
179 return true; 171 return true;
180 default: 172 default:
181 return false; 173 return false;
@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx)
215 emit(ARM_MOV_I(r_X, 0), ctx); 207 emit(ARM_MOV_I(r_X, 0), ctx);
216 208
217 /* do not leak kernel data to userspace */ 209 /* do not leak kernel data to userspace */
218 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 210 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
219 emit(ARM_MOV_I(r_A, 0), ctx); 211 emit(ARM_MOV_I(r_A, 0), ctx);
220 212
221 /* stack space for the BPF_MEM words */ 213 /* stack space for the BPF_MEM words */
@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx)
480 u32 k; 472 u32 k;
481 473
482 for (i = 0; i < prog->len; i++) { 474 for (i = 0; i < prog->len; i++) {
475 u16 code;
476
483 inst = &(prog->insns[i]); 477 inst = &(prog->insns[i]);
484 /* K as an immediate value operand */ 478 /* K as an immediate value operand */
485 k = inst->k; 479 k = inst->k;
480 code = bpf_anc_helper(inst);
486 481
487 /* compute offsets only in the fake pass */ 482 /* compute offsets only in the fake pass */
488 if (ctx->target == NULL) 483 if (ctx->target == NULL)
489 ctx->offsets[i] = ctx->idx * 4; 484 ctx->offsets[i] = ctx->idx * 4;
490 485
491 switch (inst->code) { 486 switch (code) {
492 case BPF_S_LD_IMM: 487 case BPF_LD | BPF_IMM:
493 emit_mov_i(r_A, k, ctx); 488 emit_mov_i(r_A, k, ctx);
494 break; 489 break;
495 case BPF_S_LD_W_LEN: 490 case BPF_LD | BPF_W | BPF_LEN:
496 ctx->seen |= SEEN_SKB; 491 ctx->seen |= SEEN_SKB;
497 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 492 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
498 emit(ARM_LDR_I(r_A, r_skb, 493 emit(ARM_LDR_I(r_A, r_skb,
499 offsetof(struct sk_buff, len)), ctx); 494 offsetof(struct sk_buff, len)), ctx);
500 break; 495 break;
501 case BPF_S_LD_MEM: 496 case BPF_LD | BPF_MEM:
502 /* A = scratch[k] */ 497 /* A = scratch[k] */
503 ctx->seen |= SEEN_MEM_WORD(k); 498 ctx->seen |= SEEN_MEM_WORD(k);
504 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 499 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
505 break; 500 break;
506 case BPF_S_LD_W_ABS: 501 case BPF_LD | BPF_W | BPF_ABS:
507 load_order = 2; 502 load_order = 2;
508 goto load; 503 goto load;
509 case BPF_S_LD_H_ABS: 504 case BPF_LD | BPF_H | BPF_ABS:
510 load_order = 1; 505 load_order = 1;
511 goto load; 506 goto load;
512 case BPF_S_LD_B_ABS: 507 case BPF_LD | BPF_B | BPF_ABS:
513 load_order = 0; 508 load_order = 0;
514load: 509load:
515 /* the interpreter will deal with the negative K */ 510 /* the interpreter will deal with the negative K */
@@ -552,31 +547,31 @@ load_common:
552 emit_err_ret(ARM_COND_NE, ctx); 547 emit_err_ret(ARM_COND_NE, ctx);
553 emit(ARM_MOV_R(r_A, ARM_R0), ctx); 548 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
554 break; 549 break;
555 case BPF_S_LD_W_IND: 550 case BPF_LD | BPF_W | BPF_IND:
556 load_order = 2; 551 load_order = 2;
557 goto load_ind; 552 goto load_ind;
558 case BPF_S_LD_H_IND: 553 case BPF_LD | BPF_H | BPF_IND:
559 load_order = 1; 554 load_order = 1;
560 goto load_ind; 555 goto load_ind;
561 case BPF_S_LD_B_IND: 556 case BPF_LD | BPF_B | BPF_IND:
562 load_order = 0; 557 load_order = 0;
563load_ind: 558load_ind:
564 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); 559 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
565 goto load_common; 560 goto load_common;
566 case BPF_S_LDX_IMM: 561 case BPF_LDX | BPF_IMM:
567 ctx->seen |= SEEN_X; 562 ctx->seen |= SEEN_X;
568 emit_mov_i(r_X, k, ctx); 563 emit_mov_i(r_X, k, ctx);
569 break; 564 break;
570 case BPF_S_LDX_W_LEN: 565 case BPF_LDX | BPF_W | BPF_LEN:
571 ctx->seen |= SEEN_X | SEEN_SKB; 566 ctx->seen |= SEEN_X | SEEN_SKB;
572 emit(ARM_LDR_I(r_X, r_skb, 567 emit(ARM_LDR_I(r_X, r_skb,
573 offsetof(struct sk_buff, len)), ctx); 568 offsetof(struct sk_buff, len)), ctx);
574 break; 569 break;
575 case BPF_S_LDX_MEM: 570 case BPF_LDX | BPF_MEM:
576 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); 571 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
577 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 572 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
578 break; 573 break;
579 case BPF_S_LDX_B_MSH: 574 case BPF_LDX | BPF_B | BPF_MSH:
580 /* x = ((*(frame + k)) & 0xf) << 2; */ 575 /* x = ((*(frame + k)) & 0xf) << 2; */
581 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 576 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
582 /* the interpreter should deal with the negative K */ 577 /* the interpreter should deal with the negative K */
@@ -606,113 +601,113 @@ load_ind:
606 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); 601 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
607 emit(ARM_LSL_I(r_X, r_X, 2), ctx); 602 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
608 break; 603 break;
609 case BPF_S_ST: 604 case BPF_ST:
610 ctx->seen |= SEEN_MEM_WORD(k); 605 ctx->seen |= SEEN_MEM_WORD(k);
611 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); 606 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
612 break; 607 break;
613 case BPF_S_STX: 608 case BPF_STX:
614 update_on_xread(ctx); 609 update_on_xread(ctx);
615 ctx->seen |= SEEN_MEM_WORD(k); 610 ctx->seen |= SEEN_MEM_WORD(k);
616 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); 611 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
617 break; 612 break;
618 case BPF_S_ALU_ADD_K: 613 case BPF_ALU | BPF_ADD | BPF_K:
619 /* A += K */ 614 /* A += K */
620 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); 615 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
621 break; 616 break;
622 case BPF_S_ALU_ADD_X: 617 case BPF_ALU | BPF_ADD | BPF_X:
623 update_on_xread(ctx); 618 update_on_xread(ctx);
624 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); 619 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
625 break; 620 break;
626 case BPF_S_ALU_SUB_K: 621 case BPF_ALU | BPF_SUB | BPF_K:
627 /* A -= K */ 622 /* A -= K */
628 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); 623 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
629 break; 624 break;
630 case BPF_S_ALU_SUB_X: 625 case BPF_ALU | BPF_SUB | BPF_X:
631 update_on_xread(ctx); 626 update_on_xread(ctx);
632 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); 627 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
633 break; 628 break;
634 case BPF_S_ALU_MUL_K: 629 case BPF_ALU | BPF_MUL | BPF_K:
635 /* A *= K */ 630 /* A *= K */
636 emit_mov_i(r_scratch, k, ctx); 631 emit_mov_i(r_scratch, k, ctx);
637 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); 632 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
638 break; 633 break;
639 case BPF_S_ALU_MUL_X: 634 case BPF_ALU | BPF_MUL | BPF_X:
640 update_on_xread(ctx); 635 update_on_xread(ctx);
641 emit(ARM_MUL(r_A, r_A, r_X), ctx); 636 emit(ARM_MUL(r_A, r_A, r_X), ctx);
642 break; 637 break;
643 case BPF_S_ALU_DIV_K: 638 case BPF_ALU | BPF_DIV | BPF_K:
644 if (k == 1) 639 if (k == 1)
645 break; 640 break;
646 emit_mov_i(r_scratch, k, ctx); 641 emit_mov_i(r_scratch, k, ctx);
647 emit_udiv(r_A, r_A, r_scratch, ctx); 642 emit_udiv(r_A, r_A, r_scratch, ctx);
648 break; 643 break;
649 case BPF_S_ALU_DIV_X: 644 case BPF_ALU | BPF_DIV | BPF_X:
650 update_on_xread(ctx); 645 update_on_xread(ctx);
651 emit(ARM_CMP_I(r_X, 0), ctx); 646 emit(ARM_CMP_I(r_X, 0), ctx);
652 emit_err_ret(ARM_COND_EQ, ctx); 647 emit_err_ret(ARM_COND_EQ, ctx);
653 emit_udiv(r_A, r_A, r_X, ctx); 648 emit_udiv(r_A, r_A, r_X, ctx);
654 break; 649 break;
655 case BPF_S_ALU_OR_K: 650 case BPF_ALU | BPF_OR | BPF_K:
656 /* A |= K */ 651 /* A |= K */
657 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); 652 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
658 break; 653 break;
659 case BPF_S_ALU_OR_X: 654 case BPF_ALU | BPF_OR | BPF_X:
660 update_on_xread(ctx); 655 update_on_xread(ctx);
661 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); 656 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
662 break; 657 break;
663 case BPF_S_ALU_XOR_K: 658 case BPF_ALU | BPF_XOR | BPF_K:
664 /* A ^= K; */ 659 /* A ^= K; */
665 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); 660 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
666 break; 661 break;
667 case BPF_S_ANC_ALU_XOR_X: 662 case BPF_ANC | SKF_AD_ALU_XOR_X:
668 case BPF_S_ALU_XOR_X: 663 case BPF_ALU | BPF_XOR | BPF_X:
669 /* A ^= X */ 664 /* A ^= X */
670 update_on_xread(ctx); 665 update_on_xread(ctx);
671 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); 666 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
672 break; 667 break;
673 case BPF_S_ALU_AND_K: 668 case BPF_ALU | BPF_AND | BPF_K:
674 /* A &= K */ 669 /* A &= K */
675 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); 670 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
676 break; 671 break;
677 case BPF_S_ALU_AND_X: 672 case BPF_ALU | BPF_AND | BPF_X:
678 update_on_xread(ctx); 673 update_on_xread(ctx);
679 emit(ARM_AND_R(r_A, r_A, r_X), ctx); 674 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
680 break; 675 break;
681 case BPF_S_ALU_LSH_K: 676 case BPF_ALU | BPF_LSH | BPF_K:
682 if (unlikely(k > 31)) 677 if (unlikely(k > 31))
683 return -1; 678 return -1;
684 emit(ARM_LSL_I(r_A, r_A, k), ctx); 679 emit(ARM_LSL_I(r_A, r_A, k), ctx);
685 break; 680 break;
686 case BPF_S_ALU_LSH_X: 681 case BPF_ALU | BPF_LSH | BPF_X:
687 update_on_xread(ctx); 682 update_on_xread(ctx);
688 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); 683 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
689 break; 684 break;
690 case BPF_S_ALU_RSH_K: 685 case BPF_ALU | BPF_RSH | BPF_K:
691 if (unlikely(k > 31)) 686 if (unlikely(k > 31))
692 return -1; 687 return -1;
693 emit(ARM_LSR_I(r_A, r_A, k), ctx); 688 emit(ARM_LSR_I(r_A, r_A, k), ctx);
694 break; 689 break;
695 case BPF_S_ALU_RSH_X: 690 case BPF_ALU | BPF_RSH | BPF_X:
696 update_on_xread(ctx); 691 update_on_xread(ctx);
697 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); 692 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
698 break; 693 break;
699 case BPF_S_ALU_NEG: 694 case BPF_ALU | BPF_NEG:
700 /* A = -A */ 695 /* A = -A */
701 emit(ARM_RSB_I(r_A, r_A, 0), ctx); 696 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
702 break; 697 break;
703 case BPF_S_JMP_JA: 698 case BPF_JMP | BPF_JA:
704 /* pc += K */ 699 /* pc += K */
705 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); 700 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
706 break; 701 break;
707 case BPF_S_JMP_JEQ_K: 702 case BPF_JMP | BPF_JEQ | BPF_K:
708 /* pc += (A == K) ? pc->jt : pc->jf */ 703 /* pc += (A == K) ? pc->jt : pc->jf */
709 condt = ARM_COND_EQ; 704 condt = ARM_COND_EQ;
710 goto cmp_imm; 705 goto cmp_imm;
711 case BPF_S_JMP_JGT_K: 706 case BPF_JMP | BPF_JGT | BPF_K:
712 /* pc += (A > K) ? pc->jt : pc->jf */ 707 /* pc += (A > K) ? pc->jt : pc->jf */
713 condt = ARM_COND_HI; 708 condt = ARM_COND_HI;
714 goto cmp_imm; 709 goto cmp_imm;
715 case BPF_S_JMP_JGE_K: 710 case BPF_JMP | BPF_JGE | BPF_K:
716 /* pc += (A >= K) ? pc->jt : pc->jf */ 711 /* pc += (A >= K) ? pc->jt : pc->jf */
717 condt = ARM_COND_HS; 712 condt = ARM_COND_HS;
718cmp_imm: 713cmp_imm:
@@ -731,22 +726,22 @@ cond_jump:
731 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, 726 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
732 ctx)), ctx); 727 ctx)), ctx);
733 break; 728 break;
734 case BPF_S_JMP_JEQ_X: 729 case BPF_JMP | BPF_JEQ | BPF_X:
735 /* pc += (A == X) ? pc->jt : pc->jf */ 730 /* pc += (A == X) ? pc->jt : pc->jf */
736 condt = ARM_COND_EQ; 731 condt = ARM_COND_EQ;
737 goto cmp_x; 732 goto cmp_x;
738 case BPF_S_JMP_JGT_X: 733 case BPF_JMP | BPF_JGT | BPF_X:
739 /* pc += (A > X) ? pc->jt : pc->jf */ 734 /* pc += (A > X) ? pc->jt : pc->jf */
740 condt = ARM_COND_HI; 735 condt = ARM_COND_HI;
741 goto cmp_x; 736 goto cmp_x;
742 case BPF_S_JMP_JGE_X: 737 case BPF_JMP | BPF_JGE | BPF_X:
743 /* pc += (A >= X) ? pc->jt : pc->jf */ 738 /* pc += (A >= X) ? pc->jt : pc->jf */
744 condt = ARM_COND_CS; 739 condt = ARM_COND_CS;
745cmp_x: 740cmp_x:
746 update_on_xread(ctx); 741 update_on_xread(ctx);
747 emit(ARM_CMP_R(r_A, r_X), ctx); 742 emit(ARM_CMP_R(r_A, r_X), ctx);
748 goto cond_jump; 743 goto cond_jump;
749 case BPF_S_JMP_JSET_K: 744 case BPF_JMP | BPF_JSET | BPF_K:
750 /* pc += (A & K) ? pc->jt : pc->jf */ 745 /* pc += (A & K) ? pc->jt : pc->jf */
751 condt = ARM_COND_NE; 746 condt = ARM_COND_NE;
752 /* not set iff all zeroes iff Z==1 iff EQ */ 747 /* not set iff all zeroes iff Z==1 iff EQ */
@@ -759,16 +754,16 @@ cmp_x:
759 emit(ARM_TST_I(r_A, imm12), ctx); 754 emit(ARM_TST_I(r_A, imm12), ctx);
760 } 755 }
761 goto cond_jump; 756 goto cond_jump;
762 case BPF_S_JMP_JSET_X: 757 case BPF_JMP | BPF_JSET | BPF_X:
763 /* pc += (A & X) ? pc->jt : pc->jf */ 758 /* pc += (A & X) ? pc->jt : pc->jf */
764 update_on_xread(ctx); 759 update_on_xread(ctx);
765 condt = ARM_COND_NE; 760 condt = ARM_COND_NE;
766 emit(ARM_TST_R(r_A, r_X), ctx); 761 emit(ARM_TST_R(r_A, r_X), ctx);
767 goto cond_jump; 762 goto cond_jump;
768 case BPF_S_RET_A: 763 case BPF_RET | BPF_A:
769 emit(ARM_MOV_R(ARM_R0, r_A), ctx); 764 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
770 goto b_epilogue; 765 goto b_epilogue;
771 case BPF_S_RET_K: 766 case BPF_RET | BPF_K:
772 if ((k == 0) && (ctx->ret0_fp_idx < 0)) 767 if ((k == 0) && (ctx->ret0_fp_idx < 0))
773 ctx->ret0_fp_idx = i; 768 ctx->ret0_fp_idx = i;
774 emit_mov_i(ARM_R0, k, ctx); 769 emit_mov_i(ARM_R0, k, ctx);
@@ -776,17 +771,17 @@ b_epilogue:
776 if (i != ctx->skf->len - 1) 771 if (i != ctx->skf->len - 1)
777 emit(ARM_B(b_imm(prog->len, ctx)), ctx); 772 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
778 break; 773 break;
779 case BPF_S_MISC_TAX: 774 case BPF_MISC | BPF_TAX:
780 /* X = A */ 775 /* X = A */
781 ctx->seen |= SEEN_X; 776 ctx->seen |= SEEN_X;
782 emit(ARM_MOV_R(r_X, r_A), ctx); 777 emit(ARM_MOV_R(r_X, r_A), ctx);
783 break; 778 break;
784 case BPF_S_MISC_TXA: 779 case BPF_MISC | BPF_TXA:
785 /* A = X */ 780 /* A = X */
786 update_on_xread(ctx); 781 update_on_xread(ctx);
787 emit(ARM_MOV_R(r_A, r_X), ctx); 782 emit(ARM_MOV_R(r_A, r_X), ctx);
788 break; 783 break;
789 case BPF_S_ANC_PROTOCOL: 784 case BPF_ANC | SKF_AD_PROTOCOL:
790 /* A = ntohs(skb->protocol) */ 785 /* A = ntohs(skb->protocol) */
791 ctx->seen |= SEEN_SKB; 786 ctx->seen |= SEEN_SKB;
792 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 787 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -795,7 +790,7 @@ b_epilogue:
795 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); 790 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
796 emit_swap16(r_A, r_scratch, ctx); 791 emit_swap16(r_A, r_scratch, ctx);
797 break; 792 break;
798 case BPF_S_ANC_CPU: 793 case BPF_ANC | SKF_AD_CPU:
799 /* r_scratch = current_thread_info() */ 794 /* r_scratch = current_thread_info() */
800 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); 795 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
801 /* A = current_thread_info()->cpu */ 796 /* A = current_thread_info()->cpu */
@@ -803,7 +798,7 @@ b_epilogue:
803 off = offsetof(struct thread_info, cpu); 798 off = offsetof(struct thread_info, cpu);
804 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 799 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
805 break; 800 break;
806 case BPF_S_ANC_IFINDEX: 801 case BPF_ANC | SKF_AD_IFINDEX:
807 /* A = skb->dev->ifindex */ 802 /* A = skb->dev->ifindex */
808 ctx->seen |= SEEN_SKB; 803 ctx->seen |= SEEN_SKB;
809 off = offsetof(struct sk_buff, dev); 804 off = offsetof(struct sk_buff, dev);
@@ -817,30 +812,30 @@ b_epilogue:
817 off = offsetof(struct net_device, ifindex); 812 off = offsetof(struct net_device, ifindex);
818 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); 813 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
819 break; 814 break;
820 case BPF_S_ANC_MARK: 815 case BPF_ANC | SKF_AD_MARK:
821 ctx->seen |= SEEN_SKB; 816 ctx->seen |= SEEN_SKB;
822 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 817 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
823 off = offsetof(struct sk_buff, mark); 818 off = offsetof(struct sk_buff, mark);
824 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 819 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
825 break; 820 break;
826 case BPF_S_ANC_RXHASH: 821 case BPF_ANC | SKF_AD_RXHASH:
827 ctx->seen |= SEEN_SKB; 822 ctx->seen |= SEEN_SKB;
828 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 823 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
829 off = offsetof(struct sk_buff, hash); 824 off = offsetof(struct sk_buff, hash);
830 emit(ARM_LDR_I(r_A, r_skb, off), ctx); 825 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
831 break; 826 break;
832 case BPF_S_ANC_VLAN_TAG: 827 case BPF_ANC | SKF_AD_VLAN_TAG:
833 case BPF_S_ANC_VLAN_TAG_PRESENT: 828 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
834 ctx->seen |= SEEN_SKB; 829 ctx->seen |= SEEN_SKB;
835 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 830 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
836 off = offsetof(struct sk_buff, vlan_tci); 831 off = offsetof(struct sk_buff, vlan_tci);
837 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 832 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
838 if (inst->code == BPF_S_ANC_VLAN_TAG) 833 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
839 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 834 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
840 else 835 else
841 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 836 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
842 break; 837 break;
843 case BPF_S_ANC_QUEUE: 838 case BPF_ANC | SKF_AD_QUEUE:
844 ctx->seen |= SEEN_SKB; 839 ctx->seen |= SEEN_SKB;
845 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 840 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846 queue_mapping) != 2); 841 queue_mapping) != 2);
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index e76eba74d9da..8f87d9217122 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -78,7 +78,7 @@ sk_load_byte_positive_offset:
78 blr 78 blr
79 79
80/* 80/*
81 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) 81 * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
82 * r_addr is the offset value 82 * r_addr is the offset value
83 */ 83 */
84 .globl sk_load_byte_msh 84 .globl sk_load_byte_msh
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 808ce1cae21a..6dcdadefd8d0 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
79 } 79 }
80 80
81 switch (filter[0].code) { 81 switch (filter[0].code) {
82 case BPF_S_RET_K: 82 case BPF_RET | BPF_K:
83 case BPF_S_LD_W_LEN: 83 case BPF_LD | BPF_W | BPF_LEN:
84 case BPF_S_ANC_PROTOCOL: 84 case BPF_LD | BPF_W | BPF_ABS:
85 case BPF_S_ANC_IFINDEX: 85 case BPF_LD | BPF_H | BPF_ABS:
86 case BPF_S_ANC_MARK: 86 case BPF_LD | BPF_B | BPF_ABS:
87 case BPF_S_ANC_RXHASH:
88 case BPF_S_ANC_VLAN_TAG:
89 case BPF_S_ANC_VLAN_TAG_PRESENT:
90 case BPF_S_ANC_CPU:
91 case BPF_S_ANC_QUEUE:
92 case BPF_S_LD_W_ABS:
93 case BPF_S_LD_H_ABS:
94 case BPF_S_LD_B_ABS:
95 /* first instruction sets A register (or is RET 'constant') */ 87 /* first instruction sets A register (or is RET 'constant') */
96 break; 88 break;
97 default: 89 default:
@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
144 136
145 for (i = 0; i < flen; i++) { 137 for (i = 0; i < flen; i++) {
146 unsigned int K = filter[i].k; 138 unsigned int K = filter[i].k;
139 u16 code = bpf_anc_helper(&filter[i]);
147 140
148 /* 141 /*
149 * addrs[] maps a BPF bytecode address into a real offset from 142 * addrs[] maps a BPF bytecode address into a real offset from
@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
151 */ 144 */
152 addrs[i] = ctx->idx * 4; 145 addrs[i] = ctx->idx * 4;
153 146
154 switch (filter[i].code) { 147 switch (code) {
155 /*** ALU ops ***/ 148 /*** ALU ops ***/
156 case BPF_S_ALU_ADD_X: /* A += X; */ 149 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
157 ctx->seen |= SEEN_XREG; 150 ctx->seen |= SEEN_XREG;
158 PPC_ADD(r_A, r_A, r_X); 151 PPC_ADD(r_A, r_A, r_X);
159 break; 152 break;
160 case BPF_S_ALU_ADD_K: /* A += K; */ 153 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
161 if (!K) 154 if (!K)
162 break; 155 break;
163 PPC_ADDI(r_A, r_A, IMM_L(K)); 156 PPC_ADDI(r_A, r_A, IMM_L(K));
164 if (K >= 32768) 157 if (K >= 32768)
165 PPC_ADDIS(r_A, r_A, IMM_HA(K)); 158 PPC_ADDIS(r_A, r_A, IMM_HA(K));
166 break; 159 break;
167 case BPF_S_ALU_SUB_X: /* A -= X; */ 160 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
168 ctx->seen |= SEEN_XREG; 161 ctx->seen |= SEEN_XREG;
169 PPC_SUB(r_A, r_A, r_X); 162 PPC_SUB(r_A, r_A, r_X);
170 break; 163 break;
171 case BPF_S_ALU_SUB_K: /* A -= K */ 164 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
172 if (!K) 165 if (!K)
173 break; 166 break;
174 PPC_ADDI(r_A, r_A, IMM_L(-K)); 167 PPC_ADDI(r_A, r_A, IMM_L(-K));
175 if (K >= 32768) 168 if (K >= 32768)
176 PPC_ADDIS(r_A, r_A, IMM_HA(-K)); 169 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
177 break; 170 break;
178 case BPF_S_ALU_MUL_X: /* A *= X; */ 171 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
179 ctx->seen |= SEEN_XREG; 172 ctx->seen |= SEEN_XREG;
180 PPC_MUL(r_A, r_A, r_X); 173 PPC_MUL(r_A, r_A, r_X);
181 break; 174 break;
182 case BPF_S_ALU_MUL_K: /* A *= K */ 175 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
183 if (K < 32768) 176 if (K < 32768)
184 PPC_MULI(r_A, r_A, K); 177 PPC_MULI(r_A, r_A, K);
185 else { 178 else {
@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
187 PPC_MUL(r_A, r_A, r_scratch1); 180 PPC_MUL(r_A, r_A, r_scratch1);
188 } 181 }
189 break; 182 break;
190 case BPF_S_ALU_MOD_X: /* A %= X; */ 183 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
191 ctx->seen |= SEEN_XREG; 184 ctx->seen |= SEEN_XREG;
192 PPC_CMPWI(r_X, 0); 185 PPC_CMPWI(r_X, 0);
193 if (ctx->pc_ret0 != -1) { 186 if (ctx->pc_ret0 != -1) {
@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
201 PPC_MUL(r_scratch1, r_X, r_scratch1); 194 PPC_MUL(r_scratch1, r_X, r_scratch1);
202 PPC_SUB(r_A, r_A, r_scratch1); 195 PPC_SUB(r_A, r_A, r_scratch1);
203 break; 196 break;
204 case BPF_S_ALU_MOD_K: /* A %= K; */ 197 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
205 PPC_LI32(r_scratch2, K); 198 PPC_LI32(r_scratch2, K);
206 PPC_DIVWU(r_scratch1, r_A, r_scratch2); 199 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207 PPC_MUL(r_scratch1, r_scratch2, r_scratch1); 200 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208 PPC_SUB(r_A, r_A, r_scratch1); 201 PPC_SUB(r_A, r_A, r_scratch1);
209 break; 202 break;
210 case BPF_S_ALU_DIV_X: /* A /= X; */ 203 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
211 ctx->seen |= SEEN_XREG; 204 ctx->seen |= SEEN_XREG;
212 PPC_CMPWI(r_X, 0); 205 PPC_CMPWI(r_X, 0);
213 if (ctx->pc_ret0 != -1) { 206 if (ctx->pc_ret0 != -1) {
@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
223 } 216 }
224 PPC_DIVWU(r_A, r_A, r_X); 217 PPC_DIVWU(r_A, r_A, r_X);
225 break; 218 break;
226 case BPF_S_ALU_DIV_K: /* A /= K */ 219 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
227 if (K == 1) 220 if (K == 1)
228 break; 221 break;
229 PPC_LI32(r_scratch1, K); 222 PPC_LI32(r_scratch1, K);
230 PPC_DIVWU(r_A, r_A, r_scratch1); 223 PPC_DIVWU(r_A, r_A, r_scratch1);
231 break; 224 break;
232 case BPF_S_ALU_AND_X: 225 case BPF_ALU | BPF_AND | BPF_X:
233 ctx->seen |= SEEN_XREG; 226 ctx->seen |= SEEN_XREG;
234 PPC_AND(r_A, r_A, r_X); 227 PPC_AND(r_A, r_A, r_X);
235 break; 228 break;
236 case BPF_S_ALU_AND_K: 229 case BPF_ALU | BPF_AND | BPF_K:
237 if (!IMM_H(K)) 230 if (!IMM_H(K))
238 PPC_ANDI(r_A, r_A, K); 231 PPC_ANDI(r_A, r_A, K);
239 else { 232 else {
@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
241 PPC_AND(r_A, r_A, r_scratch1); 234 PPC_AND(r_A, r_A, r_scratch1);
242 } 235 }
243 break; 236 break;
244 case BPF_S_ALU_OR_X: 237 case BPF_ALU | BPF_OR | BPF_X:
245 ctx->seen |= SEEN_XREG; 238 ctx->seen |= SEEN_XREG;
246 PPC_OR(r_A, r_A, r_X); 239 PPC_OR(r_A, r_A, r_X);
247 break; 240 break;
248 case BPF_S_ALU_OR_K: 241 case BPF_ALU | BPF_OR | BPF_K:
249 if (IMM_L(K)) 242 if (IMM_L(K))
250 PPC_ORI(r_A, r_A, IMM_L(K)); 243 PPC_ORI(r_A, r_A, IMM_L(K));
251 if (K >= 65536) 244 if (K >= 65536)
252 PPC_ORIS(r_A, r_A, IMM_H(K)); 245 PPC_ORIS(r_A, r_A, IMM_H(K));
253 break; 246 break;
254 case BPF_S_ANC_ALU_XOR_X: 247 case BPF_ANC | SKF_AD_ALU_XOR_X:
255 case BPF_S_ALU_XOR_X: /* A ^= X */ 248 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
256 ctx->seen |= SEEN_XREG; 249 ctx->seen |= SEEN_XREG;
257 PPC_XOR(r_A, r_A, r_X); 250 PPC_XOR(r_A, r_A, r_X);
258 break; 251 break;
259 case BPF_S_ALU_XOR_K: /* A ^= K */ 252 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
260 if (IMM_L(K)) 253 if (IMM_L(K))
261 PPC_XORI(r_A, r_A, IMM_L(K)); 254 PPC_XORI(r_A, r_A, IMM_L(K));
262 if (K >= 65536) 255 if (K >= 65536)
263 PPC_XORIS(r_A, r_A, IMM_H(K)); 256 PPC_XORIS(r_A, r_A, IMM_H(K));
264 break; 257 break;
265 case BPF_S_ALU_LSH_X: /* A <<= X; */ 258 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
266 ctx->seen |= SEEN_XREG; 259 ctx->seen |= SEEN_XREG;
267 PPC_SLW(r_A, r_A, r_X); 260 PPC_SLW(r_A, r_A, r_X);
268 break; 261 break;
269 case BPF_S_ALU_LSH_K: 262 case BPF_ALU | BPF_LSH | BPF_K:
270 if (K == 0) 263 if (K == 0)
271 break; 264 break;
272 else 265 else
273 PPC_SLWI(r_A, r_A, K); 266 PPC_SLWI(r_A, r_A, K);
274 break; 267 break;
275 case BPF_S_ALU_RSH_X: /* A >>= X; */ 268 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
276 ctx->seen |= SEEN_XREG; 269 ctx->seen |= SEEN_XREG;
277 PPC_SRW(r_A, r_A, r_X); 270 PPC_SRW(r_A, r_A, r_X);
278 break; 271 break;
279 case BPF_S_ALU_RSH_K: /* A >>= K; */ 272 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
280 if (K == 0) 273 if (K == 0)
281 break; 274 break;
282 else 275 else
283 PPC_SRWI(r_A, r_A, K); 276 PPC_SRWI(r_A, r_A, K);
284 break; 277 break;
285 case BPF_S_ALU_NEG: 278 case BPF_ALU | BPF_NEG:
286 PPC_NEG(r_A, r_A); 279 PPC_NEG(r_A, r_A);
287 break; 280 break;
288 case BPF_S_RET_K: 281 case BPF_RET | BPF_K:
289 PPC_LI32(r_ret, K); 282 PPC_LI32(r_ret, K);
290 if (!K) { 283 if (!K) {
291 if (ctx->pc_ret0 == -1) 284 if (ctx->pc_ret0 == -1)
@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
312 PPC_BLR(); 305 PPC_BLR();
313 } 306 }
314 break; 307 break;
315 case BPF_S_RET_A: 308 case BPF_RET | BPF_A:
316 PPC_MR(r_ret, r_A); 309 PPC_MR(r_ret, r_A);
317 if (i != flen - 1) { 310 if (i != flen - 1) {
318 if (ctx->seen) 311 if (ctx->seen)
@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
321 PPC_BLR(); 314 PPC_BLR();
322 } 315 }
323 break; 316 break;
324 case BPF_S_MISC_TAX: /* X = A */ 317 case BPF_MISC | BPF_TAX: /* X = A */
325 PPC_MR(r_X, r_A); 318 PPC_MR(r_X, r_A);
326 break; 319 break;
327 case BPF_S_MISC_TXA: /* A = X */ 320 case BPF_MISC | BPF_TXA: /* A = X */
328 ctx->seen |= SEEN_XREG; 321 ctx->seen |= SEEN_XREG;
329 PPC_MR(r_A, r_X); 322 PPC_MR(r_A, r_X);
330 break; 323 break;
331 324
332 /*** Constant loads/M[] access ***/ 325 /*** Constant loads/M[] access ***/
333 case BPF_S_LD_IMM: /* A = K */ 326 case BPF_LD | BPF_IMM: /* A = K */
334 PPC_LI32(r_A, K); 327 PPC_LI32(r_A, K);
335 break; 328 break;
336 case BPF_S_LDX_IMM: /* X = K */ 329 case BPF_LDX | BPF_IMM: /* X = K */
337 PPC_LI32(r_X, K); 330 PPC_LI32(r_X, K);
338 break; 331 break;
339 case BPF_S_LD_MEM: /* A = mem[K] */ 332 case BPF_LD | BPF_MEM: /* A = mem[K] */
340 PPC_MR(r_A, r_M + (K & 0xf)); 333 PPC_MR(r_A, r_M + (K & 0xf));
341 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 334 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
342 break; 335 break;
343 case BPF_S_LDX_MEM: /* X = mem[K] */ 336 case BPF_LDX | BPF_MEM: /* X = mem[K] */
344 PPC_MR(r_X, r_M + (K & 0xf)); 337 PPC_MR(r_X, r_M + (K & 0xf));
345 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 338 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
346 break; 339 break;
347 case BPF_S_ST: /* mem[K] = A */ 340 case BPF_ST: /* mem[K] = A */
348 PPC_MR(r_M + (K & 0xf), r_A); 341 PPC_MR(r_M + (K & 0xf), r_A);
349 ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); 342 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
350 break; 343 break;
351 case BPF_S_STX: /* mem[K] = X */ 344 case BPF_STX: /* mem[K] = X */
352 PPC_MR(r_M + (K & 0xf), r_X); 345 PPC_MR(r_M + (K & 0xf), r_X);
353 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); 346 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
354 break; 347 break;
355 case BPF_S_LD_W_LEN: /* A = skb->len; */ 348 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
356 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
357 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); 350 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
358 break; 351 break;
359 case BPF_S_LDX_W_LEN: /* X = skb->len; */ 352 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
360 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); 353 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
361 break; 354 break;
362 355
363 /*** Ancillary info loads ***/ 356 /*** Ancillary info loads ***/
364 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ 357 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 358 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
366 protocol) != 2); 359 protocol) != 2);
367 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, 360 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
368 protocol)); 361 protocol));
369 break; 362 break;
370 case BPF_S_ANC_IFINDEX: 363 case BPF_ANC | SKF_AD_IFINDEX:
371 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 364 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
372 dev)); 365 dev));
373 PPC_CMPDI(r_scratch1, 0); 366 PPC_CMPDI(r_scratch1, 0);
@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
384 PPC_LWZ_OFFS(r_A, r_scratch1, 377 PPC_LWZ_OFFS(r_A, r_scratch1,
385 offsetof(struct net_device, ifindex)); 378 offsetof(struct net_device, ifindex));
386 break; 379 break;
387 case BPF_S_ANC_MARK: 380 case BPF_ANC | SKF_AD_MARK:
388 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 381 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
389 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 382 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
390 mark)); 383 mark));
391 break; 384 break;
392 case BPF_S_ANC_RXHASH: 385 case BPF_ANC | SKF_AD_RXHASH:
393 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 386 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
394 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 387 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395 hash)); 388 hash));
396 break; 389 break;
397 case BPF_S_ANC_VLAN_TAG: 390 case BPF_ANC | SKF_AD_VLAN_TAG:
398 case BPF_S_ANC_VLAN_TAG_PRESENT: 391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
399 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
400 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401 vlan_tci)); 394 vlan_tci));
402 if (filter[i].code == BPF_S_ANC_VLAN_TAG) 395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
403 PPC_ANDI(r_A, r_A, VLAN_VID_MASK); 396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
404 else 397 else
405 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); 398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
406 break; 399 break;
407 case BPF_S_ANC_QUEUE: 400 case BPF_ANC | SKF_AD_QUEUE:
408 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
409 queue_mapping) != 2); 402 queue_mapping) != 2);
410 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 403 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
411 queue_mapping)); 404 queue_mapping));
412 break; 405 break;
413 case BPF_S_ANC_CPU: 406 case BPF_ANC | SKF_AD_CPU:
414#ifdef CONFIG_SMP 407#ifdef CONFIG_SMP
415 /* 408 /*
416 * PACA ptr is r13: 409 * PACA ptr is r13:
@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
426 break; 419 break;
427 420
428 /*** Absolute loads from packet header/data ***/ 421 /*** Absolute loads from packet header/data ***/
429 case BPF_S_LD_W_ABS: 422 case BPF_LD | BPF_W | BPF_ABS:
430 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 423 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
431 goto common_load; 424 goto common_load;
432 case BPF_S_LD_H_ABS: 425 case BPF_LD | BPF_H | BPF_ABS:
433 func = CHOOSE_LOAD_FUNC(K, sk_load_half); 426 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
434 goto common_load; 427 goto common_load;
435 case BPF_S_LD_B_ABS: 428 case BPF_LD | BPF_B | BPF_ABS:
436 func = CHOOSE_LOAD_FUNC(K, sk_load_byte); 429 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
437 common_load: 430 common_load:
438 /* Load from [K]. */ 431 /* Load from [K]. */
@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
449 break; 442 break;
450 443
451 /*** Indirect loads from packet header/data ***/ 444 /*** Indirect loads from packet header/data ***/
452 case BPF_S_LD_W_IND: 445 case BPF_LD | BPF_W | BPF_IND:
453 func = sk_load_word; 446 func = sk_load_word;
454 goto common_load_ind; 447 goto common_load_ind;
455 case BPF_S_LD_H_IND: 448 case BPF_LD | BPF_H | BPF_IND:
456 func = sk_load_half; 449 func = sk_load_half;
457 goto common_load_ind; 450 goto common_load_ind;
458 case BPF_S_LD_B_IND: 451 case BPF_LD | BPF_B | BPF_IND:
459 func = sk_load_byte; 452 func = sk_load_byte;
460 common_load_ind: 453 common_load_ind:
461 /* 454 /*
@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
473 PPC_BCC(COND_LT, exit_addr); 466 PPC_BCC(COND_LT, exit_addr);
474 break; 467 break;
475 468
476 case BPF_S_LDX_B_MSH: 469 case BPF_LDX | BPF_B | BPF_MSH:
477 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); 470 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
478 goto common_load; 471 goto common_load;
479 break; 472 break;
480 473
481 /*** Jump and branches ***/ 474 /*** Jump and branches ***/
482 case BPF_S_JMP_JA: 475 case BPF_JMP | BPF_JA:
483 if (K != 0) 476 if (K != 0)
484 PPC_JMP(addrs[i + 1 + K]); 477 PPC_JMP(addrs[i + 1 + K]);
485 break; 478 break;
486 479
487 case BPF_S_JMP_JGT_K: 480 case BPF_JMP | BPF_JGT | BPF_K:
488 case BPF_S_JMP_JGT_X: 481 case BPF_JMP | BPF_JGT | BPF_X:
489 true_cond = COND_GT; 482 true_cond = COND_GT;
490 goto cond_branch; 483 goto cond_branch;
491 case BPF_S_JMP_JGE_K: 484 case BPF_JMP | BPF_JGE | BPF_K:
492 case BPF_S_JMP_JGE_X: 485 case BPF_JMP | BPF_JGE | BPF_X:
493 true_cond = COND_GE; 486 true_cond = COND_GE;
494 goto cond_branch; 487 goto cond_branch;
495 case BPF_S_JMP_JEQ_K: 488 case BPF_JMP | BPF_JEQ | BPF_K:
496 case BPF_S_JMP_JEQ_X: 489 case BPF_JMP | BPF_JEQ | BPF_X:
497 true_cond = COND_EQ; 490 true_cond = COND_EQ;
498 goto cond_branch; 491 goto cond_branch;
499 case BPF_S_JMP_JSET_K: 492 case BPF_JMP | BPF_JSET | BPF_K:
500 case BPF_S_JMP_JSET_X: 493 case BPF_JMP | BPF_JSET | BPF_X:
501 true_cond = COND_NE; 494 true_cond = COND_NE;
502 /* Fall through */ 495 /* Fall through */
503 cond_branch: 496 cond_branch:
@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
508 break; 501 break;
509 } 502 }
510 503
511 switch (filter[i].code) { 504 switch (code) {
512 case BPF_S_JMP_JGT_X: 505 case BPF_JMP | BPF_JGT | BPF_X:
513 case BPF_S_JMP_JGE_X: 506 case BPF_JMP | BPF_JGE | BPF_X:
514 case BPF_S_JMP_JEQ_X: 507 case BPF_JMP | BPF_JEQ | BPF_X:
515 ctx->seen |= SEEN_XREG; 508 ctx->seen |= SEEN_XREG;
516 PPC_CMPLW(r_A, r_X); 509 PPC_CMPLW(r_A, r_X);
517 break; 510 break;
518 case BPF_S_JMP_JSET_X: 511 case BPF_JMP | BPF_JSET | BPF_X:
519 ctx->seen |= SEEN_XREG; 512 ctx->seen |= SEEN_XREG;
520 PPC_AND_DOT(r_scratch1, r_A, r_X); 513 PPC_AND_DOT(r_scratch1, r_A, r_X);
521 break; 514 break;
522 case BPF_S_JMP_JEQ_K: 515 case BPF_JMP | BPF_JEQ | BPF_K:
523 case BPF_S_JMP_JGT_K: 516 case BPF_JMP | BPF_JGT | BPF_K:
524 case BPF_S_JMP_JGE_K: 517 case BPF_JMP | BPF_JGE | BPF_K:
525 if (K < 32768) 518 if (K < 32768)
526 PPC_CMPLWI(r_A, K); 519 PPC_CMPLWI(r_A, K);
527 else { 520 else {
@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
529 PPC_CMPLW(r_A, r_scratch1); 522 PPC_CMPLW(r_A, r_scratch1);
530 } 523 }
531 break; 524 break;
532 case BPF_S_JMP_JSET_K: 525 case BPF_JMP | BPF_JSET | BPF_K:
533 if (K < 32768) 526 if (K < 32768)
534 /* PPC_ANDI is /only/ dot-form */ 527 /* PPC_ANDI is /only/ dot-form */
535 PPC_ANDI(r_scratch1, r_A, K); 528 PPC_ANDI(r_scratch1, r_A, K);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e9f8fa9337fe..a2cbd875543a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
269 EMIT4(0xa7c80000); 269 EMIT4(0xa7c80000);
270 /* Clear A if the first register does not set it. */ 270 /* Clear A if the first register does not set it. */
271 switch (filter[0].code) { 271 switch (filter[0].code) {
272 case BPF_S_LD_W_ABS: 272 case BPF_LD | BPF_W | BPF_ABS:
273 case BPF_S_LD_H_ABS: 273 case BPF_LD | BPF_H | BPF_ABS:
274 case BPF_S_LD_B_ABS: 274 case BPF_LD | BPF_B | BPF_ABS:
275 case BPF_S_LD_W_LEN: 275 case BPF_LD | BPF_W | BPF_LEN:
276 case BPF_S_LD_W_IND: 276 case BPF_LD | BPF_W | BPF_IND:
277 case BPF_S_LD_H_IND: 277 case BPF_LD | BPF_H | BPF_IND:
278 case BPF_S_LD_B_IND: 278 case BPF_LD | BPF_B | BPF_IND:
279 case BPF_S_LD_IMM: 279 case BPF_LD | BPF_IMM:
280 case BPF_S_LD_MEM: 280 case BPF_LD | BPF_MEM:
281 case BPF_S_MISC_TXA: 281 case BPF_MISC | BPF_TXA:
282 case BPF_S_ANC_PROTOCOL: 282 case BPF_RET | BPF_K:
283 case BPF_S_ANC_PKTTYPE:
284 case BPF_S_ANC_IFINDEX:
285 case BPF_S_ANC_MARK:
286 case BPF_S_ANC_QUEUE:
287 case BPF_S_ANC_HATYPE:
288 case BPF_S_ANC_RXHASH:
289 case BPF_S_ANC_CPU:
290 case BPF_S_ANC_VLAN_TAG:
291 case BPF_S_ANC_VLAN_TAG_PRESENT:
292 case BPF_S_RET_K:
293 /* first instruction sets A register */ 283 /* first instruction sets A register */
294 break; 284 break;
295 default: /* A = 0 */ 285 default: /* A = 0 */
@@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
304 unsigned int K; 294 unsigned int K;
305 int offset; 295 int offset;
306 unsigned int mask; 296 unsigned int mask;
297 u16 code;
307 298
308 K = filter->k; 299 K = filter->k;
309 switch (filter->code) { 300 code = bpf_anc_helper(filter);
310 case BPF_S_ALU_ADD_X: /* A += X */ 301
302 switch (code) {
303 case BPF_ALU | BPF_ADD | BPF_X: /* A += X */
311 jit->seen |= SEEN_XREG; 304 jit->seen |= SEEN_XREG;
312 /* ar %r5,%r12 */ 305 /* ar %r5,%r12 */
313 EMIT2(0x1a5c); 306 EMIT2(0x1a5c);
314 break; 307 break;
315 case BPF_S_ALU_ADD_K: /* A += K */ 308 case BPF_ALU | BPF_ADD | BPF_K: /* A += K */
316 if (!K) 309 if (!K)
317 break; 310 break;
318 if (K <= 16383) 311 if (K <= 16383)
@@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
325 /* a %r5,<d(K)>(%r13) */ 318 /* a %r5,<d(K)>(%r13) */
326 EMIT4_DISP(0x5a50d000, EMIT_CONST(K)); 319 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
327 break; 320 break;
328 case BPF_S_ALU_SUB_X: /* A -= X */ 321 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */
329 jit->seen |= SEEN_XREG; 322 jit->seen |= SEEN_XREG;
330 /* sr %r5,%r12 */ 323 /* sr %r5,%r12 */
331 EMIT2(0x1b5c); 324 EMIT2(0x1b5c);
332 break; 325 break;
333 case BPF_S_ALU_SUB_K: /* A -= K */ 326 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
334 if (!K) 327 if (!K)
335 break; 328 break;
336 if (K <= 16384) 329 if (K <= 16384)
@@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
343 /* s %r5,<d(K)>(%r13) */ 336 /* s %r5,<d(K)>(%r13) */
344 EMIT4_DISP(0x5b50d000, EMIT_CONST(K)); 337 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
345 break; 338 break;
346 case BPF_S_ALU_MUL_X: /* A *= X */ 339 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */
347 jit->seen |= SEEN_XREG; 340 jit->seen |= SEEN_XREG;
348 /* msr %r5,%r12 */ 341 /* msr %r5,%r12 */
349 EMIT4(0xb252005c); 342 EMIT4(0xb252005c);
350 break; 343 break;
351 case BPF_S_ALU_MUL_K: /* A *= K */ 344 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
352 if (K <= 16383) 345 if (K <= 16383)
353 /* mhi %r5,K */ 346 /* mhi %r5,K */
354 EMIT4_IMM(0xa75c0000, K); 347 EMIT4_IMM(0xa75c0000, K);
@@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
359 /* ms %r5,<d(K)>(%r13) */ 352 /* ms %r5,<d(K)>(%r13) */
360 EMIT4_DISP(0x7150d000, EMIT_CONST(K)); 353 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
361 break; 354 break;
362 case BPF_S_ALU_DIV_X: /* A /= X */ 355 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */
363 jit->seen |= SEEN_XREG | SEEN_RET0; 356 jit->seen |= SEEN_XREG | SEEN_RET0;
364 /* ltr %r12,%r12 */ 357 /* ltr %r12,%r12 */
365 EMIT2(0x12cc); 358 EMIT2(0x12cc);
@@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
370 /* dlr %r4,%r12 */ 363 /* dlr %r4,%r12 */
371 EMIT4(0xb997004c); 364 EMIT4(0xb997004c);
372 break; 365 break;
373 case BPF_S_ALU_DIV_K: /* A /= K */ 366 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
374 if (K == 1) 367 if (K == 1)
375 break; 368 break;
376 /* lhi %r4,0 */ 369 /* lhi %r4,0 */
@@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
378 /* dl %r4,<d(K)>(%r13) */ 371 /* dl %r4,<d(K)>(%r13) */
379 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); 372 EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
380 break; 373 break;
381 case BPF_S_ALU_MOD_X: /* A %= X */ 374 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */
382 jit->seen |= SEEN_XREG | SEEN_RET0; 375 jit->seen |= SEEN_XREG | SEEN_RET0;
383 /* ltr %r12,%r12 */ 376 /* ltr %r12,%r12 */
384 EMIT2(0x12cc); 377 EMIT2(0x12cc);
@@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
391 /* lr %r5,%r4 */ 384 /* lr %r5,%r4 */
392 EMIT2(0x1854); 385 EMIT2(0x1854);
393 break; 386 break;
394 case BPF_S_ALU_MOD_K: /* A %= K */ 387 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */
395 if (K == 1) { 388 if (K == 1) {
396 /* lhi %r5,0 */ 389 /* lhi %r5,0 */
397 EMIT4(0xa7580000); 390 EMIT4(0xa7580000);
@@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
404 /* lr %r5,%r4 */ 397 /* lr %r5,%r4 */
405 EMIT2(0x1854); 398 EMIT2(0x1854);
406 break; 399 break;
407 case BPF_S_ALU_AND_X: /* A &= X */ 400 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
408 jit->seen |= SEEN_XREG; 401 jit->seen |= SEEN_XREG;
409 /* nr %r5,%r12 */ 402 /* nr %r5,%r12 */
410 EMIT2(0x145c); 403 EMIT2(0x145c);
411 break; 404 break;
412 case BPF_S_ALU_AND_K: /* A &= K */ 405 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
413 if (test_facility(21)) 406 if (test_facility(21))
414 /* nilf %r5,<K> */ 407 /* nilf %r5,<K> */
415 EMIT6_IMM(0xc05b0000, K); 408 EMIT6_IMM(0xc05b0000, K);
@@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
417 /* n %r5,<d(K)>(%r13) */ 410 /* n %r5,<d(K)>(%r13) */
418 EMIT4_DISP(0x5450d000, EMIT_CONST(K)); 411 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
419 break; 412 break;
420 case BPF_S_ALU_OR_X: /* A |= X */ 413 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
421 jit->seen |= SEEN_XREG; 414 jit->seen |= SEEN_XREG;
422 /* or %r5,%r12 */ 415 /* or %r5,%r12 */
423 EMIT2(0x165c); 416 EMIT2(0x165c);
424 break; 417 break;
425 case BPF_S_ALU_OR_K: /* A |= K */ 418 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
426 if (test_facility(21)) 419 if (test_facility(21))
427 /* oilf %r5,<K> */ 420 /* oilf %r5,<K> */
428 EMIT6_IMM(0xc05d0000, K); 421 EMIT6_IMM(0xc05d0000, K);
@@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
430 /* o %r5,<d(K)>(%r13) */ 423 /* o %r5,<d(K)>(%r13) */
431 EMIT4_DISP(0x5650d000, EMIT_CONST(K)); 424 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
432 break; 425 break;
433 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 426 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
434 case BPF_S_ALU_XOR_X: 427 case BPF_ALU | BPF_XOR | BPF_X:
435 jit->seen |= SEEN_XREG; 428 jit->seen |= SEEN_XREG;
436 /* xr %r5,%r12 */ 429 /* xr %r5,%r12 */
437 EMIT2(0x175c); 430 EMIT2(0x175c);
438 break; 431 break;
439 case BPF_S_ALU_XOR_K: /* A ^= K */ 432 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
440 if (!K) 433 if (!K)
441 break; 434 break;
442 /* x %r5,<d(K)>(%r13) */ 435 /* x %r5,<d(K)>(%r13) */
443 EMIT4_DISP(0x5750d000, EMIT_CONST(K)); 436 EMIT4_DISP(0x5750d000, EMIT_CONST(K));
444 break; 437 break;
445 case BPF_S_ALU_LSH_X: /* A <<= X; */ 438 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
446 jit->seen |= SEEN_XREG; 439 jit->seen |= SEEN_XREG;
447 /* sll %r5,0(%r12) */ 440 /* sll %r5,0(%r12) */
448 EMIT4(0x8950c000); 441 EMIT4(0x8950c000);
449 break; 442 break;
450 case BPF_S_ALU_LSH_K: /* A <<= K */ 443 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
451 if (K == 0) 444 if (K == 0)
452 break; 445 break;
453 /* sll %r5,K */ 446 /* sll %r5,K */
454 EMIT4_DISP(0x89500000, K); 447 EMIT4_DISP(0x89500000, K);
455 break; 448 break;
456 case BPF_S_ALU_RSH_X: /* A >>= X; */ 449 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
457 jit->seen |= SEEN_XREG; 450 jit->seen |= SEEN_XREG;
458 /* srl %r5,0(%r12) */ 451 /* srl %r5,0(%r12) */
459 EMIT4(0x8850c000); 452 EMIT4(0x8850c000);
460 break; 453 break;
461 case BPF_S_ALU_RSH_K: /* A >>= K; */ 454 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
462 if (K == 0) 455 if (K == 0)
463 break; 456 break;
464 /* srl %r5,K */ 457 /* srl %r5,K */
465 EMIT4_DISP(0x88500000, K); 458 EMIT4_DISP(0x88500000, K);
466 break; 459 break;
467 case BPF_S_ALU_NEG: /* A = -A */ 460 case BPF_ALU | BPF_NEG: /* A = -A */
468 /* lnr %r5,%r5 */ 461 /* lnr %r5,%r5 */
469 EMIT2(0x1155); 462 EMIT2(0x1155);
470 break; 463 break;
471 case BPF_S_JMP_JA: /* ip += K */ 464 case BPF_JMP | BPF_JA: /* ip += K */
472 offset = addrs[i + K] + jit->start - jit->prg; 465 offset = addrs[i + K] + jit->start - jit->prg;
473 EMIT4_PCREL(0xa7f40000, offset); 466 EMIT4_PCREL(0xa7f40000, offset);
474 break; 467 break;
475 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */ 468 case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */
476 mask = 0x200000; /* jh */ 469 mask = 0x200000; /* jh */
477 goto kbranch; 470 goto kbranch;
478 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */ 471 case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */
479 mask = 0xa00000; /* jhe */ 472 mask = 0xa00000; /* jhe */
480 goto kbranch; 473 goto kbranch;
481 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */ 474 case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */
482 mask = 0x800000; /* je */ 475 mask = 0x800000; /* je */
483kbranch: /* Emit compare if the branch targets are different */ 476kbranch: /* Emit compare if the branch targets are different */
484 if (filter->jt != filter->jf) { 477 if (filter->jt != filter->jf) {
@@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) {
511 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset); 504 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
512 } 505 }
513 break; 506 break;
514 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */ 507 case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */
515 mask = 0x700000; /* jnz */ 508 mask = 0x700000; /* jnz */
516 /* Emit test if the branch targets are different */ 509 /* Emit test if the branch targets are different */
517 if (filter->jt != filter->jf) { 510 if (filter->jt != filter->jf) {
@@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) {
525 EMIT4_IMM(0xa7510000, K); 518 EMIT4_IMM(0xa7510000, K);
526 } 519 }
527 goto branch; 520 goto branch;
528 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */ 521 case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */
529 mask = 0x200000; /* jh */ 522 mask = 0x200000; /* jh */
530 goto xbranch; 523 goto xbranch;
531 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */ 524 case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */
532 mask = 0xa00000; /* jhe */ 525 mask = 0xa00000; /* jhe */
533 goto xbranch; 526 goto xbranch;
534 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */ 527 case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */
535 mask = 0x800000; /* je */ 528 mask = 0x800000; /* je */
536xbranch: /* Emit compare if the branch targets are different */ 529xbranch: /* Emit compare if the branch targets are different */
537 if (filter->jt != filter->jf) { 530 if (filter->jt != filter->jf) {
@@ -540,7 +533,7 @@ xbranch: /* Emit compare if the branch targets are different */
540 EMIT2(0x195c); 533 EMIT2(0x195c);
541 } 534 }
542 goto branch; 535 goto branch;
543 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */ 536 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
544 mask = 0x700000; /* jnz */ 537 mask = 0x700000; /* jnz */
545 /* Emit test if the branch targets are different */ 538 /* Emit test if the branch targets are different */
546 if (filter->jt != filter->jf) { 539 if (filter->jt != filter->jf) {
@@ -551,15 +544,15 @@ xbranch: /* Emit compare if the branch targets are different */
551 EMIT2(0x144c); 544 EMIT2(0x144c);
552 } 545 }
553 goto branch; 546 goto branch;
554 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */ 547 case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */
555 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD; 548 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
556 offset = jit->off_load_word; 549 offset = jit->off_load_word;
557 goto load_abs; 550 goto load_abs;
558 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */ 551 case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */
559 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF; 552 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
560 offset = jit->off_load_half; 553 offset = jit->off_load_half;
561 goto load_abs; 554 goto load_abs;
562 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */ 555 case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */
563 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE; 556 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
564 offset = jit->off_load_byte; 557 offset = jit->off_load_byte;
565load_abs: if ((int) K < 0) 558load_abs: if ((int) K < 0)
@@ -573,19 +566,19 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
573 /* jnz <ret0> */ 566 /* jnz <ret0> */
574 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg)); 567 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
575 break; 568 break;
576 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */ 569 case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */
577 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD; 570 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
578 offset = jit->off_load_iword; 571 offset = jit->off_load_iword;
579 goto call_fn; 572 goto call_fn;
580 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */ 573 case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */
581 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF; 574 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
582 offset = jit->off_load_ihalf; 575 offset = jit->off_load_ihalf;
583 goto call_fn; 576 goto call_fn;
584 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */ 577 case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */
585 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE; 578 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
586 offset = jit->off_load_ibyte; 579 offset = jit->off_load_ibyte;
587 goto call_fn; 580 goto call_fn;
588 case BPF_S_LDX_B_MSH: 581 case BPF_LDX | BPF_B | BPF_MSH:
589 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ 582 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
590 jit->seen |= SEEN_RET0; 583 jit->seen |= SEEN_RET0;
591 if ((int) K < 0) { 584 if ((int) K < 0) {
@@ -596,17 +589,17 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
596 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH; 589 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
597 offset = jit->off_load_bmsh; 590 offset = jit->off_load_bmsh;
598 goto call_fn; 591 goto call_fn;
599 case BPF_S_LD_W_LEN: /* A = skb->len; */ 592 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
600 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 593 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
601 /* l %r5,<d(len)>(%r2) */ 594 /* l %r5,<d(len)>(%r2) */
602 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len)); 595 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
603 break; 596 break;
604 case BPF_S_LDX_W_LEN: /* X = skb->len; */ 597 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
605 jit->seen |= SEEN_XREG; 598 jit->seen |= SEEN_XREG;
606 /* l %r12,<d(len)>(%r2) */ 599 /* l %r12,<d(len)>(%r2) */
607 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len)); 600 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
608 break; 601 break;
609 case BPF_S_LD_IMM: /* A = K */ 602 case BPF_LD | BPF_IMM: /* A = K */
610 if (K <= 16383) 603 if (K <= 16383)
611 /* lhi %r5,K */ 604 /* lhi %r5,K */
612 EMIT4_IMM(0xa7580000, K); 605 EMIT4_IMM(0xa7580000, K);
@@ -617,7 +610,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
617 /* l %r5,<d(K)>(%r13) */ 610 /* l %r5,<d(K)>(%r13) */
618 EMIT4_DISP(0x5850d000, EMIT_CONST(K)); 611 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
619 break; 612 break;
620 case BPF_S_LDX_IMM: /* X = K */ 613 case BPF_LDX | BPF_IMM: /* X = K */
621 jit->seen |= SEEN_XREG; 614 jit->seen |= SEEN_XREG;
622 if (K <= 16383) 615 if (K <= 16383)
623 /* lhi %r12,<K> */ 616 /* lhi %r12,<K> */
@@ -629,29 +622,29 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
629 /* l %r12,<d(K)>(%r13) */ 622 /* l %r12,<d(K)>(%r13) */
630 EMIT4_DISP(0x58c0d000, EMIT_CONST(K)); 623 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
631 break; 624 break;
632 case BPF_S_LD_MEM: /* A = mem[K] */ 625 case BPF_LD | BPF_MEM: /* A = mem[K] */
633 jit->seen |= SEEN_MEM; 626 jit->seen |= SEEN_MEM;
634 /* l %r5,<K>(%r15) */ 627 /* l %r5,<K>(%r15) */
635 EMIT4_DISP(0x5850f000, 628 EMIT4_DISP(0x5850f000,
636 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 629 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
637 break; 630 break;
638 case BPF_S_LDX_MEM: /* X = mem[K] */ 631 case BPF_LDX | BPF_MEM: /* X = mem[K] */
639 jit->seen |= SEEN_XREG | SEEN_MEM; 632 jit->seen |= SEEN_XREG | SEEN_MEM;
640 /* l %r12,<K>(%r15) */ 633 /* l %r12,<K>(%r15) */
641 EMIT4_DISP(0x58c0f000, 634 EMIT4_DISP(0x58c0f000,
642 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 635 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
643 break; 636 break;
644 case BPF_S_MISC_TAX: /* X = A */ 637 case BPF_MISC | BPF_TAX: /* X = A */
645 jit->seen |= SEEN_XREG; 638 jit->seen |= SEEN_XREG;
646 /* lr %r12,%r5 */ 639 /* lr %r12,%r5 */
647 EMIT2(0x18c5); 640 EMIT2(0x18c5);
648 break; 641 break;
649 case BPF_S_MISC_TXA: /* A = X */ 642 case BPF_MISC | BPF_TXA: /* A = X */
650 jit->seen |= SEEN_XREG; 643 jit->seen |= SEEN_XREG;
651 /* lr %r5,%r12 */ 644 /* lr %r5,%r12 */
652 EMIT2(0x185c); 645 EMIT2(0x185c);
653 break; 646 break;
654 case BPF_S_RET_K: 647 case BPF_RET | BPF_K:
655 if (K == 0) { 648 if (K == 0) {
656 jit->seen |= SEEN_RET0; 649 jit->seen |= SEEN_RET0;
657 if (last) 650 if (last)
@@ -671,33 +664,33 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
671 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); 664 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
672 } 665 }
673 break; 666 break;
674 case BPF_S_RET_A: 667 case BPF_RET | BPF_A:
675 /* llgfr %r2,%r5 */ 668 /* llgfr %r2,%r5 */
676 EMIT4(0xb9160025); 669 EMIT4(0xb9160025);
677 /* j <exit> */ 670 /* j <exit> */
678 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); 671 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
679 break; 672 break;
680 case BPF_S_ST: /* mem[K] = A */ 673 case BPF_ST: /* mem[K] = A */
681 jit->seen |= SEEN_MEM; 674 jit->seen |= SEEN_MEM;
682 /* st %r5,<K>(%r15) */ 675 /* st %r5,<K>(%r15) */
683 EMIT4_DISP(0x5050f000, 676 EMIT4_DISP(0x5050f000,
684 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 677 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
685 break; 678 break;
686 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ 679 case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
687 jit->seen |= SEEN_XREG | SEEN_MEM; 680 jit->seen |= SEEN_XREG | SEEN_MEM;
688 /* st %r12,<K>(%r15) */ 681 /* st %r12,<K>(%r15) */
689 EMIT4_DISP(0x50c0f000, 682 EMIT4_DISP(0x50c0f000,
690 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); 683 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
691 break; 684 break;
692 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ 685 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
693 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 686 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
694 /* lhi %r5,0 */ 687 /* lhi %r5,0 */
695 EMIT4(0xa7580000); 688 EMIT4(0xa7580000);
696 /* icm %r5,3,<d(protocol)>(%r2) */ 689 /* icm %r5,3,<d(protocol)>(%r2) */
697 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol)); 690 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
698 break; 691 break;
699 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0; 692 case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0;
700 * A = skb->dev->ifindex */ 693 * A = skb->dev->ifindex */
701 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 694 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
702 jit->seen |= SEEN_RET0; 695 jit->seen |= SEEN_RET0;
703 /* lg %r1,<d(dev)>(%r2) */ 696 /* lg %r1,<d(dev)>(%r2) */
@@ -709,20 +702,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
709 /* l %r5,<d(ifindex)>(%r1) */ 702 /* l %r5,<d(ifindex)>(%r1) */
710 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex)); 703 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
711 break; 704 break;
712 case BPF_S_ANC_MARK: /* A = skb->mark */ 705 case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */
713 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 706 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
714 /* l %r5,<d(mark)>(%r2) */ 707 /* l %r5,<d(mark)>(%r2) */
715 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark)); 708 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
716 break; 709 break;
717 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */ 710 case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */
718 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 711 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
719 /* lhi %r5,0 */ 712 /* lhi %r5,0 */
720 EMIT4(0xa7580000); 713 EMIT4(0xa7580000);
721 /* icm %r5,3,<d(queue_mapping)>(%r2) */ 714 /* icm %r5,3,<d(queue_mapping)>(%r2) */
722 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping)); 715 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
723 break; 716 break;
724 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0; 717 case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0;
725 * A = skb->dev->type */ 718 * A = skb->dev->type */
726 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 719 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
727 jit->seen |= SEEN_RET0; 720 jit->seen |= SEEN_RET0;
728 /* lg %r1,<d(dev)>(%r2) */ 721 /* lg %r1,<d(dev)>(%r2) */
@@ -736,20 +729,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
736 /* icm %r5,3,<d(type)>(%r1) */ 729 /* icm %r5,3,<d(type)>(%r1) */
737 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type)); 730 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
738 break; 731 break;
739 case BPF_S_ANC_RXHASH: /* A = skb->hash */ 732 case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */
740 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
741 /* l %r5,<d(hash)>(%r2) */ 734 /* l %r5,<d(hash)>(%r2) */
742 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash)); 735 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
743 break; 736 break;
744 case BPF_S_ANC_VLAN_TAG: 737 case BPF_ANC | SKF_AD_VLAN_TAG:
745 case BPF_S_ANC_VLAN_TAG_PRESENT: 738 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
746 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 739 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
747 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 740 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
748 /* lhi %r5,0 */ 741 /* lhi %r5,0 */
749 EMIT4(0xa7580000); 742 EMIT4(0xa7580000);
750 /* icm %r5,3,<d(vlan_tci)>(%r2) */ 743 /* icm %r5,3,<d(vlan_tci)>(%r2) */
751 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); 744 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
752 if (filter->code == BPF_S_ANC_VLAN_TAG) { 745 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
753 /* nill %r5,0xefff */ 746 /* nill %r5,0xefff */
754 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); 747 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
755 } else { 748 } else {
@@ -759,7 +752,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
759 EMIT4_DISP(0x88500000, 12); 752 EMIT4_DISP(0x88500000, 12);
760 } 753 }
761 break; 754 break;
762 case BPF_S_ANC_PKTTYPE: 755 case BPF_ANC | SKF_AD_PKTTYPE:
763 if (pkt_type_offset < 0) 756 if (pkt_type_offset < 0)
764 goto out; 757 goto out;
765 /* lhi %r5,0 */ 758 /* lhi %r5,0 */
@@ -769,7 +762,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
769 /* srl %r5,5 */ 762 /* srl %r5,5 */
770 EMIT4_DISP(0x88500000, 5); 763 EMIT4_DISP(0x88500000, 5);
771 break; 764 break;
772 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 765 case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */
773#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
774 /* l %r5,<d(cpu_nr)> */ 767 /* l %r5,<d(cpu_nr)> */
775 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr)); 768 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index a82c6b2a9780..c88cf147deed 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)
415 emit_reg_move(O7, r_saved_O7); 415 emit_reg_move(O7, r_saved_O7);
416 416
417 switch (filter[0].code) { 417 switch (filter[0].code) {
418 case BPF_S_RET_K: 418 case BPF_RET | BPF_K:
419 case BPF_S_LD_W_LEN: 419 case BPF_LD | BPF_W | BPF_LEN:
420 case BPF_S_ANC_PROTOCOL: 420 case BPF_LD | BPF_W | BPF_ABS:
421 case BPF_S_ANC_PKTTYPE: 421 case BPF_LD | BPF_H | BPF_ABS:
422 case BPF_S_ANC_IFINDEX: 422 case BPF_LD | BPF_B | BPF_ABS:
423 case BPF_S_ANC_MARK:
424 case BPF_S_ANC_RXHASH:
425 case BPF_S_ANC_VLAN_TAG:
426 case BPF_S_ANC_VLAN_TAG_PRESENT:
427 case BPF_S_ANC_CPU:
428 case BPF_S_ANC_QUEUE:
429 case BPF_S_LD_W_ABS:
430 case BPF_S_LD_H_ABS:
431 case BPF_S_LD_B_ABS:
432 /* The first instruction sets the A register (or is 423 /* The first instruction sets the A register (or is
433 * a "RET 'constant'") 424 * a "RET 'constant'")
434 */ 425 */
@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp)
445 unsigned int t_offset; 436 unsigned int t_offset;
446 unsigned int f_offset; 437 unsigned int f_offset;
447 u32 t_op, f_op; 438 u32 t_op, f_op;
439 u16 code = bpf_anc_helper(&filter[i]);
448 int ilen; 440 int ilen;
449 441
450 switch (filter[i].code) { 442 switch (code) {
451 case BPF_S_ALU_ADD_X: /* A += X; */ 443 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
452 emit_alu_X(ADD); 444 emit_alu_X(ADD);
453 break; 445 break;
454 case BPF_S_ALU_ADD_K: /* A += K; */ 446 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
455 emit_alu_K(ADD, K); 447 emit_alu_K(ADD, K);
456 break; 448 break;
457 case BPF_S_ALU_SUB_X: /* A -= X; */ 449 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
458 emit_alu_X(SUB); 450 emit_alu_X(SUB);
459 break; 451 break;
460 case BPF_S_ALU_SUB_K: /* A -= K */ 452 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
461 emit_alu_K(SUB, K); 453 emit_alu_K(SUB, K);
462 break; 454 break;
463 case BPF_S_ALU_AND_X: /* A &= X */ 455 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
464 emit_alu_X(AND); 456 emit_alu_X(AND);
465 break; 457 break;
466 case BPF_S_ALU_AND_K: /* A &= K */ 458 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
467 emit_alu_K(AND, K); 459 emit_alu_K(AND, K);
468 break; 460 break;
469 case BPF_S_ALU_OR_X: /* A |= X */ 461 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
470 emit_alu_X(OR); 462 emit_alu_X(OR);
471 break; 463 break;
472 case BPF_S_ALU_OR_K: /* A |= K */ 464 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
473 emit_alu_K(OR, K); 465 emit_alu_K(OR, K);
474 break; 466 break;
475 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 467 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
476 case BPF_S_ALU_XOR_X: 468 case BPF_ALU | BPF_XOR | BPF_X:
477 emit_alu_X(XOR); 469 emit_alu_X(XOR);
478 break; 470 break;
479 case BPF_S_ALU_XOR_K: /* A ^= K */ 471 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
480 emit_alu_K(XOR, K); 472 emit_alu_K(XOR, K);
481 break; 473 break;
482 case BPF_S_ALU_LSH_X: /* A <<= X */ 474 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
483 emit_alu_X(SLL); 475 emit_alu_X(SLL);
484 break; 476 break;
485 case BPF_S_ALU_LSH_K: /* A <<= K */ 477 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
486 emit_alu_K(SLL, K); 478 emit_alu_K(SLL, K);
487 break; 479 break;
488 case BPF_S_ALU_RSH_X: /* A >>= X */ 480 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
489 emit_alu_X(SRL); 481 emit_alu_X(SRL);
490 break; 482 break;
491 case BPF_S_ALU_RSH_K: /* A >>= K */ 483 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
492 emit_alu_K(SRL, K); 484 emit_alu_K(SRL, K);
493 break; 485 break;
494 case BPF_S_ALU_MUL_X: /* A *= X; */ 486 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
495 emit_alu_X(MUL); 487 emit_alu_X(MUL);
496 break; 488 break;
497 case BPF_S_ALU_MUL_K: /* A *= K */ 489 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
498 emit_alu_K(MUL, K); 490 emit_alu_K(MUL, K);
499 break; 491 break;
500 case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ 492 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
501 if (K == 1) 493 if (K == 1)
502 break; 494 break;
503 emit_write_y(G0); 495 emit_write_y(G0);
@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
512#endif 504#endif
513 emit_alu_K(DIV, K); 505 emit_alu_K(DIV, K);
514 break; 506 break;
515 case BPF_S_ALU_DIV_X: /* A /= X; */ 507 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
516 emit_cmpi(r_X, 0); 508 emit_cmpi(r_X, 0);
517 if (pc_ret0 > 0) { 509 if (pc_ret0 > 0) {
518 t_offset = addrs[pc_ret0 - 1]; 510 t_offset = addrs[pc_ret0 - 1];
@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)
544#endif 536#endif
545 emit_alu_X(DIV); 537 emit_alu_X(DIV);
546 break; 538 break;
547 case BPF_S_ALU_NEG: 539 case BPF_ALU | BPF_NEG:
548 emit_neg(); 540 emit_neg();
549 break; 541 break;
550 case BPF_S_RET_K: 542 case BPF_RET | BPF_K:
551 if (!K) { 543 if (!K) {
552 if (pc_ret0 == -1) 544 if (pc_ret0 == -1)
553 pc_ret0 = i; 545 pc_ret0 = i;
@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)
556 emit_loadimm(K, r_A); 548 emit_loadimm(K, r_A);
557 } 549 }
558 /* Fallthrough */ 550 /* Fallthrough */
559 case BPF_S_RET_A: 551 case BPF_RET | BPF_A:
560 if (seen_or_pass0) { 552 if (seen_or_pass0) {
561 if (i != flen - 1) { 553 if (i != flen - 1) {
562 emit_jump(cleanup_addr); 554 emit_jump(cleanup_addr);
@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)
573 emit_jmpl(r_saved_O7, 8, G0); 565 emit_jmpl(r_saved_O7, 8, G0);
574 emit_reg_move(r_A, O0); /* delay slot */ 566 emit_reg_move(r_A, O0); /* delay slot */
575 break; 567 break;
576 case BPF_S_MISC_TAX: 568 case BPF_MISC | BPF_TAX:
577 seen |= SEEN_XREG; 569 seen |= SEEN_XREG;
578 emit_reg_move(r_A, r_X); 570 emit_reg_move(r_A, r_X);
579 break; 571 break;
580 case BPF_S_MISC_TXA: 572 case BPF_MISC | BPF_TXA:
581 seen |= SEEN_XREG; 573 seen |= SEEN_XREG;
582 emit_reg_move(r_X, r_A); 574 emit_reg_move(r_X, r_A);
583 break; 575 break;
584 case BPF_S_ANC_CPU: 576 case BPF_ANC | SKF_AD_CPU:
585 emit_load_cpu(r_A); 577 emit_load_cpu(r_A);
586 break; 578 break;
587 case BPF_S_ANC_PROTOCOL: 579 case BPF_ANC | SKF_AD_PROTOCOL:
588 emit_skb_load16(protocol, r_A); 580 emit_skb_load16(protocol, r_A);
589 break; 581 break;
590#if 0 582#if 0
@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)
592 * a bit field even though we very much 584 * a bit field even though we very much
593 * know what we are doing here. 585 * know what we are doing here.
594 */ 586 */
595 case BPF_S_ANC_PKTTYPE: 587 case BPF_ANC | SKF_AD_PKTTYPE:
596 __emit_skb_load8(pkt_type, r_A); 588 __emit_skb_load8(pkt_type, r_A);
597 emit_alu_K(SRL, 5); 589 emit_alu_K(SRL, 5);
598 break; 590 break;
599#endif 591#endif
600 case BPF_S_ANC_IFINDEX: 592 case BPF_ANC | SKF_AD_IFINDEX:
601 emit_skb_loadptr(dev, r_A); 593 emit_skb_loadptr(dev, r_A);
602 emit_cmpi(r_A, 0); 594 emit_cmpi(r_A, 0);
603 emit_branch(BNE_PTR, cleanup_addr + 4); 595 emit_branch(BNE_PTR, cleanup_addr + 4);
604 emit_nop(); 596 emit_nop();
605 emit_load32(r_A, struct net_device, ifindex, r_A); 597 emit_load32(r_A, struct net_device, ifindex, r_A);
606 break; 598 break;
607 case BPF_S_ANC_MARK: 599 case BPF_ANC | SKF_AD_MARK:
608 emit_skb_load32(mark, r_A); 600 emit_skb_load32(mark, r_A);
609 break; 601 break;
610 case BPF_S_ANC_QUEUE: 602 case BPF_ANC | SKF_AD_QUEUE:
611 emit_skb_load16(queue_mapping, r_A); 603 emit_skb_load16(queue_mapping, r_A);
612 break; 604 break;
613 case BPF_S_ANC_HATYPE: 605 case BPF_ANC | SKF_AD_HATYPE:
614 emit_skb_loadptr(dev, r_A); 606 emit_skb_loadptr(dev, r_A);
615 emit_cmpi(r_A, 0); 607 emit_cmpi(r_A, 0);
616 emit_branch(BNE_PTR, cleanup_addr + 4); 608 emit_branch(BNE_PTR, cleanup_addr + 4);
617 emit_nop(); 609 emit_nop();
618 emit_load16(r_A, struct net_device, type, r_A); 610 emit_load16(r_A, struct net_device, type, r_A);
619 break; 611 break;
620 case BPF_S_ANC_RXHASH: 612 case BPF_ANC | SKF_AD_RXHASH:
621 emit_skb_load32(hash, r_A); 613 emit_skb_load32(hash, r_A);
622 break; 614 break;
623 case BPF_S_ANC_VLAN_TAG: 615 case BPF_ANC | SKF_AD_VLAN_TAG:
624 case BPF_S_ANC_VLAN_TAG_PRESENT: 616 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
625 emit_skb_load16(vlan_tci, r_A); 617 emit_skb_load16(vlan_tci, r_A);
626 if (filter[i].code == BPF_S_ANC_VLAN_TAG) { 618 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
627 emit_andi(r_A, VLAN_VID_MASK, r_A); 619 emit_andi(r_A, VLAN_VID_MASK, r_A);
628 } else { 620 } else {
629 emit_loadimm(VLAN_TAG_PRESENT, r_TMP); 621 emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)
631 } 623 }
632 break; 624 break;
633 625
634 case BPF_S_LD_IMM: 626 case BPF_LD | BPF_IMM:
635 emit_loadimm(K, r_A); 627 emit_loadimm(K, r_A);
636 break; 628 break;
637 case BPF_S_LDX_IMM: 629 case BPF_LDX | BPF_IMM:
638 emit_loadimm(K, r_X); 630 emit_loadimm(K, r_X);
639 break; 631 break;
640 case BPF_S_LD_MEM: 632 case BPF_LD | BPF_MEM:
641 emit_ldmem(K * 4, r_A); 633 emit_ldmem(K * 4, r_A);
642 break; 634 break;
643 case BPF_S_LDX_MEM: 635 case BPF_LDX | BPF_MEM:
644 emit_ldmem(K * 4, r_X); 636 emit_ldmem(K * 4, r_X);
645 break; 637 break;
646 case BPF_S_ST: 638 case BPF_ST:
647 emit_stmem(K * 4, r_A); 639 emit_stmem(K * 4, r_A);
648 break; 640 break;
649 case BPF_S_STX: 641 case BPF_STX:
650 emit_stmem(K * 4, r_X); 642 emit_stmem(K * 4, r_X);
651 break; 643 break;
652 644
653#define CHOOSE_LOAD_FUNC(K, func) \ 645#define CHOOSE_LOAD_FUNC(K, func) \
654 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 646 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
655 647
656 case BPF_S_LD_W_ABS: 648 case BPF_LD | BPF_W | BPF_ABS:
657 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); 649 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
658common_load: seen |= SEEN_DATAREF; 650common_load: seen |= SEEN_DATAREF;
659 emit_loadimm(K, r_OFF); 651 emit_loadimm(K, r_OFF);
660 emit_call(func); 652 emit_call(func);
661 break; 653 break;
662 case BPF_S_LD_H_ABS: 654 case BPF_LD | BPF_H | BPF_ABS:
663 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); 655 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
664 goto common_load; 656 goto common_load;
665 case BPF_S_LD_B_ABS: 657 case BPF_LD | BPF_B | BPF_ABS:
666 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); 658 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
667 goto common_load; 659 goto common_load;
668 case BPF_S_LDX_B_MSH: 660 case BPF_LDX | BPF_B | BPF_MSH:
669 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); 661 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
670 goto common_load; 662 goto common_load;
671 case BPF_S_LD_W_IND: 663 case BPF_LD | BPF_W | BPF_IND:
672 func = bpf_jit_load_word; 664 func = bpf_jit_load_word;
673common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 665common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
674 if (K) { 666 if (K) {
@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
683 } 675 }
684 emit_call(func); 676 emit_call(func);
685 break; 677 break;
686 case BPF_S_LD_H_IND: 678 case BPF_LD | BPF_H | BPF_IND:
687 func = bpf_jit_load_half; 679 func = bpf_jit_load_half;
688 goto common_load_ind; 680 goto common_load_ind;
689 case BPF_S_LD_B_IND: 681 case BPF_LD | BPF_B | BPF_IND:
690 func = bpf_jit_load_byte; 682 func = bpf_jit_load_byte;
691 goto common_load_ind; 683 goto common_load_ind;
692 case BPF_S_JMP_JA: 684 case BPF_JMP | BPF_JA:
693 emit_jump(addrs[i + K]); 685 emit_jump(addrs[i + K]);
694 emit_nop(); 686 emit_nop();
695 break; 687 break;
@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
700 f_op = FOP; \ 692 f_op = FOP; \
701 goto cond_branch 693 goto cond_branch
702 694
703 COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); 695 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
704 COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); 696 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
705 COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); 697 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
706 COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); 698 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
707 COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); 699 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
708 COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); 700 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
709 COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); 701 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
710 COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); 702 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
711 703
712cond_branch: f_offset = addrs[i + filter[i].jf]; 704cond_branch: f_offset = addrs[i + filter[i].jf];
713 t_offset = addrs[i + filter[i].jt]; 705 t_offset = addrs[i + filter[i].jt];
@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
719 break; 711 break;
720 } 712 }
721 713
722 switch (filter[i].code) { 714 switch (code) {
723 case BPF_S_JMP_JGT_X: 715 case BPF_JMP | BPF_JGT | BPF_X:
724 case BPF_S_JMP_JGE_X: 716 case BPF_JMP | BPF_JGE | BPF_X:
725 case BPF_S_JMP_JEQ_X: 717 case BPF_JMP | BPF_JEQ | BPF_X:
726 seen |= SEEN_XREG; 718 seen |= SEEN_XREG;
727 emit_cmp(r_A, r_X); 719 emit_cmp(r_A, r_X);
728 break; 720 break;
729 case BPF_S_JMP_JSET_X: 721 case BPF_JMP | BPF_JSET | BPF_X:
730 seen |= SEEN_XREG; 722 seen |= SEEN_XREG;
731 emit_btst(r_A, r_X); 723 emit_btst(r_A, r_X);
732 break; 724 break;
733 case BPF_S_JMP_JEQ_K: 725 case BPF_JMP | BPF_JEQ | BPF_K:
734 case BPF_S_JMP_JGT_K: 726 case BPF_JMP | BPF_JGT | BPF_K:
735 case BPF_S_JMP_JGE_K: 727 case BPF_JMP | BPF_JGE | BPF_K:
736 if (is_simm13(K)) { 728 if (is_simm13(K)) {
737 emit_cmpi(r_A, K); 729 emit_cmpi(r_A, K);
738 } else { 730 } else {
@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
740 emit_cmp(r_A, r_TMP); 732 emit_cmp(r_A, r_TMP);
741 } 733 }
742 break; 734 break;
743 case BPF_S_JMP_JSET_K: 735 case BPF_JMP | BPF_JSET | BPF_K:
744 if (is_simm13(K)) { 736 if (is_simm13(K)) {
745 emit_btsti(r_A, K); 737 emit_btsti(r_A, K);
746 } else { 738 } else {