diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2014-05-08 17:10:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-12 00:23:55 -0400 |
commit | 9739eef13c926645fbf88bcb77e66442fa75d688 (patch) | |
tree | d6934c95e5eae0027f755caf5ea7c1ab2aede94e | |
parent | 05ab2dae650e09add1c5295392b5516704c03a4b (diff) |
net: filter: make BPF conversion more readable
Introduce BPF helper macros to define instructions
(similar to old BPF_STMT/BPF_JUMP macros)
Use them while converting classic BPF to internal
and in BPF testsuite later.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/filter.h | 51 | ||||
-rw-r--r-- | net/core/filter.c | 142 |
2 files changed, 101 insertions, 92 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h index ed1efab10b8f..4457b383961c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -79,6 +79,57 @@ enum { | |||
79 | /* BPF program can access up to 512 bytes of stack space. */ | 79 | /* BPF program can access up to 512 bytes of stack space. */ |
80 | #define MAX_BPF_STACK 512 | 80 | #define MAX_BPF_STACK 512 |
81 | 81 | ||
82 | /* bpf_add|sub|...: a += x, bpf_mov: a = x */ | ||
83 | #define BPF_ALU64_REG(op, a, x) \ | ||
84 | ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) | ||
85 | #define BPF_ALU32_REG(op, a, x) \ | ||
86 | ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) | ||
87 | |||
88 | /* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ | ||
89 | #define BPF_ALU64_IMM(op, a, imm) \ | ||
90 | ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) | ||
91 | #define BPF_ALU32_IMM(op, a, imm) \ | ||
92 | ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) | ||
93 | |||
94 | /* R0 = *(uint *) (skb->data + off) */ | ||
95 | #define BPF_LD_ABS(size, off) \ | ||
96 | ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) | ||
97 | |||
98 | /* R0 = *(uint *) (skb->data + x + off) */ | ||
99 | #define BPF_LD_IND(size, x, off) \ | ||
100 | ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) | ||
101 | |||
102 | /* a = *(uint *) (x + off) */ | ||
103 | #define BPF_LDX_MEM(sz, a, x, off) \ | ||
104 | ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) | ||
105 | |||
106 | /* if (a 'op' x) goto pc+off */ | ||
107 | #define BPF_JMP_REG(op, a, x, off) \ | ||
108 | ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) | ||
109 | |||
110 | /* if (a 'op' imm) goto pc+off */ | ||
111 | #define BPF_JMP_IMM(op, a, imm, off) \ | ||
112 | ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) | ||
113 | |||
114 | #define BPF_EXIT_INSN() \ | ||
115 | ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) | ||
116 | |||
117 | static inline int size_to_bpf(int size) | ||
118 | { | ||
119 | switch (size) { | ||
120 | case 1: | ||
121 | return BPF_B; | ||
122 | case 2: | ||
123 | return BPF_H; | ||
124 | case 4: | ||
125 | return BPF_W; | ||
126 | case 8: | ||
127 | return BPF_DW; | ||
128 | default: | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | } | ||
132 | |||
82 | /* Macro to invoke filter function. */ | 133 | /* Macro to invoke filter function. */ |
83 | #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) | 134 | #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) |
84 | 135 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index eb020a7d6f55..9aaa05ad8fe3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -668,10 +668,9 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
668 | case SKF_AD_OFF + SKF_AD_PROTOCOL: | 668 | case SKF_AD_OFF + SKF_AD_PROTOCOL: |
669 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); | 669 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
670 | 670 | ||
671 | insn->code = BPF_LDX | BPF_MEM | BPF_H; | 671 | /* A = *(u16 *) (ctx + offsetof(protocol)) */ |
672 | insn->a_reg = BPF_REG_A; | 672 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
673 | insn->x_reg = BPF_REG_CTX; | 673 | offsetof(struct sk_buff, protocol)); |
674 | insn->off = offsetof(struct sk_buff, protocol); | ||
675 | insn++; | 674 | insn++; |
676 | 675 | ||
677 | /* A = ntohs(A) [emitting a nop or swap16] */ | 676 | /* A = ntohs(A) [emitting a nop or swap16] */ |
@@ -681,37 +680,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
681 | break; | 680 | break; |
682 | 681 | ||
683 | case SKF_AD_OFF + SKF_AD_PKTTYPE: | 682 | case SKF_AD_OFF + SKF_AD_PKTTYPE: |
684 | insn->code = BPF_LDX | BPF_MEM | BPF_B; | 683 | *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, |
685 | insn->a_reg = BPF_REG_A; | 684 | pkt_type_offset()); |
686 | insn->x_reg = BPF_REG_CTX; | ||
687 | insn->off = pkt_type_offset(); | ||
688 | if (insn->off < 0) | 685 | if (insn->off < 0) |
689 | return false; | 686 | return false; |
690 | insn++; | 687 | insn++; |
691 | 688 | ||
692 | insn->code = BPF_ALU | BPF_AND | BPF_K; | 689 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); |
693 | insn->a_reg = BPF_REG_A; | ||
694 | insn->imm = PKT_TYPE_MAX; | ||
695 | break; | 690 | break; |
696 | 691 | ||
697 | case SKF_AD_OFF + SKF_AD_IFINDEX: | 692 | case SKF_AD_OFF + SKF_AD_IFINDEX: |
698 | case SKF_AD_OFF + SKF_AD_HATYPE: | 693 | case SKF_AD_OFF + SKF_AD_HATYPE: |
699 | if (FIELD_SIZEOF(struct sk_buff, dev) == 8) | 694 | *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)), |
700 | insn->code = BPF_LDX | BPF_MEM | BPF_DW; | 695 | BPF_REG_TMP, BPF_REG_CTX, |
701 | else | 696 | offsetof(struct sk_buff, dev)); |
702 | insn->code = BPF_LDX | BPF_MEM | BPF_W; | ||
703 | insn->a_reg = BPF_REG_TMP; | ||
704 | insn->x_reg = BPF_REG_CTX; | ||
705 | insn->off = offsetof(struct sk_buff, dev); | ||
706 | insn++; | 697 | insn++; |
707 | 698 | ||
708 | insn->code = BPF_JMP | BPF_JNE | BPF_K; | 699 | /* if (tmp != 0) goto pc+1 */ |
709 | insn->a_reg = BPF_REG_TMP; | 700 | *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); |
710 | insn->imm = 0; | ||
711 | insn->off = 1; | ||
712 | insn++; | 701 | insn++; |
713 | 702 | ||
714 | insn->code = BPF_JMP | BPF_EXIT; | 703 | *insn = BPF_EXIT_INSN(); |
715 | insn++; | 704 | insn++; |
716 | 705 | ||
717 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); | 706 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
@@ -732,55 +721,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
732 | case SKF_AD_OFF + SKF_AD_MARK: | 721 | case SKF_AD_OFF + SKF_AD_MARK: |
733 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | 722 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
734 | 723 | ||
735 | insn->code = BPF_LDX | BPF_MEM | BPF_W; | 724 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
736 | insn->a_reg = BPF_REG_A; | 725 | offsetof(struct sk_buff, mark)); |
737 | insn->x_reg = BPF_REG_CTX; | ||
738 | insn->off = offsetof(struct sk_buff, mark); | ||
739 | break; | 726 | break; |
740 | 727 | ||
741 | case SKF_AD_OFF + SKF_AD_RXHASH: | 728 | case SKF_AD_OFF + SKF_AD_RXHASH: |
742 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); | 729 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
743 | 730 | ||
744 | insn->code = BPF_LDX | BPF_MEM | BPF_W; | 731 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
745 | insn->a_reg = BPF_REG_A; | 732 | offsetof(struct sk_buff, hash)); |
746 | insn->x_reg = BPF_REG_CTX; | ||
747 | insn->off = offsetof(struct sk_buff, hash); | ||
748 | break; | 733 | break; |
749 | 734 | ||
750 | case SKF_AD_OFF + SKF_AD_QUEUE: | 735 | case SKF_AD_OFF + SKF_AD_QUEUE: |
751 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); | 736 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); |
752 | 737 | ||
753 | insn->code = BPF_LDX | BPF_MEM | BPF_H; | 738 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
754 | insn->a_reg = BPF_REG_A; | 739 | offsetof(struct sk_buff, queue_mapping)); |
755 | insn->x_reg = BPF_REG_CTX; | ||
756 | insn->off = offsetof(struct sk_buff, queue_mapping); | ||
757 | break; | 740 | break; |
758 | 741 | ||
759 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: | 742 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: |
760 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: | 743 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: |
761 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); | 744 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
762 | 745 | ||
763 | insn->code = BPF_LDX | BPF_MEM | BPF_H; | 746 | /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ |
764 | insn->a_reg = BPF_REG_A; | 747 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
765 | insn->x_reg = BPF_REG_CTX; | 748 | offsetof(struct sk_buff, vlan_tci)); |
766 | insn->off = offsetof(struct sk_buff, vlan_tci); | ||
767 | insn++; | 749 | insn++; |
768 | 750 | ||
769 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); | 751 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); |
770 | 752 | ||
771 | if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { | 753 | if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { |
772 | insn->code = BPF_ALU | BPF_AND | BPF_K; | 754 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, |
773 | insn->a_reg = BPF_REG_A; | 755 | ~VLAN_TAG_PRESENT); |
774 | insn->imm = ~VLAN_TAG_PRESENT; | ||
775 | } else { | 756 | } else { |
776 | insn->code = BPF_ALU | BPF_RSH | BPF_K; | 757 | /* A >>= 12 */ |
777 | insn->a_reg = BPF_REG_A; | 758 | *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); |
778 | insn->imm = 12; | ||
779 | insn++; | 759 | insn++; |
780 | 760 | ||
781 | insn->code = BPF_ALU | BPF_AND | BPF_K; | 761 | /* A &= 1 */ |
782 | insn->a_reg = BPF_REG_A; | 762 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); |
783 | insn->imm = 1; | ||
784 | } | 763 | } |
785 | break; | 764 | break; |
786 | 765 | ||
@@ -790,21 +769,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
790 | case SKF_AD_OFF + SKF_AD_CPU: | 769 | case SKF_AD_OFF + SKF_AD_CPU: |
791 | case SKF_AD_OFF + SKF_AD_RANDOM: | 770 | case SKF_AD_OFF + SKF_AD_RANDOM: |
792 | /* arg1 = ctx */ | 771 | /* arg1 = ctx */ |
793 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 772 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); |
794 | insn->a_reg = BPF_REG_ARG1; | ||
795 | insn->x_reg = BPF_REG_CTX; | ||
796 | insn++; | 773 | insn++; |
797 | 774 | ||
798 | /* arg2 = A */ | 775 | /* arg2 = A */ |
799 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 776 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); |
800 | insn->a_reg = BPF_REG_ARG2; | ||
801 | insn->x_reg = BPF_REG_A; | ||
802 | insn++; | 777 | insn++; |
803 | 778 | ||
804 | /* arg3 = X */ | 779 | /* arg3 = X */ |
805 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 780 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); |
806 | insn->a_reg = BPF_REG_ARG3; | ||
807 | insn->x_reg = BPF_REG_X; | ||
808 | insn++; | 781 | insn++; |
809 | 782 | ||
810 | /* Emit call(ctx, arg2=A, arg3=X) */ | 783 | /* Emit call(ctx, arg2=A, arg3=X) */ |
@@ -829,9 +802,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
829 | break; | 802 | break; |
830 | 803 | ||
831 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: | 804 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: |
832 | insn->code = BPF_ALU | BPF_XOR | BPF_X; | 805 | /* A ^= X */ |
833 | insn->a_reg = BPF_REG_A; | 806 | *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); |
834 | insn->x_reg = BPF_REG_X; | ||
835 | break; | 807 | break; |
836 | 808 | ||
837 | default: | 809 | default: |
@@ -897,9 +869,7 @@ do_pass: | |||
897 | fp = prog; | 869 | fp = prog; |
898 | 870 | ||
899 | if (new_insn) { | 871 | if (new_insn) { |
900 | new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 872 | *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); |
901 | new_insn->a_reg = BPF_REG_CTX; | ||
902 | new_insn->x_reg = BPF_REG_ARG1; | ||
903 | } | 873 | } |
904 | new_insn++; | 874 | new_insn++; |
905 | 875 | ||
@@ -1027,34 +997,28 @@ do_pass: | |||
1027 | 997 | ||
1028 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ | 998 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ |
1029 | case BPF_LDX | BPF_MSH | BPF_B: | 999 | case BPF_LDX | BPF_MSH | BPF_B: |
1030 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 1000 | /* tmp = A */ |
1031 | insn->a_reg = BPF_REG_TMP; | 1001 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); |
1032 | insn->x_reg = BPF_REG_A; | ||
1033 | insn++; | 1002 | insn++; |
1034 | 1003 | ||
1035 | insn->code = BPF_LD | BPF_ABS | BPF_B; | 1004 | /* A = R0 = *(u8 *) (skb->data + K) */ |
1036 | insn->a_reg = BPF_REG_A; | 1005 | *insn = BPF_LD_ABS(BPF_B, fp->k); |
1037 | insn->imm = fp->k; | ||
1038 | insn++; | 1006 | insn++; |
1039 | 1007 | ||
1040 | insn->code = BPF_ALU | BPF_AND | BPF_K; | 1008 | /* A &= 0xf */ |
1041 | insn->a_reg = BPF_REG_A; | 1009 | *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); |
1042 | insn->imm = 0xf; | ||
1043 | insn++; | 1010 | insn++; |
1044 | 1011 | ||
1045 | insn->code = BPF_ALU | BPF_LSH | BPF_K; | 1012 | /* A <<= 2 */ |
1046 | insn->a_reg = BPF_REG_A; | 1013 | *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); |
1047 | insn->imm = 2; | ||
1048 | insn++; | 1014 | insn++; |
1049 | 1015 | ||
1050 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 1016 | /* X = A */ |
1051 | insn->a_reg = BPF_REG_X; | 1017 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); |
1052 | insn->x_reg = BPF_REG_A; | ||
1053 | insn++; | 1018 | insn++; |
1054 | 1019 | ||
1055 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 1020 | /* A = tmp */ |
1056 | insn->a_reg = BPF_REG_A; | 1021 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); |
1057 | insn->x_reg = BPF_REG_TMP; | ||
1058 | break; | 1022 | break; |
1059 | 1023 | ||
1060 | /* RET_K, RET_A are remaped into 2 insns. */ | 1024 | /* RET_K, RET_A are remaped into 2 insns. */ |
@@ -1068,7 +1032,7 @@ do_pass: | |||
1068 | insn->imm = fp->k; | 1032 | insn->imm = fp->k; |
1069 | insn++; | 1033 | insn++; |
1070 | 1034 | ||
1071 | insn->code = BPF_JMP | BPF_EXIT; | 1035 | *insn = BPF_EXIT_INSN(); |
1072 | break; | 1036 | break; |
1073 | 1037 | ||
1074 | /* Store to stack. */ | 1038 | /* Store to stack. */ |
@@ -1102,16 +1066,12 @@ do_pass: | |||
1102 | 1066 | ||
1103 | /* X = A */ | 1067 | /* X = A */ |
1104 | case BPF_MISC | BPF_TAX: | 1068 | case BPF_MISC | BPF_TAX: |
1105 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 1069 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); |
1106 | insn->a_reg = BPF_REG_X; | ||
1107 | insn->x_reg = BPF_REG_A; | ||
1108 | break; | 1070 | break; |
1109 | 1071 | ||
1110 | /* A = X */ | 1072 | /* A = X */ |
1111 | case BPF_MISC | BPF_TXA: | 1073 | case BPF_MISC | BPF_TXA: |
1112 | insn->code = BPF_ALU64 | BPF_MOV | BPF_X; | 1074 | *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); |
1113 | insn->a_reg = BPF_REG_A; | ||
1114 | insn->x_reg = BPF_REG_X; | ||
1115 | break; | 1075 | break; |
1116 | 1076 | ||
1117 | /* A = skb->len or X = skb->len */ | 1077 | /* A = skb->len or X = skb->len */ |
@@ -1126,10 +1086,8 @@ do_pass: | |||
1126 | 1086 | ||
1127 | /* access seccomp_data fields */ | 1087 | /* access seccomp_data fields */ |
1128 | case BPF_LDX | BPF_ABS | BPF_W: | 1088 | case BPF_LDX | BPF_ABS | BPF_W: |
1129 | insn->code = BPF_LDX | BPF_MEM | BPF_W; | 1089 | /* A = *(u32 *) (ctx + K) */ |
1130 | insn->a_reg = BPF_REG_A; | 1090 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); |
1131 | insn->x_reg = BPF_REG_CTX; | ||
1132 | insn->off = fp->k; | ||
1133 | break; | 1091 | break; |
1134 | 1092 | ||
1135 | default: | 1093 | default: |