aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/filter.h255
-rw-r--r--net/core/filter.c196
2 files changed, 277 insertions, 174 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 49ef7a298c92..f0c2ad43b4af 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -76,56 +76,211 @@ enum {
76/* BPF program can access up to 512 bytes of stack space. */ 76/* BPF program can access up to 512 bytes of stack space. */
77#define MAX_BPF_STACK 512 77#define MAX_BPF_STACK 512
78 78
79/* bpf_add|sub|...: a += x, bpf_mov: a = x */ 79/* Helper macros for filter block array initializers. */
80#define BPF_ALU64_REG(op, a, x) \ 80
81 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) 81/* ALU ops on registers, bpf_add|sub|...: A += X */
82#define BPF_ALU32_REG(op, a, x) \ 82
83 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) 83#define BPF_ALU64_REG(OP, A, X) \
84 84 ((struct sock_filter_int) { \
85/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ 85 .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
86#define BPF_ALU64_IMM(op, a, imm) \ 86 .a_reg = A, \
87 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) 87 .x_reg = X, \
88#define BPF_ALU32_IMM(op, a, imm) \ 88 .off = 0, \
89 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) 89 .imm = 0 })
90 90
91/* R0 = *(uint *) (skb->data + off) */ 91#define BPF_ALU32_REG(OP, A, X) \
92#define BPF_LD_ABS(size, off) \ 92 ((struct sock_filter_int) { \
93 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) 93 .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
94 94 .a_reg = A, \
95/* R0 = *(uint *) (skb->data + x + off) */ 95 .x_reg = X, \
96#define BPF_LD_IND(size, x, off) \ 96 .off = 0, \
97 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) 97 .imm = 0 })
98 98
99/* a = *(uint *) (x + off) */ 99/* ALU ops on immediates, bpf_add|sub|...: A += IMM */
100#define BPF_LDX_MEM(sz, a, x, off) \ 100
101 ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) 101#define BPF_ALU64_IMM(OP, A, IMM) \
102 102 ((struct sock_filter_int) { \
103/* if (a 'op' x) goto pc+off */ 103 .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
104#define BPF_JMP_REG(op, a, x, off) \ 104 .a_reg = A, \
105 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) 105 .x_reg = 0, \
106 106 .off = 0, \
107/* if (a 'op' imm) goto pc+off */ 107 .imm = IMM })
108#define BPF_JMP_IMM(op, a, imm, off) \ 108
109 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) 109#define BPF_ALU32_IMM(OP, A, IMM) \
110 110 ((struct sock_filter_int) { \
111#define BPF_EXIT_INSN() \ 111 .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
112 ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) 112 .a_reg = A, \
113 113 .x_reg = 0, \
114static inline int size_to_bpf(int size) 114 .off = 0, \
115{ 115 .imm = IMM })
116 switch (size) { 116
117 case 1: 117/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
118 return BPF_B; 118
119 case 2: 119#define BPF_ENDIAN(TYPE, A, LEN) \
120 return BPF_H; 120 ((struct sock_filter_int) { \
121 case 4: 121 .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
122 return BPF_W; 122 .a_reg = A, \
123 case 8: 123 .x_reg = 0, \
124 return BPF_DW; 124 .off = 0, \
125 default: 125 .imm = LEN })
126 return -EINVAL; 126
127 } 127/* Short form of mov, A = X */
128} 128
129#define BPF_MOV64_REG(A, X) \
130 ((struct sock_filter_int) { \
131 .code = BPF_ALU64 | BPF_MOV | BPF_X, \
132 .a_reg = A, \
133 .x_reg = X, \
134 .off = 0, \
135 .imm = 0 })
136
137#define BPF_MOV32_REG(A, X) \
138 ((struct sock_filter_int) { \
139 .code = BPF_ALU | BPF_MOV | BPF_X, \
140 .a_reg = A, \
141 .x_reg = X, \
142 .off = 0, \
143 .imm = 0 })
144
145/* Short form of mov, A = IMM */
146
147#define BPF_MOV64_IMM(A, IMM) \
148 ((struct sock_filter_int) { \
149 .code = BPF_ALU64 | BPF_MOV | BPF_K, \
150 .a_reg = A, \
151 .x_reg = 0, \
152 .off = 0, \
153 .imm = IMM })
154
155#define BPF_MOV32_IMM(A, IMM) \
156 ((struct sock_filter_int) { \
157 .code = BPF_ALU | BPF_MOV | BPF_K, \
158 .a_reg = A, \
159 .x_reg = 0, \
160 .off = 0, \
161 .imm = IMM })
162
163/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */
164
165#define BPF_MOV64_RAW(TYPE, A, X, IMM) \
166 ((struct sock_filter_int) { \
167 .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
168 .a_reg = A, \
169 .x_reg = X, \
170 .off = 0, \
171 .imm = IMM })
172
173#define BPF_MOV32_RAW(TYPE, A, X, IMM) \
174 ((struct sock_filter_int) { \
175 .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
176 .a_reg = A, \
177 .x_reg = X, \
178 .off = 0, \
179 .imm = IMM })
180
181/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */
182
183#define BPF_LD_ABS(SIZE, OFF) \
184 ((struct sock_filter_int) { \
185 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
186 .a_reg = 0, \
187 .x_reg = 0, \
188 .off = 0, \
189 .imm = OFF })
190
191/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */
192
193#define BPF_LD_IND(SIZE, X, OFF) \
194 ((struct sock_filter_int) { \
195 .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
196 .a_reg = 0, \
197 .x_reg = X, \
198 .off = 0, \
199 .imm = OFF })
200
201/* Memory store, A = *(uint *) (X + OFF), and vice versa */
202
203#define BPF_LDX_MEM(SIZE, A, X, OFF) \
204 ((struct sock_filter_int) { \
205 .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
206 .a_reg = A, \
207 .x_reg = X, \
208 .off = OFF, \
209 .imm = 0 })
210
211#define BPF_STX_MEM(SIZE, A, X, OFF) \
212 ((struct sock_filter_int) { \
213 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
214 .a_reg = A, \
215 .x_reg = X, \
216 .off = OFF, \
217 .imm = 0 })
218
219/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */
220
221#define BPF_JMP_REG(OP, A, X, OFF) \
222 ((struct sock_filter_int) { \
223 .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
224 .a_reg = A, \
225 .x_reg = X, \
226 .off = OFF, \
227 .imm = 0 })
228
229/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */
230
231#define BPF_JMP_IMM(OP, A, IMM, OFF) \
232 ((struct sock_filter_int) { \
233 .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
234 .a_reg = A, \
235 .x_reg = 0, \
236 .off = OFF, \
237 .imm = IMM })
238
239/* Function call */
240
241#define BPF_EMIT_CALL(FUNC) \
242 ((struct sock_filter_int) { \
243 .code = BPF_JMP | BPF_CALL, \
244 .a_reg = 0, \
245 .x_reg = 0, \
246 .off = 0, \
247 .imm = ((FUNC) - __bpf_call_base) })
248
249/* Raw code statement block */
250
251#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \
252 ((struct sock_filter_int) { \
253 .code = CODE, \
254 .a_reg = A, \
255 .x_reg = X, \
256 .off = OFF, \
257 .imm = IMM })
258
259/* Program exit */
260
261#define BPF_EXIT_INSN() \
262 ((struct sock_filter_int) { \
263 .code = BPF_JMP | BPF_EXIT, \
264 .a_reg = 0, \
265 .x_reg = 0, \
266 .off = 0, \
267 .imm = 0 })
268
269#define bytes_to_bpf_size(bytes) \
270({ \
271 int bpf_size = -EINVAL; \
272 \
273 if (bytes == sizeof(u8)) \
274 bpf_size = BPF_B; \
275 else if (bytes == sizeof(u16)) \
276 bpf_size = BPF_H; \
277 else if (bytes == sizeof(u32)) \
278 bpf_size = BPF_W; \
279 else if (bytes == sizeof(u64)) \
280 bpf_size = BPF_DW; \
281 \
282 bpf_size; \
283})
129 284
130/* Macro to invoke filter function. */ 285/* Macro to invoke filter function. */
131#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 286#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
diff --git a/net/core/filter.c b/net/core/filter.c
index 328aaf6ff4d1..842f8393121d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -672,14 +672,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
672 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 672 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
673 673
674 /* A = *(u16 *) (ctx + offsetof(protocol)) */ 674 /* A = *(u16 *) (ctx + offsetof(protocol)) */
675 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 675 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
676 offsetof(struct sk_buff, protocol)); 676 offsetof(struct sk_buff, protocol));
677 insn++;
678
679 /* A = ntohs(A) [emitting a nop or swap16] */ 677 /* A = ntohs(A) [emitting a nop or swap16] */
680 insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; 678 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
681 insn->a_reg = BPF_REG_A;
682 insn->imm = 16;
683 break; 679 break;
684 680
685 case SKF_AD_OFF + SKF_AD_PKTTYPE: 681 case SKF_AD_OFF + SKF_AD_PKTTYPE:
@@ -688,37 +684,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
688 if (insn->off < 0) 684 if (insn->off < 0)
689 return false; 685 return false;
690 insn++; 686 insn++;
691
692 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); 687 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
693 break; 688 break;
694 689
695 case SKF_AD_OFF + SKF_AD_IFINDEX: 690 case SKF_AD_OFF + SKF_AD_IFINDEX:
696 case SKF_AD_OFF + SKF_AD_HATYPE: 691 case SKF_AD_OFF + SKF_AD_HATYPE:
697 *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
698 BPF_REG_TMP, BPF_REG_CTX,
699 offsetof(struct sk_buff, dev));
700 insn++;
701
702 /* if (tmp != 0) goto pc+1 */
703 *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
704 insn++;
705
706 *insn = BPF_EXIT_INSN();
707 insn++;
708
709 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 692 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
710 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 693 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
711 694 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
712 insn->a_reg = BPF_REG_A; 695
713 insn->x_reg = BPF_REG_TMP; 696 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
714 697 BPF_REG_TMP, BPF_REG_CTX,
715 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { 698 offsetof(struct sk_buff, dev));
716 insn->code = BPF_LDX | BPF_MEM | BPF_W; 699 /* if (tmp != 0) goto pc + 1 */
717 insn->off = offsetof(struct net_device, ifindex); 700 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
718 } else { 701 *insn++ = BPF_EXIT_INSN();
719 insn->code = BPF_LDX | BPF_MEM | BPF_H; 702 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
720 insn->off = offsetof(struct net_device, type); 703 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
721 } 704 offsetof(struct net_device, ifindex));
705 else
706 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
707 offsetof(struct net_device, type));
722 break; 708 break;
723 709
724 case SKF_AD_OFF + SKF_AD_MARK: 710 case SKF_AD_OFF + SKF_AD_MARK:
@@ -745,22 +731,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
745 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 731 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
746 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 732 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
747 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
748
749 /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
750 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
751 offsetof(struct sk_buff, vlan_tci));
752 insn++;
753
754 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 734 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
755 735
736 /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
737 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
738 offsetof(struct sk_buff, vlan_tci));
756 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 739 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
757 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 740 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
758 ~VLAN_TAG_PRESENT); 741 ~VLAN_TAG_PRESENT);
759 } else { 742 } else {
760 /* A >>= 12 */ 743 /* A >>= 12 */
761 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); 744 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
762 insn++;
763
764 /* A &= 1 */ 745 /* A &= 1 */
765 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); 746 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
766 } 747 }
@@ -772,34 +753,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
772 case SKF_AD_OFF + SKF_AD_CPU: 753 case SKF_AD_OFF + SKF_AD_CPU:
773 case SKF_AD_OFF + SKF_AD_RANDOM: 754 case SKF_AD_OFF + SKF_AD_RANDOM:
774 /* arg1 = ctx */ 755 /* arg1 = ctx */
775 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); 756 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
776 insn++;
777
778 /* arg2 = A */ 757 /* arg2 = A */
779 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); 758 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
780 insn++;
781
782 /* arg3 = X */ 759 /* arg3 = X */
783 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); 760 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
784 insn++;
785
786 /* Emit call(ctx, arg2=A, arg3=X) */ 761 /* Emit call(ctx, arg2=A, arg3=X) */
787 insn->code = BPF_JMP | BPF_CALL;
788 switch (fp->k) { 762 switch (fp->k) {
789 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 763 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
790 insn->imm = __skb_get_pay_offset - __bpf_call_base; 764 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
791 break; 765 break;
792 case SKF_AD_OFF + SKF_AD_NLATTR: 766 case SKF_AD_OFF + SKF_AD_NLATTR:
793 insn->imm = __skb_get_nlattr - __bpf_call_base; 767 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
794 break; 768 break;
795 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 769 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
796 insn->imm = __skb_get_nlattr_nest - __bpf_call_base; 770 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
797 break; 771 break;
798 case SKF_AD_OFF + SKF_AD_CPU: 772 case SKF_AD_OFF + SKF_AD_CPU:
799 insn->imm = __get_raw_cpu_id - __bpf_call_base; 773 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
800 break; 774 break;
801 case SKF_AD_OFF + SKF_AD_RANDOM: 775 case SKF_AD_OFF + SKF_AD_RANDOM:
802 insn->imm = __get_random_u32 - __bpf_call_base; 776 *insn = BPF_EMIT_CALL(__get_random_u32);
803 break; 777 break;
804 } 778 }
805 break; 779 break;
@@ -871,9 +845,8 @@ do_pass:
871 new_insn = new_prog; 845 new_insn = new_prog;
872 fp = prog; 846 fp = prog;
873 847
874 if (new_insn) { 848 if (new_insn)
875 *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); 849 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
876 }
877 new_insn++; 850 new_insn++;
878 851
879 for (i = 0; i < len; fp++, i++) { 852 for (i = 0; i < len; fp++, i++) {
@@ -921,17 +894,16 @@ do_pass:
921 convert_bpf_extensions(fp, &insn)) 894 convert_bpf_extensions(fp, &insn))
922 break; 895 break;
923 896
924 insn->code = fp->code; 897 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
925 insn->a_reg = BPF_REG_A;
926 insn->x_reg = BPF_REG_X;
927 insn->imm = fp->k;
928 break; 898 break;
929 899
930 /* Jump opcodes map as-is, but offsets need adjustment. */ 900 /* Jump transformation cannot use BPF block macros
931 case BPF_JMP | BPF_JA: 901 * everywhere as offset calculation and target updates
932 target = i + fp->k + 1; 902 * require a bit more work than the rest, i.e. jump
933 insn->code = fp->code; 903 * opcodes map as-is, but offsets need adjustment.
934#define EMIT_JMP \ 904 */
905
906#define BPF_EMIT_JMP \
935 do { \ 907 do { \
936 if (target >= len || target < 0) \ 908 if (target >= len || target < 0) \
937 goto err; \ 909 goto err; \
@@ -940,7 +912,10 @@ do_pass:
940 insn->off -= insn - tmp_insns; \ 912 insn->off -= insn - tmp_insns; \
941 } while (0) 913 } while (0)
942 914
943 EMIT_JMP; 915 case BPF_JMP | BPF_JA:
916 target = i + fp->k + 1;
917 insn->code = fp->code;
918 BPF_EMIT_JMP;
944 break; 919 break;
945 920
946 case BPF_JMP | BPF_JEQ | BPF_K: 921 case BPF_JMP | BPF_JEQ | BPF_K:
@@ -956,10 +931,7 @@ do_pass:
956 * immediate into tmp register and use it 931 * immediate into tmp register and use it
957 * in compare insn. 932 * in compare insn.
958 */ 933 */
959 insn->code = BPF_ALU | BPF_MOV | BPF_K; 934 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
960 insn->a_reg = BPF_REG_TMP;
961 insn->imm = fp->k;
962 insn++;
963 935
964 insn->a_reg = BPF_REG_A; 936 insn->a_reg = BPF_REG_A;
965 insn->x_reg = BPF_REG_TMP; 937 insn->x_reg = BPF_REG_TMP;
@@ -975,7 +947,7 @@ do_pass:
975 if (fp->jf == 0) { 947 if (fp->jf == 0) {
976 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 948 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
977 target = i + fp->jt + 1; 949 target = i + fp->jt + 1;
978 EMIT_JMP; 950 BPF_EMIT_JMP;
979 break; 951 break;
980 } 952 }
981 953
@@ -983,116 +955,94 @@ do_pass:
983 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 955 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
984 insn->code = BPF_JMP | BPF_JNE | bpf_src; 956 insn->code = BPF_JMP | BPF_JNE | bpf_src;
985 target = i + fp->jf + 1; 957 target = i + fp->jf + 1;
986 EMIT_JMP; 958 BPF_EMIT_JMP;
987 break; 959 break;
988 } 960 }
989 961
990 /* Other jumps are mapped into two insns: Jxx and JA. */ 962 /* Other jumps are mapped into two insns: Jxx and JA. */
991 target = i + fp->jt + 1; 963 target = i + fp->jt + 1;
992 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 964 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
993 EMIT_JMP; 965 BPF_EMIT_JMP;
994 insn++; 966 insn++;
995 967
996 insn->code = BPF_JMP | BPF_JA; 968 insn->code = BPF_JMP | BPF_JA;
997 target = i + fp->jf + 1; 969 target = i + fp->jf + 1;
998 EMIT_JMP; 970 BPF_EMIT_JMP;
999 break; 971 break;
1000 972
1001 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 973 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
1002 case BPF_LDX | BPF_MSH | BPF_B: 974 case BPF_LDX | BPF_MSH | BPF_B:
1003 /* tmp = A */ 975 /* tmp = A */
1004 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); 976 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1005 insn++;
1006
1007 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 977 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
1008 *insn = BPF_LD_ABS(BPF_B, fp->k); 978 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
1009 insn++;
1010
1011 /* A &= 0xf */ 979 /* A &= 0xf */
1012 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 980 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
1013 insn++;
1014
1015 /* A <<= 2 */ 981 /* A <<= 2 */
1016 *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 982 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
1017 insn++;
1018
1019 /* X = A */ 983 /* X = A */
1020 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); 984 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1021 insn++;
1022
1023 /* A = tmp */ 985 /* A = tmp */
1024 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); 986 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
1025 break; 987 break;
1026 988
1027 /* RET_K, RET_A are remaped into 2 insns. */ 989 /* RET_K, RET_A are remaped into 2 insns. */
1028 case BPF_RET | BPF_A: 990 case BPF_RET | BPF_A:
1029 case BPF_RET | BPF_K: 991 case BPF_RET | BPF_K:
1030 insn->code = BPF_ALU | BPF_MOV | 992 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
1031 (BPF_RVAL(fp->code) == BPF_K ? 993 BPF_K : BPF_X, BPF_REG_0,
1032 BPF_K : BPF_X); 994 BPF_REG_A, fp->k);
1033 insn->a_reg = 0;
1034 insn->x_reg = BPF_REG_A;
1035 insn->imm = fp->k;
1036 insn++;
1037
1038 *insn = BPF_EXIT_INSN(); 995 *insn = BPF_EXIT_INSN();
1039 break; 996 break;
1040 997
1041 /* Store to stack. */ 998 /* Store to stack. */
1042 case BPF_ST: 999 case BPF_ST:
1043 case BPF_STX: 1000 case BPF_STX:
1044 insn->code = BPF_STX | BPF_MEM | BPF_W; 1001 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
1045 insn->a_reg = BPF_REG_FP; 1002 BPF_ST ? BPF_REG_A : BPF_REG_X,
1046 insn->x_reg = fp->code == BPF_ST ? 1003 -(BPF_MEMWORDS - fp->k) * 4);
1047 BPF_REG_A : BPF_REG_X;
1048 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1049 break; 1004 break;
1050 1005
1051 /* Load from stack. */ 1006 /* Load from stack. */
1052 case BPF_LD | BPF_MEM: 1007 case BPF_LD | BPF_MEM:
1053 case BPF_LDX | BPF_MEM: 1008 case BPF_LDX | BPF_MEM:
1054 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1009 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1055 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1010 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
1056 BPF_REG_A : BPF_REG_X; 1011 -(BPF_MEMWORDS - fp->k) * 4);
1057 insn->x_reg = BPF_REG_FP;
1058 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1059 break; 1012 break;
1060 1013
1061 /* A = K or X = K */ 1014 /* A = K or X = K */
1062 case BPF_LD | BPF_IMM: 1015 case BPF_LD | BPF_IMM:
1063 case BPF_LDX | BPF_IMM: 1016 case BPF_LDX | BPF_IMM:
1064 insn->code = BPF_ALU | BPF_MOV | BPF_K; 1017 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
1065 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1018 BPF_REG_A : BPF_REG_X, fp->k);
1066 BPF_REG_A : BPF_REG_X;
1067 insn->imm = fp->k;
1068 break; 1019 break;
1069 1020
1070 /* X = A */ 1021 /* X = A */
1071 case BPF_MISC | BPF_TAX: 1022 case BPF_MISC | BPF_TAX:
1072 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); 1023 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1073 break; 1024 break;
1074 1025
1075 /* A = X */ 1026 /* A = X */
1076 case BPF_MISC | BPF_TXA: 1027 case BPF_MISC | BPF_TXA:
1077 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); 1028 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
1078 break; 1029 break;
1079 1030
1080 /* A = skb->len or X = skb->len */ 1031 /* A = skb->len or X = skb->len */
1081 case BPF_LD | BPF_W | BPF_LEN: 1032 case BPF_LD | BPF_W | BPF_LEN:
1082 case BPF_LDX | BPF_W | BPF_LEN: 1033 case BPF_LDX | BPF_W | BPF_LEN:
1083 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1034 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1084 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1035 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
1085 BPF_REG_A : BPF_REG_X; 1036 offsetof(struct sk_buff, len));
1086 insn->x_reg = BPF_REG_CTX;
1087 insn->off = offsetof(struct sk_buff, len);
1088 break; 1037 break;
1089 1038
1090 /* access seccomp_data fields */ 1039 /* Access seccomp_data fields. */
1091 case BPF_LDX | BPF_ABS | BPF_W: 1040 case BPF_LDX | BPF_ABS | BPF_W:
1092 /* A = *(u32 *) (ctx + K) */ 1041 /* A = *(u32 *) (ctx + K) */
1093 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 1042 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
1094 break; 1043 break;
1095 1044
1045 /* Unkown instruction. */
1096 default: 1046 default:
1097 goto err; 1047 goto err;
1098 } 1048 }
@@ -1101,7 +1051,6 @@ do_pass:
1101 if (new_prog) 1051 if (new_prog)
1102 memcpy(new_insn, tmp_insns, 1052 memcpy(new_insn, tmp_insns,
1103 sizeof(*insn) * (insn - tmp_insns)); 1053 sizeof(*insn) * (insn - tmp_insns));
1104
1105 new_insn += insn - tmp_insns; 1054 new_insn += insn - tmp_insns;
1106 } 1055 }
1107 1056
@@ -1116,7 +1065,6 @@ do_pass:
1116 new_flen = new_insn - new_prog; 1065 new_flen = new_insn - new_prog;
1117 if (pass > 2) 1066 if (pass > 2)
1118 goto err; 1067 goto err;
1119
1120 goto do_pass; 1068 goto do_pass;
1121 } 1069 }
1122 1070