aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-23 16:48:50 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-23 16:48:50 -0400
commitbe65de7174123e02477bd488db1a657caf0f9947 (patch)
treeef3101980fef5d2f580026d96c78b40fe007b43b /net
parent76fcee2438b90e473b67ea52b9b9e0648aa501f8 (diff)
parent2e8a83c52ffa41816a979ab0e3bcadf4b0d9e8a1 (diff)
Merge branch 'filter-next'
Daniel Borkmann says: ==================== BPF updates These were still in my queue. Please see individual patches for details. I have rebased these on top of current net-next with Andrew's gcc union fixup [1] applied to avoid dealing with an unnecessary merge conflict. [1] http://patchwork.ozlabs.org/patch/351577/ ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c195
-rw-r--r--net/core/ptp_classifier.c2
-rw-r--r--net/netfilter/xt_bpf.c5
-rw-r--r--net/sched/cls_bpf.c4
4 files changed, 106 insertions, 100 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 7067cb240d3e..2c2d35d9d101 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -160,95 +160,100 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
160 static const void *jumptable[256] = { 160 static const void *jumptable[256] = {
161 [0 ... 255] = &&default_label, 161 [0 ... 255] = &&default_label,
162 /* Now overwrite non-defaults ... */ 162 /* Now overwrite non-defaults ... */
163#define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C 163 /* 32 bit ALU operations */
164 DL(ALU, ADD, X), 164 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
165 DL(ALU, ADD, K), 165 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
166 DL(ALU, SUB, X), 166 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
167 DL(ALU, SUB, K), 167 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
168 DL(ALU, AND, X), 168 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
169 DL(ALU, AND, K), 169 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
170 DL(ALU, OR, X), 170 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
171 DL(ALU, OR, K), 171 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
172 DL(ALU, LSH, X), 172 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
173 DL(ALU, LSH, K), 173 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
174 DL(ALU, RSH, X), 174 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
175 DL(ALU, RSH, K), 175 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
176 DL(ALU, XOR, X), 176 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
177 DL(ALU, XOR, K), 177 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
178 DL(ALU, MUL, X), 178 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
179 DL(ALU, MUL, K), 179 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
180 DL(ALU, MOV, X), 180 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
181 DL(ALU, MOV, K), 181 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
182 DL(ALU, DIV, X), 182 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
183 DL(ALU, DIV, K), 183 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
184 DL(ALU, MOD, X), 184 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
185 DL(ALU, MOD, K), 185 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
186 DL(ALU, NEG, 0), 186 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
187 DL(ALU, END, TO_BE), 187 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
188 DL(ALU, END, TO_LE), 188 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
189 DL(ALU64, ADD, X), 189 /* 64 bit ALU operations */
190 DL(ALU64, ADD, K), 190 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
191 DL(ALU64, SUB, X), 191 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
192 DL(ALU64, SUB, K), 192 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
193 DL(ALU64, AND, X), 193 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
194 DL(ALU64, AND, K), 194 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
195 DL(ALU64, OR, X), 195 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
196 DL(ALU64, OR, K), 196 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
197 DL(ALU64, LSH, X), 197 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
198 DL(ALU64, LSH, K), 198 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
199 DL(ALU64, RSH, X), 199 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
200 DL(ALU64, RSH, K), 200 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
201 DL(ALU64, XOR, X), 201 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
202 DL(ALU64, XOR, K), 202 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
203 DL(ALU64, MUL, X), 203 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
204 DL(ALU64, MUL, K), 204 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
205 DL(ALU64, MOV, X), 205 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
206 DL(ALU64, MOV, K), 206 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
207 DL(ALU64, ARSH, X), 207 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
208 DL(ALU64, ARSH, K), 208 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
209 DL(ALU64, DIV, X), 209 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
210 DL(ALU64, DIV, K), 210 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
211 DL(ALU64, MOD, X), 211 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
212 DL(ALU64, MOD, K), 212 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
213 DL(ALU64, NEG, 0), 213 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
214 DL(JMP, CALL, 0), 214 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
215 DL(JMP, JA, 0), 215 /* Call instruction */
216 DL(JMP, JEQ, X), 216 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
217 DL(JMP, JEQ, K), 217 /* Jumps */
218 DL(JMP, JNE, X), 218 [BPF_JMP | BPF_JA] = &&JMP_JA,
219 DL(JMP, JNE, K), 219 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
220 DL(JMP, JGT, X), 220 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
221 DL(JMP, JGT, K), 221 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
222 DL(JMP, JGE, X), 222 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
223 DL(JMP, JGE, K), 223 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
224 DL(JMP, JSGT, X), 224 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
225 DL(JMP, JSGT, K), 225 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
226 DL(JMP, JSGE, X), 226 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
227 DL(JMP, JSGE, K), 227 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
228 DL(JMP, JSET, X), 228 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
229 DL(JMP, JSET, K), 229 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
230 DL(JMP, EXIT, 0), 230 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
231 DL(STX, MEM, B), 231 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
232 DL(STX, MEM, H), 232 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
233 DL(STX, MEM, W), 233 /* Program return */
234 DL(STX, MEM, DW), 234 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
235 DL(STX, XADD, W), 235 /* Store instructions */
236 DL(STX, XADD, DW), 236 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
237 DL(ST, MEM, B), 237 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
238 DL(ST, MEM, H), 238 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
239 DL(ST, MEM, W), 239 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
240 DL(ST, MEM, DW), 240 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
241 DL(LDX, MEM, B), 241 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
242 DL(LDX, MEM, H), 242 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
243 DL(LDX, MEM, W), 243 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
244 DL(LDX, MEM, DW), 244 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
245 DL(LD, ABS, W), 245 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
246 DL(LD, ABS, H), 246 /* Load instructions */
247 DL(LD, ABS, B), 247 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
248 DL(LD, IND, W), 248 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
249 DL(LD, IND, H), 249 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
250 DL(LD, IND, B), 250 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
251#undef DL 251 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
252 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
253 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
254 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
255 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
256 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
252 }; 257 };
253 void *ptr; 258 void *ptr;
254 int off; 259 int off;
@@ -290,10 +295,10 @@ select_insn:
290 ALU(XOR, ^) 295 ALU(XOR, ^)
291 ALU(MUL, *) 296 ALU(MUL, *)
292#undef ALU 297#undef ALU
293 ALU_NEG_0: 298 ALU_NEG:
294 A = (u32) -A; 299 A = (u32) -A;
295 CONT; 300 CONT;
296 ALU64_NEG_0: 301 ALU64_NEG:
297 A = -A; 302 A = -A;
298 CONT; 303 CONT;
299 ALU_MOV_X: 304 ALU_MOV_X:
@@ -382,7 +387,7 @@ select_insn:
382 CONT; 387 CONT;
383 388
384 /* CALL */ 389 /* CALL */
385 JMP_CALL_0: 390 JMP_CALL:
386 /* Function call scratches BPF_R1-BPF_R5 registers, 391 /* Function call scratches BPF_R1-BPF_R5 registers,
387 * preserves BPF_R6-BPF_R9, and stores return value 392 * preserves BPF_R6-BPF_R9, and stores return value
388 * into BPF_R0. 393 * into BPF_R0.
@@ -392,7 +397,7 @@ select_insn:
392 CONT; 397 CONT;
393 398
394 /* JMP */ 399 /* JMP */
395 JMP_JA_0: 400 JMP_JA:
396 insn += insn->off; 401 insn += insn->off;
397 CONT; 402 CONT;
398 JMP_JEQ_X: 403 JMP_JEQ_X:
@@ -479,7 +484,7 @@ select_insn:
479 CONT_JMP; 484 CONT_JMP;
480 } 485 }
481 CONT; 486 CONT;
482 JMP_EXIT_0: 487 JMP_EXIT:
483 return BPF_R0; 488 return BPF_R0;
484 489
485 /* STX and ST and LDX*/ 490 /* STX and ST and LDX*/
@@ -1580,7 +1585,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1580 * a negative errno code is returned. On success the return is zero. 1585 * a negative errno code is returned. On success the return is zero.
1581 */ 1586 */
1582int sk_unattached_filter_create(struct sk_filter **pfp, 1587int sk_unattached_filter_create(struct sk_filter **pfp,
1583 struct sock_fprog *fprog) 1588 struct sock_fprog_kern *fprog)
1584{ 1589{
1585 unsigned int fsize = sk_filter_proglen(fprog); 1590 unsigned int fsize = sk_filter_proglen(fprog);
1586 struct sk_filter *fp; 1591 struct sk_filter *fp;
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 37d86157b76e..d3027a73fd4b 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
133 { 0x16, 0, 0, 0x00000000 }, 133 { 0x16, 0, 0, 0x00000000 },
134 { 0x06, 0, 0, 0x00000000 }, 134 { 0x06, 0, 0, 0x00000000 },
135 }; 135 };
136 struct sock_fprog ptp_prog = { 136 struct sock_fprog_kern ptp_prog = {
137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 137 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
138 }; 138 };
139 139
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 12d4da8e6c77..bbffdbdaf603 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -23,10 +23,11 @@ MODULE_ALIAS("ip6t_bpf");
23static int bpf_mt_check(const struct xt_mtchk_param *par) 23static int bpf_mt_check(const struct xt_mtchk_param *par)
24{ 24{
25 struct xt_bpf_info *info = par->matchinfo; 25 struct xt_bpf_info *info = par->matchinfo;
26 struct sock_fprog program; 26 struct sock_fprog_kern program;
27 27
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = (struct sock_filter __user *) info->bpf_program; 29 program.filter = info->bpf_program;
30
30 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (sk_unattached_filter_create(&info->filter, &program)) {
31 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
32 return -EINVAL; 33 return -EINVAL;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 16186965af97..13f64df2c710 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -160,7 +160,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
160{ 160{
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct sk_filter *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
@@ -191,7 +191,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 191 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192 192
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = (struct sock_filter __user *) bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = sk_unattached_filter_create(&fp, &tmp);
197 if (ret) 197 if (ret)