aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c198
1 files changed, 99 insertions, 99 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 6bd2e350e751..b3f21751b238 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -59,12 +59,12 @@
59#define BPF_R10 regs[BPF_REG_10] 59#define BPF_R10 regs[BPF_REG_10]
60 60
61/* Named registers */ 61/* Named registers */
62#define A regs[insn->a_reg] 62#define DST regs[insn->dst_reg]
63#define X regs[insn->x_reg] 63#define SRC regs[insn->src_reg]
64#define FP regs[BPF_REG_FP] 64#define FP regs[BPF_REG_FP]
65#define ARG1 regs[BPF_REG_ARG1] 65#define ARG1 regs[BPF_REG_ARG1]
66#define CTX regs[BPF_REG_CTX] 66#define CTX regs[BPF_REG_CTX]
67#define K insn->imm 67#define IMM insn->imm
68 68
69/* No hurry in this branch 69/* No hurry in this branch
70 * 70 *
@@ -264,7 +264,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
264 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; 264 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
265 ARG1 = (u64) (unsigned long) ctx; 265 ARG1 = (u64) (unsigned long) ctx;
266 266
267 /* Register for user BPF programs need to be reset first. */ 267 /* Registers used in classic BPF programs need to be reset first. */
268 regs[BPF_REG_A] = 0; 268 regs[BPF_REG_A] = 0;
269 regs[BPF_REG_X] = 0; 269 regs[BPF_REG_X] = 0;
270 270
@@ -274,16 +274,16 @@ select_insn:
274 /* ALU */ 274 /* ALU */
275#define ALU(OPCODE, OP) \ 275#define ALU(OPCODE, OP) \
276 ALU64_##OPCODE##_X: \ 276 ALU64_##OPCODE##_X: \
277 A = A OP X; \ 277 DST = DST OP SRC; \
278 CONT; \ 278 CONT; \
279 ALU_##OPCODE##_X: \ 279 ALU_##OPCODE##_X: \
280 A = (u32) A OP (u32) X; \ 280 DST = (u32) DST OP (u32) SRC; \
281 CONT; \ 281 CONT; \
282 ALU64_##OPCODE##_K: \ 282 ALU64_##OPCODE##_K: \
283 A = A OP K; \ 283 DST = DST OP IMM; \
284 CONT; \ 284 CONT; \
285 ALU_##OPCODE##_K: \ 285 ALU_##OPCODE##_K: \
286 A = (u32) A OP (u32) K; \ 286 DST = (u32) DST OP (u32) IMM; \
287 CONT; 287 CONT;
288 288
289 ALU(ADD, +) 289 ALU(ADD, +)
@@ -296,92 +296,92 @@ select_insn:
296 ALU(MUL, *) 296 ALU(MUL, *)
297#undef ALU 297#undef ALU
298 ALU_NEG: 298 ALU_NEG:
299 A = (u32) -A; 299 DST = (u32) -DST;
300 CONT; 300 CONT;
301 ALU64_NEG: 301 ALU64_NEG:
302 A = -A; 302 DST = -DST;
303 CONT; 303 CONT;
304 ALU_MOV_X: 304 ALU_MOV_X:
305 A = (u32) X; 305 DST = (u32) SRC;
306 CONT; 306 CONT;
307 ALU_MOV_K: 307 ALU_MOV_K:
308 A = (u32) K; 308 DST = (u32) IMM;
309 CONT; 309 CONT;
310 ALU64_MOV_X: 310 ALU64_MOV_X:
311 A = X; 311 DST = SRC;
312 CONT; 312 CONT;
313 ALU64_MOV_K: 313 ALU64_MOV_K:
314 A = K; 314 DST = IMM;
315 CONT; 315 CONT;
316 ALU64_ARSH_X: 316 ALU64_ARSH_X:
317 (*(s64 *) &A) >>= X; 317 (*(s64 *) &DST) >>= SRC;
318 CONT; 318 CONT;
319 ALU64_ARSH_K: 319 ALU64_ARSH_K:
320 (*(s64 *) &A) >>= K; 320 (*(s64 *) &DST) >>= IMM;
321 CONT; 321 CONT;
322 ALU64_MOD_X: 322 ALU64_MOD_X:
323 if (unlikely(X == 0)) 323 if (unlikely(SRC == 0))
324 return 0; 324 return 0;
325 tmp = A; 325 tmp = DST;
326 A = do_div(tmp, X); 326 DST = do_div(tmp, SRC);
327 CONT; 327 CONT;
328 ALU_MOD_X: 328 ALU_MOD_X:
329 if (unlikely(X == 0)) 329 if (unlikely(SRC == 0))
330 return 0; 330 return 0;
331 tmp = (u32) A; 331 tmp = (u32) DST;
332 A = do_div(tmp, (u32) X); 332 DST = do_div(tmp, (u32) SRC);
333 CONT; 333 CONT;
334 ALU64_MOD_K: 334 ALU64_MOD_K:
335 tmp = A; 335 tmp = DST;
336 A = do_div(tmp, K); 336 DST = do_div(tmp, IMM);
337 CONT; 337 CONT;
338 ALU_MOD_K: 338 ALU_MOD_K:
339 tmp = (u32) A; 339 tmp = (u32) DST;
340 A = do_div(tmp, (u32) K); 340 DST = do_div(tmp, (u32) IMM);
341 CONT; 341 CONT;
342 ALU64_DIV_X: 342 ALU64_DIV_X:
343 if (unlikely(X == 0)) 343 if (unlikely(SRC == 0))
344 return 0; 344 return 0;
345 do_div(A, X); 345 do_div(DST, SRC);
346 CONT; 346 CONT;
347 ALU_DIV_X: 347 ALU_DIV_X:
348 if (unlikely(X == 0)) 348 if (unlikely(SRC == 0))
349 return 0; 349 return 0;
350 tmp = (u32) A; 350 tmp = (u32) DST;
351 do_div(tmp, (u32) X); 351 do_div(tmp, (u32) SRC);
352 A = (u32) tmp; 352 DST = (u32) tmp;
353 CONT; 353 CONT;
354 ALU64_DIV_K: 354 ALU64_DIV_K:
355 do_div(A, K); 355 do_div(DST, IMM);
356 CONT; 356 CONT;
357 ALU_DIV_K: 357 ALU_DIV_K:
358 tmp = (u32) A; 358 tmp = (u32) DST;
359 do_div(tmp, (u32) K); 359 do_div(tmp, (u32) IMM);
360 A = (u32) tmp; 360 DST = (u32) tmp;
361 CONT; 361 CONT;
362 ALU_END_TO_BE: 362 ALU_END_TO_BE:
363 switch (K) { 363 switch (IMM) {
364 case 16: 364 case 16:
365 A = (__force u16) cpu_to_be16(A); 365 DST = (__force u16) cpu_to_be16(DST);
366 break; 366 break;
367 case 32: 367 case 32:
368 A = (__force u32) cpu_to_be32(A); 368 DST = (__force u32) cpu_to_be32(DST);
369 break; 369 break;
370 case 64: 370 case 64:
371 A = (__force u64) cpu_to_be64(A); 371 DST = (__force u64) cpu_to_be64(DST);
372 break; 372 break;
373 } 373 }
374 CONT; 374 CONT;
375 ALU_END_TO_LE: 375 ALU_END_TO_LE:
376 switch (K) { 376 switch (IMM) {
377 case 16: 377 case 16:
378 A = (__force u16) cpu_to_le16(A); 378 DST = (__force u16) cpu_to_le16(DST);
379 break; 379 break;
380 case 32: 380 case 32:
381 A = (__force u32) cpu_to_le32(A); 381 DST = (__force u32) cpu_to_le32(DST);
382 break; 382 break;
383 case 64: 383 case 64:
384 A = (__force u64) cpu_to_le64(A); 384 DST = (__force u64) cpu_to_le64(DST);
385 break; 385 break;
386 } 386 }
387 CONT; 387 CONT;
@@ -401,85 +401,85 @@ select_insn:
401 insn += insn->off; 401 insn += insn->off;
402 CONT; 402 CONT;
403 JMP_JEQ_X: 403 JMP_JEQ_X:
404 if (A == X) { 404 if (DST == SRC) {
405 insn += insn->off; 405 insn += insn->off;
406 CONT_JMP; 406 CONT_JMP;
407 } 407 }
408 CONT; 408 CONT;
409 JMP_JEQ_K: 409 JMP_JEQ_K:
410 if (A == K) { 410 if (DST == IMM) {
411 insn += insn->off; 411 insn += insn->off;
412 CONT_JMP; 412 CONT_JMP;
413 } 413 }
414 CONT; 414 CONT;
415 JMP_JNE_X: 415 JMP_JNE_X:
416 if (A != X) { 416 if (DST != SRC) {
417 insn += insn->off; 417 insn += insn->off;
418 CONT_JMP; 418 CONT_JMP;
419 } 419 }
420 CONT; 420 CONT;
421 JMP_JNE_K: 421 JMP_JNE_K:
422 if (A != K) { 422 if (DST != IMM) {
423 insn += insn->off; 423 insn += insn->off;
424 CONT_JMP; 424 CONT_JMP;
425 } 425 }
426 CONT; 426 CONT;
427 JMP_JGT_X: 427 JMP_JGT_X:
428 if (A > X) { 428 if (DST > SRC) {
429 insn += insn->off; 429 insn += insn->off;
430 CONT_JMP; 430 CONT_JMP;
431 } 431 }
432 CONT; 432 CONT;
433 JMP_JGT_K: 433 JMP_JGT_K:
434 if (A > K) { 434 if (DST > IMM) {
435 insn += insn->off; 435 insn += insn->off;
436 CONT_JMP; 436 CONT_JMP;
437 } 437 }
438 CONT; 438 CONT;
439 JMP_JGE_X: 439 JMP_JGE_X:
440 if (A >= X) { 440 if (DST >= SRC) {
441 insn += insn->off; 441 insn += insn->off;
442 CONT_JMP; 442 CONT_JMP;
443 } 443 }
444 CONT; 444 CONT;
445 JMP_JGE_K: 445 JMP_JGE_K:
446 if (A >= K) { 446 if (DST >= IMM) {
447 insn += insn->off; 447 insn += insn->off;
448 CONT_JMP; 448 CONT_JMP;
449 } 449 }
450 CONT; 450 CONT;
451 JMP_JSGT_X: 451 JMP_JSGT_X:
452 if (((s64) A) > ((s64) X)) { 452 if (((s64) DST) > ((s64) SRC)) {
453 insn += insn->off; 453 insn += insn->off;
454 CONT_JMP; 454 CONT_JMP;
455 } 455 }
456 CONT; 456 CONT;
457 JMP_JSGT_K: 457 JMP_JSGT_K:
458 if (((s64) A) > ((s64) K)) { 458 if (((s64) DST) > ((s64) IMM)) {
459 insn += insn->off; 459 insn += insn->off;
460 CONT_JMP; 460 CONT_JMP;
461 } 461 }
462 CONT; 462 CONT;
463 JMP_JSGE_X: 463 JMP_JSGE_X:
464 if (((s64) A) >= ((s64) X)) { 464 if (((s64) DST) >= ((s64) SRC)) {
465 insn += insn->off; 465 insn += insn->off;
466 CONT_JMP; 466 CONT_JMP;
467 } 467 }
468 CONT; 468 CONT;
469 JMP_JSGE_K: 469 JMP_JSGE_K:
470 if (((s64) A) >= ((s64) K)) { 470 if (((s64) DST) >= ((s64) IMM)) {
471 insn += insn->off; 471 insn += insn->off;
472 CONT_JMP; 472 CONT_JMP;
473 } 473 }
474 CONT; 474 CONT;
475 JMP_JSET_X: 475 JMP_JSET_X:
476 if (A & X) { 476 if (DST & SRC) {
477 insn += insn->off; 477 insn += insn->off;
478 CONT_JMP; 478 CONT_JMP;
479 } 479 }
480 CONT; 480 CONT;
481 JMP_JSET_K: 481 JMP_JSET_K:
482 if (A & K) { 482 if (DST & IMM) {
483 insn += insn->off; 483 insn += insn->off;
484 CONT_JMP; 484 CONT_JMP;
485 } 485 }
@@ -488,15 +488,15 @@ select_insn:
488 return BPF_R0; 488 return BPF_R0;
489 489
490 /* STX and ST and LDX*/ 490 /* STX and ST and LDX*/
491#define LDST(SIZEOP, SIZE) \ 491#define LDST(SIZEOP, SIZE) \
492 STX_MEM_##SIZEOP: \ 492 STX_MEM_##SIZEOP: \
493 *(SIZE *)(unsigned long) (A + insn->off) = X; \ 493 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
494 CONT; \ 494 CONT; \
495 ST_MEM_##SIZEOP: \ 495 ST_MEM_##SIZEOP: \
496 *(SIZE *)(unsigned long) (A + insn->off) = K; \ 496 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
497 CONT; \ 497 CONT; \
498 LDX_MEM_##SIZEOP: \ 498 LDX_MEM_##SIZEOP: \
499 A = *(SIZE *)(unsigned long) (X + insn->off); \ 499 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
500 CONT; 500 CONT;
501 501
502 LDST(B, u8) 502 LDST(B, u8)
@@ -504,16 +504,16 @@ select_insn:
504 LDST(W, u32) 504 LDST(W, u32)
505 LDST(DW, u64) 505 LDST(DW, u64)
506#undef LDST 506#undef LDST
507 STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ 507 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
508 atomic_add((u32) X, (atomic_t *)(unsigned long) 508 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
509 (A + insn->off)); 509 (DST + insn->off));
510 CONT; 510 CONT;
511 STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ 511 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
512 atomic64_add((u64) X, (atomic64_t *)(unsigned long) 512 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
513 (A + insn->off)); 513 (DST + insn->off));
514 CONT; 514 CONT;
515 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ 515 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
516 off = K; 516 off = IMM;
517load_word: 517load_word:
518 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 518 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
519 * only appearing in the programs where ctx == 519 * only appearing in the programs where ctx ==
@@ -527,51 +527,51 @@ load_word:
527 * BPF_R6-BPF_R9, and store return value into BPF_R0. 527 * BPF_R6-BPF_R9, and store return value into BPF_R0.
528 * 528 *
529 * Implicit input: 529 * Implicit input:
530 * ctx 530 * ctx == skb == BPF_R6 == CTX
531 * 531 *
532 * Explicit input: 532 * Explicit input:
533 * X == any register 533 * SRC == any register
534 * K == 32-bit immediate 534 * IMM == 32-bit immediate
535 * 535 *
536 * Output: 536 * Output:
537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness 537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
538 */ 538 */
539 539
540 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); 540 ptr = load_pointer((struct sk_buff *) CTX, off, 4, &tmp);
541 if (likely(ptr != NULL)) { 541 if (likely(ptr != NULL)) {
542 BPF_R0 = get_unaligned_be32(ptr); 542 BPF_R0 = get_unaligned_be32(ptr);
543 CONT; 543 CONT;
544 } 544 }
545 545
546 return 0; 546 return 0;
547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ 547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
548 off = K; 548 off = IMM;
549load_half: 549load_half:
550 ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); 550 ptr = load_pointer((struct sk_buff *) CTX, off, 2, &tmp);
551 if (likely(ptr != NULL)) { 551 if (likely(ptr != NULL)) {
552 BPF_R0 = get_unaligned_be16(ptr); 552 BPF_R0 = get_unaligned_be16(ptr);
553 CONT; 553 CONT;
554 } 554 }
555 555
556 return 0; 556 return 0;
557 LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ 557 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
558 off = K; 558 off = IMM;
559load_byte: 559load_byte:
560 ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); 560 ptr = load_pointer((struct sk_buff *) CTX, off, 1, &tmp);
561 if (likely(ptr != NULL)) { 561 if (likely(ptr != NULL)) {
562 BPF_R0 = *(u8 *)ptr; 562 BPF_R0 = *(u8 *)ptr;
563 CONT; 563 CONT;
564 } 564 }
565 565
566 return 0; 566 return 0;
567 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ 567 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
568 off = K + X; 568 off = IMM + SRC;
569 goto load_word; 569 goto load_word;
570 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ 570 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
571 off = K + X; 571 off = IMM + SRC;
572 goto load_half; 572 goto load_half;
573 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ 573 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
574 off = K + X; 574 off = IMM + SRC;
575 goto load_byte; 575 goto load_byte;
576 576
577 default_label: 577 default_label:
@@ -675,7 +675,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
675 case SKF_AD_OFF + SKF_AD_PROTOCOL: 675 case SKF_AD_OFF + SKF_AD_PROTOCOL:
676 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 676 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
677 677
678 /* A = *(u16 *) (ctx + offsetof(protocol)) */ 678 /* A = *(u16 *) (CTX + offsetof(protocol)) */
679 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 679 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
680 offsetof(struct sk_buff, protocol)); 680 offsetof(struct sk_buff, protocol));
681 /* A = ntohs(A) [emitting a nop or swap16] */ 681 /* A = ntohs(A) [emitting a nop or swap16] */
@@ -741,7 +741,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
742 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 742 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
743 743
744 /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ 744 /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
745 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 745 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
746 offsetof(struct sk_buff, vlan_tci)); 746 offsetof(struct sk_buff, vlan_tci));
747 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 747 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
@@ -760,13 +760,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
760 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 760 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
761 case SKF_AD_OFF + SKF_AD_CPU: 761 case SKF_AD_OFF + SKF_AD_CPU:
762 case SKF_AD_OFF + SKF_AD_RANDOM: 762 case SKF_AD_OFF + SKF_AD_RANDOM:
763 /* arg1 = ctx */ 763 /* arg1 = CTX */
764 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); 764 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
765 /* arg2 = A */ 765 /* arg2 = A */
766 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); 766 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
767 /* arg3 = X */ 767 /* arg3 = X */
768 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); 768 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
769 /* Emit call(ctx, arg2=A, arg3=X) */ 769 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
770 switch (fp->k) { 770 switch (fp->k) {
771 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 771 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
772 *insn = BPF_EMIT_CALL(__skb_get_pay_offset); 772 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
@@ -941,12 +941,12 @@ do_pass:
941 */ 941 */
942 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 942 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
943 943
944 insn->a_reg = BPF_REG_A; 944 insn->dst_reg = BPF_REG_A;
945 insn->x_reg = BPF_REG_TMP; 945 insn->src_reg = BPF_REG_TMP;
946 bpf_src = BPF_X; 946 bpf_src = BPF_X;
947 } else { 947 } else {
948 insn->a_reg = BPF_REG_A; 948 insn->dst_reg = BPF_REG_A;
949 insn->x_reg = BPF_REG_X; 949 insn->src_reg = BPF_REG_X;
950 insn->imm = fp->k; 950 insn->imm = fp->k;
951 bpf_src = BPF_SRC(fp->code); 951 bpf_src = BPF_SRC(fp->code);
952 } 952 }