diff options
-rw-r--r-- | Documentation/networking/filter.txt | 2 | ||||
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 260 | ||||
-rw-r--r-- | include/linux/filter.h | 156 | ||||
-rw-r--r-- | net/core/filter.c | 198 |
4 files changed, 314 insertions, 302 deletions
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 58c443926647..9f49b8690500 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -805,7 +805,7 @@ to seccomp_data, for converted BPF filters R1 points to a skb. | |||
805 | 805 | ||
806 | A program, that is translated internally consists of the following elements: | 806 | A program, that is translated internally consists of the following elements: |
807 | 807 | ||
808 | op:16, jt:8, jf:8, k:32 ==> op:8, a_reg:4, x_reg:4, off:16, imm:32 | 808 | op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32 |
809 | 809 | ||
810 | So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field | 810 | So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field |
811 | has room for new instructions. Some of them may use 16/24/32 byte encoding. New | 811 | has room for new instructions. Some of them may use 16/24/32 byte encoding. New |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 080f3f071bb0..99bef86ed6df 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -64,10 +64,10 @@ static inline bool is_simm32(s64 value) | |||
64 | return value == (s64) (s32) value; | 64 | return value == (s64) (s32) value; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* mov A, X */ | 67 | /* mov dst, src */ |
68 | #define EMIT_mov(A, X) \ | 68 | #define EMIT_mov(DST, SRC) \ |
69 | do {if (A != X) \ | 69 | do {if (DST != SRC) \ |
70 | EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ | 70 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ |
71 | } while (0) | 71 | } while (0) |
72 | 72 | ||
73 | static int bpf_size_to_x86_bytes(int bpf_size) | 73 | static int bpf_size_to_x86_bytes(int bpf_size) |
@@ -194,16 +194,16 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) | |||
194 | return byte; | 194 | return byte; |
195 | } | 195 | } |
196 | 196 | ||
197 | /* encode dest register 'a_reg' into x64 opcode 'byte' */ | 197 | /* encode 'dst_reg' register into x64 opcode 'byte' */ |
198 | static inline u8 add_1reg(u8 byte, u32 a_reg) | 198 | static inline u8 add_1reg(u8 byte, u32 dst_reg) |
199 | { | 199 | { |
200 | return byte + reg2hex[a_reg]; | 200 | return byte + reg2hex[dst_reg]; |
201 | } | 201 | } |
202 | 202 | ||
203 | /* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ | 203 | /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ |
204 | static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) | 204 | static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) |
205 | { | 205 | { |
206 | return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); | 206 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); |
207 | } | 207 | } |
208 | 208 | ||
209 | struct jit_context { | 209 | struct jit_context { |
@@ -286,9 +286,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
286 | } | 286 | } |
287 | 287 | ||
288 | for (i = 0; i < insn_cnt; i++, insn++) { | 288 | for (i = 0; i < insn_cnt; i++, insn++) { |
289 | const s32 K = insn->imm; | 289 | const s32 imm32 = insn->imm; |
290 | u32 a_reg = insn->a_reg; | 290 | u32 dst_reg = insn->dst_reg; |
291 | u32 x_reg = insn->x_reg; | 291 | u32 src_reg = insn->src_reg; |
292 | u8 b1 = 0, b2 = 0, b3 = 0; | 292 | u8 b1 = 0, b2 = 0, b3 = 0; |
293 | s64 jmp_offset; | 293 | s64 jmp_offset; |
294 | u8 jmp_cond; | 294 | u8 jmp_cond; |
@@ -315,32 +315,32 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
315 | case BPF_XOR: b2 = 0x31; break; | 315 | case BPF_XOR: b2 = 0x31; break; |
316 | } | 316 | } |
317 | if (BPF_CLASS(insn->code) == BPF_ALU64) | 317 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
318 | EMIT1(add_2mod(0x48, a_reg, x_reg)); | 318 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); |
319 | else if (is_ereg(a_reg) || is_ereg(x_reg)) | 319 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) |
320 | EMIT1(add_2mod(0x40, a_reg, x_reg)); | 320 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
321 | EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); | 321 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); |
322 | break; | 322 | break; |
323 | 323 | ||
324 | /* mov A, X */ | 324 | /* mov dst, src */ |
325 | case BPF_ALU64 | BPF_MOV | BPF_X: | 325 | case BPF_ALU64 | BPF_MOV | BPF_X: |
326 | EMIT_mov(a_reg, x_reg); | 326 | EMIT_mov(dst_reg, src_reg); |
327 | break; | 327 | break; |
328 | 328 | ||
329 | /* mov32 A, X */ | 329 | /* mov32 dst, src */ |
330 | case BPF_ALU | BPF_MOV | BPF_X: | 330 | case BPF_ALU | BPF_MOV | BPF_X: |
331 | if (is_ereg(a_reg) || is_ereg(x_reg)) | 331 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
332 | EMIT1(add_2mod(0x40, a_reg, x_reg)); | 332 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
333 | EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); | 333 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); |
334 | break; | 334 | break; |
335 | 335 | ||
336 | /* neg A */ | 336 | /* neg dst */ |
337 | case BPF_ALU | BPF_NEG: | 337 | case BPF_ALU | BPF_NEG: |
338 | case BPF_ALU64 | BPF_NEG: | 338 | case BPF_ALU64 | BPF_NEG: |
339 | if (BPF_CLASS(insn->code) == BPF_ALU64) | 339 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
340 | EMIT1(add_1mod(0x48, a_reg)); | 340 | EMIT1(add_1mod(0x48, dst_reg)); |
341 | else if (is_ereg(a_reg)) | 341 | else if (is_ereg(dst_reg)) |
342 | EMIT1(add_1mod(0x40, a_reg)); | 342 | EMIT1(add_1mod(0x40, dst_reg)); |
343 | EMIT2(0xF7, add_1reg(0xD8, a_reg)); | 343 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); |
344 | break; | 344 | break; |
345 | 345 | ||
346 | case BPF_ALU | BPF_ADD | BPF_K: | 346 | case BPF_ALU | BPF_ADD | BPF_K: |
@@ -354,9 +354,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
354 | case BPF_ALU64 | BPF_OR | BPF_K: | 354 | case BPF_ALU64 | BPF_OR | BPF_K: |
355 | case BPF_ALU64 | BPF_XOR | BPF_K: | 355 | case BPF_ALU64 | BPF_XOR | BPF_K: |
356 | if (BPF_CLASS(insn->code) == BPF_ALU64) | 356 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
357 | EMIT1(add_1mod(0x48, a_reg)); | 357 | EMIT1(add_1mod(0x48, dst_reg)); |
358 | else if (is_ereg(a_reg)) | 358 | else if (is_ereg(dst_reg)) |
359 | EMIT1(add_1mod(0x40, a_reg)); | 359 | EMIT1(add_1mod(0x40, dst_reg)); |
360 | 360 | ||
361 | switch (BPF_OP(insn->code)) { | 361 | switch (BPF_OP(insn->code)) { |
362 | case BPF_ADD: b3 = 0xC0; break; | 362 | case BPF_ADD: b3 = 0xC0; break; |
@@ -366,10 +366,10 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
366 | case BPF_XOR: b3 = 0xF0; break; | 366 | case BPF_XOR: b3 = 0xF0; break; |
367 | } | 367 | } |
368 | 368 | ||
369 | if (is_imm8(K)) | 369 | if (is_imm8(imm32)) |
370 | EMIT3(0x83, add_1reg(b3, a_reg), K); | 370 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); |
371 | else | 371 | else |
372 | EMIT2_off32(0x81, add_1reg(b3, a_reg), K); | 372 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); |
373 | break; | 373 | break; |
374 | 374 | ||
375 | case BPF_ALU64 | BPF_MOV | BPF_K: | 375 | case BPF_ALU64 | BPF_MOV | BPF_K: |
@@ -377,23 +377,23 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
377 | * use 'mov eax, imm32' (which zero-extends imm32) | 377 | * use 'mov eax, imm32' (which zero-extends imm32) |
378 | * to save 2 bytes | 378 | * to save 2 bytes |
379 | */ | 379 | */ |
380 | if (K < 0) { | 380 | if (imm32 < 0) { |
381 | /* 'mov rax, imm32' sign extends imm32 */ | 381 | /* 'mov rax, imm32' sign extends imm32 */ |
382 | b1 = add_1mod(0x48, a_reg); | 382 | b1 = add_1mod(0x48, dst_reg); |
383 | b2 = 0xC7; | 383 | b2 = 0xC7; |
384 | b3 = 0xC0; | 384 | b3 = 0xC0; |
385 | EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); | 385 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); |
386 | break; | 386 | break; |
387 | } | 387 | } |
388 | 388 | ||
389 | case BPF_ALU | BPF_MOV | BPF_K: | 389 | case BPF_ALU | BPF_MOV | BPF_K: |
390 | /* mov %eax, imm32 */ | 390 | /* mov %eax, imm32 */ |
391 | if (is_ereg(a_reg)) | 391 | if (is_ereg(dst_reg)) |
392 | EMIT1(add_1mod(0x40, a_reg)); | 392 | EMIT1(add_1mod(0x40, dst_reg)); |
393 | EMIT1_off32(add_1reg(0xB8, a_reg), K); | 393 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); |
394 | break; | 394 | break; |
395 | 395 | ||
396 | /* A %= X, A /= X, A %= K, A /= K */ | 396 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ |
397 | case BPF_ALU | BPF_MOD | BPF_X: | 397 | case BPF_ALU | BPF_MOD | BPF_X: |
398 | case BPF_ALU | BPF_DIV | BPF_X: | 398 | case BPF_ALU | BPF_DIV | BPF_X: |
399 | case BPF_ALU | BPF_MOD | BPF_K: | 399 | case BPF_ALU | BPF_MOD | BPF_K: |
@@ -406,14 +406,14 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
406 | EMIT1(0x52); /* push rdx */ | 406 | EMIT1(0x52); /* push rdx */ |
407 | 407 | ||
408 | if (BPF_SRC(insn->code) == BPF_X) | 408 | if (BPF_SRC(insn->code) == BPF_X) |
409 | /* mov r11, X */ | 409 | /* mov r11, src_reg */ |
410 | EMIT_mov(AUX_REG, x_reg); | 410 | EMIT_mov(AUX_REG, src_reg); |
411 | else | 411 | else |
412 | /* mov r11, K */ | 412 | /* mov r11, imm32 */ |
413 | EMIT3_off32(0x49, 0xC7, 0xC3, K); | 413 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); |
414 | 414 | ||
415 | /* mov rax, A */ | 415 | /* mov rax, dst_reg */ |
416 | EMIT_mov(BPF_REG_0, a_reg); | 416 | EMIT_mov(BPF_REG_0, dst_reg); |
417 | 417 | ||
418 | /* xor edx, edx | 418 | /* xor edx, edx |
419 | * equivalent to 'xor rdx, rdx', but one byte less | 419 | * equivalent to 'xor rdx, rdx', but one byte less |
@@ -421,7 +421,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
421 | EMIT2(0x31, 0xd2); | 421 | EMIT2(0x31, 0xd2); |
422 | 422 | ||
423 | if (BPF_SRC(insn->code) == BPF_X) { | 423 | if (BPF_SRC(insn->code) == BPF_X) { |
424 | /* if (X == 0) return 0 */ | 424 | /* if (src_reg == 0) return 0 */ |
425 | 425 | ||
426 | /* cmp r11, 0 */ | 426 | /* cmp r11, 0 */ |
427 | EMIT4(0x49, 0x83, 0xFB, 0x00); | 427 | EMIT4(0x49, 0x83, 0xFB, 0x00); |
@@ -457,8 +457,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
457 | EMIT1(0x5A); /* pop rdx */ | 457 | EMIT1(0x5A); /* pop rdx */ |
458 | EMIT1(0x58); /* pop rax */ | 458 | EMIT1(0x58); /* pop rax */ |
459 | 459 | ||
460 | /* mov A, r11 */ | 460 | /* mov dst_reg, r11 */ |
461 | EMIT_mov(a_reg, AUX_REG); | 461 | EMIT_mov(dst_reg, AUX_REG); |
462 | break; | 462 | break; |
463 | 463 | ||
464 | case BPF_ALU | BPF_MUL | BPF_K: | 464 | case BPF_ALU | BPF_MUL | BPF_K: |
@@ -468,15 +468,15 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
468 | EMIT1(0x50); /* push rax */ | 468 | EMIT1(0x50); /* push rax */ |
469 | EMIT1(0x52); /* push rdx */ | 469 | EMIT1(0x52); /* push rdx */ |
470 | 470 | ||
471 | /* mov r11, A */ | 471 | /* mov r11, dst_reg */ |
472 | EMIT_mov(AUX_REG, a_reg); | 472 | EMIT_mov(AUX_REG, dst_reg); |
473 | 473 | ||
474 | if (BPF_SRC(insn->code) == BPF_X) | 474 | if (BPF_SRC(insn->code) == BPF_X) |
475 | /* mov rax, X */ | 475 | /* mov rax, src_reg */ |
476 | EMIT_mov(BPF_REG_0, x_reg); | 476 | EMIT_mov(BPF_REG_0, src_reg); |
477 | else | 477 | else |
478 | /* mov rax, K */ | 478 | /* mov rax, imm32 */ |
479 | EMIT3_off32(0x48, 0xC7, 0xC0, K); | 479 | EMIT3_off32(0x48, 0xC7, 0xC0, imm32); |
480 | 480 | ||
481 | if (BPF_CLASS(insn->code) == BPF_ALU64) | 481 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
482 | EMIT1(add_1mod(0x48, AUX_REG)); | 482 | EMIT1(add_1mod(0x48, AUX_REG)); |
@@ -491,8 +491,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
491 | EMIT1(0x5A); /* pop rdx */ | 491 | EMIT1(0x5A); /* pop rdx */ |
492 | EMIT1(0x58); /* pop rax */ | 492 | EMIT1(0x58); /* pop rax */ |
493 | 493 | ||
494 | /* mov A, r11 */ | 494 | /* mov dst_reg, r11 */ |
495 | EMIT_mov(a_reg, AUX_REG); | 495 | EMIT_mov(dst_reg, AUX_REG); |
496 | break; | 496 | break; |
497 | 497 | ||
498 | /* shifts */ | 498 | /* shifts */ |
@@ -503,39 +503,39 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
503 | case BPF_ALU64 | BPF_RSH | BPF_K: | 503 | case BPF_ALU64 | BPF_RSH | BPF_K: |
504 | case BPF_ALU64 | BPF_ARSH | BPF_K: | 504 | case BPF_ALU64 | BPF_ARSH | BPF_K: |
505 | if (BPF_CLASS(insn->code) == BPF_ALU64) | 505 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
506 | EMIT1(add_1mod(0x48, a_reg)); | 506 | EMIT1(add_1mod(0x48, dst_reg)); |
507 | else if (is_ereg(a_reg)) | 507 | else if (is_ereg(dst_reg)) |
508 | EMIT1(add_1mod(0x40, a_reg)); | 508 | EMIT1(add_1mod(0x40, dst_reg)); |
509 | 509 | ||
510 | switch (BPF_OP(insn->code)) { | 510 | switch (BPF_OP(insn->code)) { |
511 | case BPF_LSH: b3 = 0xE0; break; | 511 | case BPF_LSH: b3 = 0xE0; break; |
512 | case BPF_RSH: b3 = 0xE8; break; | 512 | case BPF_RSH: b3 = 0xE8; break; |
513 | case BPF_ARSH: b3 = 0xF8; break; | 513 | case BPF_ARSH: b3 = 0xF8; break; |
514 | } | 514 | } |
515 | EMIT3(0xC1, add_1reg(b3, a_reg), K); | 515 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); |
516 | break; | 516 | break; |
517 | 517 | ||
518 | case BPF_ALU | BPF_END | BPF_FROM_BE: | 518 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
519 | switch (K) { | 519 | switch (imm32) { |
520 | case 16: | 520 | case 16: |
521 | /* emit 'ror %ax, 8' to swap lower 2 bytes */ | 521 | /* emit 'ror %ax, 8' to swap lower 2 bytes */ |
522 | EMIT1(0x66); | 522 | EMIT1(0x66); |
523 | if (is_ereg(a_reg)) | 523 | if (is_ereg(dst_reg)) |
524 | EMIT1(0x41); | 524 | EMIT1(0x41); |
525 | EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); | 525 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); |
526 | break; | 526 | break; |
527 | case 32: | 527 | case 32: |
528 | /* emit 'bswap eax' to swap lower 4 bytes */ | 528 | /* emit 'bswap eax' to swap lower 4 bytes */ |
529 | if (is_ereg(a_reg)) | 529 | if (is_ereg(dst_reg)) |
530 | EMIT2(0x41, 0x0F); | 530 | EMIT2(0x41, 0x0F); |
531 | else | 531 | else |
532 | EMIT1(0x0F); | 532 | EMIT1(0x0F); |
533 | EMIT1(add_1reg(0xC8, a_reg)); | 533 | EMIT1(add_1reg(0xC8, dst_reg)); |
534 | break; | 534 | break; |
535 | case 64: | 535 | case 64: |
536 | /* emit 'bswap rax' to swap 8 bytes */ | 536 | /* emit 'bswap rax' to swap 8 bytes */ |
537 | EMIT3(add_1mod(0x48, a_reg), 0x0F, | 537 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, |
538 | add_1reg(0xC8, a_reg)); | 538 | add_1reg(0xC8, dst_reg)); |
539 | break; | 539 | break; |
540 | } | 540 | } |
541 | break; | 541 | break; |
@@ -543,117 +543,117 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, | |||
543 | case BPF_ALU | BPF_END | BPF_FROM_LE: | 543 | case BPF_ALU | BPF_END | BPF_FROM_LE: |
544 | break; | 544 | break; |
545 | 545 | ||
546 | /* ST: *(u8*)(a_reg + off) = imm */ | 546 | /* ST: *(u8*)(dst_reg + off) = imm */ |
547 | case BPF_ST | BPF_MEM | BPF_B: | 547 | case BPF_ST | BPF_MEM | BPF_B: |
548 | if (is_ereg(a_reg)) | 548 | if (is_ereg(dst_reg)) |
549 | EMIT2(0x41, 0xC6); | 549 | EMIT2(0x41, 0xC6); |
550 | else | 550 | else |
551 | EMIT1(0xC6); | 551 | EMIT1(0xC6); |
552 | goto st; | 552 | goto st; |
553 | case BPF_ST | BPF_MEM | BPF_H: | 553 | case BPF_ST | BPF_MEM | BPF_H: |
554 | if (is_ereg(a_reg)) | 554 | if (is_ereg(dst_reg)) |
555 | EMIT3(0x66, 0x41, 0xC7); | 555 | EMIT3(0x66, 0x41, 0xC7); |
556 | else | 556 | else |
557 | EMIT2(0x66, 0xC7); | 557 | EMIT2(0x66, 0xC7); |
558 | goto st; | 558 | goto st; |
559 | case BPF_ST | BPF_MEM | BPF_W: | 559 | case BPF_ST | BPF_MEM | BPF_W: |
560 | if (is_ereg(a_reg)) | 560 | if (is_ereg(dst_reg)) |
561 | EMIT2(0x41, 0xC7); | 561 | EMIT2(0x41, 0xC7); |
562 | else | 562 | else |
563 | EMIT1(0xC7); | 563 | EMIT1(0xC7); |
564 | goto st; | 564 | goto st; |
565 | case BPF_ST | BPF_MEM | BPF_DW: | 565 | case BPF_ST | BPF_MEM | BPF_DW: |
566 | EMIT2(add_1mod(0x48, a_reg), 0xC7); | 566 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); |
567 | 567 | ||
568 | st: if (is_imm8(insn->off)) | 568 | st: if (is_imm8(insn->off)) |
569 | EMIT2(add_1reg(0x40, a_reg), insn->off); | 569 | EMIT2(add_1reg(0x40, dst_reg), insn->off); |
570 | else | 570 | else |
571 | EMIT1_off32(add_1reg(0x80, a_reg), insn->off); | 571 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); |
572 | 572 | ||
573 | EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); | 573 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); |
574 | break; | 574 | break; |
575 | 575 | ||
576 | /* STX: *(u8*)(a_reg + off) = x_reg */ | 576 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
577 | case BPF_STX | BPF_MEM | BPF_B: | 577 | case BPF_STX | BPF_MEM | BPF_B: |
578 | /* emit 'mov byte ptr [rax + off], al' */ | 578 | /* emit 'mov byte ptr [rax + off], al' */ |
579 | if (is_ereg(a_reg) || is_ereg(x_reg) || | 579 | if (is_ereg(dst_reg) || is_ereg(src_reg) || |
580 | /* have to add extra byte for x86 SIL, DIL regs */ | 580 | /* have to add extra byte for x86 SIL, DIL regs */ |
581 | x_reg == BPF_REG_1 || x_reg == BPF_REG_2) | 581 | src_reg == BPF_REG_1 || src_reg == BPF_REG_2) |
582 | EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); | 582 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); |
583 | else | 583 | else |
584 | EMIT1(0x88); | 584 | EMIT1(0x88); |
585 | goto stx; | 585 | goto stx; |
586 | case BPF_STX | BPF_MEM | BPF_H: | 586 | case BPF_STX | BPF_MEM | BPF_H: |
587 | if (is_ereg(a_reg) || is_ereg(x_reg)) | 587 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
588 | EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); | 588 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); |
589 | else | 589 | else |
590 | EMIT2(0x66, 0x89); | 590 | EMIT2(0x66, 0x89); |
591 | goto stx; | 591 | goto stx; |
592 | case BPF_STX | BPF_MEM | BPF_W: | 592 | case BPF_STX | BPF_MEM | BPF_W: |
593 | if (is_ereg(a_reg) || is_ereg(x_reg)) | 593 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
594 | EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); | 594 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); |
595 | else | 595 | else |
596 | EMIT1(0x89); | 596 | EMIT1(0x89); |
597 | goto stx; | 597 | goto stx; |
598 | case BPF_STX | BPF_MEM | BPF_DW: | 598 | case BPF_STX | BPF_MEM | BPF_DW: |
599 | EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); | 599 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); |
600 | stx: if (is_imm8(insn->off)) | 600 | stx: if (is_imm8(insn->off)) |
601 | EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); | 601 | EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); |
602 | else | 602 | else |
603 | EMIT1_off32(add_2reg(0x80, a_reg, x_reg), | 603 | EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), |
604 | insn->off); | 604 | insn->off); |
605 | break; | 605 | break; |
606 | 606 | ||
607 | /* LDX: a_reg = *(u8*)(x_reg + off) */ | 607 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
608 | case BPF_LDX | BPF_MEM | BPF_B: | 608 | case BPF_LDX | BPF_MEM | BPF_B: |
609 | /* emit 'movzx rax, byte ptr [rax + off]' */ | 609 | /* emit 'movzx rax, byte ptr [rax + off]' */ |
610 | EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); | 610 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); |
611 | goto ldx; | 611 | goto ldx; |
612 | case BPF_LDX | BPF_MEM | BPF_H: | 612 | case BPF_LDX | BPF_MEM | BPF_H: |
613 | /* emit 'movzx rax, word ptr [rax + off]' */ | 613 | /* emit 'movzx rax, word ptr [rax + off]' */ |
614 | EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); | 614 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); |
615 | goto ldx; | 615 | goto ldx; |
616 | case BPF_LDX | BPF_MEM | BPF_W: | 616 | case BPF_LDX | BPF_MEM | BPF_W: |
617 | /* emit 'mov eax, dword ptr [rax+0x14]' */ | 617 | /* emit 'mov eax, dword ptr [rax+0x14]' */ |
618 | if (is_ereg(a_reg) || is_ereg(x_reg)) | 618 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
619 | EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); | 619 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); |
620 | else | 620 | else |
621 | EMIT1(0x8B); | 621 | EMIT1(0x8B); |
622 | goto ldx; | 622 | goto ldx; |
623 | case BPF_LDX | BPF_MEM | BPF_DW: | 623 | case BPF_LDX | BPF_MEM | BPF_DW: |
624 | /* emit 'mov rax, qword ptr [rax+0x14]' */ | 624 | /* emit 'mov rax, qword ptr [rax+0x14]' */ |
625 | EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); | 625 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); |
626 | ldx: /* if insn->off == 0 we can save one extra byte, but | 626 | ldx: /* if insn->off == 0 we can save one extra byte, but |
627 | * special case of x86 r13 which always needs an offset | 627 | * special case of x86 r13 which always needs an offset |
628 | * is not worth the hassle | 628 | * is not worth the hassle |
629 | */ | 629 | */ |
630 | if (is_imm8(insn->off)) | 630 | if (is_imm8(insn->off)) |
631 | EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); | 631 | EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); |
632 | else | 632 | else |
633 | EMIT1_off32(add_2reg(0x80, x_reg, a_reg), | 633 | EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), |
634 | insn->off); | 634 | insn->off); |
635 | break; | 635 | break; |
636 | 636 | ||
637 | /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ | 637 | /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ |
638 | case BPF_STX | BPF_XADD | BPF_W: | 638 | case BPF_STX | BPF_XADD | BPF_W: |
639 | /* emit 'lock add dword ptr [rax + off], eax' */ | 639 | /* emit 'lock add dword ptr [rax + off], eax' */ |
640 | if (is_ereg(a_reg) || is_ereg(x_reg)) | 640 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
641 | EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); | 641 | EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); |
642 | else | 642 | else |
643 | EMIT2(0xF0, 0x01); | 643 | EMIT2(0xF0, 0x01); |
644 | goto xadd; | 644 | goto xadd; |
645 | case BPF_STX | BPF_XADD | BPF_DW: | 645 | case BPF_STX | BPF_XADD | BPF_DW: |
646 | EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); | 646 | EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); |
647 | xadd: if (is_imm8(insn->off)) | 647 | xadd: if (is_imm8(insn->off)) |
648 | EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); | 648 | EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); |
649 | else | 649 | else |
650 | EMIT1_off32(add_2reg(0x80, a_reg, x_reg), | 650 | EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), |
651 | insn->off); | 651 | insn->off); |
652 | break; | 652 | break; |
653 | 653 | ||
654 | /* call */ | 654 | /* call */ |
655 | case BPF_JMP | BPF_CALL: | 655 | case BPF_JMP | BPF_CALL: |
656 | func = (u8 *) __bpf_call_base + K; | 656 | func = (u8 *) __bpf_call_base + imm32; |
657 | jmp_offset = func - (image + addrs[i]); | 657 | jmp_offset = func - (image + addrs[i]); |
658 | if (ctx->seen_ld_abs) { | 658 | if (ctx->seen_ld_abs) { |
659 | EMIT2(0x41, 0x52); /* push %r10 */ | 659 | EMIT2(0x41, 0x52); /* push %r10 */ |
@@ -663,9 +663,9 @@ xadd: if (is_imm8(insn->off)) | |||
663 | */ | 663 | */ |
664 | jmp_offset += 4; | 664 | jmp_offset += 4; |
665 | } | 665 | } |
666 | if (!K || !is_simm32(jmp_offset)) { | 666 | if (!imm32 || !is_simm32(jmp_offset)) { |
667 | pr_err("unsupported bpf func %d addr %p image %p\n", | 667 | pr_err("unsupported bpf func %d addr %p image %p\n", |
668 | K, func, image); | 668 | imm32, func, image); |
669 | return -EINVAL; | 669 | return -EINVAL; |
670 | } | 670 | } |
671 | EMIT1_off32(0xE8, jmp_offset); | 671 | EMIT1_off32(0xE8, jmp_offset); |
@@ -682,21 +682,21 @@ xadd: if (is_imm8(insn->off)) | |||
682 | case BPF_JMP | BPF_JGE | BPF_X: | 682 | case BPF_JMP | BPF_JGE | BPF_X: |
683 | case BPF_JMP | BPF_JSGT | BPF_X: | 683 | case BPF_JMP | BPF_JSGT | BPF_X: |
684 | case BPF_JMP | BPF_JSGE | BPF_X: | 684 | case BPF_JMP | BPF_JSGE | BPF_X: |
685 | /* cmp a_reg, x_reg */ | 685 | /* cmp dst_reg, src_reg */ |
686 | EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, | 686 | EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, |
687 | add_2reg(0xC0, a_reg, x_reg)); | 687 | add_2reg(0xC0, dst_reg, src_reg)); |
688 | goto emit_cond_jmp; | 688 | goto emit_cond_jmp; |
689 | 689 | ||
690 | case BPF_JMP | BPF_JSET | BPF_X: | 690 | case BPF_JMP | BPF_JSET | BPF_X: |
691 | /* test a_reg, x_reg */ | 691 | /* test dst_reg, src_reg */ |
692 | EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, | 692 | EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, |
693 | add_2reg(0xC0, a_reg, x_reg)); | 693 | add_2reg(0xC0, dst_reg, src_reg)); |
694 | goto emit_cond_jmp; | 694 | goto emit_cond_jmp; |
695 | 695 | ||
696 | case BPF_JMP | BPF_JSET | BPF_K: | 696 | case BPF_JMP | BPF_JSET | BPF_K: |
697 | /* test a_reg, imm32 */ | 697 | /* test dst_reg, imm32 */ |
698 | EMIT1(add_1mod(0x48, a_reg)); | 698 | EMIT1(add_1mod(0x48, dst_reg)); |
699 | EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); | 699 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); |
700 | goto emit_cond_jmp; | 700 | goto emit_cond_jmp; |
701 | 701 | ||
702 | case BPF_JMP | BPF_JEQ | BPF_K: | 702 | case BPF_JMP | BPF_JEQ | BPF_K: |
@@ -705,13 +705,13 @@ xadd: if (is_imm8(insn->off)) | |||
705 | case BPF_JMP | BPF_JGE | BPF_K: | 705 | case BPF_JMP | BPF_JGE | BPF_K: |
706 | case BPF_JMP | BPF_JSGT | BPF_K: | 706 | case BPF_JMP | BPF_JSGT | BPF_K: |
707 | case BPF_JMP | BPF_JSGE | BPF_K: | 707 | case BPF_JMP | BPF_JSGE | BPF_K: |
708 | /* cmp a_reg, imm8/32 */ | 708 | /* cmp dst_reg, imm8/32 */ |
709 | EMIT1(add_1mod(0x48, a_reg)); | 709 | EMIT1(add_1mod(0x48, dst_reg)); |
710 | 710 | ||
711 | if (is_imm8(K)) | 711 | if (is_imm8(imm32)) |
712 | EMIT3(0x83, add_1reg(0xF8, a_reg), K); | 712 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); |
713 | else | 713 | else |
714 | EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); | 714 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); |
715 | 715 | ||
716 | emit_cond_jmp: /* convert BPF opcode to x86 */ | 716 | emit_cond_jmp: /* convert BPF opcode to x86 */ |
717 | switch (BPF_OP(insn->code)) { | 717 | switch (BPF_OP(insn->code)) { |
@@ -773,27 +773,27 @@ emit_jmp: | |||
773 | func = sk_load_word; | 773 | func = sk_load_word; |
774 | goto common_load; | 774 | goto common_load; |
775 | case BPF_LD | BPF_ABS | BPF_W: | 775 | case BPF_LD | BPF_ABS | BPF_W: |
776 | func = CHOOSE_LOAD_FUNC(K, sk_load_word); | 776 | func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); |
777 | common_load: ctx->seen_ld_abs = true; | 777 | common_load: ctx->seen_ld_abs = true; |
778 | jmp_offset = func - (image + addrs[i]); | 778 | jmp_offset = func - (image + addrs[i]); |
779 | if (!func || !is_simm32(jmp_offset)) { | 779 | if (!func || !is_simm32(jmp_offset)) { |
780 | pr_err("unsupported bpf func %d addr %p image %p\n", | 780 | pr_err("unsupported bpf func %d addr %p image %p\n", |
781 | K, func, image); | 781 | imm32, func, image); |
782 | return -EINVAL; | 782 | return -EINVAL; |
783 | } | 783 | } |
784 | if (BPF_MODE(insn->code) == BPF_ABS) { | 784 | if (BPF_MODE(insn->code) == BPF_ABS) { |
785 | /* mov %esi, imm32 */ | 785 | /* mov %esi, imm32 */ |
786 | EMIT1_off32(0xBE, K); | 786 | EMIT1_off32(0xBE, imm32); |
787 | } else { | 787 | } else { |
788 | /* mov %rsi, x_reg */ | 788 | /* mov %rsi, src_reg */ |
789 | EMIT_mov(BPF_REG_2, x_reg); | 789 | EMIT_mov(BPF_REG_2, src_reg); |
790 | if (K) { | 790 | if (imm32) { |
791 | if (is_imm8(K)) | 791 | if (is_imm8(imm32)) |
792 | /* add %esi, imm8 */ | 792 | /* add %esi, imm8 */ |
793 | EMIT3(0x83, 0xC6, K); | 793 | EMIT3(0x83, 0xC6, imm32); |
794 | else | 794 | else |
795 | /* add %esi, imm32 */ | 795 | /* add %esi, imm32 */ |
796 | EMIT2_off32(0x81, 0xC6, K); | 796 | EMIT2_off32(0x81, 0xC6, imm32); |
797 | } | 797 | } |
798 | } | 798 | } |
799 | /* skb pointer is in R6 (%rbx), it will be copied into | 799 | /* skb pointer is in R6 (%rbx), it will be copied into |
@@ -808,13 +808,13 @@ common_load: ctx->seen_ld_abs = true; | |||
808 | func = sk_load_half; | 808 | func = sk_load_half; |
809 | goto common_load; | 809 | goto common_load; |
810 | case BPF_LD | BPF_ABS | BPF_H: | 810 | case BPF_LD | BPF_ABS | BPF_H: |
811 | func = CHOOSE_LOAD_FUNC(K, sk_load_half); | 811 | func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); |
812 | goto common_load; | 812 | goto common_load; |
813 | case BPF_LD | BPF_IND | BPF_B: | 813 | case BPF_LD | BPF_IND | BPF_B: |
814 | func = sk_load_byte; | 814 | func = sk_load_byte; |
815 | goto common_load; | 815 | goto common_load; |
816 | case BPF_LD | BPF_ABS | BPF_B: | 816 | case BPF_LD | BPF_ABS | BPF_B: |
817 | func = CHOOSE_LOAD_FUNC(K, sk_load_byte); | 817 | func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); |
818 | goto common_load; | 818 | goto common_load; |
819 | 819 | ||
820 | case BPF_JMP | BPF_EXIT: | 820 | case BPF_JMP | BPF_EXIT: |
diff --git a/include/linux/filter.h b/include/linux/filter.h index f0c2ad43b4af..a7e3c48d73a7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -78,161 +78,173 @@ enum { | |||
78 | 78 | ||
79 | /* Helper macros for filter block array initializers. */ | 79 | /* Helper macros for filter block array initializers. */ |
80 | 80 | ||
81 | /* ALU ops on registers, bpf_add|sub|...: A += X */ | 81 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
82 | 82 | ||
83 | #define BPF_ALU64_REG(OP, A, X) \ | 83 | #define BPF_ALU64_REG(OP, DST, SRC) \ |
84 | ((struct sock_filter_int) { \ | 84 | ((struct sock_filter_int) { \ |
85 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ | 85 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
86 | .a_reg = A, \ | 86 | .dst_reg = DST, \ |
87 | .x_reg = X, \ | 87 | .src_reg = SRC, \ |
88 | .off = 0, \ | 88 | .off = 0, \ |
89 | .imm = 0 }) | 89 | .imm = 0 }) |
90 | 90 | ||
91 | #define BPF_ALU32_REG(OP, A, X) \ | 91 | #define BPF_ALU32_REG(OP, DST, SRC) \ |
92 | ((struct sock_filter_int) { \ | 92 | ((struct sock_filter_int) { \ |
93 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ | 93 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
94 | .a_reg = A, \ | 94 | .dst_reg = DST, \ |
95 | .x_reg = X, \ | 95 | .src_reg = SRC, \ |
96 | .off = 0, \ | 96 | .off = 0, \ |
97 | .imm = 0 }) | 97 | .imm = 0 }) |
98 | 98 | ||
99 | /* ALU ops on immediates, bpf_add|sub|...: A += IMM */ | 99 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
100 | 100 | ||
101 | #define BPF_ALU64_IMM(OP, A, IMM) \ | 101 | #define BPF_ALU64_IMM(OP, DST, IMM) \ |
102 | ((struct sock_filter_int) { \ | 102 | ((struct sock_filter_int) { \ |
103 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ | 103 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
104 | .a_reg = A, \ | 104 | .dst_reg = DST, \ |
105 | .x_reg = 0, \ | 105 | .src_reg = 0, \ |
106 | .off = 0, \ | 106 | .off = 0, \ |
107 | .imm = IMM }) | 107 | .imm = IMM }) |
108 | 108 | ||
109 | #define BPF_ALU32_IMM(OP, A, IMM) \ | 109 | #define BPF_ALU32_IMM(OP, DST, IMM) \ |
110 | ((struct sock_filter_int) { \ | 110 | ((struct sock_filter_int) { \ |
111 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ | 111 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
112 | .a_reg = A, \ | 112 | .dst_reg = DST, \ |
113 | .x_reg = 0, \ | 113 | .src_reg = 0, \ |
114 | .off = 0, \ | 114 | .off = 0, \ |
115 | .imm = IMM }) | 115 | .imm = IMM }) |
116 | 116 | ||
117 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | 117 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ |
118 | 118 | ||
119 | #define BPF_ENDIAN(TYPE, A, LEN) \ | 119 | #define BPF_ENDIAN(TYPE, DST, LEN) \ |
120 | ((struct sock_filter_int) { \ | 120 | ((struct sock_filter_int) { \ |
121 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ | 121 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
122 | .a_reg = A, \ | 122 | .dst_reg = DST, \ |
123 | .x_reg = 0, \ | 123 | .src_reg = 0, \ |
124 | .off = 0, \ | 124 | .off = 0, \ |
125 | .imm = LEN }) | 125 | .imm = LEN }) |
126 | 126 | ||
127 | /* Short form of mov, A = X */ | 127 | /* Short form of mov, dst_reg = src_reg */ |
128 | 128 | ||
129 | #define BPF_MOV64_REG(A, X) \ | 129 | #define BPF_MOV64_REG(DST, SRC) \ |
130 | ((struct sock_filter_int) { \ | 130 | ((struct sock_filter_int) { \ |
131 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ | 131 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
132 | .a_reg = A, \ | 132 | .dst_reg = DST, \ |
133 | .x_reg = X, \ | 133 | .src_reg = SRC, \ |
134 | .off = 0, \ | 134 | .off = 0, \ |
135 | .imm = 0 }) | 135 | .imm = 0 }) |
136 | 136 | ||
137 | #define BPF_MOV32_REG(A, X) \ | 137 | #define BPF_MOV32_REG(DST, SRC) \ |
138 | ((struct sock_filter_int) { \ | 138 | ((struct sock_filter_int) { \ |
139 | .code = BPF_ALU | BPF_MOV | BPF_X, \ | 139 | .code = BPF_ALU | BPF_MOV | BPF_X, \ |
140 | .a_reg = A, \ | 140 | .dst_reg = DST, \ |
141 | .x_reg = X, \ | 141 | .src_reg = SRC, \ |
142 | .off = 0, \ | 142 | .off = 0, \ |
143 | .imm = 0 }) | 143 | .imm = 0 }) |
144 | 144 | ||
145 | /* Short form of mov, A = IMM */ | 145 | /* Short form of mov, dst_reg = imm32 */ |
146 | 146 | ||
147 | #define BPF_MOV64_IMM(A, IMM) \ | 147 | #define BPF_MOV64_IMM(DST, IMM) \ |
148 | ((struct sock_filter_int) { \ | 148 | ((struct sock_filter_int) { \ |
149 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ | 149 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
150 | .a_reg = A, \ | 150 | .dst_reg = DST, \ |
151 | .x_reg = 0, \ | 151 | .src_reg = 0, \ |
152 | .off = 0, \ | 152 | .off = 0, \ |
153 | .imm = IMM }) | 153 | .imm = IMM }) |
154 | 154 | ||
155 | #define BPF_MOV32_IMM(A, IMM) \ | 155 | #define BPF_MOV32_IMM(DST, IMM) \ |
156 | ((struct sock_filter_int) { \ | 156 | ((struct sock_filter_int) { \ |
157 | .code = BPF_ALU | BPF_MOV | BPF_K, \ | 157 | .code = BPF_ALU | BPF_MOV | BPF_K, \ |
158 | .a_reg = A, \ | 158 | .dst_reg = DST, \ |
159 | .x_reg = 0, \ | 159 | .src_reg = 0, \ |
160 | .off = 0, \ | 160 | .off = 0, \ |
161 | .imm = IMM }) | 161 | .imm = IMM }) |
162 | 162 | ||
163 | /* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */ | 163 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
164 | 164 | ||
165 | #define BPF_MOV64_RAW(TYPE, A, X, IMM) \ | 165 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
166 | ((struct sock_filter_int) { \ | 166 | ((struct sock_filter_int) { \ |
167 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ | 167 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
168 | .a_reg = A, \ | 168 | .dst_reg = DST, \ |
169 | .x_reg = X, \ | 169 | .src_reg = SRC, \ |
170 | .off = 0, \ | 170 | .off = 0, \ |
171 | .imm = IMM }) | 171 | .imm = IMM }) |
172 | 172 | ||
173 | #define BPF_MOV32_RAW(TYPE, A, X, IMM) \ | 173 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
174 | ((struct sock_filter_int) { \ | 174 | ((struct sock_filter_int) { \ |
175 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ | 175 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
176 | .a_reg = A, \ | 176 | .dst_reg = DST, \ |
177 | .x_reg = X, \ | 177 | .src_reg = SRC, \ |
178 | .off = 0, \ | 178 | .off = 0, \ |
179 | .imm = IMM }) | 179 | .imm = IMM }) |
180 | 180 | ||
181 | /* Direct packet access, R0 = *(uint *) (skb->data + OFF) */ | 181 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
182 | 182 | ||
183 | #define BPF_LD_ABS(SIZE, OFF) \ | 183 | #define BPF_LD_ABS(SIZE, IMM) \ |
184 | ((struct sock_filter_int) { \ | 184 | ((struct sock_filter_int) { \ |
185 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ | 185 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
186 | .a_reg = 0, \ | 186 | .dst_reg = 0, \ |
187 | .x_reg = 0, \ | 187 | .src_reg = 0, \ |
188 | .off = 0, \ | 188 | .off = 0, \ |
189 | .imm = OFF }) | 189 | .imm = IMM }) |
190 | 190 | ||
191 | /* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */ | 191 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
192 | 192 | ||
193 | #define BPF_LD_IND(SIZE, X, OFF) \ | 193 | #define BPF_LD_IND(SIZE, SRC, IMM) \ |
194 | ((struct sock_filter_int) { \ | 194 | ((struct sock_filter_int) { \ |
195 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ | 195 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
196 | .a_reg = 0, \ | 196 | .dst_reg = 0, \ |
197 | .x_reg = X, \ | 197 | .src_reg = SRC, \ |
198 | .off = 0, \ | 198 | .off = 0, \ |
199 | .imm = OFF }) | 199 | .imm = IMM }) |
200 | 200 | ||
201 | /* Memory store, A = *(uint *) (X + OFF), and vice versa */ | 201 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
202 | 202 | ||
203 | #define BPF_LDX_MEM(SIZE, A, X, OFF) \ | 203 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
204 | ((struct sock_filter_int) { \ | 204 | ((struct sock_filter_int) { \ |
205 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ | 205 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
206 | .a_reg = A, \ | 206 | .dst_reg = DST, \ |
207 | .x_reg = X, \ | 207 | .src_reg = SRC, \ |
208 | .off = OFF, \ | 208 | .off = OFF, \ |
209 | .imm = 0 }) | 209 | .imm = 0 }) |
210 | 210 | ||
211 | #define BPF_STX_MEM(SIZE, A, X, OFF) \ | 211 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ |
212 | |||
213 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | ||
212 | ((struct sock_filter_int) { \ | 214 | ((struct sock_filter_int) { \ |
213 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ | 215 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
214 | .a_reg = A, \ | 216 | .dst_reg = DST, \ |
215 | .x_reg = X, \ | 217 | .src_reg = SRC, \ |
216 | .off = OFF, \ | 218 | .off = OFF, \ |
217 | .imm = 0 }) | 219 | .imm = 0 }) |
218 | 220 | ||
219 | /* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */ | 221 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
222 | |||
223 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | ||
224 | ((struct sock_filter_int) { \ | ||
225 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ | ||
226 | .dst_reg = DST, \ | ||
227 | .src_reg = 0, \ | ||
228 | .off = OFF, \ | ||
229 | .imm = IMM }) | ||
230 | |||
231 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | ||
220 | 232 | ||
221 | #define BPF_JMP_REG(OP, A, X, OFF) \ | 233 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
222 | ((struct sock_filter_int) { \ | 234 | ((struct sock_filter_int) { \ |
223 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ | 235 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
224 | .a_reg = A, \ | 236 | .dst_reg = DST, \ |
225 | .x_reg = X, \ | 237 | .src_reg = SRC, \ |
226 | .off = OFF, \ | 238 | .off = OFF, \ |
227 | .imm = 0 }) | 239 | .imm = 0 }) |
228 | 240 | ||
229 | /* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */ | 241 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
230 | 242 | ||
231 | #define BPF_JMP_IMM(OP, A, IMM, OFF) \ | 243 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
232 | ((struct sock_filter_int) { \ | 244 | ((struct sock_filter_int) { \ |
233 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ | 245 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
234 | .a_reg = A, \ | 246 | .dst_reg = DST, \ |
235 | .x_reg = 0, \ | 247 | .src_reg = 0, \ |
236 | .off = OFF, \ | 248 | .off = OFF, \ |
237 | .imm = IMM }) | 249 | .imm = IMM }) |
238 | 250 | ||
@@ -241,18 +253,18 @@ enum { | |||
241 | #define BPF_EMIT_CALL(FUNC) \ | 253 | #define BPF_EMIT_CALL(FUNC) \ |
242 | ((struct sock_filter_int) { \ | 254 | ((struct sock_filter_int) { \ |
243 | .code = BPF_JMP | BPF_CALL, \ | 255 | .code = BPF_JMP | BPF_CALL, \ |
244 | .a_reg = 0, \ | 256 | .dst_reg = 0, \ |
245 | .x_reg = 0, \ | 257 | .src_reg = 0, \ |
246 | .off = 0, \ | 258 | .off = 0, \ |
247 | .imm = ((FUNC) - __bpf_call_base) }) | 259 | .imm = ((FUNC) - __bpf_call_base) }) |
248 | 260 | ||
249 | /* Raw code statement block */ | 261 | /* Raw code statement block */ |
250 | 262 | ||
251 | #define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \ | 263 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
252 | ((struct sock_filter_int) { \ | 264 | ((struct sock_filter_int) { \ |
253 | .code = CODE, \ | 265 | .code = CODE, \ |
254 | .a_reg = A, \ | 266 | .dst_reg = DST, \ |
255 | .x_reg = X, \ | 267 | .src_reg = SRC, \ |
256 | .off = OFF, \ | 268 | .off = OFF, \ |
257 | .imm = IMM }) | 269 | .imm = IMM }) |
258 | 270 | ||
@@ -261,8 +273,8 @@ enum { | |||
261 | #define BPF_EXIT_INSN() \ | 273 | #define BPF_EXIT_INSN() \ |
262 | ((struct sock_filter_int) { \ | 274 | ((struct sock_filter_int) { \ |
263 | .code = BPF_JMP | BPF_EXIT, \ | 275 | .code = BPF_JMP | BPF_EXIT, \ |
264 | .a_reg = 0, \ | 276 | .dst_reg = 0, \ |
265 | .x_reg = 0, \ | 277 | .src_reg = 0, \ |
266 | .off = 0, \ | 278 | .off = 0, \ |
267 | .imm = 0 }) | 279 | .imm = 0 }) |
268 | 280 | ||
@@ -287,8 +299,8 @@ enum { | |||
287 | 299 | ||
288 | struct sock_filter_int { | 300 | struct sock_filter_int { |
289 | __u8 code; /* opcode */ | 301 | __u8 code; /* opcode */ |
290 | __u8 a_reg:4; /* dest register */ | 302 | __u8 dst_reg:4; /* dest register */ |
291 | __u8 x_reg:4; /* source register */ | 303 | __u8 src_reg:4; /* source register */ |
292 | __s16 off; /* signed offset */ | 304 | __s16 off; /* signed offset */ |
293 | __s32 imm; /* signed immediate constant */ | 305 | __s32 imm; /* signed immediate constant */ |
294 | }; | 306 | }; |
diff --git a/net/core/filter.c b/net/core/filter.c index 6bd2e350e751..b3f21751b238 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -59,12 +59,12 @@ | |||
59 | #define BPF_R10 regs[BPF_REG_10] | 59 | #define BPF_R10 regs[BPF_REG_10] |
60 | 60 | ||
61 | /* Named registers */ | 61 | /* Named registers */ |
62 | #define A regs[insn->a_reg] | 62 | #define DST regs[insn->dst_reg] |
63 | #define X regs[insn->x_reg] | 63 | #define SRC regs[insn->src_reg] |
64 | #define FP regs[BPF_REG_FP] | 64 | #define FP regs[BPF_REG_FP] |
65 | #define ARG1 regs[BPF_REG_ARG1] | 65 | #define ARG1 regs[BPF_REG_ARG1] |
66 | #define CTX regs[BPF_REG_CTX] | 66 | #define CTX regs[BPF_REG_CTX] |
67 | #define K insn->imm | 67 | #define IMM insn->imm |
68 | 68 | ||
69 | /* No hurry in this branch | 69 | /* No hurry in this branch |
70 | * | 70 | * |
@@ -264,7 +264,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins | |||
264 | FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; | 264 | FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; |
265 | ARG1 = (u64) (unsigned long) ctx; | 265 | ARG1 = (u64) (unsigned long) ctx; |
266 | 266 | ||
267 | /* Register for user BPF programs need to be reset first. */ | 267 | /* Registers used in classic BPF programs need to be reset first. */ |
268 | regs[BPF_REG_A] = 0; | 268 | regs[BPF_REG_A] = 0; |
269 | regs[BPF_REG_X] = 0; | 269 | regs[BPF_REG_X] = 0; |
270 | 270 | ||
@@ -274,16 +274,16 @@ select_insn: | |||
274 | /* ALU */ | 274 | /* ALU */ |
275 | #define ALU(OPCODE, OP) \ | 275 | #define ALU(OPCODE, OP) \ |
276 | ALU64_##OPCODE##_X: \ | 276 | ALU64_##OPCODE##_X: \ |
277 | A = A OP X; \ | 277 | DST = DST OP SRC; \ |
278 | CONT; \ | 278 | CONT; \ |
279 | ALU_##OPCODE##_X: \ | 279 | ALU_##OPCODE##_X: \ |
280 | A = (u32) A OP (u32) X; \ | 280 | DST = (u32) DST OP (u32) SRC; \ |
281 | CONT; \ | 281 | CONT; \ |
282 | ALU64_##OPCODE##_K: \ | 282 | ALU64_##OPCODE##_K: \ |
283 | A = A OP K; \ | 283 | DST = DST OP IMM; \ |
284 | CONT; \ | 284 | CONT; \ |
285 | ALU_##OPCODE##_K: \ | 285 | ALU_##OPCODE##_K: \ |
286 | A = (u32) A OP (u32) K; \ | 286 | DST = (u32) DST OP (u32) IMM; \ |
287 | CONT; | 287 | CONT; |
288 | 288 | ||
289 | ALU(ADD, +) | 289 | ALU(ADD, +) |
@@ -296,92 +296,92 @@ select_insn: | |||
296 | ALU(MUL, *) | 296 | ALU(MUL, *) |
297 | #undef ALU | 297 | #undef ALU |
298 | ALU_NEG: | 298 | ALU_NEG: |
299 | A = (u32) -A; | 299 | DST = (u32) -DST; |
300 | CONT; | 300 | CONT; |
301 | ALU64_NEG: | 301 | ALU64_NEG: |
302 | A = -A; | 302 | DST = -DST; |
303 | CONT; | 303 | CONT; |
304 | ALU_MOV_X: | 304 | ALU_MOV_X: |
305 | A = (u32) X; | 305 | DST = (u32) SRC; |
306 | CONT; | 306 | CONT; |
307 | ALU_MOV_K: | 307 | ALU_MOV_K: |
308 | A = (u32) K; | 308 | DST = (u32) IMM; |
309 | CONT; | 309 | CONT; |
310 | ALU64_MOV_X: | 310 | ALU64_MOV_X: |
311 | A = X; | 311 | DST = SRC; |
312 | CONT; | 312 | CONT; |
313 | ALU64_MOV_K: | 313 | ALU64_MOV_K: |
314 | A = K; | 314 | DST = IMM; |
315 | CONT; | 315 | CONT; |
316 | ALU64_ARSH_X: | 316 | ALU64_ARSH_X: |
317 | (*(s64 *) &A) >>= X; | 317 | (*(s64 *) &DST) >>= SRC; |
318 | CONT; | 318 | CONT; |
319 | ALU64_ARSH_K: | 319 | ALU64_ARSH_K: |
320 | (*(s64 *) &A) >>= K; | 320 | (*(s64 *) &DST) >>= IMM; |
321 | CONT; | 321 | CONT; |
322 | ALU64_MOD_X: | 322 | ALU64_MOD_X: |
323 | if (unlikely(X == 0)) | 323 | if (unlikely(SRC == 0)) |
324 | return 0; | 324 | return 0; |
325 | tmp = A; | 325 | tmp = DST; |
326 | A = do_div(tmp, X); | 326 | DST = do_div(tmp, SRC); |
327 | CONT; | 327 | CONT; |
328 | ALU_MOD_X: | 328 | ALU_MOD_X: |
329 | if (unlikely(X == 0)) | 329 | if (unlikely(SRC == 0)) |
330 | return 0; | 330 | return 0; |
331 | tmp = (u32) A; | 331 | tmp = (u32) DST; |
332 | A = do_div(tmp, (u32) X); | 332 | DST = do_div(tmp, (u32) SRC); |
333 | CONT; | 333 | CONT; |
334 | ALU64_MOD_K: | 334 | ALU64_MOD_K: |
335 | tmp = A; | 335 | tmp = DST; |
336 | A = do_div(tmp, K); | 336 | DST = do_div(tmp, IMM); |
337 | CONT; | 337 | CONT; |
338 | ALU_MOD_K: | 338 | ALU_MOD_K: |
339 | tmp = (u32) A; | 339 | tmp = (u32) DST; |
340 | A = do_div(tmp, (u32) K); | 340 | DST = do_div(tmp, (u32) IMM); |
341 | CONT; | 341 | CONT; |
342 | ALU64_DIV_X: | 342 | ALU64_DIV_X: |
343 | if (unlikely(X == 0)) | 343 | if (unlikely(SRC == 0)) |
344 | return 0; | 344 | return 0; |
345 | do_div(A, X); | 345 | do_div(DST, SRC); |
346 | CONT; | 346 | CONT; |
347 | ALU_DIV_X: | 347 | ALU_DIV_X: |
348 | if (unlikely(X == 0)) | 348 | if (unlikely(SRC == 0)) |
349 | return 0; | 349 | return 0; |
350 | tmp = (u32) A; | 350 | tmp = (u32) DST; |
351 | do_div(tmp, (u32) X); | 351 | do_div(tmp, (u32) SRC); |
352 | A = (u32) tmp; | 352 | DST = (u32) tmp; |
353 | CONT; | 353 | CONT; |
354 | ALU64_DIV_K: | 354 | ALU64_DIV_K: |
355 | do_div(A, K); | 355 | do_div(DST, IMM); |
356 | CONT; | 356 | CONT; |
357 | ALU_DIV_K: | 357 | ALU_DIV_K: |
358 | tmp = (u32) A; | 358 | tmp = (u32) DST; |
359 | do_div(tmp, (u32) K); | 359 | do_div(tmp, (u32) IMM); |
360 | A = (u32) tmp; | 360 | DST = (u32) tmp; |
361 | CONT; | 361 | CONT; |
362 | ALU_END_TO_BE: | 362 | ALU_END_TO_BE: |
363 | switch (K) { | 363 | switch (IMM) { |
364 | case 16: | 364 | case 16: |
365 | A = (__force u16) cpu_to_be16(A); | 365 | DST = (__force u16) cpu_to_be16(DST); |
366 | break; | 366 | break; |
367 | case 32: | 367 | case 32: |
368 | A = (__force u32) cpu_to_be32(A); | 368 | DST = (__force u32) cpu_to_be32(DST); |
369 | break; | 369 | break; |
370 | case 64: | 370 | case 64: |
371 | A = (__force u64) cpu_to_be64(A); | 371 | DST = (__force u64) cpu_to_be64(DST); |
372 | break; | 372 | break; |
373 | } | 373 | } |
374 | CONT; | 374 | CONT; |
375 | ALU_END_TO_LE: | 375 | ALU_END_TO_LE: |
376 | switch (K) { | 376 | switch (IMM) { |
377 | case 16: | 377 | case 16: |
378 | A = (__force u16) cpu_to_le16(A); | 378 | DST = (__force u16) cpu_to_le16(DST); |
379 | break; | 379 | break; |
380 | case 32: | 380 | case 32: |
381 | A = (__force u32) cpu_to_le32(A); | 381 | DST = (__force u32) cpu_to_le32(DST); |
382 | break; | 382 | break; |
383 | case 64: | 383 | case 64: |
384 | A = (__force u64) cpu_to_le64(A); | 384 | DST = (__force u64) cpu_to_le64(DST); |
385 | break; | 385 | break; |
386 | } | 386 | } |
387 | CONT; | 387 | CONT; |
@@ -401,85 +401,85 @@ select_insn: | |||
401 | insn += insn->off; | 401 | insn += insn->off; |
402 | CONT; | 402 | CONT; |
403 | JMP_JEQ_X: | 403 | JMP_JEQ_X: |
404 | if (A == X) { | 404 | if (DST == SRC) { |
405 | insn += insn->off; | 405 | insn += insn->off; |
406 | CONT_JMP; | 406 | CONT_JMP; |
407 | } | 407 | } |
408 | CONT; | 408 | CONT; |
409 | JMP_JEQ_K: | 409 | JMP_JEQ_K: |
410 | if (A == K) { | 410 | if (DST == IMM) { |
411 | insn += insn->off; | 411 | insn += insn->off; |
412 | CONT_JMP; | 412 | CONT_JMP; |
413 | } | 413 | } |
414 | CONT; | 414 | CONT; |
415 | JMP_JNE_X: | 415 | JMP_JNE_X: |
416 | if (A != X) { | 416 | if (DST != SRC) { |
417 | insn += insn->off; | 417 | insn += insn->off; |
418 | CONT_JMP; | 418 | CONT_JMP; |
419 | } | 419 | } |
420 | CONT; | 420 | CONT; |
421 | JMP_JNE_K: | 421 | JMP_JNE_K: |
422 | if (A != K) { | 422 | if (DST != IMM) { |
423 | insn += insn->off; | 423 | insn += insn->off; |
424 | CONT_JMP; | 424 | CONT_JMP; |
425 | } | 425 | } |
426 | CONT; | 426 | CONT; |
427 | JMP_JGT_X: | 427 | JMP_JGT_X: |
428 | if (A > X) { | 428 | if (DST > SRC) { |
429 | insn += insn->off; | 429 | insn += insn->off; |
430 | CONT_JMP; | 430 | CONT_JMP; |
431 | } | 431 | } |
432 | CONT; | 432 | CONT; |
433 | JMP_JGT_K: | 433 | JMP_JGT_K: |
434 | if (A > K) { | 434 | if (DST > IMM) { |
435 | insn += insn->off; | 435 | insn += insn->off; |
436 | CONT_JMP; | 436 | CONT_JMP; |
437 | } | 437 | } |
438 | CONT; | 438 | CONT; |
439 | JMP_JGE_X: | 439 | JMP_JGE_X: |
440 | if (A >= X) { | 440 | if (DST >= SRC) { |
441 | insn += insn->off; | 441 | insn += insn->off; |
442 | CONT_JMP; | 442 | CONT_JMP; |
443 | } | 443 | } |
444 | CONT; | 444 | CONT; |
445 | JMP_JGE_K: | 445 | JMP_JGE_K: |
446 | if (A >= K) { | 446 | if (DST >= IMM) { |
447 | insn += insn->off; | 447 | insn += insn->off; |
448 | CONT_JMP; | 448 | CONT_JMP; |
449 | } | 449 | } |
450 | CONT; | 450 | CONT; |
451 | JMP_JSGT_X: | 451 | JMP_JSGT_X: |
452 | if (((s64) A) > ((s64) X)) { | 452 | if (((s64) DST) > ((s64) SRC)) { |
453 | insn += insn->off; | 453 | insn += insn->off; |
454 | CONT_JMP; | 454 | CONT_JMP; |
455 | } | 455 | } |
456 | CONT; | 456 | CONT; |
457 | JMP_JSGT_K: | 457 | JMP_JSGT_K: |
458 | if (((s64) A) > ((s64) K)) { | 458 | if (((s64) DST) > ((s64) IMM)) { |
459 | insn += insn->off; | 459 | insn += insn->off; |
460 | CONT_JMP; | 460 | CONT_JMP; |
461 | } | 461 | } |
462 | CONT; | 462 | CONT; |
463 | JMP_JSGE_X: | 463 | JMP_JSGE_X: |
464 | if (((s64) A) >= ((s64) X)) { | 464 | if (((s64) DST) >= ((s64) SRC)) { |
465 | insn += insn->off; | 465 | insn += insn->off; |
466 | CONT_JMP; | 466 | CONT_JMP; |
467 | } | 467 | } |
468 | CONT; | 468 | CONT; |
469 | JMP_JSGE_K: | 469 | JMP_JSGE_K: |
470 | if (((s64) A) >= ((s64) K)) { | 470 | if (((s64) DST) >= ((s64) IMM)) { |
471 | insn += insn->off; | 471 | insn += insn->off; |
472 | CONT_JMP; | 472 | CONT_JMP; |
473 | } | 473 | } |
474 | CONT; | 474 | CONT; |
475 | JMP_JSET_X: | 475 | JMP_JSET_X: |
476 | if (A & X) { | 476 | if (DST & SRC) { |
477 | insn += insn->off; | 477 | insn += insn->off; |
478 | CONT_JMP; | 478 | CONT_JMP; |
479 | } | 479 | } |
480 | CONT; | 480 | CONT; |
481 | JMP_JSET_K: | 481 | JMP_JSET_K: |
482 | if (A & K) { | 482 | if (DST & IMM) { |
483 | insn += insn->off; | 483 | insn += insn->off; |
484 | CONT_JMP; | 484 | CONT_JMP; |
485 | } | 485 | } |
@@ -488,15 +488,15 @@ select_insn: | |||
488 | return BPF_R0; | 488 | return BPF_R0; |
489 | 489 | ||
490 | /* STX and ST and LDX*/ | 490 | /* STX and ST and LDX*/ |
491 | #define LDST(SIZEOP, SIZE) \ | 491 | #define LDST(SIZEOP, SIZE) \ |
492 | STX_MEM_##SIZEOP: \ | 492 | STX_MEM_##SIZEOP: \ |
493 | *(SIZE *)(unsigned long) (A + insn->off) = X; \ | 493 | *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ |
494 | CONT; \ | 494 | CONT; \ |
495 | ST_MEM_##SIZEOP: \ | 495 | ST_MEM_##SIZEOP: \ |
496 | *(SIZE *)(unsigned long) (A + insn->off) = K; \ | 496 | *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ |
497 | CONT; \ | 497 | CONT; \ |
498 | LDX_MEM_##SIZEOP: \ | 498 | LDX_MEM_##SIZEOP: \ |
499 | A = *(SIZE *)(unsigned long) (X + insn->off); \ | 499 | DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ |
500 | CONT; | 500 | CONT; |
501 | 501 | ||
502 | LDST(B, u8) | 502 | LDST(B, u8) |
@@ -504,16 +504,16 @@ select_insn: | |||
504 | LDST(W, u32) | 504 | LDST(W, u32) |
505 | LDST(DW, u64) | 505 | LDST(DW, u64) |
506 | #undef LDST | 506 | #undef LDST |
507 | STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ | 507 | STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ |
508 | atomic_add((u32) X, (atomic_t *)(unsigned long) | 508 | atomic_add((u32) SRC, (atomic_t *)(unsigned long) |
509 | (A + insn->off)); | 509 | (DST + insn->off)); |
510 | CONT; | 510 | CONT; |
511 | STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ | 511 | STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ |
512 | atomic64_add((u64) X, (atomic64_t *)(unsigned long) | 512 | atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) |
513 | (A + insn->off)); | 513 | (DST + insn->off)); |
514 | CONT; | 514 | CONT; |
515 | LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ | 515 | LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ |
516 | off = K; | 516 | off = IMM; |
517 | load_word: | 517 | load_word: |
518 | /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are | 518 | /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are |
519 | * only appearing in the programs where ctx == | 519 | * only appearing in the programs where ctx == |
@@ -527,51 +527,51 @@ load_word: | |||
527 | * BPF_R6-BPF_R9, and store return value into BPF_R0. | 527 | * BPF_R6-BPF_R9, and store return value into BPF_R0. |
528 | * | 528 | * |
529 | * Implicit input: | 529 | * Implicit input: |
530 | * ctx | 530 | * ctx == skb == BPF_R6 == CTX |
531 | * | 531 | * |
532 | * Explicit input: | 532 | * Explicit input: |
533 | * X == any register | 533 | * SRC == any register |
534 | * K == 32-bit immediate | 534 | * IMM == 32-bit immediate |
535 | * | 535 | * |
536 | * Output: | 536 | * Output: |
537 | * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness | 537 | * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness |
538 | */ | 538 | */ |
539 | 539 | ||
540 | ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); | 540 | ptr = load_pointer((struct sk_buff *) CTX, off, 4, &tmp); |
541 | if (likely(ptr != NULL)) { | 541 | if (likely(ptr != NULL)) { |
542 | BPF_R0 = get_unaligned_be32(ptr); | 542 | BPF_R0 = get_unaligned_be32(ptr); |
543 | CONT; | 543 | CONT; |
544 | } | 544 | } |
545 | 545 | ||
546 | return 0; | 546 | return 0; |
547 | LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ | 547 | LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ |
548 | off = K; | 548 | off = IMM; |
549 | load_half: | 549 | load_half: |
550 | ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); | 550 | ptr = load_pointer((struct sk_buff *) CTX, off, 2, &tmp); |
551 | if (likely(ptr != NULL)) { | 551 | if (likely(ptr != NULL)) { |
552 | BPF_R0 = get_unaligned_be16(ptr); | 552 | BPF_R0 = get_unaligned_be16(ptr); |
553 | CONT; | 553 | CONT; |
554 | } | 554 | } |
555 | 555 | ||
556 | return 0; | 556 | return 0; |
557 | LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ | 557 | LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ |
558 | off = K; | 558 | off = IMM; |
559 | load_byte: | 559 | load_byte: |
560 | ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); | 560 | ptr = load_pointer((struct sk_buff *) CTX, off, 1, &tmp); |
561 | if (likely(ptr != NULL)) { | 561 | if (likely(ptr != NULL)) { |
562 | BPF_R0 = *(u8 *)ptr; | 562 | BPF_R0 = *(u8 *)ptr; |
563 | CONT; | 563 | CONT; |
564 | } | 564 | } |
565 | 565 | ||
566 | return 0; | 566 | return 0; |
567 | LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ | 567 | LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ |
568 | off = K + X; | 568 | off = IMM + SRC; |
569 | goto load_word; | 569 | goto load_word; |
570 | LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ | 570 | LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ |
571 | off = K + X; | 571 | off = IMM + SRC; |
572 | goto load_half; | 572 | goto load_half; |
573 | LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ | 573 | LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ |
574 | off = K + X; | 574 | off = IMM + SRC; |
575 | goto load_byte; | 575 | goto load_byte; |
576 | 576 | ||
577 | default_label: | 577 | default_label: |
@@ -675,7 +675,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
675 | case SKF_AD_OFF + SKF_AD_PROTOCOL: | 675 | case SKF_AD_OFF + SKF_AD_PROTOCOL: |
676 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); | 676 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
677 | 677 | ||
678 | /* A = *(u16 *) (ctx + offsetof(protocol)) */ | 678 | /* A = *(u16 *) (CTX + offsetof(protocol)) */ |
679 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | 679 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
680 | offsetof(struct sk_buff, protocol)); | 680 | offsetof(struct sk_buff, protocol)); |
681 | /* A = ntohs(A) [emitting a nop or swap16] */ | 681 | /* A = ntohs(A) [emitting a nop or swap16] */ |
@@ -741,7 +741,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
741 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); | 741 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
742 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); | 742 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); |
743 | 743 | ||
744 | /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ | 744 | /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */ |
745 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | 745 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
746 | offsetof(struct sk_buff, vlan_tci)); | 746 | offsetof(struct sk_buff, vlan_tci)); |
747 | if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { | 747 | if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { |
@@ -760,13 +760,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp, | |||
760 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: | 760 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: |
761 | case SKF_AD_OFF + SKF_AD_CPU: | 761 | case SKF_AD_OFF + SKF_AD_CPU: |
762 | case SKF_AD_OFF + SKF_AD_RANDOM: | 762 | case SKF_AD_OFF + SKF_AD_RANDOM: |
763 | /* arg1 = ctx */ | 763 | /* arg1 = CTX */ |
764 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); | 764 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
765 | /* arg2 = A */ | 765 | /* arg2 = A */ |
766 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); | 766 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); |
767 | /* arg3 = X */ | 767 | /* arg3 = X */ |
768 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); | 768 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); |
769 | /* Emit call(ctx, arg2=A, arg3=X) */ | 769 | /* Emit call(arg1=CTX, arg2=A, arg3=X) */ |
770 | switch (fp->k) { | 770 | switch (fp->k) { |
771 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: | 771 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
772 | *insn = BPF_EMIT_CALL(__skb_get_pay_offset); | 772 | *insn = BPF_EMIT_CALL(__skb_get_pay_offset); |
@@ -941,12 +941,12 @@ do_pass: | |||
941 | */ | 941 | */ |
942 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); | 942 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); |
943 | 943 | ||
944 | insn->a_reg = BPF_REG_A; | 944 | insn->dst_reg = BPF_REG_A; |
945 | insn->x_reg = BPF_REG_TMP; | 945 | insn->src_reg = BPF_REG_TMP; |
946 | bpf_src = BPF_X; | 946 | bpf_src = BPF_X; |
947 | } else { | 947 | } else { |
948 | insn->a_reg = BPF_REG_A; | 948 | insn->dst_reg = BPF_REG_A; |
949 | insn->x_reg = BPF_REG_X; | 949 | insn->src_reg = BPF_REG_X; |
950 | insn->imm = fp->k; | 950 | insn->imm = fp->k; |
951 | bpf_src = BPF_SRC(fp->code); | 951 | bpf_src = BPF_SRC(fp->code); |
952 | } | 952 | } |