aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-06-06 17:46:06 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-11 03:13:16 -0400
commite430f34ee5192c84bcabd3c79ab7e2388b5eec74 (patch)
tree4b4086b0ecf0c4d67c4ae28d493b5987430da143 /arch/x86/net
parent7b0dcbd879101e829755d1288c1b440ba1f59460 (diff)
net: filter: cleanup A/X name usage
The macro 'A' used in internal BPF interpreter: #define A regs[insn->a_reg] was easily confused with the name of classic BPF register 'A', since 'A' would mean two different things depending on context. This patch is trying to clean up the naming and clarify its usage in the following way: - A and X are names of two classic BPF registers - BPF_REG_A denotes internal BPF register R0 used to map classic register A in internal BPF programs generated from classic - BPF_REG_X denotes internal BPF register R7 used to map classic register X in internal BPF programs generated from classic - internal BPF instruction format: struct sock_filter_int { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; - BPF_X/BPF_K is 1 bit used to encode source operand of instruction In classic: BPF_X - means use register X as source operand BPF_K - means use 32-bit immediate as source operand In internal: BPF_X - means use 'src_reg' register as source operand BPF_K - means use 32-bit immediate as source operand Suggested-by: Chema Gonzalez <chema@google.com> Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Acked-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Chema Gonzalez <chema@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c260
1 files changed, 130 insertions, 130 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 080f3f071bb0..99bef86ed6df 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -64,10 +64,10 @@ static inline bool is_simm32(s64 value)
64 return value == (s64) (s32) value; 64 return value == (s64) (s32) value;
65} 65}
66 66
67/* mov A, X */ 67/* mov dst, src */
68#define EMIT_mov(A, X) \ 68#define EMIT_mov(DST, SRC) \
69 do {if (A != X) \ 69 do {if (DST != SRC) \
70 EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ 70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 } while (0) 71 } while (0)
72 72
73static int bpf_size_to_x86_bytes(int bpf_size) 73static int bpf_size_to_x86_bytes(int bpf_size)
@@ -194,16 +194,16 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
194 return byte; 194 return byte;
195} 195}
196 196
197/* encode dest register 'a_reg' into x64 opcode 'byte' */ 197/* encode 'dst_reg' register into x64 opcode 'byte' */
198static inline u8 add_1reg(u8 byte, u32 a_reg) 198static inline u8 add_1reg(u8 byte, u32 dst_reg)
199{ 199{
200 return byte + reg2hex[a_reg]; 200 return byte + reg2hex[dst_reg];
201} 201}
202 202
203/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ 203/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
204static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) 204static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
205{ 205{
206 return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); 206 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
207} 207}
208 208
209struct jit_context { 209struct jit_context {
@@ -286,9 +286,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
286 } 286 }
287 287
288 for (i = 0; i < insn_cnt; i++, insn++) { 288 for (i = 0; i < insn_cnt; i++, insn++) {
289 const s32 K = insn->imm; 289 const s32 imm32 = insn->imm;
290 u32 a_reg = insn->a_reg; 290 u32 dst_reg = insn->dst_reg;
291 u32 x_reg = insn->x_reg; 291 u32 src_reg = insn->src_reg;
292 u8 b1 = 0, b2 = 0, b3 = 0; 292 u8 b1 = 0, b2 = 0, b3 = 0;
293 s64 jmp_offset; 293 s64 jmp_offset;
294 u8 jmp_cond; 294 u8 jmp_cond;
@@ -315,32 +315,32 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
315 case BPF_XOR: b2 = 0x31; break; 315 case BPF_XOR: b2 = 0x31; break;
316 } 316 }
317 if (BPF_CLASS(insn->code) == BPF_ALU64) 317 if (BPF_CLASS(insn->code) == BPF_ALU64)
318 EMIT1(add_2mod(0x48, a_reg, x_reg)); 318 EMIT1(add_2mod(0x48, dst_reg, src_reg));
319 else if (is_ereg(a_reg) || is_ereg(x_reg)) 319 else if (is_ereg(dst_reg) || is_ereg(src_reg))
320 EMIT1(add_2mod(0x40, a_reg, x_reg)); 320 EMIT1(add_2mod(0x40, dst_reg, src_reg));
321 EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); 321 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
322 break; 322 break;
323 323
324 /* mov A, X */ 324 /* mov dst, src */
325 case BPF_ALU64 | BPF_MOV | BPF_X: 325 case BPF_ALU64 | BPF_MOV | BPF_X:
326 EMIT_mov(a_reg, x_reg); 326 EMIT_mov(dst_reg, src_reg);
327 break; 327 break;
328 328
329 /* mov32 A, X */ 329 /* mov32 dst, src */
330 case BPF_ALU | BPF_MOV | BPF_X: 330 case BPF_ALU | BPF_MOV | BPF_X:
331 if (is_ereg(a_reg) || is_ereg(x_reg)) 331 if (is_ereg(dst_reg) || is_ereg(src_reg))
332 EMIT1(add_2mod(0x40, a_reg, x_reg)); 332 EMIT1(add_2mod(0x40, dst_reg, src_reg));
333 EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); 333 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
334 break; 334 break;
335 335
336 /* neg A */ 336 /* neg dst */
337 case BPF_ALU | BPF_NEG: 337 case BPF_ALU | BPF_NEG:
338 case BPF_ALU64 | BPF_NEG: 338 case BPF_ALU64 | BPF_NEG:
339 if (BPF_CLASS(insn->code) == BPF_ALU64) 339 if (BPF_CLASS(insn->code) == BPF_ALU64)
340 EMIT1(add_1mod(0x48, a_reg)); 340 EMIT1(add_1mod(0x48, dst_reg));
341 else if (is_ereg(a_reg)) 341 else if (is_ereg(dst_reg))
342 EMIT1(add_1mod(0x40, a_reg)); 342 EMIT1(add_1mod(0x40, dst_reg));
343 EMIT2(0xF7, add_1reg(0xD8, a_reg)); 343 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
344 break; 344 break;
345 345
346 case BPF_ALU | BPF_ADD | BPF_K: 346 case BPF_ALU | BPF_ADD | BPF_K:
@@ -354,9 +354,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
354 case BPF_ALU64 | BPF_OR | BPF_K: 354 case BPF_ALU64 | BPF_OR | BPF_K:
355 case BPF_ALU64 | BPF_XOR | BPF_K: 355 case BPF_ALU64 | BPF_XOR | BPF_K:
356 if (BPF_CLASS(insn->code) == BPF_ALU64) 356 if (BPF_CLASS(insn->code) == BPF_ALU64)
357 EMIT1(add_1mod(0x48, a_reg)); 357 EMIT1(add_1mod(0x48, dst_reg));
358 else if (is_ereg(a_reg)) 358 else if (is_ereg(dst_reg))
359 EMIT1(add_1mod(0x40, a_reg)); 359 EMIT1(add_1mod(0x40, dst_reg));
360 360
361 switch (BPF_OP(insn->code)) { 361 switch (BPF_OP(insn->code)) {
362 case BPF_ADD: b3 = 0xC0; break; 362 case BPF_ADD: b3 = 0xC0; break;
@@ -366,10 +366,10 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
366 case BPF_XOR: b3 = 0xF0; break; 366 case BPF_XOR: b3 = 0xF0; break;
367 } 367 }
368 368
369 if (is_imm8(K)) 369 if (is_imm8(imm32))
370 EMIT3(0x83, add_1reg(b3, a_reg), K); 370 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
371 else 371 else
372 EMIT2_off32(0x81, add_1reg(b3, a_reg), K); 372 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
373 break; 373 break;
374 374
375 case BPF_ALU64 | BPF_MOV | BPF_K: 375 case BPF_ALU64 | BPF_MOV | BPF_K:
@@ -377,23 +377,23 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
377 * use 'mov eax, imm32' (which zero-extends imm32) 377 * use 'mov eax, imm32' (which zero-extends imm32)
378 * to save 2 bytes 378 * to save 2 bytes
379 */ 379 */
380 if (K < 0) { 380 if (imm32 < 0) {
381 /* 'mov rax, imm32' sign extends imm32 */ 381 /* 'mov rax, imm32' sign extends imm32 */
382 b1 = add_1mod(0x48, a_reg); 382 b1 = add_1mod(0x48, dst_reg);
383 b2 = 0xC7; 383 b2 = 0xC7;
384 b3 = 0xC0; 384 b3 = 0xC0;
385 EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); 385 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
386 break; 386 break;
387 } 387 }
388 388
389 case BPF_ALU | BPF_MOV | BPF_K: 389 case BPF_ALU | BPF_MOV | BPF_K:
390 /* mov %eax, imm32 */ 390 /* mov %eax, imm32 */
391 if (is_ereg(a_reg)) 391 if (is_ereg(dst_reg))
392 EMIT1(add_1mod(0x40, a_reg)); 392 EMIT1(add_1mod(0x40, dst_reg));
393 EMIT1_off32(add_1reg(0xB8, a_reg), K); 393 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
394 break; 394 break;
395 395
396 /* A %= X, A /= X, A %= K, A /= K */ 396 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
397 case BPF_ALU | BPF_MOD | BPF_X: 397 case BPF_ALU | BPF_MOD | BPF_X:
398 case BPF_ALU | BPF_DIV | BPF_X: 398 case BPF_ALU | BPF_DIV | BPF_X:
399 case BPF_ALU | BPF_MOD | BPF_K: 399 case BPF_ALU | BPF_MOD | BPF_K:
@@ -406,14 +406,14 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
406 EMIT1(0x52); /* push rdx */ 406 EMIT1(0x52); /* push rdx */
407 407
408 if (BPF_SRC(insn->code) == BPF_X) 408 if (BPF_SRC(insn->code) == BPF_X)
409 /* mov r11, X */ 409 /* mov r11, src_reg */
410 EMIT_mov(AUX_REG, x_reg); 410 EMIT_mov(AUX_REG, src_reg);
411 else 411 else
412 /* mov r11, K */ 412 /* mov r11, imm32 */
413 EMIT3_off32(0x49, 0xC7, 0xC3, K); 413 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
414 414
415 /* mov rax, A */ 415 /* mov rax, dst_reg */
416 EMIT_mov(BPF_REG_0, a_reg); 416 EMIT_mov(BPF_REG_0, dst_reg);
417 417
418 /* xor edx, edx 418 /* xor edx, edx
419 * equivalent to 'xor rdx, rdx', but one byte less 419 * equivalent to 'xor rdx, rdx', but one byte less
@@ -421,7 +421,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
421 EMIT2(0x31, 0xd2); 421 EMIT2(0x31, 0xd2);
422 422
423 if (BPF_SRC(insn->code) == BPF_X) { 423 if (BPF_SRC(insn->code) == BPF_X) {
424 /* if (X == 0) return 0 */ 424 /* if (src_reg == 0) return 0 */
425 425
426 /* cmp r11, 0 */ 426 /* cmp r11, 0 */
427 EMIT4(0x49, 0x83, 0xFB, 0x00); 427 EMIT4(0x49, 0x83, 0xFB, 0x00);
@@ -457,8 +457,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
457 EMIT1(0x5A); /* pop rdx */ 457 EMIT1(0x5A); /* pop rdx */
458 EMIT1(0x58); /* pop rax */ 458 EMIT1(0x58); /* pop rax */
459 459
460 /* mov A, r11 */ 460 /* mov dst_reg, r11 */
461 EMIT_mov(a_reg, AUX_REG); 461 EMIT_mov(dst_reg, AUX_REG);
462 break; 462 break;
463 463
464 case BPF_ALU | BPF_MUL | BPF_K: 464 case BPF_ALU | BPF_MUL | BPF_K:
@@ -468,15 +468,15 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
468 EMIT1(0x50); /* push rax */ 468 EMIT1(0x50); /* push rax */
469 EMIT1(0x52); /* push rdx */ 469 EMIT1(0x52); /* push rdx */
470 470
471 /* mov r11, A */ 471 /* mov r11, dst_reg */
472 EMIT_mov(AUX_REG, a_reg); 472 EMIT_mov(AUX_REG, dst_reg);
473 473
474 if (BPF_SRC(insn->code) == BPF_X) 474 if (BPF_SRC(insn->code) == BPF_X)
475 /* mov rax, X */ 475 /* mov rax, src_reg */
476 EMIT_mov(BPF_REG_0, x_reg); 476 EMIT_mov(BPF_REG_0, src_reg);
477 else 477 else
478 /* mov rax, K */ 478 /* mov rax, imm32 */
479 EMIT3_off32(0x48, 0xC7, 0xC0, K); 479 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
480 480
481 if (BPF_CLASS(insn->code) == BPF_ALU64) 481 if (BPF_CLASS(insn->code) == BPF_ALU64)
482 EMIT1(add_1mod(0x48, AUX_REG)); 482 EMIT1(add_1mod(0x48, AUX_REG));
@@ -491,8 +491,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
491 EMIT1(0x5A); /* pop rdx */ 491 EMIT1(0x5A); /* pop rdx */
492 EMIT1(0x58); /* pop rax */ 492 EMIT1(0x58); /* pop rax */
493 493
494 /* mov A, r11 */ 494 /* mov dst_reg, r11 */
495 EMIT_mov(a_reg, AUX_REG); 495 EMIT_mov(dst_reg, AUX_REG);
496 break; 496 break;
497 497
498 /* shifts */ 498 /* shifts */
@@ -503,39 +503,39 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
503 case BPF_ALU64 | BPF_RSH | BPF_K: 503 case BPF_ALU64 | BPF_RSH | BPF_K:
504 case BPF_ALU64 | BPF_ARSH | BPF_K: 504 case BPF_ALU64 | BPF_ARSH | BPF_K:
505 if (BPF_CLASS(insn->code) == BPF_ALU64) 505 if (BPF_CLASS(insn->code) == BPF_ALU64)
506 EMIT1(add_1mod(0x48, a_reg)); 506 EMIT1(add_1mod(0x48, dst_reg));
507 else if (is_ereg(a_reg)) 507 else if (is_ereg(dst_reg))
508 EMIT1(add_1mod(0x40, a_reg)); 508 EMIT1(add_1mod(0x40, dst_reg));
509 509
510 switch (BPF_OP(insn->code)) { 510 switch (BPF_OP(insn->code)) {
511 case BPF_LSH: b3 = 0xE0; break; 511 case BPF_LSH: b3 = 0xE0; break;
512 case BPF_RSH: b3 = 0xE8; break; 512 case BPF_RSH: b3 = 0xE8; break;
513 case BPF_ARSH: b3 = 0xF8; break; 513 case BPF_ARSH: b3 = 0xF8; break;
514 } 514 }
515 EMIT3(0xC1, add_1reg(b3, a_reg), K); 515 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
516 break; 516 break;
517 517
518 case BPF_ALU | BPF_END | BPF_FROM_BE: 518 case BPF_ALU | BPF_END | BPF_FROM_BE:
519 switch (K) { 519 switch (imm32) {
520 case 16: 520 case 16:
521 /* emit 'ror %ax, 8' to swap lower 2 bytes */ 521 /* emit 'ror %ax, 8' to swap lower 2 bytes */
522 EMIT1(0x66); 522 EMIT1(0x66);
523 if (is_ereg(a_reg)) 523 if (is_ereg(dst_reg))
524 EMIT1(0x41); 524 EMIT1(0x41);
525 EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); 525 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
526 break; 526 break;
527 case 32: 527 case 32:
528 /* emit 'bswap eax' to swap lower 4 bytes */ 528 /* emit 'bswap eax' to swap lower 4 bytes */
529 if (is_ereg(a_reg)) 529 if (is_ereg(dst_reg))
530 EMIT2(0x41, 0x0F); 530 EMIT2(0x41, 0x0F);
531 else 531 else
532 EMIT1(0x0F); 532 EMIT1(0x0F);
533 EMIT1(add_1reg(0xC8, a_reg)); 533 EMIT1(add_1reg(0xC8, dst_reg));
534 break; 534 break;
535 case 64: 535 case 64:
536 /* emit 'bswap rax' to swap 8 bytes */ 536 /* emit 'bswap rax' to swap 8 bytes */
537 EMIT3(add_1mod(0x48, a_reg), 0x0F, 537 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
538 add_1reg(0xC8, a_reg)); 538 add_1reg(0xC8, dst_reg));
539 break; 539 break;
540 } 540 }
541 break; 541 break;
@@ -543,117 +543,117 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
543 case BPF_ALU | BPF_END | BPF_FROM_LE: 543 case BPF_ALU | BPF_END | BPF_FROM_LE:
544 break; 544 break;
545 545
546 /* ST: *(u8*)(a_reg + off) = imm */ 546 /* ST: *(u8*)(dst_reg + off) = imm */
547 case BPF_ST | BPF_MEM | BPF_B: 547 case BPF_ST | BPF_MEM | BPF_B:
548 if (is_ereg(a_reg)) 548 if (is_ereg(dst_reg))
549 EMIT2(0x41, 0xC6); 549 EMIT2(0x41, 0xC6);
550 else 550 else
551 EMIT1(0xC6); 551 EMIT1(0xC6);
552 goto st; 552 goto st;
553 case BPF_ST | BPF_MEM | BPF_H: 553 case BPF_ST | BPF_MEM | BPF_H:
554 if (is_ereg(a_reg)) 554 if (is_ereg(dst_reg))
555 EMIT3(0x66, 0x41, 0xC7); 555 EMIT3(0x66, 0x41, 0xC7);
556 else 556 else
557 EMIT2(0x66, 0xC7); 557 EMIT2(0x66, 0xC7);
558 goto st; 558 goto st;
559 case BPF_ST | BPF_MEM | BPF_W: 559 case BPF_ST | BPF_MEM | BPF_W:
560 if (is_ereg(a_reg)) 560 if (is_ereg(dst_reg))
561 EMIT2(0x41, 0xC7); 561 EMIT2(0x41, 0xC7);
562 else 562 else
563 EMIT1(0xC7); 563 EMIT1(0xC7);
564 goto st; 564 goto st;
565 case BPF_ST | BPF_MEM | BPF_DW: 565 case BPF_ST | BPF_MEM | BPF_DW:
566 EMIT2(add_1mod(0x48, a_reg), 0xC7); 566 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
567 567
568st: if (is_imm8(insn->off)) 568st: if (is_imm8(insn->off))
569 EMIT2(add_1reg(0x40, a_reg), insn->off); 569 EMIT2(add_1reg(0x40, dst_reg), insn->off);
570 else 570 else
571 EMIT1_off32(add_1reg(0x80, a_reg), insn->off); 571 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
572 572
573 EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); 573 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
574 break; 574 break;
575 575
576 /* STX: *(u8*)(a_reg + off) = x_reg */ 576 /* STX: *(u8*)(dst_reg + off) = src_reg */
577 case BPF_STX | BPF_MEM | BPF_B: 577 case BPF_STX | BPF_MEM | BPF_B:
578 /* emit 'mov byte ptr [rax + off], al' */ 578 /* emit 'mov byte ptr [rax + off], al' */
579 if (is_ereg(a_reg) || is_ereg(x_reg) || 579 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
580 /* have to add extra byte for x86 SIL, DIL regs */ 580 /* have to add extra byte for x86 SIL, DIL regs */
581 x_reg == BPF_REG_1 || x_reg == BPF_REG_2) 581 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
582 EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); 582 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
583 else 583 else
584 EMIT1(0x88); 584 EMIT1(0x88);
585 goto stx; 585 goto stx;
586 case BPF_STX | BPF_MEM | BPF_H: 586 case BPF_STX | BPF_MEM | BPF_H:
587 if (is_ereg(a_reg) || is_ereg(x_reg)) 587 if (is_ereg(dst_reg) || is_ereg(src_reg))
588 EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); 588 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
589 else 589 else
590 EMIT2(0x66, 0x89); 590 EMIT2(0x66, 0x89);
591 goto stx; 591 goto stx;
592 case BPF_STX | BPF_MEM | BPF_W: 592 case BPF_STX | BPF_MEM | BPF_W:
593 if (is_ereg(a_reg) || is_ereg(x_reg)) 593 if (is_ereg(dst_reg) || is_ereg(src_reg))
594 EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); 594 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
595 else 595 else
596 EMIT1(0x89); 596 EMIT1(0x89);
597 goto stx; 597 goto stx;
598 case BPF_STX | BPF_MEM | BPF_DW: 598 case BPF_STX | BPF_MEM | BPF_DW:
599 EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); 599 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
600stx: if (is_imm8(insn->off)) 600stx: if (is_imm8(insn->off))
601 EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); 601 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
602 else 602 else
603 EMIT1_off32(add_2reg(0x80, a_reg, x_reg), 603 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
604 insn->off); 604 insn->off);
605 break; 605 break;
606 606
607 /* LDX: a_reg = *(u8*)(x_reg + off) */ 607 /* LDX: dst_reg = *(u8*)(src_reg + off) */
608 case BPF_LDX | BPF_MEM | BPF_B: 608 case BPF_LDX | BPF_MEM | BPF_B:
609 /* emit 'movzx rax, byte ptr [rax + off]' */ 609 /* emit 'movzx rax, byte ptr [rax + off]' */
610 EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); 610 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
611 goto ldx; 611 goto ldx;
612 case BPF_LDX | BPF_MEM | BPF_H: 612 case BPF_LDX | BPF_MEM | BPF_H:
613 /* emit 'movzx rax, word ptr [rax + off]' */ 613 /* emit 'movzx rax, word ptr [rax + off]' */
614 EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); 614 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
615 goto ldx; 615 goto ldx;
616 case BPF_LDX | BPF_MEM | BPF_W: 616 case BPF_LDX | BPF_MEM | BPF_W:
617 /* emit 'mov eax, dword ptr [rax+0x14]' */ 617 /* emit 'mov eax, dword ptr [rax+0x14]' */
618 if (is_ereg(a_reg) || is_ereg(x_reg)) 618 if (is_ereg(dst_reg) || is_ereg(src_reg))
619 EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); 619 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
620 else 620 else
621 EMIT1(0x8B); 621 EMIT1(0x8B);
622 goto ldx; 622 goto ldx;
623 case BPF_LDX | BPF_MEM | BPF_DW: 623 case BPF_LDX | BPF_MEM | BPF_DW:
624 /* emit 'mov rax, qword ptr [rax+0x14]' */ 624 /* emit 'mov rax, qword ptr [rax+0x14]' */
625 EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); 625 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
626ldx: /* if insn->off == 0 we can save one extra byte, but 626ldx: /* if insn->off == 0 we can save one extra byte, but
627 * special case of x86 r13 which always needs an offset 627 * special case of x86 r13 which always needs an offset
628 * is not worth the hassle 628 * is not worth the hassle
629 */ 629 */
630 if (is_imm8(insn->off)) 630 if (is_imm8(insn->off))
631 EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); 631 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
632 else 632 else
633 EMIT1_off32(add_2reg(0x80, x_reg, a_reg), 633 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
634 insn->off); 634 insn->off);
635 break; 635 break;
636 636
637 /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ 637 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
638 case BPF_STX | BPF_XADD | BPF_W: 638 case BPF_STX | BPF_XADD | BPF_W:
639 /* emit 'lock add dword ptr [rax + off], eax' */ 639 /* emit 'lock add dword ptr [rax + off], eax' */
640 if (is_ereg(a_reg) || is_ereg(x_reg)) 640 if (is_ereg(dst_reg) || is_ereg(src_reg))
641 EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); 641 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
642 else 642 else
643 EMIT2(0xF0, 0x01); 643 EMIT2(0xF0, 0x01);
644 goto xadd; 644 goto xadd;
645 case BPF_STX | BPF_XADD | BPF_DW: 645 case BPF_STX | BPF_XADD | BPF_DW:
646 EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); 646 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
647xadd: if (is_imm8(insn->off)) 647xadd: if (is_imm8(insn->off))
648 EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); 648 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
649 else 649 else
650 EMIT1_off32(add_2reg(0x80, a_reg, x_reg), 650 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
651 insn->off); 651 insn->off);
652 break; 652 break;
653 653
654 /* call */ 654 /* call */
655 case BPF_JMP | BPF_CALL: 655 case BPF_JMP | BPF_CALL:
656 func = (u8 *) __bpf_call_base + K; 656 func = (u8 *) __bpf_call_base + imm32;
657 jmp_offset = func - (image + addrs[i]); 657 jmp_offset = func - (image + addrs[i]);
658 if (ctx->seen_ld_abs) { 658 if (ctx->seen_ld_abs) {
659 EMIT2(0x41, 0x52); /* push %r10 */ 659 EMIT2(0x41, 0x52); /* push %r10 */
@@ -663,9 +663,9 @@ xadd: if (is_imm8(insn->off))
663 */ 663 */
664 jmp_offset += 4; 664 jmp_offset += 4;
665 } 665 }
666 if (!K || !is_simm32(jmp_offset)) { 666 if (!imm32 || !is_simm32(jmp_offset)) {
667 pr_err("unsupported bpf func %d addr %p image %p\n", 667 pr_err("unsupported bpf func %d addr %p image %p\n",
668 K, func, image); 668 imm32, func, image);
669 return -EINVAL; 669 return -EINVAL;
670 } 670 }
671 EMIT1_off32(0xE8, jmp_offset); 671 EMIT1_off32(0xE8, jmp_offset);
@@ -682,21 +682,21 @@ xadd: if (is_imm8(insn->off))
682 case BPF_JMP | BPF_JGE | BPF_X: 682 case BPF_JMP | BPF_JGE | BPF_X:
683 case BPF_JMP | BPF_JSGT | BPF_X: 683 case BPF_JMP | BPF_JSGT | BPF_X:
684 case BPF_JMP | BPF_JSGE | BPF_X: 684 case BPF_JMP | BPF_JSGE | BPF_X:
685 /* cmp a_reg, x_reg */ 685 /* cmp dst_reg, src_reg */
686 EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, 686 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
687 add_2reg(0xC0, a_reg, x_reg)); 687 add_2reg(0xC0, dst_reg, src_reg));
688 goto emit_cond_jmp; 688 goto emit_cond_jmp;
689 689
690 case BPF_JMP | BPF_JSET | BPF_X: 690 case BPF_JMP | BPF_JSET | BPF_X:
691 /* test a_reg, x_reg */ 691 /* test dst_reg, src_reg */
692 EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, 692 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
693 add_2reg(0xC0, a_reg, x_reg)); 693 add_2reg(0xC0, dst_reg, src_reg));
694 goto emit_cond_jmp; 694 goto emit_cond_jmp;
695 695
696 case BPF_JMP | BPF_JSET | BPF_K: 696 case BPF_JMP | BPF_JSET | BPF_K:
697 /* test a_reg, imm32 */ 697 /* test dst_reg, imm32 */
698 EMIT1(add_1mod(0x48, a_reg)); 698 EMIT1(add_1mod(0x48, dst_reg));
699 EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); 699 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
700 goto emit_cond_jmp; 700 goto emit_cond_jmp;
701 701
702 case BPF_JMP | BPF_JEQ | BPF_K: 702 case BPF_JMP | BPF_JEQ | BPF_K:
@@ -705,13 +705,13 @@ xadd: if (is_imm8(insn->off))
705 case BPF_JMP | BPF_JGE | BPF_K: 705 case BPF_JMP | BPF_JGE | BPF_K:
706 case BPF_JMP | BPF_JSGT | BPF_K: 706 case BPF_JMP | BPF_JSGT | BPF_K:
707 case BPF_JMP | BPF_JSGE | BPF_K: 707 case BPF_JMP | BPF_JSGE | BPF_K:
708 /* cmp a_reg, imm8/32 */ 708 /* cmp dst_reg, imm8/32 */
709 EMIT1(add_1mod(0x48, a_reg)); 709 EMIT1(add_1mod(0x48, dst_reg));
710 710
711 if (is_imm8(K)) 711 if (is_imm8(imm32))
712 EMIT3(0x83, add_1reg(0xF8, a_reg), K); 712 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
713 else 713 else
714 EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); 714 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
715 715
716emit_cond_jmp: /* convert BPF opcode to x86 */ 716emit_cond_jmp: /* convert BPF opcode to x86 */
717 switch (BPF_OP(insn->code)) { 717 switch (BPF_OP(insn->code)) {
@@ -773,27 +773,27 @@ emit_jmp:
773 func = sk_load_word; 773 func = sk_load_word;
774 goto common_load; 774 goto common_load;
775 case BPF_LD | BPF_ABS | BPF_W: 775 case BPF_LD | BPF_ABS | BPF_W:
776 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 776 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
777common_load: ctx->seen_ld_abs = true; 777common_load: ctx->seen_ld_abs = true;
778 jmp_offset = func - (image + addrs[i]); 778 jmp_offset = func - (image + addrs[i]);
779 if (!func || !is_simm32(jmp_offset)) { 779 if (!func || !is_simm32(jmp_offset)) {
780 pr_err("unsupported bpf func %d addr %p image %p\n", 780 pr_err("unsupported bpf func %d addr %p image %p\n",
781 K, func, image); 781 imm32, func, image);
782 return -EINVAL; 782 return -EINVAL;
783 } 783 }
784 if (BPF_MODE(insn->code) == BPF_ABS) { 784 if (BPF_MODE(insn->code) == BPF_ABS) {
785 /* mov %esi, imm32 */ 785 /* mov %esi, imm32 */
786 EMIT1_off32(0xBE, K); 786 EMIT1_off32(0xBE, imm32);
787 } else { 787 } else {
788 /* mov %rsi, x_reg */ 788 /* mov %rsi, src_reg */
789 EMIT_mov(BPF_REG_2, x_reg); 789 EMIT_mov(BPF_REG_2, src_reg);
790 if (K) { 790 if (imm32) {
791 if (is_imm8(K)) 791 if (is_imm8(imm32))
792 /* add %esi, imm8 */ 792 /* add %esi, imm8 */
793 EMIT3(0x83, 0xC6, K); 793 EMIT3(0x83, 0xC6, imm32);
794 else 794 else
795 /* add %esi, imm32 */ 795 /* add %esi, imm32 */
796 EMIT2_off32(0x81, 0xC6, K); 796 EMIT2_off32(0x81, 0xC6, imm32);
797 } 797 }
798 } 798 }
799 /* skb pointer is in R6 (%rbx), it will be copied into 799 /* skb pointer is in R6 (%rbx), it will be copied into
@@ -808,13 +808,13 @@ common_load: ctx->seen_ld_abs = true;
808 func = sk_load_half; 808 func = sk_load_half;
809 goto common_load; 809 goto common_load;
810 case BPF_LD | BPF_ABS | BPF_H: 810 case BPF_LD | BPF_ABS | BPF_H:
811 func = CHOOSE_LOAD_FUNC(K, sk_load_half); 811 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
812 goto common_load; 812 goto common_load;
813 case BPF_LD | BPF_IND | BPF_B: 813 case BPF_LD | BPF_IND | BPF_B:
814 func = sk_load_byte; 814 func = sk_load_byte;
815 goto common_load; 815 goto common_load;
816 case BPF_LD | BPF_ABS | BPF_B: 816 case BPF_LD | BPF_ABS | BPF_B:
817 func = CHOOSE_LOAD_FUNC(K, sk_load_byte); 817 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
818 goto common_load; 818 goto common_load;
819 819
820 case BPF_JMP | BPF_EXIT: 820 case BPF_JMP | BPF_EXIT: