diff options
author | Josef Bacik <jbacik@fb.com> | 2016-09-28 10:54:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-09-29 01:35:35 -0400 |
commit | 484611357c19f9e19ef742ebef4505a07d243cc9 (patch) | |
tree | 34f14c2b2ac71d0bf0a53cab096960e7c91ae87f | |
parent | 7836667cec5e02ed2ae3eb09b88047b5b5f2343a (diff) |
bpf: allow access into map value arrays
Suppose you have a map array value that is something like this
struct foo {
unsigned iter;
int array[SOME_CONSTANT];
};
You can easily insert this into an array, but you cannot modify the contents of
foo->array[] after the fact. This is because we have no way to verify we won't
go off the end of the array at verification time. This patch provides a start
for this work. We accomplish this by keeping track of a minimum and maximum
value a register could be while we're checking the code. Then at the time we
try to do an access into a MAP_VALUE we verify that the maximum offset into that
region is a valid access into that memory region. So in practice, code such as
this
unsigned index = 0;
if (foo->iter >= SOME_CONSTANT)
foo->iter = index;
else
index = foo->iter++;
foo->array[index] = bar;
would be allowed, as we can verify that index will always be between 0 and
SOME_CONSTANT-1. If you wish to use signed values you'll have to have an extra
check to make sure the index isn't less than 0, or do something like index %=
SOME_CONSTANT.
Signed-off-by: Josef Bacik <jbacik@fb.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/bpf.h | 7 | ||||
-rw-r--r-- | include/linux/bpf_verifier.h | 12 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 329 | ||||
-rw-r--r-- | samples/bpf/libbpf.h | 8 | ||||
-rw-r--r-- | samples/bpf/test_verifier.c | 243 |
5 files changed, 577 insertions, 22 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5691fdc83819..c201017b5730 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -139,6 +139,13 @@ enum bpf_reg_type { | |||
139 | */ | 139 | */ |
140 | PTR_TO_PACKET, | 140 | PTR_TO_PACKET, |
141 | PTR_TO_PACKET_END, /* skb->data + headlen */ | 141 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
142 | |||
143 | /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map | ||
144 | * elem value. We only allow this if we can statically verify that | ||
145 | * access from this register are going to fall within the size of the | ||
146 | * map element. | ||
147 | */ | ||
148 | PTR_TO_MAP_VALUE_ADJ, | ||
142 | }; | 149 | }; |
143 | 150 | ||
144 | struct bpf_prog; | 151 | struct bpf_prog; |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c5cb661712c9..7035b997aaa5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -10,8 +10,19 @@ | |||
10 | #include <linux/bpf.h> /* for enum bpf_reg_type */ | 10 | #include <linux/bpf.h> /* for enum bpf_reg_type */ |
11 | #include <linux/filter.h> /* for MAX_BPF_STACK */ | 11 | #include <linux/filter.h> /* for MAX_BPF_STACK */ |
12 | 12 | ||
13 | /* Just some arbitrary values so we can safely do math without overflowing and | ||
14 | * are obviously wrong for any sort of memory access. | ||
15 | */ | ||
16 | #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) | ||
17 | #define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) | ||
18 | |||
13 | struct bpf_reg_state { | 19 | struct bpf_reg_state { |
14 | enum bpf_reg_type type; | 20 | enum bpf_reg_type type; |
21 | /* | ||
22 | * Used to determine if any memory access using this register will | ||
23 | * result in a bad access. | ||
24 | */ | ||
25 | u64 min_value, max_value; | ||
15 | union { | 26 | union { |
16 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | 27 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
17 | s64 imm; | 28 | s64 imm; |
@@ -81,6 +92,7 @@ struct bpf_verifier_env { | |||
81 | u32 id_gen; /* used to generate unique reg IDs */ | 92 | u32 id_gen; /* used to generate unique reg IDs */ |
82 | bool allow_ptr_leaks; | 93 | bool allow_ptr_leaks; |
83 | bool seen_direct_write; | 94 | bool seen_direct_write; |
95 | bool varlen_map_value_access; | ||
84 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | 96 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
85 | }; | 97 | }; |
86 | 98 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7ada3152a556..99a7e5b388f2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -182,6 +182,7 @@ static const char * const reg_type_str[] = { | |||
182 | [CONST_PTR_TO_MAP] = "map_ptr", | 182 | [CONST_PTR_TO_MAP] = "map_ptr", |
183 | [PTR_TO_MAP_VALUE] = "map_value", | 183 | [PTR_TO_MAP_VALUE] = "map_value", |
184 | [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", | 184 | [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", |
185 | [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj", | ||
185 | [FRAME_PTR] = "fp", | 186 | [FRAME_PTR] = "fp", |
186 | [PTR_TO_STACK] = "fp", | 187 | [PTR_TO_STACK] = "fp", |
187 | [CONST_IMM] = "imm", | 188 | [CONST_IMM] = "imm", |
@@ -209,10 +210,17 @@ static void print_verifier_state(struct bpf_verifier_state *state) | |||
209 | else if (t == UNKNOWN_VALUE && reg->imm) | 210 | else if (t == UNKNOWN_VALUE && reg->imm) |
210 | verbose("%lld", reg->imm); | 211 | verbose("%lld", reg->imm); |
211 | else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || | 212 | else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || |
212 | t == PTR_TO_MAP_VALUE_OR_NULL) | 213 | t == PTR_TO_MAP_VALUE_OR_NULL || |
214 | t == PTR_TO_MAP_VALUE_ADJ) | ||
213 | verbose("(ks=%d,vs=%d)", | 215 | verbose("(ks=%d,vs=%d)", |
214 | reg->map_ptr->key_size, | 216 | reg->map_ptr->key_size, |
215 | reg->map_ptr->value_size); | 217 | reg->map_ptr->value_size); |
218 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) | ||
219 | verbose(",min_value=%llu", | ||
220 | (unsigned long long)reg->min_value); | ||
221 | if (reg->max_value != BPF_REGISTER_MAX_RANGE) | ||
222 | verbose(",max_value=%llu", | ||
223 | (unsigned long long)reg->max_value); | ||
216 | } | 224 | } |
217 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { | 225 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
218 | if (state->stack_slot_type[i] == STACK_SPILL) | 226 | if (state->stack_slot_type[i] == STACK_SPILL) |
@@ -424,6 +432,8 @@ static void init_reg_state(struct bpf_reg_state *regs) | |||
424 | for (i = 0; i < MAX_BPF_REG; i++) { | 432 | for (i = 0; i < MAX_BPF_REG; i++) { |
425 | regs[i].type = NOT_INIT; | 433 | regs[i].type = NOT_INIT; |
426 | regs[i].imm = 0; | 434 | regs[i].imm = 0; |
435 | regs[i].min_value = BPF_REGISTER_MIN_RANGE; | ||
436 | regs[i].max_value = BPF_REGISTER_MAX_RANGE; | ||
427 | } | 437 | } |
428 | 438 | ||
429 | /* frame pointer */ | 439 | /* frame pointer */ |
@@ -440,6 +450,12 @@ static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) | |||
440 | regs[regno].imm = 0; | 450 | regs[regno].imm = 0; |
441 | } | 451 | } |
442 | 452 | ||
453 | static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) | ||
454 | { | ||
455 | regs[regno].min_value = BPF_REGISTER_MIN_RANGE; | ||
456 | regs[regno].max_value = BPF_REGISTER_MAX_RANGE; | ||
457 | } | ||
458 | |||
443 | enum reg_arg_type { | 459 | enum reg_arg_type { |
444 | SRC_OP, /* register is used as source operand */ | 460 | SRC_OP, /* register is used as source operand */ |
445 | DST_OP, /* register is used as destination operand */ | 461 | DST_OP, /* register is used as destination operand */ |
@@ -665,7 +681,7 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) | |||
665 | static int check_ptr_alignment(struct bpf_verifier_env *env, | 681 | static int check_ptr_alignment(struct bpf_verifier_env *env, |
666 | struct bpf_reg_state *reg, int off, int size) | 682 | struct bpf_reg_state *reg, int off, int size) |
667 | { | 683 | { |
668 | if (reg->type != PTR_TO_PACKET) { | 684 | if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) { |
669 | if (off % size != 0) { | 685 | if (off % size != 0) { |
670 | verbose("misaligned access off %d size %d\n", | 686 | verbose("misaligned access off %d size %d\n", |
671 | off, size); | 687 | off, size); |
@@ -675,16 +691,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, | |||
675 | } | 691 | } |
676 | } | 692 | } |
677 | 693 | ||
678 | switch (env->prog->type) { | ||
679 | case BPF_PROG_TYPE_SCHED_CLS: | ||
680 | case BPF_PROG_TYPE_SCHED_ACT: | ||
681 | case BPF_PROG_TYPE_XDP: | ||
682 | break; | ||
683 | default: | ||
684 | verbose("verifier is misconfigured\n"); | ||
685 | return -EACCES; | ||
686 | } | ||
687 | |||
688 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) | 694 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) |
689 | /* misaligned access to packet is ok on x86,arm,arm64 */ | 695 | /* misaligned access to packet is ok on x86,arm,arm64 */ |
690 | return 0; | 696 | return 0; |
@@ -695,7 +701,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, | |||
695 | } | 701 | } |
696 | 702 | ||
697 | /* skb->data is NET_IP_ALIGN-ed */ | 703 | /* skb->data is NET_IP_ALIGN-ed */ |
698 | if ((NET_IP_ALIGN + reg->off + off) % size != 0) { | 704 | if (reg->type == PTR_TO_PACKET && |
705 | (NET_IP_ALIGN + reg->off + off) % size != 0) { | ||
699 | verbose("misaligned packet access off %d+%d+%d size %d\n", | 706 | verbose("misaligned packet access off %d+%d+%d size %d\n", |
700 | NET_IP_ALIGN, reg->off, off, size); | 707 | NET_IP_ALIGN, reg->off, off, size); |
701 | return -EACCES; | 708 | return -EACCES; |
@@ -728,12 +735,52 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
728 | if (err) | 735 | if (err) |
729 | return err; | 736 | return err; |
730 | 737 | ||
731 | if (reg->type == PTR_TO_MAP_VALUE) { | 738 | if (reg->type == PTR_TO_MAP_VALUE || |
739 | reg->type == PTR_TO_MAP_VALUE_ADJ) { | ||
732 | if (t == BPF_WRITE && value_regno >= 0 && | 740 | if (t == BPF_WRITE && value_regno >= 0 && |
733 | is_pointer_value(env, value_regno)) { | 741 | is_pointer_value(env, value_regno)) { |
734 | verbose("R%d leaks addr into map\n", value_regno); | 742 | verbose("R%d leaks addr into map\n", value_regno); |
735 | return -EACCES; | 743 | return -EACCES; |
736 | } | 744 | } |
745 | |||
746 | /* If we adjusted the register to this map value at all then we | ||
747 | * need to change off and size to min_value and max_value | ||
748 | * respectively to make sure our theoretical access will be | ||
749 | * safe. | ||
750 | */ | ||
751 | if (reg->type == PTR_TO_MAP_VALUE_ADJ) { | ||
752 | if (log_level) | ||
753 | print_verifier_state(state); | ||
754 | env->varlen_map_value_access = true; | ||
755 | /* The minimum value is only important with signed | ||
756 | * comparisons where we can't assume the floor of a | ||
757 | * value is 0. If we are using signed variables for our | ||
758 | * index'es we need to make sure that whatever we use | ||
759 | * will have a set floor within our range. | ||
760 | */ | ||
761 | if ((s64)reg->min_value < 0) { | ||
762 | verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | ||
763 | regno); | ||
764 | return -EACCES; | ||
765 | } | ||
766 | err = check_map_access(env, regno, reg->min_value + off, | ||
767 | size); | ||
768 | if (err) { | ||
769 | verbose("R%d min value is outside of the array range\n", | ||
770 | regno); | ||
771 | return err; | ||
772 | } | ||
773 | |||
774 | /* If we haven't set a max value then we need to bail | ||
775 | * since we can't be sure we won't do bad things. | ||
776 | */ | ||
777 | if (reg->max_value == BPF_REGISTER_MAX_RANGE) { | ||
778 | verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", | ||
779 | regno); | ||
780 | return -EACCES; | ||
781 | } | ||
782 | off += reg->max_value; | ||
783 | } | ||
737 | err = check_map_access(env, regno, off, size); | 784 | err = check_map_access(env, regno, off, size); |
738 | if (!err && t == BPF_READ && value_regno >= 0) | 785 | if (!err && t == BPF_READ && value_regno >= 0) |
739 | mark_reg_unknown_value(state->regs, value_regno); | 786 | mark_reg_unknown_value(state->regs, value_regno); |
@@ -1195,6 +1242,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id) | |||
1195 | regs[BPF_REG_0].type = NOT_INIT; | 1242 | regs[BPF_REG_0].type = NOT_INIT; |
1196 | } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { | 1243 | } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { |
1197 | regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; | 1244 | regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; |
1245 | regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; | ||
1198 | /* remember map_ptr, so that check_map_access() | 1246 | /* remember map_ptr, so that check_map_access() |
1199 | * can check 'value_size' boundary of memory access | 1247 | * can check 'value_size' boundary of memory access |
1200 | * to map element returned from bpf_map_lookup_elem() | 1248 | * to map element returned from bpf_map_lookup_elem() |
@@ -1416,6 +1464,106 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, | |||
1416 | return 0; | 1464 | return 0; |
1417 | } | 1465 | } |
1418 | 1466 | ||
1467 | static void check_reg_overflow(struct bpf_reg_state *reg) | ||
1468 | { | ||
1469 | if (reg->max_value > BPF_REGISTER_MAX_RANGE) | ||
1470 | reg->max_value = BPF_REGISTER_MAX_RANGE; | ||
1471 | if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) | ||
1472 | reg->min_value = BPF_REGISTER_MIN_RANGE; | ||
1473 | } | ||
1474 | |||
1475 | static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | ||
1476 | struct bpf_insn *insn) | ||
1477 | { | ||
1478 | struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; | ||
1479 | u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; | ||
1480 | bool min_set = false, max_set = false; | ||
1481 | u8 opcode = BPF_OP(insn->code); | ||
1482 | |||
1483 | dst_reg = ®s[insn->dst_reg]; | ||
1484 | if (BPF_SRC(insn->code) == BPF_X) { | ||
1485 | check_reg_overflow(®s[insn->src_reg]); | ||
1486 | min_val = regs[insn->src_reg].min_value; | ||
1487 | max_val = regs[insn->src_reg].max_value; | ||
1488 | |||
1489 | /* If the source register is a random pointer then the | ||
1490 | * min_value/max_value values represent the range of the known | ||
1491 | * accesses into that value, not the actual min/max value of the | ||
1492 | * register itself. In this case we have to reset the reg range | ||
1493 | * values so we know it is not safe to look at. | ||
1494 | */ | ||
1495 | if (regs[insn->src_reg].type != CONST_IMM && | ||
1496 | regs[insn->src_reg].type != UNKNOWN_VALUE) { | ||
1497 | min_val = BPF_REGISTER_MIN_RANGE; | ||
1498 | max_val = BPF_REGISTER_MAX_RANGE; | ||
1499 | } | ||
1500 | } else if (insn->imm < BPF_REGISTER_MAX_RANGE && | ||
1501 | (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { | ||
1502 | min_val = max_val = insn->imm; | ||
1503 | min_set = max_set = true; | ||
1504 | } | ||
1505 | |||
1506 | /* We don't know anything about what was done to this register, mark it | ||
1507 | * as unknown. | ||
1508 | */ | ||
1509 | if (min_val == BPF_REGISTER_MIN_RANGE && | ||
1510 | max_val == BPF_REGISTER_MAX_RANGE) { | ||
1511 | reset_reg_range_values(regs, insn->dst_reg); | ||
1512 | return; | ||
1513 | } | ||
1514 | |||
1515 | switch (opcode) { | ||
1516 | case BPF_ADD: | ||
1517 | dst_reg->min_value += min_val; | ||
1518 | dst_reg->max_value += max_val; | ||
1519 | break; | ||
1520 | case BPF_SUB: | ||
1521 | dst_reg->min_value -= min_val; | ||
1522 | dst_reg->max_value -= max_val; | ||
1523 | break; | ||
1524 | case BPF_MUL: | ||
1525 | dst_reg->min_value *= min_val; | ||
1526 | dst_reg->max_value *= max_val; | ||
1527 | break; | ||
1528 | case BPF_AND: | ||
1529 | /* & is special since it could end up with 0 bits set. */ | ||
1530 | dst_reg->min_value &= min_val; | ||
1531 | dst_reg->max_value = max_val; | ||
1532 | break; | ||
1533 | case BPF_LSH: | ||
1534 | /* Gotta have special overflow logic here, if we're shifting | ||
1535 | * more than MAX_RANGE then just assume we have an invalid | ||
1536 | * range. | ||
1537 | */ | ||
1538 | if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) | ||
1539 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | ||
1540 | else | ||
1541 | dst_reg->min_value <<= min_val; | ||
1542 | |||
1543 | if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) | ||
1544 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; | ||
1545 | else | ||
1546 | dst_reg->max_value <<= max_val; | ||
1547 | break; | ||
1548 | case BPF_RSH: | ||
1549 | dst_reg->min_value >>= min_val; | ||
1550 | dst_reg->max_value >>= max_val; | ||
1551 | break; | ||
1552 | case BPF_MOD: | ||
1553 | /* % is special since it is an unsigned modulus, so the floor | ||
1554 | * will always be 0. | ||
1555 | */ | ||
1556 | dst_reg->min_value = 0; | ||
1557 | dst_reg->max_value = max_val - 1; | ||
1558 | break; | ||
1559 | default: | ||
1560 | reset_reg_range_values(regs, insn->dst_reg); | ||
1561 | break; | ||
1562 | } | ||
1563 | |||
1564 | check_reg_overflow(dst_reg); | ||
1565 | } | ||
1566 | |||
1419 | /* check validity of 32-bit and 64-bit arithmetic operations */ | 1567 | /* check validity of 32-bit and 64-bit arithmetic operations */ |
1420 | static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | 1568 | static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
1421 | { | 1569 | { |
@@ -1479,6 +1627,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1479 | if (err) | 1627 | if (err) |
1480 | return err; | 1628 | return err; |
1481 | 1629 | ||
1630 | /* we are setting our register to something new, we need to | ||
1631 | * reset its range values. | ||
1632 | */ | ||
1633 | reset_reg_range_values(regs, insn->dst_reg); | ||
1634 | |||
1482 | if (BPF_SRC(insn->code) == BPF_X) { | 1635 | if (BPF_SRC(insn->code) == BPF_X) { |
1483 | if (BPF_CLASS(insn->code) == BPF_ALU64) { | 1636 | if (BPF_CLASS(insn->code) == BPF_ALU64) { |
1484 | /* case: R1 = R2 | 1637 | /* case: R1 = R2 |
@@ -1500,6 +1653,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1500 | */ | 1653 | */ |
1501 | regs[insn->dst_reg].type = CONST_IMM; | 1654 | regs[insn->dst_reg].type = CONST_IMM; |
1502 | regs[insn->dst_reg].imm = insn->imm; | 1655 | regs[insn->dst_reg].imm = insn->imm; |
1656 | regs[insn->dst_reg].max_value = insn->imm; | ||
1657 | regs[insn->dst_reg].min_value = insn->imm; | ||
1503 | } | 1658 | } |
1504 | 1659 | ||
1505 | } else if (opcode > BPF_END) { | 1660 | } else if (opcode > BPF_END) { |
@@ -1552,6 +1707,9 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1552 | 1707 | ||
1553 | dst_reg = ®s[insn->dst_reg]; | 1708 | dst_reg = ®s[insn->dst_reg]; |
1554 | 1709 | ||
1710 | /* first we want to adjust our ranges. */ | ||
1711 | adjust_reg_min_max_vals(env, insn); | ||
1712 | |||
1555 | /* pattern match 'bpf_add Rx, imm' instruction */ | 1713 | /* pattern match 'bpf_add Rx, imm' instruction */ |
1556 | if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && | 1714 | if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && |
1557 | dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { | 1715 | dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { |
@@ -1586,8 +1744,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1586 | return -EACCES; | 1744 | return -EACCES; |
1587 | } | 1745 | } |
1588 | 1746 | ||
1589 | /* mark dest operand */ | 1747 | /* If we did pointer math on a map value then just set it to our |
1590 | mark_reg_unknown_value(regs, insn->dst_reg); | 1748 | * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or |
1749 | * loads to this register appropriately, otherwise just mark the | ||
1750 | * register as unknown. | ||
1751 | */ | ||
1752 | if (env->allow_ptr_leaks && | ||
1753 | (dst_reg->type == PTR_TO_MAP_VALUE || | ||
1754 | dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) | ||
1755 | dst_reg->type = PTR_TO_MAP_VALUE_ADJ; | ||
1756 | else | ||
1757 | mark_reg_unknown_value(regs, insn->dst_reg); | ||
1591 | } | 1758 | } |
1592 | 1759 | ||
1593 | return 0; | 1760 | return 0; |
@@ -1642,6 +1809,104 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, | |||
1642 | } | 1809 | } |
1643 | } | 1810 | } |
1644 | 1811 | ||
1812 | /* Adjusts the register min/max values in the case that the dst_reg is the | ||
1813 | * variable register that we are working on, and src_reg is a constant or we're | ||
1814 | * simply doing a BPF_K check. | ||
1815 | */ | ||
1816 | static void reg_set_min_max(struct bpf_reg_state *true_reg, | ||
1817 | struct bpf_reg_state *false_reg, u64 val, | ||
1818 | u8 opcode) | ||
1819 | { | ||
1820 | switch (opcode) { | ||
1821 | case BPF_JEQ: | ||
1822 | /* If this is false then we know nothing Jon Snow, but if it is | ||
1823 | * true then we know for sure. | ||
1824 | */ | ||
1825 | true_reg->max_value = true_reg->min_value = val; | ||
1826 | break; | ||
1827 | case BPF_JNE: | ||
1828 | /* If this is true we know nothing Jon Snow, but if it is false | ||
1829 | * we know the value for sure; | ||
1830 | */ | ||
1831 | false_reg->max_value = false_reg->min_value = val; | ||
1832 | break; | ||
1833 | case BPF_JGT: | ||
1834 | /* Unsigned comparison, the minimum value is 0. */ | ||
1835 | false_reg->min_value = 0; | ||
1836 | case BPF_JSGT: | ||
1837 | /* If this is false then we know the maximum val is val, | ||
1838 | * otherwise we know the min val is val+1. | ||
1839 | */ | ||
1840 | false_reg->max_value = val; | ||
1841 | true_reg->min_value = val + 1; | ||
1842 | break; | ||
1843 | case BPF_JGE: | ||
1844 | /* Unsigned comparison, the minimum value is 0. */ | ||
1845 | false_reg->min_value = 0; | ||
1846 | case BPF_JSGE: | ||
1847 | /* If this is false then we know the maximum value is val - 1, | ||
1848 | * otherwise we know the mimimum value is val. | ||
1849 | */ | ||
1850 | false_reg->max_value = val - 1; | ||
1851 | true_reg->min_value = val; | ||
1852 | break; | ||
1853 | default: | ||
1854 | break; | ||
1855 | } | ||
1856 | |||
1857 | check_reg_overflow(false_reg); | ||
1858 | check_reg_overflow(true_reg); | ||
1859 | } | ||
1860 | |||
1861 | /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg | ||
1862 | * is the variable reg. | ||
1863 | */ | ||
1864 | static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | ||
1865 | struct bpf_reg_state *false_reg, u64 val, | ||
1866 | u8 opcode) | ||
1867 | { | ||
1868 | switch (opcode) { | ||
1869 | case BPF_JEQ: | ||
1870 | /* If this is false then we know nothing Jon Snow, but if it is | ||
1871 | * true then we know for sure. | ||
1872 | */ | ||
1873 | true_reg->max_value = true_reg->min_value = val; | ||
1874 | break; | ||
1875 | case BPF_JNE: | ||
1876 | /* If this is true we know nothing Jon Snow, but if it is false | ||
1877 | * we know the value for sure; | ||
1878 | */ | ||
1879 | false_reg->max_value = false_reg->min_value = val; | ||
1880 | break; | ||
1881 | case BPF_JGT: | ||
1882 | /* Unsigned comparison, the minimum value is 0. */ | ||
1883 | true_reg->min_value = 0; | ||
1884 | case BPF_JSGT: | ||
1885 | /* | ||
1886 | * If this is false, then the val is <= the register, if it is | ||
1887 | * true the register <= to the val. | ||
1888 | */ | ||
1889 | false_reg->min_value = val; | ||
1890 | true_reg->max_value = val - 1; | ||
1891 | break; | ||
1892 | case BPF_JGE: | ||
1893 | /* Unsigned comparison, the minimum value is 0. */ | ||
1894 | true_reg->min_value = 0; | ||
1895 | case BPF_JSGE: | ||
1896 | /* If this is false then constant < register, if it is true then | ||
1897 | * the register < constant. | ||
1898 | */ | ||
1899 | false_reg->min_value = val + 1; | ||
1900 | true_reg->max_value = val; | ||
1901 | break; | ||
1902 | default: | ||
1903 | break; | ||
1904 | } | ||
1905 | |||
1906 | check_reg_overflow(false_reg); | ||
1907 | check_reg_overflow(true_reg); | ||
1908 | } | ||
1909 | |||
1645 | static int check_cond_jmp_op(struct bpf_verifier_env *env, | 1910 | static int check_cond_jmp_op(struct bpf_verifier_env *env, |
1646 | struct bpf_insn *insn, int *insn_idx) | 1911 | struct bpf_insn *insn, int *insn_idx) |
1647 | { | 1912 | { |
@@ -1708,6 +1973,23 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
1708 | if (!other_branch) | 1973 | if (!other_branch) |
1709 | return -EFAULT; | 1974 | return -EFAULT; |
1710 | 1975 | ||
1976 | /* detect if we are comparing against a constant value so we can adjust | ||
1977 | * our min/max values for our dst register. | ||
1978 | */ | ||
1979 | if (BPF_SRC(insn->code) == BPF_X) { | ||
1980 | if (regs[insn->src_reg].type == CONST_IMM) | ||
1981 | reg_set_min_max(&other_branch->regs[insn->dst_reg], | ||
1982 | dst_reg, regs[insn->src_reg].imm, | ||
1983 | opcode); | ||
1984 | else if (dst_reg->type == CONST_IMM) | ||
1985 | reg_set_min_max_inv(&other_branch->regs[insn->src_reg], | ||
1986 | ®s[insn->src_reg], dst_reg->imm, | ||
1987 | opcode); | ||
1988 | } else { | ||
1989 | reg_set_min_max(&other_branch->regs[insn->dst_reg], | ||
1990 | dst_reg, insn->imm, opcode); | ||
1991 | } | ||
1992 | |||
1711 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ | 1993 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ |
1712 | if (BPF_SRC(insn->code) == BPF_K && | 1994 | if (BPF_SRC(insn->code) == BPF_K && |
1713 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && | 1995 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
@@ -2144,7 +2426,8 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old, | |||
2144 | * whereas register type in current state is meaningful, it means that | 2426 | * whereas register type in current state is meaningful, it means that |
2145 | * the current state will reach 'bpf_exit' instruction safely | 2427 | * the current state will reach 'bpf_exit' instruction safely |
2146 | */ | 2428 | */ |
2147 | static bool states_equal(struct bpf_verifier_state *old, | 2429 | static bool states_equal(struct bpf_verifier_env *env, |
2430 | struct bpf_verifier_state *old, | ||
2148 | struct bpf_verifier_state *cur) | 2431 | struct bpf_verifier_state *cur) |
2149 | { | 2432 | { |
2150 | struct bpf_reg_state *rold, *rcur; | 2433 | struct bpf_reg_state *rold, *rcur; |
@@ -2157,6 +2440,13 @@ static bool states_equal(struct bpf_verifier_state *old, | |||
2157 | if (memcmp(rold, rcur, sizeof(*rold)) == 0) | 2440 | if (memcmp(rold, rcur, sizeof(*rold)) == 0) |
2158 | continue; | 2441 | continue; |
2159 | 2442 | ||
2443 | /* If the ranges were not the same, but everything else was and | ||
2444 | * we didn't do a variable access into a map then we are a-ok. | ||
2445 | */ | ||
2446 | if (!env->varlen_map_value_access && | ||
2447 | rold->type == rcur->type && rold->imm == rcur->imm) | ||
2448 | continue; | ||
2449 | |||
2160 | if (rold->type == NOT_INIT || | 2450 | if (rold->type == NOT_INIT || |
2161 | (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) | 2451 | (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) |
2162 | continue; | 2452 | continue; |
@@ -2213,7 +2503,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | |||
2213 | return 0; | 2503 | return 0; |
2214 | 2504 | ||
2215 | while (sl != STATE_LIST_MARK) { | 2505 | while (sl != STATE_LIST_MARK) { |
2216 | if (states_equal(&sl->state, &env->cur_state)) | 2506 | if (states_equal(env, &sl->state, &env->cur_state)) |
2217 | /* reached equivalent register/stack state, | 2507 | /* reached equivalent register/stack state, |
2218 | * prune the search | 2508 | * prune the search |
2219 | */ | 2509 | */ |
@@ -2259,6 +2549,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
2259 | 2549 | ||
2260 | init_reg_state(regs); | 2550 | init_reg_state(regs); |
2261 | insn_idx = 0; | 2551 | insn_idx = 0; |
2552 | env->varlen_map_value_access = false; | ||
2262 | for (;;) { | 2553 | for (;;) { |
2263 | struct bpf_insn *insn; | 2554 | struct bpf_insn *insn; |
2264 | u8 class; | 2555 | u8 class; |
@@ -2339,6 +2630,7 @@ static int do_check(struct bpf_verifier_env *env) | |||
2339 | if (err) | 2630 | if (err) |
2340 | return err; | 2631 | return err; |
2341 | 2632 | ||
2633 | reset_reg_range_values(regs, insn->dst_reg); | ||
2342 | if (BPF_SIZE(insn->code) != BPF_W && | 2634 | if (BPF_SIZE(insn->code) != BPF_W && |
2343 | BPF_SIZE(insn->code) != BPF_DW) { | 2635 | BPF_SIZE(insn->code) != BPF_DW) { |
2344 | insn_idx++; | 2636 | insn_idx++; |
@@ -2509,6 +2801,7 @@ process_bpf_exit: | |||
2509 | verbose("invalid BPF_LD mode\n"); | 2801 | verbose("invalid BPF_LD mode\n"); |
2510 | return -EINVAL; | 2802 | return -EINVAL; |
2511 | } | 2803 | } |
2804 | reset_reg_range_values(regs, insn->dst_reg); | ||
2512 | } else { | 2805 | } else { |
2513 | verbose("unknown insn class %d\n", class); | 2806 | verbose("unknown insn class %d\n", class); |
2514 | return -EINVAL; | 2807 | return -EINVAL; |
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h index 364582b77888..ac6edb61b64a 100644 --- a/samples/bpf/libbpf.h +++ b/samples/bpf/libbpf.h | |||
@@ -85,6 +85,14 @@ extern char bpf_log_buf[LOG_BUF_SIZE]; | |||
85 | .off = 0, \ | 85 | .off = 0, \ |
86 | .imm = IMM }) | 86 | .imm = IMM }) |
87 | 87 | ||
88 | #define BPF_MOV32_IMM(DST, IMM) \ | ||
89 | ((struct bpf_insn) { \ | ||
90 | .code = BPF_ALU | BPF_MOV | BPF_K, \ | ||
91 | .dst_reg = DST, \ | ||
92 | .src_reg = 0, \ | ||
93 | .off = 0, \ | ||
94 | .imm = IMM }) | ||
95 | |||
88 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ | 96 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ |
89 | #define BPF_LD_IMM64(DST, IMM) \ | 97 | #define BPF_LD_IMM64(DST, IMM) \ |
90 | BPF_LD_IMM64_RAW(DST, 0, IMM) | 98 | BPF_LD_IMM64_RAW(DST, 0, IMM) |
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index ac590d4b7f02..369ffaad3799 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c | |||
@@ -29,6 +29,7 @@ struct bpf_test { | |||
29 | struct bpf_insn insns[MAX_INSNS]; | 29 | struct bpf_insn insns[MAX_INSNS]; |
30 | int fixup[MAX_FIXUPS]; | 30 | int fixup[MAX_FIXUPS]; |
31 | int prog_array_fixup[MAX_FIXUPS]; | 31 | int prog_array_fixup[MAX_FIXUPS]; |
32 | int test_val_map_fixup[MAX_FIXUPS]; | ||
32 | const char *errstr; | 33 | const char *errstr; |
33 | const char *errstr_unpriv; | 34 | const char *errstr_unpriv; |
34 | enum { | 35 | enum { |
@@ -39,6 +40,19 @@ struct bpf_test { | |||
39 | enum bpf_prog_type prog_type; | 40 | enum bpf_prog_type prog_type; |
40 | }; | 41 | }; |
41 | 42 | ||
43 | /* Note we want this to be 64 bit aligned so that the end of our array is | ||
44 | * actually the end of the structure. | ||
45 | */ | ||
46 | #define MAX_ENTRIES 11 | ||
47 | struct test_val { | ||
48 | unsigned index; | ||
49 | int foo[MAX_ENTRIES]; | ||
50 | }; | ||
51 | |||
52 | struct other_val { | ||
53 | unsigned int action[32]; | ||
54 | }; | ||
55 | |||
42 | static struct bpf_test tests[] = { | 56 | static struct bpf_test tests[] = { |
43 | { | 57 | { |
44 | "add+sub+mul", | 58 | "add+sub+mul", |
@@ -2163,6 +2177,212 @@ static struct bpf_test tests[] = { | |||
2163 | .errstr = "invalid access to packet", | 2177 | .errstr = "invalid access to packet", |
2164 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | 2178 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, |
2165 | }, | 2179 | }, |
2180 | { | ||
2181 | "valid map access into an array with a constant", | ||
2182 | .insns = { | ||
2183 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2184 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2185 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2186 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2187 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2188 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
2189 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2190 | BPF_EXIT_INSN(), | ||
2191 | }, | ||
2192 | .test_val_map_fixup = {3}, | ||
2193 | .errstr_unpriv = "R0 leaks addr", | ||
2194 | .result_unpriv = REJECT, | ||
2195 | .result = ACCEPT, | ||
2196 | }, | ||
2197 | { | ||
2198 | "valid map access into an array with a register", | ||
2199 | .insns = { | ||
2200 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2201 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2202 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2203 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2204 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2205 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), | ||
2206 | BPF_MOV64_IMM(BPF_REG_1, 4), | ||
2207 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2208 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2209 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2210 | BPF_EXIT_INSN(), | ||
2211 | }, | ||
2212 | .test_val_map_fixup = {3}, | ||
2213 | .errstr_unpriv = "R0 leaks addr", | ||
2214 | .result_unpriv = REJECT, | ||
2215 | .result = ACCEPT, | ||
2216 | }, | ||
2217 | { | ||
2218 | "valid map access into an array with a variable", | ||
2219 | .insns = { | ||
2220 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2221 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2222 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2223 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2224 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2225 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), | ||
2226 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), | ||
2227 | BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), | ||
2228 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2229 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2230 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2231 | BPF_EXIT_INSN(), | ||
2232 | }, | ||
2233 | .test_val_map_fixup = {3}, | ||
2234 | .errstr_unpriv = "R0 leaks addr", | ||
2235 | .result_unpriv = REJECT, | ||
2236 | .result = ACCEPT, | ||
2237 | }, | ||
2238 | { | ||
2239 | "valid map access into an array with a signed variable", | ||
2240 | .insns = { | ||
2241 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2242 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2243 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2244 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2245 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2246 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), | ||
2247 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), | ||
2248 | BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), | ||
2249 | BPF_MOV32_IMM(BPF_REG_1, 0), | ||
2250 | BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), | ||
2251 | BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), | ||
2252 | BPF_MOV32_IMM(BPF_REG_1, 0), | ||
2253 | BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2254 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2255 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2256 | BPF_EXIT_INSN(), | ||
2257 | }, | ||
2258 | .test_val_map_fixup = {3}, | ||
2259 | .errstr_unpriv = "R0 leaks addr", | ||
2260 | .result_unpriv = REJECT, | ||
2261 | .result = ACCEPT, | ||
2262 | }, | ||
2263 | { | ||
2264 | "invalid map access into an array with a constant", | ||
2265 | .insns = { | ||
2266 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2267 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2268 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2269 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2270 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2271 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
2272 | BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, | ||
2273 | offsetof(struct test_val, foo)), | ||
2274 | BPF_EXIT_INSN(), | ||
2275 | }, | ||
2276 | .test_val_map_fixup = {3}, | ||
2277 | .errstr = "invalid access to map value, value_size=48 off=48 size=8", | ||
2278 | .result = REJECT, | ||
2279 | }, | ||
2280 | { | ||
2281 | "invalid map access into an array with a register", | ||
2282 | .insns = { | ||
2283 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2284 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2285 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2286 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2287 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2288 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), | ||
2289 | BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), | ||
2290 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2291 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2292 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2293 | BPF_EXIT_INSN(), | ||
2294 | }, | ||
2295 | .test_val_map_fixup = {3}, | ||
2296 | .errstr = "R0 min value is outside of the array range", | ||
2297 | .result = REJECT, | ||
2298 | }, | ||
2299 | { | ||
2300 | "invalid map access into an array with a variable", | ||
2301 | .insns = { | ||
2302 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2303 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2304 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2305 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2306 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2307 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), | ||
2308 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), | ||
2309 | BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2310 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2311 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2312 | BPF_EXIT_INSN(), | ||
2313 | }, | ||
2314 | .test_val_map_fixup = {3}, | ||
2315 | .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", | ||
2316 | .result = REJECT, | ||
2317 | }, | ||
2318 | { | ||
2319 | "invalid map access into an array with no floor check", | ||
2320 | .insns = { | ||
2321 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2322 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2323 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2324 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2325 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2326 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), | ||
2327 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), | ||
2328 | BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), | ||
2329 | BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), | ||
2330 | BPF_MOV32_IMM(BPF_REG_1, 0), | ||
2331 | BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2332 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2333 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2334 | BPF_EXIT_INSN(), | ||
2335 | }, | ||
2336 | .test_val_map_fixup = {3}, | ||
2337 | .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", | ||
2338 | .result = REJECT, | ||
2339 | }, | ||
2340 | { | ||
2341 | "invalid map access into an array with a invalid max check", | ||
2342 | .insns = { | ||
2343 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2344 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2345 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2346 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2347 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2348 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), | ||
2349 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), | ||
2350 | BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), | ||
2351 | BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), | ||
2352 | BPF_MOV32_IMM(BPF_REG_1, 0), | ||
2353 | BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), | ||
2354 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), | ||
2355 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), | ||
2356 | BPF_EXIT_INSN(), | ||
2357 | }, | ||
2358 | .test_val_map_fixup = {3}, | ||
2359 | .errstr = "invalid access to map value, value_size=48 off=44 size=8", | ||
2360 | .result = REJECT, | ||
2361 | }, | ||
2362 | { | ||
2363 | "invalid map access into an array with a invalid max check", | ||
2364 | .insns = { | ||
2365 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2366 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2367 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2368 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2369 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2370 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), | ||
2371 | BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), | ||
2372 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
2373 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
2374 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
2375 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
2376 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), | ||
2377 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), | ||
2378 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), | ||
2379 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct test_val, foo)), | ||
2380 | BPF_EXIT_INSN(), | ||
2381 | }, | ||
2382 | .test_val_map_fixup = {3, 11}, | ||
2383 | .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", | ||
2384 | .result = REJECT, | ||
2385 | }, | ||
2166 | }; | 2386 | }; |
2167 | 2387 | ||
2168 | static int probe_filter_length(struct bpf_insn *fp) | 2388 | static int probe_filter_length(struct bpf_insn *fp) |
@@ -2176,12 +2396,12 @@ static int probe_filter_length(struct bpf_insn *fp) | |||
2176 | return len + 1; | 2396 | return len + 1; |
2177 | } | 2397 | } |
2178 | 2398 | ||
2179 | static int create_map(void) | 2399 | static int create_map(size_t val_size, int num) |
2180 | { | 2400 | { |
2181 | int map_fd; | 2401 | int map_fd; |
2182 | 2402 | ||
2183 | map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, | 2403 | map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, |
2184 | sizeof(long long), sizeof(long long), 1024, 0); | 2404 | sizeof(long long), val_size, num, 0); |
2185 | if (map_fd < 0) | 2405 | if (map_fd < 0) |
2186 | printf("failed to create map '%s'\n", strerror(errno)); | 2406 | printf("failed to create map '%s'\n", strerror(errno)); |
2187 | 2407 | ||
@@ -2211,12 +2431,13 @@ static int test(void) | |||
2211 | int prog_len = probe_filter_length(prog); | 2431 | int prog_len = probe_filter_length(prog); |
2212 | int *fixup = tests[i].fixup; | 2432 | int *fixup = tests[i].fixup; |
2213 | int *prog_array_fixup = tests[i].prog_array_fixup; | 2433 | int *prog_array_fixup = tests[i].prog_array_fixup; |
2434 | int *test_val_map_fixup = tests[i].test_val_map_fixup; | ||
2214 | int expected_result; | 2435 | int expected_result; |
2215 | const char *expected_errstr; | 2436 | const char *expected_errstr; |
2216 | int map_fd = -1, prog_array_fd = -1; | 2437 | int map_fd = -1, prog_array_fd = -1, test_val_map_fd = -1; |
2217 | 2438 | ||
2218 | if (*fixup) { | 2439 | if (*fixup) { |
2219 | map_fd = create_map(); | 2440 | map_fd = create_map(sizeof(long long), 1024); |
2220 | 2441 | ||
2221 | do { | 2442 | do { |
2222 | prog[*fixup].imm = map_fd; | 2443 | prog[*fixup].imm = map_fd; |
@@ -2231,6 +2452,18 @@ static int test(void) | |||
2231 | prog_array_fixup++; | 2452 | prog_array_fixup++; |
2232 | } while (*prog_array_fixup); | 2453 | } while (*prog_array_fixup); |
2233 | } | 2454 | } |
2455 | if (*test_val_map_fixup) { | ||
2456 | /* Unprivileged can't create a hash map.*/ | ||
2457 | if (unpriv) | ||
2458 | continue; | ||
2459 | test_val_map_fd = create_map(sizeof(struct test_val), | ||
2460 | 256); | ||
2461 | do { | ||
2462 | prog[*test_val_map_fixup].imm = test_val_map_fd; | ||
2463 | test_val_map_fixup++; | ||
2464 | } while (*test_val_map_fixup); | ||
2465 | } | ||
2466 | |||
2234 | printf("#%d %s ", i, tests[i].descr); | 2467 | printf("#%d %s ", i, tests[i].descr); |
2235 | 2468 | ||
2236 | prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, | 2469 | prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, |
@@ -2277,6 +2510,8 @@ fail: | |||
2277 | close(map_fd); | 2510 | close(map_fd); |
2278 | if (prog_array_fd >= 0) | 2511 | if (prog_array_fd >= 0) |
2279 | close(prog_array_fd); | 2512 | close(prog_array_fd); |
2513 | if (test_val_map_fd >= 0) | ||
2514 | close(test_val_map_fd); | ||
2280 | close(prog_fd); | 2515 | close(prog_fd); |
2281 | 2516 | ||
2282 | } | 2517 | } |