diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2014-10-28 18:11:41 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-30 15:44:37 -0400 |
commit | 9c3997601d51069ec08d7d06cf31a17884056cc2 (patch) | |
tree | 640f601a6df8b0cd19d33af5c47691e3294db143 | |
parent | d3627795d074d3877e0818ab533c86139ea31413 (diff) |
bpf: reduce verifier memory consumption
verifier keeps track of register state spilled to stack.
registers are 8-byte wide and always aligned, so instead of tracking them
in every byte-sized stack slot, use MAX_BPF_STACK / 8 array to track
spilled register state.
Though verifier runs in user context and its state freed immediately
after verification, it makes sense to reduce its memory usage.
This optimization reduces sizeof(struct verifier_state)
from 12464 to 1712 on 64-bit and from 6232 to 1112 on 32-bit.
Note, this patch doesn't change existing limits, which are there to bound
time and memory during verification: 4k total number of insns in a program,
1k number of jumps (states to visit) and 32k number of processed insn
(since an insn may be visited multiple times). Theoretical worst case memory
during verification is 1712 * 1k = 17Mbyte. Out-of-memory situation triggers
cleanup and rejects the program.
Suggested-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | kernel/bpf/verifier.c | 101 |
1 files changed, 57 insertions, 44 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9f81818f2941..b6a1f7c14a67 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -153,22 +153,19 @@ struct reg_state { | |||
153 | 153 | ||
154 | enum bpf_stack_slot_type { | 154 | enum bpf_stack_slot_type { |
155 | STACK_INVALID, /* nothing was stored in this stack slot */ | 155 | STACK_INVALID, /* nothing was stored in this stack slot */ |
156 | STACK_SPILL, /* 1st byte of register spilled into stack */ | 156 | STACK_SPILL, /* register spilled into stack */ |
157 | STACK_SPILL_PART, /* other 7 bytes of register spill */ | ||
158 | STACK_MISC /* BPF program wrote some data into this slot */ | 157 | STACK_MISC /* BPF program wrote some data into this slot */ |
159 | }; | 158 | }; |
160 | 159 | ||
161 | struct bpf_stack_slot { | 160 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ |
162 | enum bpf_stack_slot_type stype; | ||
163 | struct reg_state reg_st; | ||
164 | }; | ||
165 | 161 | ||
166 | /* state of the program: | 162 | /* state of the program: |
167 | * type of all registers and stack info | 163 | * type of all registers and stack info |
168 | */ | 164 | */ |
169 | struct verifier_state { | 165 | struct verifier_state { |
170 | struct reg_state regs[MAX_BPF_REG]; | 166 | struct reg_state regs[MAX_BPF_REG]; |
171 | struct bpf_stack_slot stack[MAX_BPF_STACK]; | 167 | u8 stack_slot_type[MAX_BPF_STACK]; |
168 | struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; | ||
172 | }; | 169 | }; |
173 | 170 | ||
174 | /* linked list of verifier states used to prune search */ | 171 | /* linked list of verifier states used to prune search */ |
@@ -259,10 +256,10 @@ static void print_verifier_state(struct verifier_env *env) | |||
259 | env->cur_state.regs[i].map_ptr->key_size, | 256 | env->cur_state.regs[i].map_ptr->key_size, |
260 | env->cur_state.regs[i].map_ptr->value_size); | 257 | env->cur_state.regs[i].map_ptr->value_size); |
261 | } | 258 | } |
262 | for (i = 0; i < MAX_BPF_STACK; i++) { | 259 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
263 | if (env->cur_state.stack[i].stype == STACK_SPILL) | 260 | if (env->cur_state.stack_slot_type[i] == STACK_SPILL) |
264 | verbose(" fp%d=%s", -MAX_BPF_STACK + i, | 261 | verbose(" fp%d=%s", -MAX_BPF_STACK + i, |
265 | reg_type_str[env->cur_state.stack[i].reg_st.type]); | 262 | reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); |
266 | } | 263 | } |
267 | verbose("\n"); | 264 | verbose("\n"); |
268 | } | 265 | } |
@@ -539,8 +536,10 @@ static int bpf_size_to_bytes(int bpf_size) | |||
539 | static int check_stack_write(struct verifier_state *state, int off, int size, | 536 | static int check_stack_write(struct verifier_state *state, int off, int size, |
540 | int value_regno) | 537 | int value_regno) |
541 | { | 538 | { |
542 | struct bpf_stack_slot *slot; | ||
543 | int i; | 539 | int i; |
540 | /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, | ||
541 | * so it's aligned access and [off, off + size) are within stack limits | ||
542 | */ | ||
544 | 543 | ||
545 | if (value_regno >= 0 && | 544 | if (value_regno >= 0 && |
546 | (state->regs[value_regno].type == PTR_TO_MAP_VALUE || | 545 | (state->regs[value_regno].type == PTR_TO_MAP_VALUE || |
@@ -548,30 +547,24 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
548 | state->regs[value_regno].type == PTR_TO_CTX)) { | 547 | state->regs[value_regno].type == PTR_TO_CTX)) { |
549 | 548 | ||
550 | /* register containing pointer is being spilled into stack */ | 549 | /* register containing pointer is being spilled into stack */ |
551 | if (size != 8) { | 550 | if (size != BPF_REG_SIZE) { |
552 | verbose("invalid size of register spill\n"); | 551 | verbose("invalid size of register spill\n"); |
553 | return -EACCES; | 552 | return -EACCES; |
554 | } | 553 | } |
555 | 554 | ||
556 | slot = &state->stack[MAX_BPF_STACK + off]; | ||
557 | slot->stype = STACK_SPILL; | ||
558 | /* save register state */ | 555 | /* save register state */ |
559 | slot->reg_st = state->regs[value_regno]; | 556 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = |
560 | for (i = 1; i < 8; i++) { | 557 | state->regs[value_regno]; |
561 | slot = &state->stack[MAX_BPF_STACK + off + i]; | ||
562 | slot->stype = STACK_SPILL_PART; | ||
563 | slot->reg_st.type = UNKNOWN_VALUE; | ||
564 | slot->reg_st.map_ptr = NULL; | ||
565 | } | ||
566 | } else { | ||
567 | 558 | ||
559 | for (i = 0; i < BPF_REG_SIZE; i++) | ||
560 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; | ||
561 | } else { | ||
568 | /* regular write of data into stack */ | 562 | /* regular write of data into stack */ |
569 | for (i = 0; i < size; i++) { | 563 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = |
570 | slot = &state->stack[MAX_BPF_STACK + off + i]; | 564 | (struct reg_state) {}; |
571 | slot->stype = STACK_MISC; | 565 | |
572 | slot->reg_st.type = UNKNOWN_VALUE; | 566 | for (i = 0; i < size; i++) |
573 | slot->reg_st.map_ptr = NULL; | 567 | state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; |
574 | } | ||
575 | } | 568 | } |
576 | return 0; | 569 | return 0; |
577 | } | 570 | } |
@@ -579,19 +572,18 @@ static int check_stack_write(struct verifier_state *state, int off, int size, | |||
579 | static int check_stack_read(struct verifier_state *state, int off, int size, | 572 | static int check_stack_read(struct verifier_state *state, int off, int size, |
580 | int value_regno) | 573 | int value_regno) |
581 | { | 574 | { |
575 | u8 *slot_type; | ||
582 | int i; | 576 | int i; |
583 | struct bpf_stack_slot *slot; | ||
584 | 577 | ||
585 | slot = &state->stack[MAX_BPF_STACK + off]; | 578 | slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; |
586 | 579 | ||
587 | if (slot->stype == STACK_SPILL) { | 580 | if (slot_type[0] == STACK_SPILL) { |
588 | if (size != 8) { | 581 | if (size != BPF_REG_SIZE) { |
589 | verbose("invalid size of register spill\n"); | 582 | verbose("invalid size of register spill\n"); |
590 | return -EACCES; | 583 | return -EACCES; |
591 | } | 584 | } |
592 | for (i = 1; i < 8; i++) { | 585 | for (i = 1; i < BPF_REG_SIZE; i++) { |
593 | if (state->stack[MAX_BPF_STACK + off + i].stype != | 586 | if (slot_type[i] != STACK_SPILL) { |
594 | STACK_SPILL_PART) { | ||
595 | verbose("corrupted spill memory\n"); | 587 | verbose("corrupted spill memory\n"); |
596 | return -EACCES; | 588 | return -EACCES; |
597 | } | 589 | } |
@@ -599,12 +591,12 @@ static int check_stack_read(struct verifier_state *state, int off, int size, | |||
599 | 591 | ||
600 | if (value_regno >= 0) | 592 | if (value_regno >= 0) |
601 | /* restore register state from stack */ | 593 | /* restore register state from stack */ |
602 | state->regs[value_regno] = slot->reg_st; | 594 | state->regs[value_regno] = |
595 | state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; | ||
603 | return 0; | 596 | return 0; |
604 | } else { | 597 | } else { |
605 | for (i = 0; i < size; i++) { | 598 | for (i = 0; i < size; i++) { |
606 | if (state->stack[MAX_BPF_STACK + off + i].stype != | 599 | if (slot_type[i] != STACK_MISC) { |
607 | STACK_MISC) { | ||
608 | verbose("invalid read from stack off %d+%d size %d\n", | 600 | verbose("invalid read from stack off %d+%d size %d\n", |
609 | off, i, size); | 601 | off, i, size); |
610 | return -EACCES; | 602 | return -EACCES; |
@@ -747,7 +739,7 @@ static int check_stack_boundary(struct verifier_env *env, | |||
747 | } | 739 | } |
748 | 740 | ||
749 | for (i = 0; i < access_size; i++) { | 741 | for (i = 0; i < access_size; i++) { |
750 | if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) { | 742 | if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { |
751 | verbose("invalid indirect read from stack off %d+%d size %d\n", | 743 | verbose("invalid indirect read from stack off %d+%d size %d\n", |
752 | off, i, access_size); | 744 | off, i, access_size); |
753 | return -EACCES; | 745 | return -EACCES; |
@@ -1417,12 +1409,33 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) | |||
1417 | } | 1409 | } |
1418 | 1410 | ||
1419 | for (i = 0; i < MAX_BPF_STACK; i++) { | 1411 | for (i = 0; i < MAX_BPF_STACK; i++) { |
1420 | if (memcmp(&old->stack[i], &cur->stack[i], | 1412 | if (old->stack_slot_type[i] == STACK_INVALID) |
1421 | sizeof(old->stack[0])) != 0) { | 1413 | continue; |
1422 | if (old->stack[i].stype == STACK_INVALID) | 1414 | if (old->stack_slot_type[i] != cur->stack_slot_type[i]) |
1423 | continue; | 1415 | /* Ex: old explored (safe) state has STACK_SPILL in |
1416 | * this stack slot, but current has has STACK_MISC -> | ||
1417 | * this verifier states are not equivalent, | ||
1418 | * return false to continue verification of this path | ||
1419 | */ | ||
1424 | return false; | 1420 | return false; |
1425 | } | 1421 | if (i % BPF_REG_SIZE) |
1422 | continue; | ||
1423 | if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], | ||
1424 | &cur->spilled_regs[i / BPF_REG_SIZE], | ||
1425 | sizeof(old->spilled_regs[0]))) | ||
1426 | /* when explored and current stack slot types are | ||
1427 | * the same, check that stored pointers types | ||
1428 | * are the same as well. | ||
1429 | * Ex: explored safe path could have stored | ||
1430 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} | ||
1431 | * but current path has stored: | ||
1432 | * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} | ||
1433 | * such verifier states are not equivalent. | ||
1434 | * return false to continue verification of this path | ||
1435 | */ | ||
1436 | return false; | ||
1437 | else | ||
1438 | continue; | ||
1426 | } | 1439 | } |
1427 | return true; | 1440 | return true; |
1428 | } | 1441 | } |