aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-05-13 13:08:30 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-16 13:49:32 -0400
commitc237ee5eb33bf19fe0591c04ff8db19da7323a83 (patch)
tree8db90add83d5becc20b69434523448970ec16351 /kernel
parent93a73d442d370e20ed1009cd79cb29c4d7c0ee86 (diff)
bpf: add bpf_patch_insn_single helper
Move the functionality to patch instructions out of the verifier code and into the core as the new bpf_patch_insn_single() helper will be needed later on for blinding as well. No changes in functionality. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c71
-rw-r--r--kernel/bpf/verifier.c53
2 files changed, 80 insertions, 44 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5313d09d4b62..49b5538a5301 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,6 +136,77 @@ void __bpf_prog_free(struct bpf_prog *fp)
136 vfree(fp); 136 vfree(fp);
137} 137}
138 138
139static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
140{
141 return BPF_CLASS(insn->code) == BPF_JMP &&
142 /* Call and Exit are both special jumps with no
143 * target inside the BPF instruction image.
144 */
145 BPF_OP(insn->code) != BPF_CALL &&
146 BPF_OP(insn->code) != BPF_EXIT;
147}
148
149static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
150{
151 struct bpf_insn *insn = prog->insnsi;
152 u32 i, insn_cnt = prog->len;
153
154 for (i = 0; i < insn_cnt; i++, insn++) {
155 if (!bpf_is_jmp_and_has_target(insn))
156 continue;
157
158 /* Adjust offset of jmps if we cross boundaries. */
159 if (i < pos && i + insn->off + 1 > pos)
160 insn->off += delta;
161 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
162 insn->off -= delta;
163 }
164}
165
166struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
167 const struct bpf_insn *patch, u32 len)
168{
169 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
170 struct bpf_prog *prog_adj;
171
172 /* Since our patchlet doesn't expand the image, we're done. */
173 if (insn_delta == 0) {
174 memcpy(prog->insnsi + off, patch, sizeof(*patch));
175 return prog;
176 }
177
178 insn_adj_cnt = prog->len + insn_delta;
179
180 /* Several new instructions need to be inserted. Make room
181 * for them. Likely, there's no need for a new allocation as
182 * last page could have large enough tailroom.
183 */
184 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
185 GFP_USER);
186 if (!prog_adj)
187 return NULL;
188
189 prog_adj->len = insn_adj_cnt;
190
191 /* Patching happens in 3 steps:
192 *
193 * 1) Move over tail of insnsi from next instruction onwards,
194 * so we can patch the single target insn with one or more
195 * new ones (patching is always from 1 to n insns, n > 0).
196 * 2) Inject new instructions at the target location.
197 * 3) Adjust branch offsets if necessary.
198 */
199 insn_rest = insn_adj_cnt - off - len;
200
201 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
202 sizeof(*patch) * insn_rest);
203 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
204
205 bpf_adj_branches(prog_adj, off, insn_delta);
206
207 return prog_adj;
208}
209
139#ifdef CONFIG_BPF_JIT 210#ifdef CONFIG_BPF_JIT
140struct bpf_binary_header * 211struct bpf_binary_header *
141bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 212bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 84bff68cf80e..a08d66215245 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
2587 insn->src_reg = 0; 2587 insn->src_reg = 0;
2588} 2588}
2589 2589
2590static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
2591{
2592 struct bpf_insn *insn = prog->insnsi;
2593 int insn_cnt = prog->len;
2594 int i;
2595
2596 for (i = 0; i < insn_cnt; i++, insn++) {
2597 if (BPF_CLASS(insn->code) != BPF_JMP ||
2598 BPF_OP(insn->code) == BPF_CALL ||
2599 BPF_OP(insn->code) == BPF_EXIT)
2600 continue;
2601
2602 /* adjust offset of jmps if necessary */
2603 if (i < pos && i + insn->off + 1 > pos)
2604 insn->off += delta;
2605 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
2606 insn->off -= delta;
2607 }
2608}
2609
2610/* convert load instructions that access fields of 'struct __sk_buff' 2590/* convert load instructions that access fields of 'struct __sk_buff'
2611 * into sequence of instructions that access fields of 'struct sk_buff' 2591 * into sequence of instructions that access fields of 'struct sk_buff'
2612 */ 2592 */
@@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env)
2616 int insn_cnt = env->prog->len; 2596 int insn_cnt = env->prog->len;
2617 struct bpf_insn insn_buf[16]; 2597 struct bpf_insn insn_buf[16];
2618 struct bpf_prog *new_prog; 2598 struct bpf_prog *new_prog;
2619 u32 cnt;
2620 int i;
2621 enum bpf_access_type type; 2599 enum bpf_access_type type;
2600 int i;
2622 2601
2623 if (!env->prog->aux->ops->convert_ctx_access) 2602 if (!env->prog->aux->ops->convert_ctx_access)
2624 return 0; 2603 return 0;
2625 2604
2626 for (i = 0; i < insn_cnt; i++, insn++) { 2605 for (i = 0; i < insn_cnt; i++, insn++) {
2606 u32 insn_delta, cnt;
2607
2627 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) 2608 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
2628 type = BPF_READ; 2609 type = BPF_READ;
2629 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) 2610 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
@@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env)
2645 return -EINVAL; 2626 return -EINVAL;
2646 } 2627 }
2647 2628
2648 if (cnt == 1) { 2629 new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
2649 memcpy(insn, insn_buf, sizeof(*insn));
2650 continue;
2651 }
2652
2653 /* several new insns need to be inserted. Make room for them */
2654 insn_cnt += cnt - 1;
2655 new_prog = bpf_prog_realloc(env->prog,
2656 bpf_prog_size(insn_cnt),
2657 GFP_USER);
2658 if (!new_prog) 2630 if (!new_prog)
2659 return -ENOMEM; 2631 return -ENOMEM;
2660 2632
2661 new_prog->len = insn_cnt; 2633 insn_delta = cnt - 1;
2662
2663 memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
2664 sizeof(*insn) * (insn_cnt - i - cnt));
2665
2666 /* copy substitute insns in place of load instruction */
2667 memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
2668
2669 /* adjust branches in the whole program */
2670 adjust_branches(new_prog, i, cnt - 1);
2671 2634
2672 /* keep walking new program and skip insns we just inserted */ 2635 /* keep walking new program and skip insns we just inserted */
2673 env->prog = new_prog; 2636 env->prog = new_prog;
2674 insn = new_prog->insnsi + i + cnt - 1; 2637 insn = new_prog->insnsi + i + insn_delta;
2675 i += cnt - 1; 2638
2639 insn_cnt += insn_delta;
2640 i += insn_delta;
2676 } 2641 }
2677 2642
2678 return 0; 2643 return 0;