aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-05-13 13:08:30 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-16 13:49:32 -0400
commitc237ee5eb33bf19fe0591c04ff8db19da7323a83 (patch)
tree8db90add83d5becc20b69434523448970ec16351 /kernel/bpf/core.c
parent93a73d442d370e20ed1009cd79cb29c4d7c0ee86 (diff)
bpf: add bpf_patch_insn_single helper
Move the functionality to patch instructions out of the verifier code and into the core as the new bpf_patch_insn_single() helper will be needed later on for blinding as well. No changes in functionality. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5313d09d4b62..49b5538a5301 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,6 +136,77 @@ void __bpf_prog_free(struct bpf_prog *fp)
136 vfree(fp); 136 vfree(fp);
137} 137}
138 138
139static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
140{
141 return BPF_CLASS(insn->code) == BPF_JMP &&
142 /* Call and Exit are both special jumps with no
143 * target inside the BPF instruction image.
144 */
145 BPF_OP(insn->code) != BPF_CALL &&
146 BPF_OP(insn->code) != BPF_EXIT;
147}
148
149static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
150{
151 struct bpf_insn *insn = prog->insnsi;
152 u32 i, insn_cnt = prog->len;
153
154 for (i = 0; i < insn_cnt; i++, insn++) {
155 if (!bpf_is_jmp_and_has_target(insn))
156 continue;
157
158 /* Adjust offset of jmps if we cross boundaries. */
159 if (i < pos && i + insn->off + 1 > pos)
160 insn->off += delta;
161 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
162 insn->off -= delta;
163 }
164}
165
166struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
167 const struct bpf_insn *patch, u32 len)
168{
169 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
170 struct bpf_prog *prog_adj;
171
172 /* Since our patchlet doesn't expand the image, we're done. */
173 if (insn_delta == 0) {
174 memcpy(prog->insnsi + off, patch, sizeof(*patch));
175 return prog;
176 }
177
178 insn_adj_cnt = prog->len + insn_delta;
179
180 /* Several new instructions need to be inserted. Make room
181 * for them. Likely, there's no need for a new allocation as
182 * last page could have large enough tailroom.
183 */
184 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
185 GFP_USER);
186 if (!prog_adj)
187 return NULL;
188
189 prog_adj->len = insn_adj_cnt;
190
191 /* Patching happens in 3 steps:
192 *
193 * 1) Move over tail of insnsi from next instruction onwards,
194 * so we can patch the single target insn with one or more
195 * new ones (patching is always from 1 to n insns, n > 0).
196 * 2) Inject new instructions at the target location.
197 * 3) Adjust branch offsets if necessary.
198 */
199 insn_rest = insn_adj_cnt - off - len;
200
201 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
202 sizeof(*patch) * insn_rest);
203 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
204
205 bpf_adj_branches(prog_adj, off, insn_delta);
206
207 return prog_adj;
208}
209
139#ifdef CONFIG_BPF_JIT 210#ifdef CONFIG_BPF_JIT
140struct bpf_binary_header * 211struct bpf_binary_header *
141bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 212bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,