aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-12-17 19:52:57 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-17 21:27:44 -0500
commitaafe6ae9cee32df85eb5e8bb6dd1d918e6807b09 (patch)
tree4aa9958d1ddf599f4ed32d491f8ab217c9747056 /kernel
parent40e972ab652f3e9b84a8f24f517345b460962c29 (diff)
bpf: dynamically allocate digest scratch buffer
Geert rightfully complained that 7bd509e311f4 ("bpf: add prog_digest and expose it via fdinfo/netlink") added a too large allocation of variable 'raw' from bss section, and should instead be done dynamically: # ./scripts/bloat-o-meter kernel/bpf/core.o.1 kernel/bpf/core.o.2 add/remove: 3/0 grow/shrink: 0/0 up/down: 33291/0 (33291) function old new delta raw - 32832 +32832 [...] Since this is only relevant during program creation path, which can be considered slow-path anyway, lets allocate that dynamically and be not implicitly dependent on verifier mutex. Move bpf_prog_calc_digest() at the beginning of replace_map_fd_with_map_ptr() and also error handling stays straight forward. Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c27
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c6
3 files changed, 21 insertions, 14 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 83e0d153b0b4..75c08b8068d6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,28 +136,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
136 vfree(fp); 136 vfree(fp);
137} 137}
138 138
139#define SHA_BPF_RAW_SIZE \ 139int bpf_prog_calc_digest(struct bpf_prog *fp)
140 round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
141
142/* Called under verifier mutex. */
143void bpf_prog_calc_digest(struct bpf_prog *fp)
144{ 140{
145 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 141 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
146 static u32 ws[SHA_WORKSPACE_WORDS]; 142 u32 raw_size = bpf_prog_digest_scratch_size(fp);
147 static u8 raw[SHA_BPF_RAW_SIZE]; 143 u32 ws[SHA_WORKSPACE_WORDS];
148 struct bpf_insn *dst = (void *)raw;
149 u32 i, bsize, psize, blocks; 144 u32 i, bsize, psize, blocks;
145 struct bpf_insn *dst;
150 bool was_ld_map; 146 bool was_ld_map;
151 u8 *todo = raw; 147 u8 *raw, *todo;
152 __be32 *result; 148 __be32 *result;
153 __be64 *bits; 149 __be64 *bits;
154 150
151 raw = vmalloc(raw_size);
152 if (!raw)
153 return -ENOMEM;
154
155 sha_init(fp->digest); 155 sha_init(fp->digest);
156 memset(ws, 0, sizeof(ws)); 156 memset(ws, 0, sizeof(ws));
157 157
158 /* We need to take out the map fd for the digest calculation 158 /* We need to take out the map fd for the digest calculation
159 * since they are unstable from user space side. 159 * since they are unstable from user space side.
160 */ 160 */
161 dst = (void *)raw;
161 for (i = 0, was_ld_map = false; i < fp->len; i++) { 162 for (i = 0, was_ld_map = false; i < fp->len; i++) {
162 dst[i] = fp->insnsi[i]; 163 dst[i] = fp->insnsi[i];
163 if (!was_ld_map && 164 if (!was_ld_map &&
@@ -177,12 +178,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
177 } 178 }
178 } 179 }
179 180
180 psize = fp->len * sizeof(struct bpf_insn); 181 psize = bpf_prog_insn_size(fp);
181 memset(&raw[psize], 0, sizeof(raw) - psize); 182 memset(&raw[psize], 0, raw_size - psize);
182 raw[psize++] = 0x80; 183 raw[psize++] = 0x80;
183 184
184 bsize = round_up(psize, SHA_MESSAGE_BYTES); 185 bsize = round_up(psize, SHA_MESSAGE_BYTES);
185 blocks = bsize / SHA_MESSAGE_BYTES; 186 blocks = bsize / SHA_MESSAGE_BYTES;
187 todo = raw;
186 if (bsize - psize >= sizeof(__be64)) { 188 if (bsize - psize >= sizeof(__be64)) {
187 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 189 bits = (__be64 *)(todo + bsize - sizeof(__be64));
188 } else { 190 } else {
@@ -199,6 +201,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
199 result = (__force __be32 *)fp->digest; 201 result = (__force __be32 *)fp->digest;
200 for (i = 0; i < SHA_DIGEST_WORDS; i++) 202 for (i = 0; i < SHA_DIGEST_WORDS; i++)
201 result[i] = cpu_to_be32(fp->digest[i]); 203 result[i] = cpu_to_be32(fp->digest[i]);
204
205 vfree(raw);
206 return 0;
202} 207}
203 208
204static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) 209static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4819ec9d95f6..35d674c1f12e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -811,7 +811,7 @@ static int bpf_prog_load(union bpf_attr *attr)
811 811
812 err = -EFAULT; 812 err = -EFAULT;
813 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 813 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
814 prog->len * sizeof(struct bpf_insn)) != 0) 814 bpf_prog_insn_size(prog)) != 0)
815 goto free_prog; 815 goto free_prog;
816 816
817 prog->orig_prog = NULL; 817 prog->orig_prog = NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 81e267bc4640..64b7b1abe087 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2931,6 +2931,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
2931 int insn_cnt = env->prog->len; 2931 int insn_cnt = env->prog->len;
2932 int i, j, err; 2932 int i, j, err;
2933 2933
2934 err = bpf_prog_calc_digest(env->prog);
2935 if (err)
2936 return err;
2937
2934 for (i = 0; i < insn_cnt; i++, insn++) { 2938 for (i = 0; i < insn_cnt; i++, insn++) {
2935 if (BPF_CLASS(insn->code) == BPF_LDX && 2939 if (BPF_CLASS(insn->code) == BPF_LDX &&
2936 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2940 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
@@ -3178,8 +3182,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3178 log_level = 0; 3182 log_level = 0;
3179 } 3183 }
3180 3184
3181 bpf_prog_calc_digest(env->prog);
3182
3183 ret = replace_map_fd_with_map_ptr(env); 3185 ret = replace_map_fd_with_map_ptr(env);
3184 if (ret < 0) 3186 if (ret < 0)
3185 goto skip_full_check; 3187 goto skip_full_check;