aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 83e0d153b0b4..75c08b8068d6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,28 +136,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
136 vfree(fp); 136 vfree(fp);
137} 137}
138 138
139#define SHA_BPF_RAW_SIZE \ 139int bpf_prog_calc_digest(struct bpf_prog *fp)
140 round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
141
142/* Called under verifier mutex. */
143void bpf_prog_calc_digest(struct bpf_prog *fp)
144{ 140{
145 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 141 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
146 static u32 ws[SHA_WORKSPACE_WORDS]; 142 u32 raw_size = bpf_prog_digest_scratch_size(fp);
147 static u8 raw[SHA_BPF_RAW_SIZE]; 143 u32 ws[SHA_WORKSPACE_WORDS];
148 struct bpf_insn *dst = (void *)raw;
149 u32 i, bsize, psize, blocks; 144 u32 i, bsize, psize, blocks;
145 struct bpf_insn *dst;
150 bool was_ld_map; 146 bool was_ld_map;
151 u8 *todo = raw; 147 u8 *raw, *todo;
152 __be32 *result; 148 __be32 *result;
153 __be64 *bits; 149 __be64 *bits;
154 150
151 raw = vmalloc(raw_size);
152 if (!raw)
153 return -ENOMEM;
154
155 sha_init(fp->digest); 155 sha_init(fp->digest);
156 memset(ws, 0, sizeof(ws)); 156 memset(ws, 0, sizeof(ws));
157 157
158 /* We need to take out the map fd for the digest calculation 158 /* We need to take out the map fd for the digest calculation
159 * since they are unstable from user space side. 159 * since they are unstable from user space side.
160 */ 160 */
161 dst = (void *)raw;
161 for (i = 0, was_ld_map = false; i < fp->len; i++) { 162 for (i = 0, was_ld_map = false; i < fp->len; i++) {
162 dst[i] = fp->insnsi[i]; 163 dst[i] = fp->insnsi[i];
163 if (!was_ld_map && 164 if (!was_ld_map &&
@@ -177,12 +178,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
177 } 178 }
178 } 179 }
179 180
180 psize = fp->len * sizeof(struct bpf_insn); 181 psize = bpf_prog_insn_size(fp);
181 memset(&raw[psize], 0, sizeof(raw) - psize); 182 memset(&raw[psize], 0, raw_size - psize);
182 raw[psize++] = 0x80; 183 raw[psize++] = 0x80;
183 184
184 bsize = round_up(psize, SHA_MESSAGE_BYTES); 185 bsize = round_up(psize, SHA_MESSAGE_BYTES);
185 blocks = bsize / SHA_MESSAGE_BYTES; 186 blocks = bsize / SHA_MESSAGE_BYTES;
187 todo = raw;
186 if (bsize - psize >= sizeof(__be64)) { 188 if (bsize - psize >= sizeof(__be64)) {
187 bits = (__be64 *)(todo + bsize - sizeof(__be64)); 189 bits = (__be64 *)(todo + bsize - sizeof(__be64));
188 } else { 190 } else {
@@ -199,6 +201,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
199 result = (__force __be32 *)fp->digest; 201 result = (__force __be32 *)fp->digest;
200 for (i = 0; i < SHA_DIGEST_WORDS; i++) 202 for (i = 0; i < SHA_DIGEST_WORDS; i++)
201 result[i] = cpu_to_be32(fp->digest[i]); 203 result[i] = cpu_to_be32(fp->digest[i]);
204
205 vfree(raw);
206 return 0;
202} 207}
203 208
204static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) 209static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)