aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/net/bpf_jit_comp.c')
-rw-r--r--arch/x86/net/bpf_jit_comp.c129
1 files changed, 77 insertions, 52 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5c8cb8043c5a..d56cd1f515bd 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -8,12 +8,10 @@
8 * as published by the Free Software Foundation; version 2 8 * as published by the Free Software Foundation; version 2
9 * of the License. 9 * of the License.
10 */ 10 */
11#include <linux/moduleloader.h>
12#include <asm/cacheflush.h>
13#include <linux/netdevice.h> 11#include <linux/netdevice.h>
14#include <linux/filter.h> 12#include <linux/filter.h>
15#include <linux/if_vlan.h> 13#include <linux/if_vlan.h>
16#include <linux/random.h> 14#include <asm/cacheflush.h>
17 15
18int bpf_jit_enable __read_mostly; 16int bpf_jit_enable __read_mostly;
19 17
@@ -109,39 +107,6 @@ static inline void bpf_flush_icache(void *start, void *end)
109#define CHOOSE_LOAD_FUNC(K, func) \ 107#define CHOOSE_LOAD_FUNC(K, func) \
110 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 108 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111 109
112struct bpf_binary_header {
113 unsigned int pages;
114 /* Note : for security reasons, bpf code will follow a randomly
115 * sized amount of int3 instructions
116 */
117 u8 image[];
118};
119
120static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
121 u8 **image_ptr)
122{
123 unsigned int sz, hole;
124 struct bpf_binary_header *header;
125
126 /* Most of BPF filters are really small,
127 * but if some of them fill a page, allow at least
128 * 128 extra bytes to insert a random section of int3
129 */
130 sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
131 header = module_alloc(sz);
132 if (!header)
133 return NULL;
134
135 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
136
137 header->pages = sz / PAGE_SIZE;
138 hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
139
140 /* insert a random number of int3 instructions before BPF code */
141 *image_ptr = &header->image[prandom_u32() % hole];
142 return header;
143}
144
145/* pick a register outside of BPF range for JIT internal work */ 110/* pick a register outside of BPF range for JIT internal work */
146#define AUX_REG (MAX_BPF_REG + 1) 111#define AUX_REG (MAX_BPF_REG + 1)
147 112
@@ -206,6 +171,12 @@ static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
206 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
207} 172}
208 173
174static void jit_fill_hole(void *area, unsigned int size)
175{
176 /* fill whole space with int3 instructions */
177 memset(area, 0xcc, size);
178}
179
209struct jit_context { 180struct jit_context {
210 unsigned int cleanup_addr; /* epilogue code offset */ 181 unsigned int cleanup_addr; /* epilogue code offset */
211 bool seen_ld_abs; 182 bool seen_ld_abs;
@@ -393,6 +364,23 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
393 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); 364 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
394 break; 365 break;
395 366
367 case BPF_LD | BPF_IMM | BPF_DW:
368 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
369 insn[1].dst_reg != 0 || insn[1].off != 0) {
370 /* verifier must catch invalid insns */
371 pr_err("invalid BPF_LD_IMM64 insn\n");
372 return -EINVAL;
373 }
374
375 /* movabsq %rax, imm64 */
376 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
377 EMIT(insn[0].imm, 4);
378 EMIT(insn[1].imm, 4);
379
380 insn++;
381 i++;
382 break;
383
396 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ 384 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
397 case BPF_ALU | BPF_MOD | BPF_X: 385 case BPF_ALU | BPF_MOD | BPF_X:
398 case BPF_ALU | BPF_DIV | BPF_X: 386 case BPF_ALU | BPF_DIV | BPF_X:
@@ -515,6 +503,48 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
515 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); 503 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
516 break; 504 break;
517 505
506 case BPF_ALU | BPF_LSH | BPF_X:
507 case BPF_ALU | BPF_RSH | BPF_X:
508 case BPF_ALU | BPF_ARSH | BPF_X:
509 case BPF_ALU64 | BPF_LSH | BPF_X:
510 case BPF_ALU64 | BPF_RSH | BPF_X:
511 case BPF_ALU64 | BPF_ARSH | BPF_X:
512
513 /* check for bad case when dst_reg == rcx */
514 if (dst_reg == BPF_REG_4) {
515 /* mov r11, dst_reg */
516 EMIT_mov(AUX_REG, dst_reg);
517 dst_reg = AUX_REG;
518 }
519
520 if (src_reg != BPF_REG_4) { /* common case */
521 EMIT1(0x51); /* push rcx */
522
523 /* mov rcx, src_reg */
524 EMIT_mov(BPF_REG_4, src_reg);
525 }
526
527 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
528 if (BPF_CLASS(insn->code) == BPF_ALU64)
529 EMIT1(add_1mod(0x48, dst_reg));
530 else if (is_ereg(dst_reg))
531 EMIT1(add_1mod(0x40, dst_reg));
532
533 switch (BPF_OP(insn->code)) {
534 case BPF_LSH: b3 = 0xE0; break;
535 case BPF_RSH: b3 = 0xE8; break;
536 case BPF_ARSH: b3 = 0xF8; break;
537 }
538 EMIT2(0xD3, add_1reg(b3, dst_reg));
539
540 if (src_reg != BPF_REG_4)
541 EMIT1(0x59); /* pop rcx */
542
543 if (insn->dst_reg == BPF_REG_4)
544 /* mov dst_reg, r11 */
545 EMIT_mov(insn->dst_reg, AUX_REG);
546 break;
547
518 case BPF_ALU | BPF_END | BPF_FROM_BE: 548 case BPF_ALU | BPF_END | BPF_FROM_BE:
519 switch (imm32) { 549 switch (imm32) {
520 case 16: 550 case 16:
@@ -900,7 +930,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
900 if (proglen <= 0) { 930 if (proglen <= 0) {
901 image = NULL; 931 image = NULL;
902 if (header) 932 if (header)
903 module_free(NULL, header); 933 bpf_jit_binary_free(header);
904 goto out; 934 goto out;
905 } 935 }
906 if (image) { 936 if (image) {
@@ -910,7 +940,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
910 break; 940 break;
911 } 941 }
912 if (proglen == oldproglen) { 942 if (proglen == oldproglen) {
913 header = bpf_alloc_binary(proglen, &image); 943 header = bpf_jit_binary_alloc(proglen, &image,
944 1, jit_fill_hole);
914 if (!header) 945 if (!header)
915 goto out; 946 goto out;
916 } 947 }
@@ -924,29 +955,23 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
924 bpf_flush_icache(header, image + proglen); 955 bpf_flush_icache(header, image + proglen);
925 set_memory_ro((unsigned long)header, header->pages); 956 set_memory_ro((unsigned long)header, header->pages);
926 prog->bpf_func = (void *)image; 957 prog->bpf_func = (void *)image;
927 prog->jited = 1; 958 prog->jited = true;
928 } 959 }
929out: 960out:
930 kfree(addrs); 961 kfree(addrs);
931} 962}
932 963
933static void bpf_jit_free_deferred(struct work_struct *work) 964void bpf_jit_free(struct bpf_prog *fp)
934{ 965{
935 struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
936 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 966 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
937 struct bpf_binary_header *header = (void *)addr; 967 struct bpf_binary_header *header = (void *)addr;
938 968
969 if (!fp->jited)
970 goto free_filter;
971
939 set_memory_rw(addr, header->pages); 972 set_memory_rw(addr, header->pages);
940 module_free(NULL, header); 973 bpf_jit_binary_free(header);
941 kfree(fp);
942}
943 974
944void bpf_jit_free(struct bpf_prog *fp) 975free_filter:
945{ 976 bpf_prog_unlock_free(fp);
946 if (fp->jited) {
947 INIT_WORK(&fp->work, bpf_jit_free_deferred);
948 schedule_work(&fp->work);
949 } else {
950 kfree(fp);
951 }
952} 977}