diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2016-05-13 13:08:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-16 13:49:32 -0400 |
commit | 959a7579160349d222cc5da30db3b138139b6fbc (patch) | |
tree | 60cc7d0a58a032b5894766c92218ab2a2b453a7c /arch/x86/net | |
parent | 4f3446bb809f20ad56cadf712e6006815ae7a8f9 (diff) |
bpf, x86: add support for constant blinding
This patch adds recently added constant blinding helpers into the
x86 eBPF JIT. In the bpf_int_jit_compile() path, requirements are
to utilize bpf_jit_blind_constants()/bpf_jit_prog_release_other()
pair for rewriting the program into a blinded one, and to map the
BPF_REG_AX register to a CPU register. The mapping of BPF_REG_AX
is at non-callee saved register r10, and thus shared with cached
skb->data used for ld_abs/ind and not in every program type needed.
When blinding is not used, there's zero additional overhead in the
generated image.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 66 |
1 files changed, 53 insertions, 13 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 6b2d23ea3590..fe04a04dab8e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -110,11 +110,16 @@ static void bpf_flush_icache(void *start, void *end) | |||
110 | ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) | 110 | ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) |
111 | 111 | ||
112 | /* pick a register outside of BPF range for JIT internal work */ | 112 | /* pick a register outside of BPF range for JIT internal work */ |
113 | #define AUX_REG (MAX_BPF_REG + 1) | 113 | #define AUX_REG (MAX_BPF_JIT_REG + 1) |
114 | 114 | ||
115 | /* the following table maps BPF registers to x64 registers. | 115 | /* The following table maps BPF registers to x64 registers. |
116 | * x64 register r12 is unused, since if used as base address register | 116 | * |
117 | * in load/store instructions, it always needs an extra byte of encoding | 117 | * x64 register r12 is unused, since if used as base address |
118 | * register in load/store instructions, it always needs an | ||
119 | * extra byte of encoding and is callee saved. | ||
120 | * | ||
121 | * r9 caches skb->len - skb->data_len | ||
122 | * r10 caches skb->data, and used for blinding (if enabled) | ||
118 | */ | 123 | */ |
119 | static const int reg2hex[] = { | 124 | static const int reg2hex[] = { |
120 | [BPF_REG_0] = 0, /* rax */ | 125 | [BPF_REG_0] = 0, /* rax */ |
@@ -128,6 +133,7 @@ static const int reg2hex[] = { | |||
128 | [BPF_REG_8] = 6, /* r14 callee saved */ | 133 | [BPF_REG_8] = 6, /* r14 callee saved */ |
129 | [BPF_REG_9] = 7, /* r15 callee saved */ | 134 | [BPF_REG_9] = 7, /* r15 callee saved */ |
130 | [BPF_REG_FP] = 5, /* rbp readonly */ | 135 | [BPF_REG_FP] = 5, /* rbp readonly */ |
136 | [BPF_REG_AX] = 2, /* r10 temp register */ | ||
131 | [AUX_REG] = 3, /* r11 temp register */ | 137 | [AUX_REG] = 3, /* r11 temp register */ |
132 | }; | 138 | }; |
133 | 139 | ||
@@ -141,7 +147,8 @@ static bool is_ereg(u32 reg) | |||
141 | BIT(AUX_REG) | | 147 | BIT(AUX_REG) | |
142 | BIT(BPF_REG_7) | | 148 | BIT(BPF_REG_7) | |
143 | BIT(BPF_REG_8) | | 149 | BIT(BPF_REG_8) | |
144 | BIT(BPF_REG_9)); | 150 | BIT(BPF_REG_9) | |
151 | BIT(BPF_REG_AX)); | ||
145 | } | 152 | } |
146 | 153 | ||
147 | /* add modifiers if 'reg' maps to x64 registers r8..r15 */ | 154 | /* add modifiers if 'reg' maps to x64 registers r8..r15 */ |
@@ -182,6 +189,7 @@ static void jit_fill_hole(void *area, unsigned int size) | |||
182 | struct jit_context { | 189 | struct jit_context { |
183 | int cleanup_addr; /* epilogue code offset */ | 190 | int cleanup_addr; /* epilogue code offset */ |
184 | bool seen_ld_abs; | 191 | bool seen_ld_abs; |
192 | bool seen_ax_reg; | ||
185 | }; | 193 | }; |
186 | 194 | ||
187 | /* maximum number of bytes emitted while JITing one eBPF insn */ | 195 | /* maximum number of bytes emitted while JITing one eBPF insn */ |
@@ -345,6 +353,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | |||
345 | struct bpf_insn *insn = bpf_prog->insnsi; | 353 | struct bpf_insn *insn = bpf_prog->insnsi; |
346 | int insn_cnt = bpf_prog->len; | 354 | int insn_cnt = bpf_prog->len; |
347 | bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); | 355 | bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); |
356 | bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0); | ||
348 | bool seen_exit = false; | 357 | bool seen_exit = false; |
349 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | 358 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; |
350 | int i, cnt = 0; | 359 | int i, cnt = 0; |
@@ -367,6 +376,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | |||
367 | int ilen; | 376 | int ilen; |
368 | u8 *func; | 377 | u8 *func; |
369 | 378 | ||
379 | if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) | ||
380 | ctx->seen_ax_reg = seen_ax_reg = true; | ||
381 | |||
370 | switch (insn->code) { | 382 | switch (insn->code) { |
371 | /* ALU */ | 383 | /* ALU */ |
372 | case BPF_ALU | BPF_ADD | BPF_X: | 384 | case BPF_ALU | BPF_ADD | BPF_X: |
@@ -1002,6 +1014,10 @@ common_load: | |||
1002 | * sk_load_* helpers also use %r10 and %r9d. | 1014 | * sk_load_* helpers also use %r10 and %r9d. |
1003 | * See bpf_jit.S | 1015 | * See bpf_jit.S |
1004 | */ | 1016 | */ |
1017 | if (seen_ax_reg) | ||
1018 | /* r10 = skb->data, mov %r10, off32(%rbx) */ | ||
1019 | EMIT3_off32(0x4c, 0x8b, 0x93, | ||
1020 | offsetof(struct sk_buff, data)); | ||
1005 | EMIT1_off32(0xE8, jmp_offset); /* call */ | 1021 | EMIT1_off32(0xE8, jmp_offset); /* call */ |
1006 | break; | 1022 | break; |
1007 | 1023 | ||
@@ -1076,19 +1092,34 @@ void bpf_jit_compile(struct bpf_prog *prog) | |||
1076 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | 1092 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
1077 | { | 1093 | { |
1078 | struct bpf_binary_header *header = NULL; | 1094 | struct bpf_binary_header *header = NULL; |
1095 | struct bpf_prog *tmp, *orig_prog = prog; | ||
1079 | int proglen, oldproglen = 0; | 1096 | int proglen, oldproglen = 0; |
1080 | struct jit_context ctx = {}; | 1097 | struct jit_context ctx = {}; |
1098 | bool tmp_blinded = false; | ||
1081 | u8 *image = NULL; | 1099 | u8 *image = NULL; |
1082 | int *addrs; | 1100 | int *addrs; |
1083 | int pass; | 1101 | int pass; |
1084 | int i; | 1102 | int i; |
1085 | 1103 | ||
1086 | if (!bpf_jit_enable) | 1104 | if (!bpf_jit_enable) |
1087 | return prog; | 1105 | return orig_prog; |
1106 | |||
1107 | tmp = bpf_jit_blind_constants(prog); | ||
1108 | /* If blinding was requested and we failed during blinding, | ||
1109 | * we must fall back to the interpreter. | ||
1110 | */ | ||
1111 | if (IS_ERR(tmp)) | ||
1112 | return orig_prog; | ||
1113 | if (tmp != prog) { | ||
1114 | tmp_blinded = true; | ||
1115 | prog = tmp; | ||
1116 | } | ||
1088 | 1117 | ||
1089 | addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); | 1118 | addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); |
1090 | if (!addrs) | 1119 | if (!addrs) { |
1091 | return prog; | 1120 | prog = orig_prog; |
1121 | goto out; | ||
1122 | } | ||
1092 | 1123 | ||
1093 | /* Before first pass, make a rough estimation of addrs[] | 1124 | /* Before first pass, make a rough estimation of addrs[] |
1094 | * each bpf instruction is translated to less than 64 bytes | 1125 | * each bpf instruction is translated to less than 64 bytes |
@@ -1110,21 +1141,25 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
1110 | image = NULL; | 1141 | image = NULL; |
1111 | if (header) | 1142 | if (header) |
1112 | bpf_jit_binary_free(header); | 1143 | bpf_jit_binary_free(header); |
1113 | goto out; | 1144 | prog = orig_prog; |
1145 | goto out_addrs; | ||
1114 | } | 1146 | } |
1115 | if (image) { | 1147 | if (image) { |
1116 | if (proglen != oldproglen) { | 1148 | if (proglen != oldproglen) { |
1117 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", | 1149 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
1118 | proglen, oldproglen); | 1150 | proglen, oldproglen); |
1119 | goto out; | 1151 | prog = orig_prog; |
1152 | goto out_addrs; | ||
1120 | } | 1153 | } |
1121 | break; | 1154 | break; |
1122 | } | 1155 | } |
1123 | if (proglen == oldproglen) { | 1156 | if (proglen == oldproglen) { |
1124 | header = bpf_jit_binary_alloc(proglen, &image, | 1157 | header = bpf_jit_binary_alloc(proglen, &image, |
1125 | 1, jit_fill_hole); | 1158 | 1, jit_fill_hole); |
1126 | if (!header) | 1159 | if (!header) { |
1127 | goto out; | 1160 | prog = orig_prog; |
1161 | goto out_addrs; | ||
1162 | } | ||
1128 | } | 1163 | } |
1129 | oldproglen = proglen; | 1164 | oldproglen = proglen; |
1130 | } | 1165 | } |
@@ -1138,8 +1173,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
1138 | prog->bpf_func = (void *)image; | 1173 | prog->bpf_func = (void *)image; |
1139 | prog->jited = 1; | 1174 | prog->jited = 1; |
1140 | } | 1175 | } |
1141 | out: | 1176 | |
1177 | out_addrs: | ||
1142 | kfree(addrs); | 1178 | kfree(addrs); |
1179 | out: | ||
1180 | if (tmp_blinded) | ||
1181 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | ||
1182 | tmp : orig_prog); | ||
1143 | return prog; | 1183 | return prog; |
1144 | } | 1184 | } |
1145 | 1185 | ||