aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-05-13 13:08:35 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-16 13:49:33 -0400
commitd93a47f735f3455a896e46b18d0ac26fa19639e6 (patch)
treeec3eef1f80ae2f1d5fdbb9a5c12df5bf06025ad7 /arch/s390/net
parent26eb042ee4c7845aa395c41c4e125c240b82b984 (diff)
bpf, s390: add support for constant blinding
This patch adds recently added constant blinding helpers into the s390 eBPF JIT. In the bpf_int_jit_compile() path, requirements are to utilize bpf_jit_blind_constants()/bpf_jit_prog_release_other() pair for rewriting the program into a blinded one, and to map the BPF_REG_AX register to a CPU register. The mapping of BPF_REG_AX is at r12 and similarly like in x86 case performs reloading when ld_abs/ind is used. When blinding is not used, there's no additional overhead in the generated image. When BPF_REG_AX is used, we don't need to emit skb->data reload when helper function changed skb->data, as this will be reloaded later on anyway from stack on ld_abs/ind, where skb->data is needed. s390 allows for this w/o much additional complexity unlike f.e. x86. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/s390/net')
-rw-r--r--arch/s390/net/bpf_jit_comp.c73
1 files changed, 56 insertions, 17 deletions
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fcf301a889e7..9133b0ec000b 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -54,16 +54,17 @@ struct bpf_jit {
54#define SEEN_FUNC 16 /* calls C functions */ 54#define SEEN_FUNC 16 /* calls C functions */
55#define SEEN_TAIL_CALL 32 /* code uses tail calls */ 55#define SEEN_TAIL_CALL 32 /* code uses tail calls */
56#define SEEN_SKB_CHANGE 64 /* code changes skb data */ 56#define SEEN_SKB_CHANGE 64 /* code changes skb data */
57#define SEEN_REG_AX 128 /* code uses constant blinding */
57#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) 58#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
58 59
59/* 60/*
60 * s390 registers 61 * s390 registers
61 */ 62 */
62#define REG_W0 (__MAX_BPF_REG+0) /* Work register 1 (even) */ 63#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
63#define REG_W1 (__MAX_BPF_REG+1) /* Work register 2 (odd) */ 64#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
64#define REG_SKB_DATA (__MAX_BPF_REG+2) /* SKB data register */ 65#define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */
65#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */ 66#define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */
66#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */ 67#define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */
67#define REG_0 REG_W0 /* Register 0 */ 68#define REG_0 REG_W0 /* Register 0 */
68#define REG_1 REG_W1 /* Register 1 */ 69#define REG_1 REG_W1 /* Register 1 */
69#define REG_2 BPF_REG_1 /* Register 2 */ 70#define REG_2 BPF_REG_1 /* Register 2 */
@@ -88,6 +89,8 @@ static const int reg2hex[] = {
88 [BPF_REG_9] = 10, 89 [BPF_REG_9] = 10,
89 /* BPF stack pointer */ 90 /* BPF stack pointer */
90 [BPF_REG_FP] = 13, 91 [BPF_REG_FP] = 13,
92 /* Register for blinding (shared with REG_SKB_DATA) */
93 [BPF_REG_AX] = 12,
91 /* SKB data pointer */ 94 /* SKB data pointer */
92 [REG_SKB_DATA] = 12, 95 [REG_SKB_DATA] = 12,
93 /* Work registers for s390x backend */ 96 /* Work registers for s390x backend */
@@ -385,7 +388,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
385/* 388/*
386 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S" 389 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
387 * we store the SKB header length on the stack and the SKB data 390 * we store the SKB header length on the stack and the SKB data
388 * pointer in REG_SKB_DATA. 391 * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
389 */ 392 */
390static void emit_load_skb_data_hlen(struct bpf_jit *jit) 393static void emit_load_skb_data_hlen(struct bpf_jit *jit)
391{ 394{
@@ -397,9 +400,10 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
397 offsetof(struct sk_buff, data_len)); 400 offsetof(struct sk_buff, data_len));
398 /* stg %w1,ST_OFF_HLEN(%r0,%r15) */ 401 /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
399 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN); 402 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
400 /* lg %skb_data,data_off(%b1) */ 403 if (!(jit->seen & SEEN_REG_AX))
401 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 404 /* lg %skb_data,data_off(%b1) */
402 BPF_REG_1, offsetof(struct sk_buff, data)); 405 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
406 BPF_REG_1, offsetof(struct sk_buff, data));
403} 407}
404 408
405/* 409/*
@@ -487,6 +491,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
487 s32 imm = insn->imm; 491 s32 imm = insn->imm;
488 s16 off = insn->off; 492 s16 off = insn->off;
489 493
494 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
495 jit->seen |= SEEN_REG_AX;
490 switch (insn->code) { 496 switch (insn->code) {
491 /* 497 /*
492 * BPF_MOV 498 * BPF_MOV
@@ -1188,7 +1194,7 @@ call_fn:
1188 /* 1194 /*
1189 * Implicit input: 1195 * Implicit input:
1190 * BPF_REG_6 (R7) : skb pointer 1196 * BPF_REG_6 (R7) : skb pointer
1191 * REG_SKB_DATA (R12): skb data pointer 1197 * REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
1192 * 1198 *
1193 * Calculated input: 1199 * Calculated input:
1194 * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb 1200 * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb
@@ -1209,6 +1215,11 @@ call_fn:
1209 /* agfr %b2,%src (%src is s32 here) */ 1215 /* agfr %b2,%src (%src is s32 here) */
1210 EMIT4(0xb9180000, BPF_REG_2, src_reg); 1216 EMIT4(0xb9180000, BPF_REG_2, src_reg);
1211 1217
1218 /* Reload REG_SKB_DATA if BPF_REG_AX is used */
1219 if (jit->seen & SEEN_REG_AX)
1220 /* lg %skb_data,data_off(%b6) */
1221 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
1222 BPF_REG_6, offsetof(struct sk_buff, data));
1212 /* basr %b5,%w1 (%b5 is call saved) */ 1223 /* basr %b5,%w1 (%b5 is call saved) */
1213 EMIT2(0x0d00, BPF_REG_5, REG_W1); 1224 EMIT2(0x0d00, BPF_REG_5, REG_W1);
1214 1225
@@ -1264,36 +1275,60 @@ void bpf_jit_compile(struct bpf_prog *fp)
1264 */ 1275 */
1265struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) 1276struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1266{ 1277{
1278 struct bpf_prog *tmp, *orig_fp = fp;
1267 struct bpf_binary_header *header; 1279 struct bpf_binary_header *header;
1280 bool tmp_blinded = false;
1268 struct bpf_jit jit; 1281 struct bpf_jit jit;
1269 int pass; 1282 int pass;
1270 1283
1271 if (!bpf_jit_enable) 1284 if (!bpf_jit_enable)
1272 return fp; 1285 return orig_fp;
1286
1287 tmp = bpf_jit_blind_constants(fp);
1288 /*
1289 * If blinding was requested and we failed during blinding,
1290 * we must fall back to the interpreter.
1291 */
1292 if (IS_ERR(tmp))
1293 return orig_fp;
1294 if (tmp != fp) {
1295 tmp_blinded = true;
1296 fp = tmp;
1297 }
1273 1298
1274 memset(&jit, 0, sizeof(jit)); 1299 memset(&jit, 0, sizeof(jit));
1275 jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); 1300 jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1276 if (jit.addrs == NULL) 1301 if (jit.addrs == NULL) {
1277 return fp; 1302 fp = orig_fp;
1303 goto out;
1304 }
1278 /* 1305 /*
1279 * Three initial passes: 1306 * Three initial passes:
1280 * - 1/2: Determine clobbered registers 1307 * - 1/2: Determine clobbered registers
1281 * - 3: Calculate program size and addrs arrray 1308 * - 3: Calculate program size and addrs arrray
1282 */ 1309 */
1283 for (pass = 1; pass <= 3; pass++) { 1310 for (pass = 1; pass <= 3; pass++) {
1284 if (bpf_jit_prog(&jit, fp)) 1311 if (bpf_jit_prog(&jit, fp)) {
1312 fp = orig_fp;
1285 goto free_addrs; 1313 goto free_addrs;
1314 }
1286 } 1315 }
1287 /* 1316 /*
1288 * Final pass: Allocate and generate program 1317 * Final pass: Allocate and generate program
1289 */ 1318 */
1290 if (jit.size >= BPF_SIZE_MAX) 1319 if (jit.size >= BPF_SIZE_MAX) {
1320 fp = orig_fp;
1291 goto free_addrs; 1321 goto free_addrs;
1322 }
1292 header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); 1323 header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1293 if (!header) 1324 if (!header) {
1325 fp = orig_fp;
1294 goto free_addrs; 1326 goto free_addrs;
1295 if (bpf_jit_prog(&jit, fp)) 1327 }
1328 if (bpf_jit_prog(&jit, fp)) {
1329 fp = orig_fp;
1296 goto free_addrs; 1330 goto free_addrs;
1331 }
1297 if (bpf_jit_enable > 1) { 1332 if (bpf_jit_enable > 1) {
1298 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); 1333 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1299 if (jit.prg_buf) 1334 if (jit.prg_buf)
@@ -1306,6 +1341,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1306 } 1341 }
1307free_addrs: 1342free_addrs:
1308 kfree(jit.addrs); 1343 kfree(jit.addrs);
1344out:
1345 if (tmp_blinded)
1346 bpf_jit_prog_release_other(fp, fp == orig_fp ?
1347 tmp : orig_fp);
1309 return fp; 1348 return fp;
1310} 1349}
1311 1350