aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c69
1 files changed, 62 insertions, 7 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9f1493705f40..a9e6c04d0f4a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
350 return prog_adj; 350 return prog_adj;
351} 351}
352 352
353void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
354{
355 int i;
356
357 for (i = 0; i < fp->aux->func_cnt; i++)
358 bpf_prog_kallsyms_del(fp->aux->func[i]);
359}
360
361void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
362{
363 bpf_prog_kallsyms_del_subprogs(fp);
364 bpf_prog_kallsyms_del(fp);
365}
366
353#ifdef CONFIG_BPF_JIT 367#ifdef CONFIG_BPF_JIT
354/* All BPF JIT sysctl knobs here. */ 368/* All BPF JIT sysctl knobs here. */
355int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 369int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -584,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
584 bpf_fill_ill_insns(hdr, size); 598 bpf_fill_ill_insns(hdr, size);
585 599
586 hdr->pages = size / PAGE_SIZE; 600 hdr->pages = size / PAGE_SIZE;
601 hdr->locked = 0;
602
587 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 603 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
588 PAGE_SIZE - sizeof(*hdr)); 604 PAGE_SIZE - sizeof(*hdr));
589 start = (get_random_int() % hole) & ~(alignment - 1); 605 start = (get_random_int() % hole) & ~(alignment - 1);
@@ -1434,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1434 return 0; 1450 return 0;
1435} 1451}
1436 1452
1453static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
1454{
1455#ifdef CONFIG_ARCH_HAS_SET_MEMORY
1456 int i, err;
1457
1458 for (i = 0; i < fp->aux->func_cnt; i++) {
1459 err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
1460 if (err)
1461 return err;
1462 }
1463
1464 return bpf_prog_check_pages_ro_single(fp);
1465#endif
1466 return 0;
1467}
1468
1469static void bpf_prog_select_func(struct bpf_prog *fp)
1470{
1471#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1472 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1473
1474 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1475#else
1476 fp->bpf_func = __bpf_prog_ret0_warn;
1477#endif
1478}
1479
1437/** 1480/**
1438 * bpf_prog_select_runtime - select exec runtime for BPF program 1481 * bpf_prog_select_runtime - select exec runtime for BPF program
1439 * @fp: bpf_prog populated with internal BPF program 1482 * @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1444 */ 1487 */
1445struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1488struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1446{ 1489{
1447#ifndef CONFIG_BPF_JIT_ALWAYS_ON 1490 /* In case of BPF to BPF calls, verifier did all the prep
1448 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1491 * work with regards to JITing, etc.
1492 */
1493 if (fp->bpf_func)
1494 goto finalize;
1449 1495
1450 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1496 bpf_prog_select_func(fp);
1451#else
1452 fp->bpf_func = __bpf_prog_ret0_warn;
1453#endif
1454 1497
1455 /* eBPF JITs can rewrite the program in case constant 1498 /* eBPF JITs can rewrite the program in case constant
1456 * blinding is active. However, in case of error during 1499 * blinding is active. However, in case of error during
@@ -1471,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1471 if (*err) 1514 if (*err)
1472 return fp; 1515 return fp;
1473 } 1516 }
1517
1518finalize:
1474 bpf_prog_lock_ro(fp); 1519 bpf_prog_lock_ro(fp);
1475 1520
1476 /* The tail call compatibility check can only be done at 1521 /* The tail call compatibility check can only be done at
@@ -1479,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1479 * all eBPF JITs might immediately support all features. 1524 * all eBPF JITs might immediately support all features.
1480 */ 1525 */
1481 *err = bpf_check_tail_call(fp); 1526 *err = bpf_check_tail_call(fp);
1482 1527 if (*err)
1528 return fp;
1529
1530 /* Checkpoint: at this point onwards any cBPF -> eBPF or
1531 * native eBPF program is read-only. If we failed to change
1532 * the page attributes (e.g. allocation failure from
1533 * splitting large pages), then reject the whole program
1534 * in order to guarantee not ending up with any W+X pages
1535 * from BPF side in kernel.
1536 */
1537 *err = bpf_prog_check_pages_ro_locked(fp);
1483 return fp; 1538 return fp;
1484} 1539}
1485EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 1540EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);