diff options
-rw-r--r-- | arch/arm/net/bpf_jit_32.c | 2 | ||||
-rw-r--r-- | arch/s390/net/bpf_jit_comp.c | 1 | ||||
-rw-r--r-- | include/linux/filter.h | 56 | ||||
-rw-r--r-- | kernel/bpf/core.c | 30 |
4 files changed, 11 insertions, 78 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6e8b71613039..f6a62ae44a65 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
1844 | /* there are 2 passes here */ | 1844 | /* there are 2 passes here */ |
1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); | 1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); |
1846 | 1846 | ||
1847 | set_memory_ro((unsigned long)header, header->pages); | 1847 | bpf_jit_binary_lock_ro(header); |
1848 | prog->bpf_func = (void *)ctx.target; | 1848 | prog->bpf_func = (void *)ctx.target; |
1849 | prog->jited = 1; | 1849 | prog->jited = 1; |
1850 | prog->jited_len = image_size; | 1850 | prog->jited_len = image_size; |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d2db8acb1a55..5f0234ec8038 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) | |||
1286 | goto free_addrs; | 1286 | goto free_addrs; |
1287 | } | 1287 | } |
1288 | if (bpf_jit_prog(&jit, fp)) { | 1288 | if (bpf_jit_prog(&jit, fp)) { |
1289 | bpf_jit_binary_free(header); | ||
1289 | fp = orig_fp; | 1290 | fp = orig_fp; |
1290 | goto free_addrs; | 1291 | goto free_addrs; |
1291 | } | 1292 | } |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 20f2659dd829..300baad62c88 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -470,9 +470,7 @@ struct sock_fprog_kern { | |||
470 | }; | 470 | }; |
471 | 471 | ||
472 | struct bpf_binary_header { | 472 | struct bpf_binary_header { |
473 | u16 pages; | 473 | u32 pages; |
474 | u16 locked:1; | ||
475 | |||
476 | /* Some arches need word alignment for their instructions */ | 474 | /* Some arches need word alignment for their instructions */ |
477 | u8 image[] __aligned(4); | 475 | u8 image[] __aligned(4); |
478 | }; | 476 | }; |
@@ -481,7 +479,7 @@ struct bpf_prog { | |||
481 | u16 pages; /* Number of allocated pages */ | 479 | u16 pages; /* Number of allocated pages */ |
482 | u16 jited:1, /* Is our filter JIT'ed? */ | 480 | u16 jited:1, /* Is our filter JIT'ed? */ |
483 | jit_requested:1,/* archs need to JIT the prog */ | 481 | jit_requested:1,/* archs need to JIT the prog */ |
484 | locked:1, /* Program image locked? */ | 482 | undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ |
485 | gpl_compatible:1, /* Is filter GPL compatible? */ | 483 | gpl_compatible:1, /* Is filter GPL compatible? */ |
486 | cb_access:1, /* Is control block accessed? */ | 484 | cb_access:1, /* Is control block accessed? */ |
487 | dst_needed:1, /* Do we need dst entry? */ | 485 | dst_needed:1, /* Do we need dst entry? */ |
@@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |||
677 | 675 | ||
678 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | 676 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
679 | { | 677 | { |
680 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 678 | fp->undo_set_mem = 1; |
681 | fp->locked = 1; | 679 | set_memory_ro((unsigned long)fp, fp->pages); |
682 | if (set_memory_ro((unsigned long)fp, fp->pages)) | ||
683 | fp->locked = 0; | ||
684 | #endif | ||
685 | } | 680 | } |
686 | 681 | ||
687 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | 682 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) |
688 | { | 683 | { |
689 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 684 | if (fp->undo_set_mem) |
690 | if (fp->locked) { | 685 | set_memory_rw((unsigned long)fp, fp->pages); |
691 | WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); | ||
692 | /* In case set_memory_rw() fails, we want to be the first | ||
693 | * to crash here instead of some random place later on. | ||
694 | */ | ||
695 | fp->locked = 0; | ||
696 | } | ||
697 | #endif | ||
698 | } | 686 | } |
699 | 687 | ||
700 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) | 688 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
701 | { | 689 | { |
702 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 690 | set_memory_ro((unsigned long)hdr, hdr->pages); |
703 | hdr->locked = 1; | ||
704 | if (set_memory_ro((unsigned long)hdr, hdr->pages)) | ||
705 | hdr->locked = 0; | ||
706 | #endif | ||
707 | } | 691 | } |
708 | 692 | ||
709 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) | 693 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) |
710 | { | 694 | { |
711 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 695 | set_memory_rw((unsigned long)hdr, hdr->pages); |
712 | if (hdr->locked) { | ||
713 | WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); | ||
714 | /* In case set_memory_rw() fails, we want to be the first | ||
715 | * to crash here instead of some random place later on. | ||
716 | */ | ||
717 | hdr->locked = 0; | ||
718 | } | ||
719 | #endif | ||
720 | } | 696 | } |
721 | 697 | ||
722 | static inline struct bpf_binary_header * | 698 | static inline struct bpf_binary_header * |
@@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp) | |||
728 | return (void *)addr; | 704 | return (void *)addr; |
729 | } | 705 | } |
730 | 706 | ||
731 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
732 | static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp) | ||
733 | { | ||
734 | if (!fp->locked) | ||
735 | return -ENOLCK; | ||
736 | if (fp->jited) { | ||
737 | const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); | ||
738 | |||
739 | if (!hdr->locked) | ||
740 | return -ENOLCK; | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | #endif | ||
746 | |||
747 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); | 707 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
748 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | 708 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) |
749 | { | 709 | { |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a9e6c04d0f4a..1e5625d46414 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -598,8 +598,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
598 | bpf_fill_ill_insns(hdr, size); | 598 | bpf_fill_ill_insns(hdr, size); |
599 | 599 | ||
600 | hdr->pages = size / PAGE_SIZE; | 600 | hdr->pages = size / PAGE_SIZE; |
601 | hdr->locked = 0; | ||
602 | |||
603 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), | 601 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), |
604 | PAGE_SIZE - sizeof(*hdr)); | 602 | PAGE_SIZE - sizeof(*hdr)); |
605 | start = (get_random_int() % hole) & ~(alignment - 1); | 603 | start = (get_random_int() % hole) & ~(alignment - 1); |
@@ -1450,22 +1448,6 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) | |||
1450 | return 0; | 1448 | return 0; |
1451 | } | 1449 | } |
1452 | 1450 | ||
1453 | static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp) | ||
1454 | { | ||
1455 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
1456 | int i, err; | ||
1457 | |||
1458 | for (i = 0; i < fp->aux->func_cnt; i++) { | ||
1459 | err = bpf_prog_check_pages_ro_single(fp->aux->func[i]); | ||
1460 | if (err) | ||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | return bpf_prog_check_pages_ro_single(fp); | ||
1465 | #endif | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | static void bpf_prog_select_func(struct bpf_prog *fp) | 1451 | static void bpf_prog_select_func(struct bpf_prog *fp) |
1470 | { | 1452 | { |
1471 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | 1453 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
@@ -1524,17 +1506,7 @@ finalize: | |||
1524 | * all eBPF JITs might immediately support all features. | 1506 | * all eBPF JITs might immediately support all features. |
1525 | */ | 1507 | */ |
1526 | *err = bpf_check_tail_call(fp); | 1508 | *err = bpf_check_tail_call(fp); |
1527 | if (*err) | 1509 | |
1528 | return fp; | ||
1529 | |||
1530 | /* Checkpoint: at this point onwards any cBPF -> eBPF or | ||
1531 | * native eBPF program is read-only. If we failed to change | ||
1532 | * the page attributes (e.g. allocation failure from | ||
1533 | * splitting large pages), then reject the whole program | ||
1534 | * in order to guarantee not ending up with any W+X pages | ||
1535 | * from BPF side in kernel. | ||
1536 | */ | ||
1537 | *err = bpf_prog_check_pages_ro_locked(fp); | ||
1538 | return fp; | 1510 | return fp; |
1539 | } | 1511 | } |
1540 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); | 1512 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); |