aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-09-16 03:48:50 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-10-20 12:47:03 -0400
commitb569c1c622c5e60c960a6ae5bd0880e0cdbd56b1 (patch)
treef2497f02c9435911adbdb66fd95e0dc6051acd78
parentc0260ba906c4dfbcccd6414c3e2c0e73a7d7e35a (diff)
net: bpf: arm64: address randomize and write protect JIT code
This is the ARM64 variant for 314beb9bcab ("x86: bpf_jit_comp: secure bpf jit against spraying attacks"). Thanks to commit 11d91a770f1f ("arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support") which added necessary infrastructure, we can now implement RO marking of eBPF generated JIT image pages and randomize start offset for the JIT code, so that it does not reside directly on a page boundary anymore. Likewise, the holes are filled with illegal instructions: here we use BRK #0x100 (opcode 0xd4202000) to trigger a fault in the kernel (unallocated BRKs would trigger a fault through do_debug_exception). This seems more reliable as we don't have a guaranteed undefined instruction space on ARM64. This is basically the ARM64 variant of what we already have in ARM via commit 55309dd3d4cd ("net: bpf: arm: address randomize and write protect JIT code"). Moreover, this commit also presents a merge resolution due to conflicts with commit 60a3b2253c41 ("net: bpf: make eBPF interpreter images read-only") as we don't use kfree() in bpf_jit_free() anymore to release the locked bpf_prog structure, but instead bpf_prog_unlock_free() through a different allocator. JIT tested on aarch64 with BPF test suite. Reference: http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.html Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Reviewed-by: Zi Shen Lim <zlim.lnx@gmail.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/net/bpf_jit_comp.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7ae33545535b..71088952ed27 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -19,12 +19,13 @@
19#define pr_fmt(fmt) "bpf_jit: " fmt 19#define pr_fmt(fmt) "bpf_jit: " fmt
20 20
21#include <linux/filter.h> 21#include <linux/filter.h>
22#include <linux/moduleloader.h>
23#include <linux/printk.h> 22#include <linux/printk.h>
24#include <linux/skbuff.h> 23#include <linux/skbuff.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
25
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/debug-monitors.h>
28 29
29#include "bpf_jit.h" 30#include "bpf_jit.h"
30 31
@@ -119,6 +120,14 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
119 return to - from; 120 return to - from;
120} 121}
121 122
123static void jit_fill_hole(void *area, unsigned int size)
124{
125 u32 *ptr;
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
129}
130
122static inline int epilogue_offset(const struct jit_ctx *ctx) 131static inline int epilogue_offset(const struct jit_ctx *ctx)
123{ 132{
124 int to = ctx->offset[ctx->prog->len - 1]; 133 int to = ctx->offset[ctx->prog->len - 1];
@@ -613,8 +622,10 @@ void bpf_jit_compile(struct bpf_prog *prog)
613 622
614void bpf_int_jit_compile(struct bpf_prog *prog) 623void bpf_int_jit_compile(struct bpf_prog *prog)
615{ 624{
625 struct bpf_binary_header *header;
616 struct jit_ctx ctx; 626 struct jit_ctx ctx;
617 int image_size; 627 int image_size;
628 u8 *image_ptr;
618 629
619 if (!bpf_jit_enable) 630 if (!bpf_jit_enable)
620 return; 631 return;
@@ -636,23 +647,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
636 goto out; 647 goto out;
637 648
638 build_prologue(&ctx); 649 build_prologue(&ctx);
639
640 build_epilogue(&ctx); 650 build_epilogue(&ctx);
641 651
642 /* Now we know the actual image size. */ 652 /* Now we know the actual image size. */
643 image_size = sizeof(u32) * ctx.idx; 653 image_size = sizeof(u32) * ctx.idx;
644 ctx.image = module_alloc(image_size); 654 header = bpf_jit_binary_alloc(image_size, &image_ptr,
645 if (unlikely(ctx.image == NULL)) 655 sizeof(u32), jit_fill_hole);
656 if (header == NULL)
646 goto out; 657 goto out;
647 658
648 /* 2. Now, the actual pass. */ 659 /* 2. Now, the actual pass. */
649 660
661 ctx.image = (u32 *)image_ptr;
650 ctx.idx = 0; 662 ctx.idx = 0;
663
651 build_prologue(&ctx); 664 build_prologue(&ctx);
652 665
653 ctx.body_offset = ctx.idx; 666 ctx.body_offset = ctx.idx;
654 if (build_body(&ctx)) { 667 if (build_body(&ctx)) {
655 module_free(NULL, ctx.image); 668 bpf_jit_binary_free(header);
656 goto out; 669 goto out;
657 } 670 }
658 671
@@ -663,17 +676,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
663 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 676 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
664 677
665 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 678 bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
679
680 set_memory_ro((unsigned long)header, header->pages);
666 prog->bpf_func = (void *)ctx.image; 681 prog->bpf_func = (void *)ctx.image;
667 prog->jited = 1; 682 prog->jited = 1;
668
669out: 683out:
670 kfree(ctx.offset); 684 kfree(ctx.offset);
671} 685}
672 686
673void bpf_jit_free(struct bpf_prog *prog) 687void bpf_jit_free(struct bpf_prog *prog)
674{ 688{
675 if (prog->jited) 689 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
676 module_free(NULL, prog->bpf_func); 690 struct bpf_binary_header *header = (void *)addr;
691
692 if (!prog->jited)
693 goto free_filter;
694
695 set_memory_rw(addr, header->pages);
696 bpf_jit_binary_free(header);
677 697
678 kfree(prog); 698free_filter:
699 bpf_prog_unlock_free(prog);
679} 700}