aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-07-16 07:25:49 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-07-18 06:44:37 -0400
commitaa2d2c73c21f22ce4c643128b671aa7e7bbff54f (patch)
tree2f30547e740f0931dce2b4d864ee952ef05a297a /arch
parentfee1b5488d76396d8f95989624d37f436b3fba44 (diff)
s390/bpf,jit: address randomize and write protect jit code
This is the s390 variant of 314beb9b "x86: bpf_jit_comp: secure bpf jit against spraying attacks". With this change the whole jit code and literal pool will be write protected after creation. In addition the start address of the jit code won't be always on a page boundary anymore. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/net/bpf_jit_comp.c51
1 files changed, 46 insertions, 5 deletions
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 80828bfee2ec..788e22395acd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,7 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
12#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14#include <asm/facility.h> 15#include <asm/facility.h>
@@ -738,8 +739,41 @@ out:
738 return -1; 739 return -1;
739} 740}
740 741
742/*
743 * Note: for security reasons, bpf code will follow a randomly
744 * sized amount of illegal instructions.
745 */
746struct bpf_binary_header {
747 unsigned int pages;
748 u8 image[];
749};
750
751static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
752 u8 **image_ptr)
753{
754 struct bpf_binary_header *header;
755 unsigned int sz, hole;
756
757 /* Most BPF filters are really small, but if some of them fill a page,
758 * allow at least 128 extra bytes for illegal instructions.
759 */
760 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
761 header = module_alloc(sz);
762 if (!header)
763 return NULL;
764 memset(header, 0, sz);
765 header->pages = sz / PAGE_SIZE;
766 hole = sz - bpfsize + sizeof(*header);
767 /* Insert random number of illegal instructions before BPF code
768 * and make sure the first instruction starts at an even address.
769 */
770 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
771 return header;
772}
773
741void bpf_jit_compile(struct sk_filter *fp) 774void bpf_jit_compile(struct sk_filter *fp)
742{ 775{
776 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 777 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 778 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 779 unsigned int *addrs;
@@ -775,8 +809,8 @@ void bpf_jit_compile(struct sk_filter *fp)
775 size = prg_len + lit_len; 809 size = prg_len + lit_len;
776 if (size >= BPF_SIZE_MAX) 810 if (size >= BPF_SIZE_MAX)
777 goto out; 811 goto out;
778 jit.start = module_alloc(size); 812 header = bpf_alloc_binary(size, &jit.start);
779 if (!jit.start) 813 if (!header)
780 goto out; 814 goto out;
781 jit.prg = jit.mid = jit.start + prg_len; 815 jit.prg = jit.mid = jit.start + prg_len;
782 jit.lit = jit.end = jit.start + prg_len + lit_len; 816 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -791,14 +825,21 @@ void bpf_jit_compile(struct sk_filter *fp)
791 if (jit.start) 825 if (jit.start)
792 print_fn_code(jit.start, jit.mid - jit.start); 826 print_fn_code(jit.start, jit.mid - jit.start);
793 } 827 }
794 if (jit.start) 828 if (jit.start) {
829 set_memory_ro((unsigned long)header, header->pages);
795 fp->bpf_func = (void *) jit.start; 830 fp->bpf_func = (void *) jit.start;
831 }
796out: 832out:
797 kfree(addrs); 833 kfree(addrs);
798} 834}
799 835
800void bpf_jit_free(struct sk_filter *fp) 836void bpf_jit_free(struct sk_filter *fp)
801{ 837{
802 if (fp->bpf_func != sk_run_filter) 838 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
803 module_free(NULL, fp->bpf_func); 839 struct bpf_binary_header *header = (void *)addr;
840
841 if (fp->bpf_func == sk_run_filter)
842 return;
843 set_memory_rw(addr, header->pages);
844 module_free(NULL, header);
804} 845}