aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/net/bpf_jit_comp.c')
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
1 files changed, 88 insertions, 25 deletions
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
13#include <linux/init.h>
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14#include <asm/facility.h> 16#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
221 EMIT2(0x07fe); 223 EMIT2(0x07fe);
222} 224}
223 225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
224/* 257/*
225 * make sure we dont leak kernel information to user 258 * make sure we dont leak kernel information to user
226 */ 259 */
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
720 EMIT4_DISP(0x88500000, 12); 753 EMIT4_DISP(0x88500000, 12);
721 } 754 }
722 break; 755 break;
756 case BPF_S_ANC_PKTTYPE:
757 if (pkt_type_offset < 0)
758 goto out;
759 /* lhi %r5,0 */
760 EMIT4(0xa7580000);
761 /* ic %r5,<d(pkt_type_offset)>(%r2) */
762 EMIT4_DISP(0x43502000, pkt_type_offset);
763 /* srl %r5,5 */
764 EMIT4_DISP(0x88500000, 5);
765 break;
723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 766 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
724#ifdef CONFIG_SMP 767#ifdef CONFIG_SMP
725 /* l %r5,<d(cpu_nr)> */ 768 /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
738 return -1; 781 return -1;
739} 782}
740 783
784/*
785 * Note: for security reasons, bpf code will follow a randomly
786 * sized amount of illegal instructions.
787 */
788struct bpf_binary_header {
789 unsigned int pages;
790 u8 image[];
791};
792
793static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
794 u8 **image_ptr)
795{
796 struct bpf_binary_header *header;
797 unsigned int sz, hole;
798
799 /* Most BPF filters are really small, but if some of them fill a page,
800 * allow at least 128 extra bytes for illegal instructions.
801 */
802 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
803 header = module_alloc(sz);
804 if (!header)
805 return NULL;
806 memset(header, 0, sz);
807 header->pages = sz / PAGE_SIZE;
808 hole = sz - bpfsize + sizeof(*header);
809 /* Insert random number of illegal instructions before BPF code
810 * and make sure the first instruction starts at an even address.
811 */
812 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
813 return header;
814}
815
741void bpf_jit_compile(struct sk_filter *fp) 816void bpf_jit_compile(struct sk_filter *fp)
742{ 817{
818 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 819 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 820 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 821 unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
772 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) { 848 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
773 prg_len = jit.prg - jit.start; 849 prg_len = jit.prg - jit.start;
774 lit_len = jit.lit - jit.mid; 850 lit_len = jit.lit - jit.mid;
775 size = max_t(unsigned long, prg_len + lit_len, 851 size = prg_len + lit_len;
776 sizeof(struct work_struct));
777 if (size >= BPF_SIZE_MAX) 852 if (size >= BPF_SIZE_MAX)
778 goto out; 853 goto out;
779 jit.start = module_alloc(size); 854 header = bpf_alloc_binary(size, &jit.start);
780 if (!jit.start) 855 if (!header)
781 goto out; 856 goto out;
782 jit.prg = jit.mid = jit.start + prg_len; 857 jit.prg = jit.mid = jit.start + prg_len;
783 jit.lit = jit.end = jit.start + prg_len + lit_len; 858 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
788 cjit = jit; 863 cjit = jit;
789 } 864 }
790 if (bpf_jit_enable > 1) { 865 if (bpf_jit_enable > 1) {
791 pr_err("flen=%d proglen=%lu pass=%d image=%p\n", 866 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
792 fp->len, jit.end - jit.start, pass, jit.start); 867 if (jit.start)
793 if (jit.start) {
794 printk(KERN_ERR "JIT code:\n");
795 print_fn_code(jit.start, jit.mid - jit.start); 868 print_fn_code(jit.start, jit.mid - jit.start);
796 print_hex_dump(KERN_ERR, "JIT literals:\n",
797 DUMP_PREFIX_ADDRESS, 16, 1,
798 jit.mid, jit.end - jit.mid, false);
799 }
800 } 869 }
801 if (jit.start) 870 if (jit.start) {
871 set_memory_ro((unsigned long)header, header->pages);
802 fp->bpf_func = (void *) jit.start; 872 fp->bpf_func = (void *) jit.start;
873 }
803out: 874out:
804 kfree(addrs); 875 kfree(addrs);
805} 876}
806 877
807static void jit_free_defer(struct work_struct *arg)
808{
809 module_free(NULL, arg);
810}
811
812/* run from softirq, we must use a work_struct to call
813 * module_free() from process context
814 */
815void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct sk_filter *fp)
816{ 879{
817 struct work_struct *work; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr;
818 882
819 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
820 return; 884 return;
821 work = (struct work_struct *)fp->bpf_func; 885 set_memory_rw(addr, header->pages);
822 INIT_WORK(work, jit_free_defer); 886 module_free(NULL, header);
823 schedule_work(work);
824} 887}