aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-19 18:08:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-19 18:08:12 -0400
commit89a8c5940d5cb43e6bede51bf4b3a7516b0ca622 (patch)
tree417f3eacd4f059d3a787911ec907a9046b0ae9bc /arch
parentb8a33fc7258f9e6e42b15571d0284b7c0ef0d0a9 (diff)
parent9da3545d827cdb9163697a1dc4471fbb5540e85f (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 fixes from Martin Schwidefsky: "An update for the BFP jit to the latest and greatest, two patches to get kdump working again, the random-abort ptrace extention for transactional execution, the z90crypt module alias for ap and a tiny cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/zcrypt: Alias for new zcrypt device driver base module s390/kdump: Allow copy_oldmem_page() copy to virtual memory s390/kdump: Disable mmap for s390 s390/bpf,jit: add pkt_type support s390/bpf,jit: address randomize and write protect jit code s390/bpf,jit: use generic jit dumper s390/bpf,jit: call module_free() from any context s390/qdio: remove unused variable s390/ptrace: PTRACE_TE_ABORT_RAND
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h1
-rw-r--r--arch/s390/kernel/crash_dump.c51
-rw-r--r--arch/s390/kernel/ptrace.c50
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
6 files changed, 186 insertions, 43 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..b0e6435b2f02 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,7 +91,15 @@ struct thread_struct {
91#endif 91#endif
92}; 92};
93 93
94#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */ 94/* Flag to disable transactions. */
95#define PER_FLAG_NO_TE 1UL
96/* Flag to enable random transaction aborts. */
97#define PER_FLAG_TE_ABORT_RAND 2UL
98/* Flag to specify random transaction abort mode:
99 * - abort each transaction at a random instruction before TEND if set.
100 * - abort random transactions at a random instruction if cleared.
101 */
102#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
95 103
96typedef struct thread_struct thread_struct; 104typedef struct thread_struct thread_struct;
97 105
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f3a9e0f92704..80b6f11263c4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,7 +10,7 @@
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12extern struct task_struct *__switch_to(void *, void *); 12extern struct task_struct *__switch_to(void *, void *);
13extern void update_per_regs(struct task_struct *task); 13extern void update_cr_regs(struct task_struct *task);
14 14
15static inline void save_fp_regs(s390_fp_regs *fpregs) 15static inline void save_fp_regs(s390_fp_regs *fpregs)
16{ 16{
@@ -86,7 +86,7 @@ static inline void restore_access_regs(unsigned int *acrs)
86 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
87 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
89 update_per_regs(next); \ 89 update_cr_regs(next); \
90 } \ 90 } \
91 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
92} while (0) 92} while (0)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 3aa9f1ec5b29..7a84619e315e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -400,6 +400,7 @@ typedef struct
400#define PTRACE_POKE_SYSTEM_CALL 0x5008 400#define PTRACE_POKE_SYSTEM_CALL 0x5008
401#define PTRACE_ENABLE_TE 0x5009 401#define PTRACE_ENABLE_TE 0x5009
402#define PTRACE_DISABLE_TE 0x5010 402#define PTRACE_DISABLE_TE 0x5010
403#define PTRACE_TE_ABORT_RAND 0x5011
403 404
404/* 405/*
405 * PT_PROT definition is loosely based on hppa bsd definition in 406 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f703d91bf720..d8f355657171 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -21,6 +21,48 @@
21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
23 23
24
25/*
26 * Return physical address for virtual address
27 */
28static inline void *load_real_addr(void *addr)
29{
30 unsigned long real_addr;
31
32 asm volatile(
33 " lra %0,0(%1)\n"
34 " jz 0f\n"
35 " la %0,0\n"
36 "0:"
37 : "=a" (real_addr) : "a" (addr) : "cc");
38 return (void *)real_addr;
39}
40
41/*
42 * Copy up to one page to vmalloc or real memory
43 */
44static ssize_t copy_page_real(void *buf, void *src, size_t csize)
45{
46 size_t size;
47
48 if (is_vmalloc_addr(buf)) {
49 BUG_ON(csize >= PAGE_SIZE);
50 /* If buf is not page aligned, copy first part */
51 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
52 if (size) {
53 if (memcpy_real(load_real_addr(buf), src, size))
54 return -EFAULT;
55 buf += size;
56 src += size;
57 }
58 /* Copy second part */
59 size = csize - size;
60 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
61 } else {
62 return memcpy_real(buf, src, csize);
63 }
64}
65
24/* 66/*
25 * Copy one page from "oldmem" 67 * Copy one page from "oldmem"
26 * 68 *
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset, int userbuf) 74 size_t csize, unsigned long offset, int userbuf)
33{ 75{
34 unsigned long src; 76 unsigned long src;
77 int rc;
35 78
36 if (!csize) 79 if (!csize)
37 return 0; 80 return 0;
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
43 src < OLDMEM_BASE + OLDMEM_SIZE) 86 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE; 87 src -= OLDMEM_BASE;
45 if (userbuf) 88 if (userbuf)
46 copy_to_user_real((void __force __user *) buf, (void *) src, 89 rc = copy_to_user_real((void __force __user *) buf,
47 csize); 90 (void *) src, csize);
48 else 91 else
49 memcpy_real(buf, (void *) src, csize); 92 rc = copy_page_real(buf, (void *) src, csize);
50 return csize; 93 return (rc == 0) ? csize : rc;
51} 94}
52 95
53/* 96/*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..e9fadb04e3c6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,7 +47,7 @@ enum s390_regset {
47 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
48}; 48};
49 49
50void update_per_regs(struct task_struct *task) 50void update_cr_regs(struct task_struct *task)
51{ 51{
52 struct pt_regs *regs = task_pt_regs(task); 52 struct pt_regs *regs = task_pt_regs(task);
53 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
@@ -56,17 +56,25 @@ void update_per_regs(struct task_struct *task)
56#ifdef CONFIG_64BIT 56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new; 59 unsigned long cr[3], cr_new[3];
60 60
61 __ctl_store(cr0, 0, 0); 61 __ctl_store(cr, 0, 2);
62 /* set or clear transaction execution bits 8 and 9. */ 62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54); 65 cr_new[0] = cr[0] & ~(3UL << 54);
65 else 66 else
66 cr0_new = cr0 | (3UL << 54); 67 cr_new[0] = cr[0] | (3UL << 54);
67 /* Only load control register 0 if necessary. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
68 if (cr0 != cr0_new) 69 cr_new[2] = cr[2] & ~3UL;
69 __ctl_load(cr0_new, 0, 0); 70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
72 cr_new[2] |= 1UL;
73 else
74 cr_new[2] |= 2UL;
75 }
76 if (memcmp(&cr_new, &cr, sizeof(cr)))
77 __ctl_load(cr_new, 0, 2);
70 } 78 }
71#endif 79#endif
72 /* Copy user specified PER registers */ 80 /* Copy user specified PER registers */
@@ -100,14 +108,14 @@ void user_enable_single_step(struct task_struct *task)
100{ 108{
101 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 109 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 if (task == current) 110 if (task == current)
103 update_per_regs(task); 111 update_cr_regs(task);
104} 112}
105 113
106void user_disable_single_step(struct task_struct *task) 114void user_disable_single_step(struct task_struct *task)
107{ 115{
108 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 116 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
109 if (task == current) 117 if (task == current)
110 update_per_regs(task); 118 update_cr_regs(task);
111} 119}
112 120
113/* 121/*
@@ -447,6 +455,26 @@ long arch_ptrace(struct task_struct *child, long request,
447 if (!MACHINE_HAS_TE) 455 if (!MACHINE_HAS_TE)
448 return -EIO; 456 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE; 457 child->thread.per_flags |= PER_FLAG_NO_TE;
458 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
459 return 0;
460 case PTRACE_TE_ABORT_RAND:
461 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
462 return -EIO;
463 switch (data) {
464 case 0UL:
465 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
466 break;
467 case 1UL:
468 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
469 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
470 break;
471 case 2UL:
472 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
473 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
474 break;
475 default:
476 return -EINVAL;
477 }
450 return 0; 478 return 0;
451 default: 479 default:
452 /* Removing high order bit from addr (only for 31 bit). */ 480 /* Removing high order bit from addr (only for 31 bit). */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
13#include <linux/init.h>
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14#include <asm/facility.h> 16#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
221 EMIT2(0x07fe); 223 EMIT2(0x07fe);
222} 224}
223 225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
224/* 257/*
225 * make sure we dont leak kernel information to user 258 * make sure we dont leak kernel information to user
226 */ 259 */
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
720 EMIT4_DISP(0x88500000, 12); 753 EMIT4_DISP(0x88500000, 12);
721 } 754 }
722 break; 755 break;
756 case BPF_S_ANC_PKTTYPE:
757 if (pkt_type_offset < 0)
758 goto out;
759 /* lhi %r5,0 */
760 EMIT4(0xa7580000);
761 /* ic %r5,<d(pkt_type_offset)>(%r2) */
762 EMIT4_DISP(0x43502000, pkt_type_offset);
763 /* srl %r5,5 */
764 EMIT4_DISP(0x88500000, 5);
765 break;
723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 766 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
724#ifdef CONFIG_SMP 767#ifdef CONFIG_SMP
725 /* l %r5,<d(cpu_nr)> */ 768 /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
738 return -1; 781 return -1;
739} 782}
740 783
784/*
785 * Note: for security reasons, bpf code will follow a randomly
786 * sized amount of illegal instructions.
787 */
788struct bpf_binary_header {
789 unsigned int pages;
790 u8 image[];
791};
792
793static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
794 u8 **image_ptr)
795{
796 struct bpf_binary_header *header;
797 unsigned int sz, hole;
798
799 /* Most BPF filters are really small, but if some of them fill a page,
800 * allow at least 128 extra bytes for illegal instructions.
801 */
802 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
803 header = module_alloc(sz);
804 if (!header)
805 return NULL;
806 memset(header, 0, sz);
807 header->pages = sz / PAGE_SIZE;
808 hole = sz - bpfsize + sizeof(*header);
809 /* Insert random number of illegal instructions before BPF code
810 * and make sure the first instruction starts at an even address.
811 */
812 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
813 return header;
814}
815
741void bpf_jit_compile(struct sk_filter *fp) 816void bpf_jit_compile(struct sk_filter *fp)
742{ 817{
818 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 819 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 820 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 821 unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
772 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) { 848 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
773 prg_len = jit.prg - jit.start; 849 prg_len = jit.prg - jit.start;
774 lit_len = jit.lit - jit.mid; 850 lit_len = jit.lit - jit.mid;
775 size = max_t(unsigned long, prg_len + lit_len, 851 size = prg_len + lit_len;
776 sizeof(struct work_struct));
777 if (size >= BPF_SIZE_MAX) 852 if (size >= BPF_SIZE_MAX)
778 goto out; 853 goto out;
779 jit.start = module_alloc(size); 854 header = bpf_alloc_binary(size, &jit.start);
780 if (!jit.start) 855 if (!header)
781 goto out; 856 goto out;
782 jit.prg = jit.mid = jit.start + prg_len; 857 jit.prg = jit.mid = jit.start + prg_len;
783 jit.lit = jit.end = jit.start + prg_len + lit_len; 858 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
788 cjit = jit; 863 cjit = jit;
789 } 864 }
790 if (bpf_jit_enable > 1) { 865 if (bpf_jit_enable > 1) {
791 pr_err("flen=%d proglen=%lu pass=%d image=%p\n", 866 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
792 fp->len, jit.end - jit.start, pass, jit.start); 867 if (jit.start)
793 if (jit.start) {
794 printk(KERN_ERR "JIT code:\n");
795 print_fn_code(jit.start, jit.mid - jit.start); 868 print_fn_code(jit.start, jit.mid - jit.start);
796 print_hex_dump(KERN_ERR, "JIT literals:\n",
797 DUMP_PREFIX_ADDRESS, 16, 1,
798 jit.mid, jit.end - jit.mid, false);
799 }
800 } 869 }
801 if (jit.start) 870 if (jit.start) {
871 set_memory_ro((unsigned long)header, header->pages);
802 fp->bpf_func = (void *) jit.start; 872 fp->bpf_func = (void *) jit.start;
873 }
803out: 874out:
804 kfree(addrs); 875 kfree(addrs);
805} 876}
806 877
807static void jit_free_defer(struct work_struct *arg)
808{
809 module_free(NULL, arg);
810}
811
812/* run from softirq, we must use a work_struct to call
813 * module_free() from process context
814 */
815void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct sk_filter *fp)
816{ 879{
817 struct work_struct *work; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr;
818 882
819 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
820 return; 884 return;
821 work = (struct work_struct *)fp->bpf_func; 885 set_memory_rw(addr, header->pages);
822 INIT_WORK(work, jit_free_defer); 886 module_free(NULL, header);
823 schedule_work(work);
824} 887}