aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h1
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/kernel/cache.c15
-rw-r--r--arch/s390/kernel/crash_dump.c51
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c50
-rw-r--r--arch/s390/kernel/smp.c17
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
15 files changed, 212 insertions, 73 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..b0e6435b2f02 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,7 +91,15 @@ struct thread_struct {
91#endif 91#endif
92}; 92};
93 93
94#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */ 94/* Flag to disable transactions. */
95#define PER_FLAG_NO_TE 1UL
96/* Flag to enable random transaction aborts. */
97#define PER_FLAG_TE_ABORT_RAND 2UL
98/* Flag to specify random transaction abort mode:
99 * - abort each transaction at a random instruction before TEND if set.
100 * - abort random transactions at a random instruction if cleared.
101 */
102#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
95 103
96typedef struct thread_struct thread_struct; 104typedef struct thread_struct thread_struct;
97 105
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f3a9e0f92704..80b6f11263c4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,7 +10,7 @@
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12extern struct task_struct *__switch_to(void *, void *); 12extern struct task_struct *__switch_to(void *, void *);
13extern void update_per_regs(struct task_struct *task); 13extern void update_cr_regs(struct task_struct *task);
14 14
15static inline void save_fp_regs(s390_fp_regs *fpregs) 15static inline void save_fp_regs(s390_fp_regs *fpregs)
16{ 16{
@@ -86,7 +86,7 @@ static inline void restore_access_regs(unsigned int *acrs)
86 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
87 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
89 update_per_regs(next); \ 89 update_cr_regs(next); \
90 } \ 90 } \
91 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
92} while (0) 92} while (0)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 3aa9f1ec5b29..7a84619e315e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -400,6 +400,7 @@ typedef struct
400#define PTRACE_POKE_SYSTEM_CALL 0x5008 400#define PTRACE_POKE_SYSTEM_CALL 0x5008
401#define PTRACE_ENABLE_TE 0x5009 401#define PTRACE_ENABLE_TE 0x5009
402#define PTRACE_DISABLE_TE 0x5010 402#define PTRACE_DISABLE_TE 0x5010
403#define PTRACE_TE_ABORT_RAND 0x5011
403 404
404/* 405/*
405 * PT_PROT definition is loosely based on hppa bsd definition in 406 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 2dacb306835c..92494494692e 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -80,4 +80,6 @@
80 80
81#define SO_SELECT_ERR_QUEUE 45 81#define SO_SELECT_ERR_QUEUE 45
82 82
83#define SO_BUSY_POLL 46
84
83#endif /* _ASM_SOCKET_H */ 85#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 64b24650e4f8..dd62071624be 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -173,7 +173,7 @@ error:
173 } 173 }
174} 174}
175 175
176static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu) 176static struct cache_dir *cache_create_cache_dir(int cpu)
177{ 177{
178 struct cache_dir *cache_dir; 178 struct cache_dir *cache_dir;
179 struct kobject *kobj = NULL; 179 struct kobject *kobj = NULL;
@@ -289,9 +289,8 @@ static struct kobj_type cache_index_type = {
289 .default_attrs = cache_index_default_attrs, 289 .default_attrs = cache_index_default_attrs,
290}; 290};
291 291
292static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir, 292static int cache_create_index_dir(struct cache_dir *cache_dir,
293 struct cache *cache, int index, 293 struct cache *cache, int index, int cpu)
294 int cpu)
295{ 294{
296 struct cache_index_dir *index_dir; 295 struct cache_index_dir *index_dir;
297 int rc; 296 int rc;
@@ -313,7 +312,7 @@ out:
313 return rc; 312 return rc;
314} 313}
315 314
316static int __cpuinit cache_add_cpu(int cpu) 315static int cache_add_cpu(int cpu)
317{ 316{
318 struct cache_dir *cache_dir; 317 struct cache_dir *cache_dir;
319 struct cache *cache; 318 struct cache *cache;
@@ -335,7 +334,7 @@ static int __cpuinit cache_add_cpu(int cpu)
335 return 0; 334 return 0;
336} 335}
337 336
338static void __cpuinit cache_remove_cpu(int cpu) 337static void cache_remove_cpu(int cpu)
339{ 338{
340 struct cache_index_dir *index, *next; 339 struct cache_index_dir *index, *next;
341 struct cache_dir *cache_dir; 340 struct cache_dir *cache_dir;
@@ -354,8 +353,8 @@ static void __cpuinit cache_remove_cpu(int cpu)
354 cache_dir_cpu[cpu] = NULL; 353 cache_dir_cpu[cpu] = NULL;
355} 354}
356 355
357static int __cpuinit cache_hotplug(struct notifier_block *nfb, 356static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
358 unsigned long action, void *hcpu) 357 void *hcpu)
359{ 358{
360 int cpu = (long)hcpu; 359 int cpu = (long)hcpu;
361 int rc = 0; 360 int rc = 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f703d91bf720..d8f355657171 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -21,6 +21,48 @@
21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
23 23
24
25/*
26 * Return physical address for virtual address
27 */
28static inline void *load_real_addr(void *addr)
29{
30 unsigned long real_addr;
31
32 asm volatile(
33 " lra %0,0(%1)\n"
34 " jz 0f\n"
35 " la %0,0\n"
36 "0:"
37 : "=a" (real_addr) : "a" (addr) : "cc");
38 return (void *)real_addr;
39}
40
41/*
42 * Copy up to one page to vmalloc or real memory
43 */
44static ssize_t copy_page_real(void *buf, void *src, size_t csize)
45{
46 size_t size;
47
48 if (is_vmalloc_addr(buf)) {
49 BUG_ON(csize >= PAGE_SIZE);
50 /* If buf is not page aligned, copy first part */
51 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
52 if (size) {
53 if (memcpy_real(load_real_addr(buf), src, size))
54 return -EFAULT;
55 buf += size;
56 src += size;
57 }
58 /* Copy second part */
59 size = csize - size;
60 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
61 } else {
62 return memcpy_real(buf, src, csize);
63 }
64}
65
24/* 66/*
25 * Copy one page from "oldmem" 67 * Copy one page from "oldmem"
26 * 68 *
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset, int userbuf) 74 size_t csize, unsigned long offset, int userbuf)
33{ 75{
34 unsigned long src; 76 unsigned long src;
77 int rc;
35 78
36 if (!csize) 79 if (!csize)
37 return 0; 80 return 0;
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
43 src < OLDMEM_BASE + OLDMEM_SIZE) 86 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE; 87 src -= OLDMEM_BASE;
45 if (userbuf) 88 if (userbuf)
46 copy_to_user_real((void __force __user *) buf, (void *) src, 89 rc = copy_to_user_real((void __force __user *) buf,
47 csize); 90 (void *) src, csize);
48 else 91 else
49 memcpy_real(buf, (void *) src, csize); 92 rc = copy_page_real(buf, (void *) src, csize);
50 return csize; 93 return (rc == 0) ? csize : rc;
51} 94}
52 95
53/* 96/*
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 390d9ae57bb2..fb99c2057b85 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -639,8 +639,8 @@ static struct pmu cpumf_pmu = {
639 .cancel_txn = cpumf_pmu_cancel_txn, 639 .cancel_txn = cpumf_pmu_cancel_txn,
640}; 640};
641 641
642static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self, 642static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 unsigned int cpu = (long) hcpu; 645 unsigned int cpu = (long) hcpu;
646 int flags; 646 int flags;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 753c41d0ffd3..24612029f450 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
21/* 21/*
22 * cpu_init - initializes state that is per-CPU. 22 * cpu_init - initializes state that is per-CPU.
23 */ 23 */
24void __cpuinit cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 27 struct cpuid *id = &__get_cpu_var(cpu_id);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..e9fadb04e3c6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,7 +47,7 @@ enum s390_regset {
47 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
48}; 48};
49 49
50void update_per_regs(struct task_struct *task) 50void update_cr_regs(struct task_struct *task)
51{ 51{
52 struct pt_regs *regs = task_pt_regs(task); 52 struct pt_regs *regs = task_pt_regs(task);
53 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
@@ -56,17 +56,25 @@ void update_per_regs(struct task_struct *task)
56#ifdef CONFIG_64BIT 56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new; 59 unsigned long cr[3], cr_new[3];
60 60
61 __ctl_store(cr0, 0, 0); 61 __ctl_store(cr, 0, 2);
62 /* set or clear transaction execution bits 8 and 9. */ 62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54); 65 cr_new[0] = cr[0] & ~(3UL << 54);
65 else 66 else
66 cr0_new = cr0 | (3UL << 54); 67 cr_new[0] = cr[0] | (3UL << 54);
67 /* Only load control register 0 if necessary. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
68 if (cr0 != cr0_new) 69 cr_new[2] = cr[2] & ~3UL;
69 __ctl_load(cr0_new, 0, 0); 70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
72 cr_new[2] |= 1UL;
73 else
74 cr_new[2] |= 2UL;
75 }
76 if (memcmp(&cr_new, &cr, sizeof(cr)))
77 __ctl_load(cr_new, 0, 2);
70 } 78 }
71#endif 79#endif
72 /* Copy user specified PER registers */ 80 /* Copy user specified PER registers */
@@ -100,14 +108,14 @@ void user_enable_single_step(struct task_struct *task)
100{ 108{
101 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 109 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 if (task == current) 110 if (task == current)
103 update_per_regs(task); 111 update_cr_regs(task);
104} 112}
105 113
106void user_disable_single_step(struct task_struct *task) 114void user_disable_single_step(struct task_struct *task)
107{ 115{
108 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 116 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
109 if (task == current) 117 if (task == current)
110 update_per_regs(task); 118 update_cr_regs(task);
111} 119}
112 120
113/* 121/*
@@ -447,6 +455,26 @@ long arch_ptrace(struct task_struct *child, long request,
447 if (!MACHINE_HAS_TE) 455 if (!MACHINE_HAS_TE)
448 return -EIO; 456 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE; 457 child->thread.per_flags |= PER_FLAG_NO_TE;
458 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
459 return 0;
460 case PTRACE_TE_ABORT_RAND:
461 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
462 return -EIO;
463 switch (data) {
464 case 0UL:
465 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
466 break;
467 case 1UL:
468 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
469 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
470 break;
471 case 2UL:
472 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
473 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
474 break;
475 default:
476 return -EINVAL;
477 }
450 return 0; 478 return 0;
451 default: 479 default:
452 /* Removing high order bit from addr (only for 31 bit). */ 480 /* Removing high order bit from addr (only for 31 bit). */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 15a016c10563..d386c4e9d2e5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,7 +165,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
165 pcpu_sigp_retry(pcpu, order, 0); 165 pcpu_sigp_retry(pcpu, order, 0);
166} 166}
167 167
168static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 168static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
169{ 169{
170 struct _lowcore *lc; 170 struct _lowcore *lc;
171 171
@@ -616,10 +616,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
616 return info; 616 return info;
617} 617}
618 618
619static int __cpuinit smp_add_present_cpu(int cpu); 619static int smp_add_present_cpu(int cpu);
620 620
621static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, 621static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
622 int sysfs_add)
623{ 622{
624 struct pcpu *pcpu; 623 struct pcpu *pcpu;
625 cpumask_t avail; 624 cpumask_t avail;
@@ -685,7 +684,7 @@ static void __init smp_detect_cpus(void)
685/* 684/*
686 * Activate a secondary processor. 685 * Activate a secondary processor.
687 */ 686 */
688static void __cpuinit smp_start_secondary(void *cpuvoid) 687static void smp_start_secondary(void *cpuvoid)
689{ 688{
690 S390_lowcore.last_update_clock = get_tod_clock(); 689 S390_lowcore.last_update_clock = get_tod_clock();
691 S390_lowcore.restart_stack = (unsigned long) restart_stack; 690 S390_lowcore.restart_stack = (unsigned long) restart_stack;
@@ -708,7 +707,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
708} 707}
709 708
710/* Upping and downing of CPUs */ 709/* Upping and downing of CPUs */
711int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 710int __cpu_up(unsigned int cpu, struct task_struct *tidle)
712{ 711{
713 struct pcpu *pcpu; 712 struct pcpu *pcpu;
714 int rc; 713 int rc;
@@ -964,8 +963,8 @@ static struct attribute_group cpu_online_attr_group = {
964 .attrs = cpu_online_attrs, 963 .attrs = cpu_online_attrs,
965}; 964};
966 965
967static int __cpuinit smp_cpu_notify(struct notifier_block *self, 966static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
968 unsigned long action, void *hcpu) 967 void *hcpu)
969{ 968{
970 unsigned int cpu = (unsigned int)(long)hcpu; 969 unsigned int cpu = (unsigned int)(long)hcpu;
971 struct cpu *c = &pcpu_devices[cpu].cpu; 970 struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -983,7 +982,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
983 return notifier_from_errno(err); 982 return notifier_from_errno(err);
984} 983}
985 984
986static int __cpuinit smp_add_present_cpu(int cpu) 985static int smp_add_present_cpu(int cpu)
987{ 986{
988 struct cpu *c = &pcpu_devices[cpu].cpu; 987 struct cpu *c = &pcpu_devices[cpu].cpu;
989 struct device *s = &c->dev; 988 struct device *s = &c->dev;
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 62f89d98e880..811f542b8ed4 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -418,7 +418,7 @@ void s390_adjust_jiffies(void)
418/* 418/*
419 * calibrate the delay loop 419 * calibrate the delay loop
420 */ 420 */
421void __cpuinit calibrate_delay(void) 421void calibrate_delay(void)
422{ 422{
423 s390_adjust_jiffies(); 423 s390_adjust_jiffies();
424 /* Print the good old Bogomips line .. */ 424 /* Print the good old Bogomips line .. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..9b9c1b78ec67 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -371,14 +371,14 @@ EXPORT_SYMBOL(del_virt_timer);
371/* 371/*
372 * Start the virtual CPU timer on the current CPU. 372 * Start the virtual CPU timer on the current CPU.
373 */ 373 */
374void __cpuinit init_cpu_vtimer(void) 374void init_cpu_vtimer(void)
375{ 375{
376 /* set initial cpu timer */ 376 /* set initial cpu timer */
377 set_vtimer(VTIMER_MAX_SLICE); 377 set_vtimer(VTIMER_MAX_SLICE);
378} 378}
379 379
380static int __cpuinit s390_nohz_notify(struct notifier_block *self, 380static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
381 unsigned long action, void *hcpu) 381 void *hcpu)
382{ 382{
383 struct s390_idle_data *idle; 383 struct s390_idle_data *idle;
384 long cpu = (long) hcpu; 384 long cpu = (long) hcpu;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 047c3e4c59a2..f00aefb66a4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -639,8 +639,8 @@ out:
639 put_task_struct(tsk); 639 put_task_struct(tsk);
640} 640}
641 641
642static int __cpuinit pfault_cpu_notify(struct notifier_block *self, 642static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 struct thread_struct *thread, *next; 645 struct thread_struct *thread, *next;
646 struct task_struct *tsk; 646 struct task_struct *tsk;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 06bafec00278..40023290ee5b 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
91 if (mmap_is_legacy()) { 91 if (mmap_is_legacy()) {
92 mm->mmap_base = TASK_UNMAPPED_BASE; 92 mm->mmap_base = TASK_UNMAPPED_BASE;
93 mm->get_unmapped_area = arch_get_unmapped_area; 93 mm->get_unmapped_area = arch_get_unmapped_area;
94 mm->unmap_area = arch_unmap_area;
95 } else { 94 } else {
96 mm->mmap_base = mmap_base(); 95 mm->mmap_base = mmap_base();
97 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 96 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
98 mm->unmap_area = arch_unmap_area_topdown;
99 } 97 }
100} 98}
101 99
@@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
176 if (mmap_is_legacy()) { 174 if (mmap_is_legacy()) {
177 mm->mmap_base = TASK_UNMAPPED_BASE; 175 mm->mmap_base = TASK_UNMAPPED_BASE;
178 mm->get_unmapped_area = s390_get_unmapped_area; 176 mm->get_unmapped_area = s390_get_unmapped_area;
179 mm->unmap_area = arch_unmap_area;
180 } else { 177 } else {
181 mm->mmap_base = mmap_base(); 178 mm->mmap_base = mmap_base();
182 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 179 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
183 mm->unmap_area = arch_unmap_area_topdown;
184 } 180 }
185} 181}
186 182
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
13#include <linux/init.h>
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14#include <asm/facility.h> 16#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
221 EMIT2(0x07fe); 223 EMIT2(0x07fe);
222} 224}
223 225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
224/* 257/*
225 * make sure we dont leak kernel information to user 258 * make sure we dont leak kernel information to user
226 */ 259 */
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
720 EMIT4_DISP(0x88500000, 12); 753 EMIT4_DISP(0x88500000, 12);
721 } 754 }
722 break; 755 break;
756 case BPF_S_ANC_PKTTYPE:
757 if (pkt_type_offset < 0)
758 goto out;
759 /* lhi %r5,0 */
760 EMIT4(0xa7580000);
761 /* ic %r5,<d(pkt_type_offset)>(%r2) */
762 EMIT4_DISP(0x43502000, pkt_type_offset);
763 /* srl %r5,5 */
764 EMIT4_DISP(0x88500000, 5);
765 break;
723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 766 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
724#ifdef CONFIG_SMP 767#ifdef CONFIG_SMP
725 /* l %r5,<d(cpu_nr)> */ 768 /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
738 return -1; 781 return -1;
739} 782}
740 783
784/*
785 * Note: for security reasons, bpf code will follow a randomly
786 * sized amount of illegal instructions.
787 */
788struct bpf_binary_header {
789 unsigned int pages;
790 u8 image[];
791};
792
793static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
794 u8 **image_ptr)
795{
796 struct bpf_binary_header *header;
797 unsigned int sz, hole;
798
799 /* Most BPF filters are really small, but if some of them fill a page,
800 * allow at least 128 extra bytes for illegal instructions.
801 */
802 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
803 header = module_alloc(sz);
804 if (!header)
805 return NULL;
806 memset(header, 0, sz);
807 header->pages = sz / PAGE_SIZE;
808 hole = sz - bpfsize + sizeof(*header);
809 /* Insert random number of illegal instructions before BPF code
810 * and make sure the first instruction starts at an even address.
811 */
812 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
813 return header;
814}
815
741void bpf_jit_compile(struct sk_filter *fp) 816void bpf_jit_compile(struct sk_filter *fp)
742{ 817{
818 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 819 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 820 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 821 unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
772 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) { 848 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
773 prg_len = jit.prg - jit.start; 849 prg_len = jit.prg - jit.start;
774 lit_len = jit.lit - jit.mid; 850 lit_len = jit.lit - jit.mid;
775 size = max_t(unsigned long, prg_len + lit_len, 851 size = prg_len + lit_len;
776 sizeof(struct work_struct));
777 if (size >= BPF_SIZE_MAX) 852 if (size >= BPF_SIZE_MAX)
778 goto out; 853 goto out;
779 jit.start = module_alloc(size); 854 header = bpf_alloc_binary(size, &jit.start);
780 if (!jit.start) 855 if (!header)
781 goto out; 856 goto out;
782 jit.prg = jit.mid = jit.start + prg_len; 857 jit.prg = jit.mid = jit.start + prg_len;
783 jit.lit = jit.end = jit.start + prg_len + lit_len; 858 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
788 cjit = jit; 863 cjit = jit;
789 } 864 }
790 if (bpf_jit_enable > 1) { 865 if (bpf_jit_enable > 1) {
791 pr_err("flen=%d proglen=%lu pass=%d image=%p\n", 866 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
792 fp->len, jit.end - jit.start, pass, jit.start); 867 if (jit.start)
793 if (jit.start) {
794 printk(KERN_ERR "JIT code:\n");
795 print_fn_code(jit.start, jit.mid - jit.start); 868 print_fn_code(jit.start, jit.mid - jit.start);
796 print_hex_dump(KERN_ERR, "JIT literals:\n",
797 DUMP_PREFIX_ADDRESS, 16, 1,
798 jit.mid, jit.end - jit.mid, false);
799 }
800 } 869 }
801 if (jit.start) 870 if (jit.start) {
871 set_memory_ro((unsigned long)header, header->pages);
802 fp->bpf_func = (void *) jit.start; 872 fp->bpf_func = (void *) jit.start;
873 }
803out: 874out:
804 kfree(addrs); 875 kfree(addrs);
805} 876}
806 877
807static void jit_free_defer(struct work_struct *arg)
808{
809 module_free(NULL, arg);
810}
811
812/* run from softirq, we must use a work_struct to call
813 * module_free() from process context
814 */
815void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct sk_filter *fp)
816{ 879{
817 struct work_struct *work; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr;
818 882
819 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
820 return; 884 return;
821 work = (struct work_struct *)fp->bpf_func; 885 set_memory_rw(addr, header->pages);
822 INIT_WORK(work, jit_free_defer); 886 module_free(NULL, header);
823 schedule_work(work);
824} 887}