aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:47:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:47:00 -0400
commit1ee07ef6b5db7235b133ee257a3adf507697e6b3 (patch)
tree9c7a00cf98462c2a70610da9d09770c835ef8fcd /arch/s390
parent77654908ff1a58cee4886298968b5262884aff0b (diff)
parent0cccdda8d1512af4d3f6913044e8c8e58e15ef37 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "This patch set contains the main portion of the changes for 3.18 in regard to the s390 architecture. It is a bit bigger than usual, mainly because of a new driver and the vector extension patches. The interesting bits are: - Quite a bit of work on the tracing front. Uprobes is enabled and the ftrace code is reworked to get some of the lost performance back if CONFIG_FTRACE is enabled. - To improve boot time with CONFIG_DEBIG_PAGEALLOC, support for the IPTE range facility is added. - The rwlock code is re-factored to improve writer fairness and to be able to use the interlocked-access instructions. - The kernel part for the support of the vector extension is added. - The device driver to access the CD/DVD on the HMC is added, this will hopefully come in handy to improve the installation process. - Add support for control-unit initiated reconfiguration. - The crypto device driver is enhanced to enable the additional AP domains and to allow the new crypto hardware to be used. - Bug fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits) s390/ftrace: simplify enabling/disabling of ftrace_graph_caller s390/ftrace: remove 31 bit ftrace support s390/kdump: add support for vector extension s390/disassembler: add vector instructions s390: add support for vector extension s390/zcrypt: Toleration of new crypto hardware s390/idle: consolidate idle functions and definitions s390/nohz: use a per-cpu flag for arch_needs_cpu s390/vtime: do not reset idle data on CPU hotplug s390/dasd: add support for control unit initiated reconfiguration s390/dasd: fix infinite loop during format s390/mm: make use of ipte range facility s390/setup: correct 4-level kernel page table detection s390/topology: call set_sched_topology early s390/uprobes: architecture backend for uprobes s390/uprobes: common library for kprobes and uprobes s390/rwlock: use the interlocked-access facility 1 instructions s390/rwlock: improve writer fairness s390/rwlock: remove interrupt-enabling rwlock variant. s390/mm: remove change bit override support ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/Makefile17
-rw-r--r--arch/s390/include/asm/barrier.h6
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/dis.h13
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/ftrace.h9
-rw-r--r--arch/s390/include/asm/idle.h26
-rw-r--r--arch/s390/include/asm/ipl.h4
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/lowcore.h21
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/pgtable.h25
-rw-r--r--arch/s390/include/asm/processor.h12
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/sigp.h6
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/spinlock.h135
-rw-r--r--arch/s390/include/asm/spinlock_types.h1
-rw-r--r--arch/s390/include/asm/switch_to.h61
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/uprobes.h42
-rw-r--r--arch/s390/include/asm/vdso.h18
-rw-r--r--arch/s390/include/asm/vtimer.h2
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h20
-rw-r--r--arch/s390/include/uapi/asm/types.h4
-rw-r--r--arch/s390/include/uapi/asm/ucontext.h15
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/compat_linux.h9
-rw-r--r--arch/s390/kernel/compat_signal.c212
-rw-r--r--arch/s390/kernel/crash_dump.c58
-rw-r--r--arch/s390/kernel/dis.c245
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.h6
-rw-r--r--arch/s390/kernel/entry64.S17
-rw-r--r--arch/s390/kernel/ftrace.c139
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/idle.c124
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/kprobes.c159
-rw-r--r--arch/s390/kernel/machine_kexec.c8
-rw-r--r--arch/s390/kernel/mcount.S86
-rw-r--r--arch/s390/kernel/mcount64.S62
-rw-r--r--arch/s390/kernel/nmi.c16
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c24
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c254
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/kernel/signal.c296
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/time.c13
-rw-r--r--arch/s390/kernel/topology.c18
-rw-r--r--arch/s390/kernel/traps.c115
-rw-r--r--arch/s390/kernel/uprobes.c332
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S11
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S8
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vtime.c77
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/lib/probes.c159
-rw-r--r--arch/s390/lib/spinlock.c105
-rw-r--r--arch/s390/mm/dump_pagetables.c5
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pageattr.c38
-rw-r--r--arch/s390/mm/vmem.c8
73 files changed, 2399 insertions, 911 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 296391395b95..f2cf1f90295b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -58,6 +58,9 @@ config NO_IOPORT_MAP
58config PCI_QUIRKS 58config PCI_QUIRKS
59 def_bool n 59 def_bool n
60 60
61config ARCH_SUPPORTS_UPROBES
62 def_bool 64BIT
63
61config S390 64config S390
62 def_bool y 65 def_bool y
63 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -97,6 +100,7 @@ config S390
97 select ARCH_WANT_IPC_PARSE_VERSION 100 select ARCH_WANT_IPC_PARSE_VERSION
98 select BUILDTIME_EXTABLE_SORT 101 select BUILDTIME_EXTABLE_SORT
99 select CLONE_BACKWARDS2 102 select CLONE_BACKWARDS2
103 select DYNAMIC_FTRACE if FUNCTION_TRACER
100 select GENERIC_CLOCKEVENTS 104 select GENERIC_CLOCKEVENTS
101 select GENERIC_CPU_DEVICES if !SMP 105 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 106 select GENERIC_FIND_FIRST_BIT
@@ -113,10 +117,11 @@ config S390
113 select HAVE_CMPXCHG_LOCAL 117 select HAVE_CMPXCHG_LOCAL
114 select HAVE_C_RECORDMCOUNT 118 select HAVE_C_RECORDMCOUNT
115 select HAVE_DEBUG_KMEMLEAK 119 select HAVE_DEBUG_KMEMLEAK
116 select HAVE_DYNAMIC_FTRACE 120 select HAVE_DYNAMIC_FTRACE if 64BIT
121 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
117 select HAVE_FTRACE_MCOUNT_RECORD 122 select HAVE_FTRACE_MCOUNT_RECORD
118 select HAVE_FUNCTION_GRAPH_TRACER 123 select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
119 select HAVE_FUNCTION_TRACER 124 select HAVE_FUNCTION_TRACER if 64BIT
120 select HAVE_FUTEX_CMPXCHG if FUTEX 125 select HAVE_FUTEX_CMPXCHG if FUTEX
121 select HAVE_KERNEL_BZIP2 126 select HAVE_KERNEL_BZIP2
122 select HAVE_KERNEL_GZIP 127 select HAVE_KERNEL_GZIP
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 874e6d6e9c5f..878e67973151 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,16 @@ endif
35 35
36export LD_BFD 36export LD_BFD
37 37
38cflags-$(CONFIG_MARCH_G5) += -march=g5 38mflags-$(CONFIG_MARCH_G5) := -march=g5
39cflags-$(CONFIG_MARCH_Z900) += -march=z900 39mflags-$(CONFIG_MARCH_Z900) := -march=z900
40cflags-$(CONFIG_MARCH_Z990) += -march=z990 40mflags-$(CONFIG_MARCH_Z990) := -march=z990
41cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109 41mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
42cflags-$(CONFIG_MARCH_Z10) += -march=z10 42mflags-$(CONFIG_MARCH_Z10) := -march=z10
43cflags-$(CONFIG_MARCH_Z196) += -march=z196 43mflags-$(CONFIG_MARCH_Z196) := -march=z196
44cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12 44mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
45
46aflags-y += $(mflags-y)
47cflags-y += $(mflags-y)
45 48
46cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 49cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
47cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 50cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 19ff956b752b..b5dce6544d76 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -15,11 +15,13 @@
15 15
16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
17/* Fast-BCR without checkpoint synchronization */ 17/* Fast-BCR without checkpoint synchronization */
18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) 18#define __ASM_BARRIER "bcr 14,0\n"
19#else 19#else
20#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) 20#define __ASM_BARRIER "bcr 15,0\n"
21#endif 21#endif
22 22
23#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24
23#define rmb() mb() 25#define rmb() mb()
24#define wmb() mb() 26#define wmb() mb()
25#define read_barrier_depends() do { } while(0) 27#define read_barrier_depends() do { } while(0)
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 3001887f94b7..f8c196984853 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,8 +8,6 @@
8#define _S390_CPUTIME_H 8#define _S390_CPUTIME_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/percpu.h>
12#include <linux/spinlock.h>
13#include <asm/div64.h> 11#include <asm/div64.h>
14 12
15 13
@@ -167,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
167 return clock; 165 return clock;
168} 166}
169 167
170struct s390_idle_data { 168cputime64_t arch_cpu_idle_time(int cpu);
171 int nohz_delay;
172 unsigned int sequence;
173 unsigned long long idle_count;
174 unsigned long long idle_time;
175 unsigned long long clock_idle_enter;
176 unsigned long long clock_idle_exit;
177 unsigned long long timer_idle_enter;
178 unsigned long long timer_idle_exit;
179};
180 169
181DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 170#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
182
183cputime64_t s390_get_idle_time(int cpu);
184
185#define arch_idle_time(cpu) s390_get_idle_time(cpu)
186
187static inline int s390_nohz_delay(int cpu)
188{
189 return __get_cpu_var(s390_idle).nohz_delay != 0;
190}
191
192#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
193 171
194#endif /* _S390_CPUTIME_H */ 172#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 04a83f5773cd..60323c21938b 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -13,12 +13,13 @@
13#define OPERAND_FPR 0x2 /* Operand printed as %fx */ 13#define OPERAND_FPR 0x2 /* Operand printed as %fx */
14#define OPERAND_AR 0x4 /* Operand printed as %ax */ 14#define OPERAND_AR 0x4 /* Operand printed as %ax */
15#define OPERAND_CR 0x8 /* Operand printed as %cx */ 15#define OPERAND_CR 0x8 /* Operand printed as %cx */
16#define OPERAND_DISP 0x10 /* Operand printed as displacement */ 16#define OPERAND_VR 0x10 /* Operand printed as %vx */
17#define OPERAND_BASE 0x20 /* Operand printed as base register */ 17#define OPERAND_DISP 0x20 /* Operand printed as displacement */
18#define OPERAND_INDEX 0x40 /* Operand printed as index register */ 18#define OPERAND_BASE 0x40 /* Operand printed as base register */
19#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ 19#define OPERAND_INDEX 0x80 /* Operand printed as index register */
20#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ 20#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
21#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ 21#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
22#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
22 23
23 24
24struct s390_operand { 25struct s390_operand {
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 78f4f8711d58..f6e43d39e3d8 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -102,6 +102,7 @@
102#define HWCAP_S390_ETF3EH 256 102#define HWCAP_S390_ETF3EH 256
103#define HWCAP_S390_HIGH_GPRS 512 103#define HWCAP_S390_HIGH_GPRS 512
104#define HWCAP_S390_TE 1024 104#define HWCAP_S390_TE 1024
105#define HWCAP_S390_VXRS 2048
105 106
106/* 107/*
107 * These are used to set parameters in the core dumps. 108 * These are used to set parameters in the core dumps.
@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
225extern unsigned long arch_randomize_brk(struct mm_struct *mm); 226extern unsigned long arch_randomize_brk(struct mm_struct *mm);
226#define arch_randomize_brk arch_randomize_brk 227#define arch_randomize_brk arch_randomize_brk
227 228
228void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); 229void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
229 230
230#endif 231#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index bf246dae1367..3aef8afec336 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,6 +4,7 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern char ftrace_graph_caller_end;
7 8
8struct dyn_arch_ftrace { }; 9struct dyn_arch_ftrace { };
9 10
@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
17 18
18#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
19 20
20#ifdef CONFIG_64BIT 21#define MCOUNT_INSN_SIZE 18
21#define MCOUNT_INSN_SIZE 12 22
22#else 23#define ARCH_SUPPORTS_FTRACE_OPS 1
23#define MCOUNT_INSN_SIZE 22
24#endif
25 24
26#endif /* _ASM_S390_FTRACE_H */ 25#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
new file mode 100644
index 000000000000..6af037f574b8
--- /dev/null
+++ b/arch/s390/include/asm/idle.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright IBM Corp. 2014
3 *
4 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef _S390_IDLE_H
8#define _S390_IDLE_H
9
10#include <linux/types.h>
11#include <linux/device.h>
12
13struct s390_idle_data {
14 unsigned int sequence;
15 unsigned long long idle_count;
16 unsigned long long idle_time;
17 unsigned long long clock_idle_enter;
18 unsigned long long clock_idle_exit;
19 unsigned long long timer_idle_enter;
20 unsigned long long timer_idle_exit;
21};
22
23extern struct device_attribute dev_attr_idle_count;
24extern struct device_attribute dev_attr_idle_time_us;
25
26#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index c81661e756a0..ece606c2ee86 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -89,12 +89,12 @@ extern u32 ipl_flags;
89extern u32 dump_prefix_page; 89extern u32 dump_prefix_page;
90 90
91struct dump_save_areas { 91struct dump_save_areas {
92 struct save_area **areas; 92 struct save_area_ext **areas;
93 int count; 93 int count;
94}; 94};
95 95
96extern struct dump_save_areas dump_save_areas; 96extern struct dump_save_areas dump_save_areas;
97struct save_area *dump_save_area_create(int cpu); 97struct save_area_ext *dump_save_area_create(int cpu);
98 98
99extern void do_reipl(void); 99extern void do_reipl(void);
100extern void do_halt(void); 100extern void do_halt(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index c4dd400a2791..e787cc1bff8f 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -51,6 +51,7 @@ enum interruption_class {
51 IRQEXT_CMS, 51 IRQEXT_CMS,
52 IRQEXT_CMC, 52 IRQEXT_CMC,
53 IRQEXT_CMR, 53 IRQEXT_CMR,
54 IRQEXT_FTP,
54 IRQIO_CIO, 55 IRQIO_CIO,
55 IRQIO_QAI, 56 IRQIO_QAI,
56 IRQIO_DAS, 57 IRQIO_DAS,
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 4176dfe0fba1..98629173ce3b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
84int kprobe_exceptions_notify(struct notifier_block *self, 84int kprobe_exceptions_notify(struct notifier_block *self,
85 unsigned long val, void *data); 85 unsigned long val, void *data);
86 86
87int probe_is_prohibited_opcode(u16 *insn);
88int probe_get_fixup_type(u16 *insn);
89int probe_is_insn_relative_long(u16 *insn);
90
87#define flush_insn_slot(p) do { } while (0) 91#define flush_insn_slot(p) do { } while (0)
88 92
89#endif /* _ASM_S390_KPROBES_H */ 93#endif /* _ASM_S390_KPROBES_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 4349197ab9df..6cc51fe84410 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14#include <asm/types.h>
14 15
15#ifdef CONFIG_32BIT 16#ifdef CONFIG_32BIT
16 17
@@ -31,6 +32,11 @@ struct save_area {
31 u32 ctrl_regs[16]; 32 u32 ctrl_regs[16];
32} __packed; 33} __packed;
33 34
35struct save_area_ext {
36 struct save_area sa;
37 __vector128 vx_regs[32];
38};
39
34struct _lowcore { 40struct _lowcore {
35 psw_t restart_psw; /* 0x0000 */ 41 psw_t restart_psw; /* 0x0000 */
36 psw_t restart_old_psw; /* 0x0008 */ 42 psw_t restart_old_psw; /* 0x0008 */
@@ -183,6 +189,11 @@ struct save_area {
183 u64 ctrl_regs[16]; 189 u64 ctrl_regs[16];
184} __packed; 190} __packed;
185 191
192struct save_area_ext {
193 struct save_area sa;
194 __vector128 vx_regs[32];
195};
196
186struct _lowcore { 197struct _lowcore {
187 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ 198 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
188 __u32 ipl_parmblock_ptr; /* 0x0014 */ 199 __u32 ipl_parmblock_ptr; /* 0x0014 */
@@ -310,7 +321,10 @@ struct _lowcore {
310 321
311 /* Extended facility list */ 322 /* Extended facility list */
312 __u64 stfle_fac_list[32]; /* 0x0f00 */ 323 __u64 stfle_fac_list[32]; /* 0x0f00 */
313 __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */ 324 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
325
326 /* Pointer to vector register save area */
327 __u64 vector_save_area_addr; /* 0x11b0 */
314 328
315 /* 64 bit extparam used for pfault/diag 250: defined by architecture */ 329 /* 64 bit extparam used for pfault/diag 250: defined by architecture */
316 __u64 ext_params2; /* 0x11B8 */ 330 __u64 ext_params2; /* 0x11B8 */
@@ -334,9 +348,10 @@ struct _lowcore {
334 348
335 /* Transaction abort diagnostic block */ 349 /* Transaction abort diagnostic block */
336 __u8 pgm_tdb[256]; /* 0x1800 */ 350 __u8 pgm_tdb[256]; /* 0x1800 */
351 __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
337 352
338 /* align to the top of the prefix area */ 353 /* Software defined save area for vector registers */
339 __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ 354 __u8 vector_save_area[1024]; /* 0x1c00 */
340} __packed; 355} __packed;
341 356
342#endif /* CONFIG_32BIT */ 357#endif /* CONFIG_32BIT */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 35f8ec185616..3027a5a72b74 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -38,7 +38,7 @@ struct mci {
38 __u32 pm : 1; /* 22 psw program mask and cc validity */ 38 __u32 pm : 1; /* 22 psw program mask and cc validity */
39 __u32 ia : 1; /* 23 psw instruction address validity */ 39 __u32 ia : 1; /* 23 psw instruction address validity */
40 __u32 fa : 1; /* 24 failing storage address validity */ 40 __u32 fa : 1; /* 24 failing storage address validity */
41 __u32 : 1; /* 25 */ 41 __u32 vr : 1; /* 25 vector register validity */
42 __u32 ec : 1; /* 26 external damage code validity */ 42 __u32 ec : 1; /* 26 external damage code validity */
43 __u32 fp : 1; /* 27 floating point register validity */ 43 __u32 fp : 1; /* 27 floating point register validity */
44 __u32 gr : 1; /* 28 general register validity */ 44 __u32 gr : 1; /* 28 general register validity */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b7054356cc98..57c882761dea 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,7 +217,6 @@ extern unsigned long MODULES_END;
217 */ 217 */
218 218
219/* Hardware bits in the page table entry */ 219/* Hardware bits in the page table entry */
220#define _PAGE_CO 0x100 /* HW Change-bit override */
221#define _PAGE_PROTECT 0x200 /* HW read-only bit */ 220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
222#define _PAGE_INVALID 0x400 /* HW invalid bit */ 221#define _PAGE_INVALID 0x400 /* HW invalid bit */
223#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@@ -234,8 +233,8 @@ extern unsigned long MODULES_END;
234#define __HAVE_ARCH_PTE_SPECIAL 233#define __HAVE_ARCH_PTE_SPECIAL
235 234
236/* Set of bits not changed in pte_modify */ 235/* Set of bits not changed in pte_modify */
237#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 236#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
238 _PAGE_DIRTY | _PAGE_YOUNG) 237 _PAGE_YOUNG)
239 238
240/* 239/*
241 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the 240 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
@@ -354,7 +353,6 @@ extern unsigned long MODULES_END;
354 353
355#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 354#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
356#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 355#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
357#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
358 356
359/* Bits in the segment table entry */ 357/* Bits in the segment table entry */
360#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 358#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -371,7 +369,6 @@ extern unsigned long MODULES_END;
371#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 369#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
372#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ 370#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
373#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 371#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
374#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
375#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 372#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
376#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 373#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
377 374
@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
873 pgste = pgste_set_pte(ptep, pgste, entry); 870 pgste = pgste_set_pte(ptep, pgste, entry);
874 pgste_set_unlock(ptep, pgste); 871 pgste_set_unlock(ptep, pgste);
875 } else { 872 } else {
876 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
877 pte_val(entry) |= _PAGE_CO;
878 *ptep = entry; 873 *ptep = entry;
879 } 874 }
880} 875}
@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1044 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 1039 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1045} 1040}
1046 1041
1042static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1043{
1044 unsigned long pto = (unsigned long) ptep;
1045
1046#ifndef CONFIG_64BIT
1047 /* pto in ESA mode must point to the start of the segment table */
1048 pto &= 0x7ffffc00;
1049#endif
1050 /* Invalidate a range of ptes + global TLB flush of the ptes */
1051 do {
1052 asm volatile(
1053 " .insn rrf,0xb2210000,%2,%0,%1,0"
1054 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1055 } while (nr != 255);
1056}
1057
1047static inline void ptep_flush_direct(struct mm_struct *mm, 1058static inline void ptep_flush_direct(struct mm_struct *mm,
1048 unsigned long address, pte_t *ptep) 1059 unsigned long address, pte_t *ptep)
1049{ 1060{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e568fc8a7250..d559bdb03d18 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -13,9 +13,11 @@
13 13
14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
16#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
16 17
17#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) 18#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
18#define _CIF_ASCE (1<<CIF_ASCE) 19#define _CIF_ASCE (1<<CIF_ASCE)
20#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
19 21
20 22
21#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag)
43 return !!(S390_lowcore.cpu_flags & (1U << flag)); 45 return !!(S390_lowcore.cpu_flags & (1U << flag));
44} 46}
45 47
48#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
49
46/* 50/*
47 * Default implementation of macro that returns current 51 * Default implementation of macro that returns current
48 * instruction pointer ("program counter"). 52 * instruction pointer ("program counter").
@@ -113,6 +117,7 @@ struct thread_struct {
113 int ri_signum; 117 int ri_signum;
114#ifdef CONFIG_64BIT 118#ifdef CONFIG_64BIT
115 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 119 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
120 __vector128 *vxrs; /* Vector register save area */
116#endif 121#endif
117}; 122};
118 123
@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
285 return (psw.addr - ilc) & mask; 290 return (psw.addr - ilc) & mask;
286#endif 291#endif
287} 292}
288 293
294/*
295 * Function to stop a processor until the next interrupt occurs
296 */
297void enabled_wait(void);
298
289/* 299/*
290 * Function to drop a processor into disabled wait state 300 * Function to drop a processor into disabled wait state
291 */ 301 */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 55d69dd7473c..be317feff7ac 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs)
161 return regs->gprs[2]; 161 return regs->gprs[2];
162} 162}
163 163
164static inline void instruction_pointer_set(struct pt_regs *regs,
165 unsigned long val)
166{
167 regs->psw.addr = val | PSW_ADDR_AMODE;
168}
169
164int regs_query_register_offset(const char *name); 170int regs_query_register_offset(const char *name);
165const char *regs_query_register_name(unsigned int offset); 171const char *regs_query_register_name(unsigned int offset);
166unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); 172unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 089a49814c50..7736fdd72595 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void);
55#define MACHINE_FLAG_LPP (1UL << 13) 55#define MACHINE_FLAG_LPP (1UL << 13)
56#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 56#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
57#define MACHINE_FLAG_TE (1UL << 15) 57#define MACHINE_FLAG_TE (1UL << 15)
58#define MACHINE_FLAG_RRBM (1UL << 16)
59#define MACHINE_FLAG_TLB_LC (1UL << 17) 58#define MACHINE_FLAG_TLB_LC (1UL << 17)
59#define MACHINE_FLAG_VX (1UL << 18)
60 60
61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void);
78#define MACHINE_HAS_LPP (0) 78#define MACHINE_HAS_LPP (0)
79#define MACHINE_HAS_TOPOLOGY (0) 79#define MACHINE_HAS_TOPOLOGY (0)
80#define MACHINE_HAS_TE (0) 80#define MACHINE_HAS_TE (0)
81#define MACHINE_HAS_RRBM (0)
82#define MACHINE_HAS_TLB_LC (0) 81#define MACHINE_HAS_TLB_LC (0)
82#define MACHINE_HAS_VX (0)
83#else /* CONFIG_64BIT */ 83#else /* CONFIG_64BIT */
84#define MACHINE_HAS_IEEE (1) 84#define MACHINE_HAS_IEEE (1)
85#define MACHINE_HAS_CSP (1) 85#define MACHINE_HAS_CSP (1)
@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void);
91#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 91#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
92#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 92#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
94#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
95#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 94#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
95#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
96#endif /* CONFIG_64BIT */ 96#endif /* CONFIG_64BIT */
97 97
98/* 98/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index bf9c823d4020..49576115dbb7 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -15,6 +15,7 @@
15#define SIGP_SET_ARCHITECTURE 18 15#define SIGP_SET_ARCHITECTURE 18
16#define SIGP_COND_EMERGENCY_SIGNAL 19 16#define SIGP_COND_EMERGENCY_SIGNAL 19
17#define SIGP_SENSE_RUNNING 21 17#define SIGP_SENSE_RUNNING 21
18#define SIGP_STORE_ADDITIONAL_STATUS 23
18 19
19/* SIGP condition codes */ 20/* SIGP condition codes */
20#define SIGP_CC_ORDER_CODE_ACCEPTED 0 21#define SIGP_CC_ORDER_CODE_ACCEPTED 0
@@ -33,9 +34,10 @@
33 34
34#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
35 36
36static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) 37static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
38 u32 *status)
37{ 39{
38 register unsigned int reg1 asm ("1") = parm; 40 register unsigned long reg1 asm ("1") = parm;
39 int cc; 41 int cc;
40 42
41 asm volatile( 43 asm volatile(
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 4f1307962a95..762d4f88af5a 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
29extern int smp_store_status(int cpu); 29extern int smp_store_status(int cpu);
30extern int smp_vcpu_scheduled(int cpu); 30extern int smp_vcpu_scheduled(int cpu);
31extern void smp_yield_cpu(int cpu); 31extern void smp_yield_cpu(int cpu);
32extern void smp_yield(void);
33extern void smp_cpu_set_polarization(int cpu, int val); 32extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu); 33extern int smp_cpu_get_polarization(int cpu);
35extern void smp_fill_possible_mask(void); 34extern void smp_fill_possible_mask(void);
@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
50static inline int smp_store_status(int cpu) { return 0; } 49static inline int smp_store_status(int cpu) { return 0; }
51static inline int smp_vcpu_scheduled(int cpu) { return 1; } 50static inline int smp_vcpu_scheduled(int cpu) { return 1; }
52static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
53static inline void smp_yield(void) { }
54static inline void smp_fill_possible_mask(void) { } 52static inline void smp_fill_possible_mask(void) { }
55 53
56#endif /* CONFIG_SMP */ 54#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 96879f7ad6da..d6bdf906caa5 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
37 * (the type definitions are in asm/spinlock_types.h) 37 * (the type definitions are in asm/spinlock_types.h)
38 */ 38 */
39 39
40void arch_lock_relax(unsigned int cpu);
41
40void arch_spin_lock_wait(arch_spinlock_t *); 42void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *); 43int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 44void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 45
46static inline void arch_spin_relax(arch_spinlock_t *lock)
47{
48 arch_lock_relax(lock->lock);
49}
50
45static inline u32 arch_spin_lockval(int cpu) 51static inline u32 arch_spin_lockval(int cpu)
46{ 52{
47 return ~cpu; 53 return ~cpu;
@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
64 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); 70 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
65} 71}
66 72
67static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
68{
69 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
70}
71
72static inline void arch_spin_lock(arch_spinlock_t *lp) 73static inline void arch_spin_lock(arch_spinlock_t *lp)
73{ 74{
74 if (!arch_spin_trylock_once(lp)) 75 if (!arch_spin_trylock_once(lp))
@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
91 92
92static inline void arch_spin_unlock(arch_spinlock_t *lp) 93static inline void arch_spin_unlock(arch_spinlock_t *lp)
93{ 94{
94 arch_spin_tryrelease_once(lp); 95 typecheck(unsigned int, lp->lock);
96 asm volatile(
97 __ASM_BARRIER
98 "st %1,%0\n"
99 : "+Q" (lp->lock)
100 : "d" (0)
101 : "cc", "memory");
95} 102}
96 103
97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 104static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
123 */ 130 */
124#define arch_write_can_lock(x) ((x)->lock == 0) 131#define arch_write_can_lock(x) ((x)->lock == 0)
125 132
126extern void _raw_read_lock_wait(arch_rwlock_t *lp);
127extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
128extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 133extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129extern void _raw_write_lock_wait(arch_rwlock_t *lp);
130extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
131extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 134extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
132 135
136#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
137#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
138
133static inline int arch_read_trylock_once(arch_rwlock_t *rw) 139static inline int arch_read_trylock_once(arch_rwlock_t *rw)
134{ 140{
135 unsigned int old = ACCESS_ONCE(rw->lock); 141 unsigned int old = ACCESS_ONCE(rw->lock);
@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
144 _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 150 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
145} 151}
146 152
153#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154
155#define __RAW_OP_OR "lao"
156#define __RAW_OP_AND "lan"
157#define __RAW_OP_ADD "laa"
158
159#define __RAW_LOCK(ptr, op_val, op_string) \
160({ \
161 unsigned int old_val; \
162 \
163 typecheck(unsigned int *, ptr); \
164 asm volatile( \
165 op_string " %0,%2,%1\n" \
166 "bcr 14,0\n" \
167 : "=d" (old_val), "+Q" (*ptr) \
168 : "d" (op_val) \
169 : "cc", "memory"); \
170 old_val; \
171})
172
173#define __RAW_UNLOCK(ptr, op_val, op_string) \
174({ \
175 unsigned int old_val; \
176 \
177 typecheck(unsigned int *, ptr); \
178 asm volatile( \
179 "bcr 14,0\n" \
180 op_string " %0,%2,%1\n" \
181 : "=d" (old_val), "+Q" (*ptr) \
182 : "d" (op_val) \
183 : "cc", "memory"); \
184 old_val; \
185})
186
187extern void _raw_read_lock_wait(arch_rwlock_t *lp);
188extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
189
147static inline void arch_read_lock(arch_rwlock_t *rw) 190static inline void arch_read_lock(arch_rwlock_t *rw)
148{ 191{
149 if (!arch_read_trylock_once(rw)) 192 unsigned int old;
193
194 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
195 if ((int) old < 0)
150 _raw_read_lock_wait(rw); 196 _raw_read_lock_wait(rw);
151} 197}
152 198
153static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 199static inline void arch_read_unlock(arch_rwlock_t *rw)
200{
201 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
202}
203
204static inline void arch_write_lock(arch_rwlock_t *rw)
205{
206 unsigned int old;
207
208 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
209 if (old != 0)
210 _raw_write_lock_wait(rw, old);
211 rw->owner = SPINLOCK_LOCKVAL;
212}
213
214static inline void arch_write_unlock(arch_rwlock_t *rw)
215{
216 rw->owner = 0;
217 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
218}
219
220#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221
222extern void _raw_read_lock_wait(arch_rwlock_t *lp);
223extern void _raw_write_lock_wait(arch_rwlock_t *lp);
224
225static inline void arch_read_lock(arch_rwlock_t *rw)
154{ 226{
155 if (!arch_read_trylock_once(rw)) 227 if (!arch_read_trylock_once(rw))
156 _raw_read_lock_wait_flags(rw, flags); 228 _raw_read_lock_wait(rw);
157} 229}
158 230
159static inline void arch_read_unlock(arch_rwlock_t *rw) 231static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
169{ 241{
170 if (!arch_write_trylock_once(rw)) 242 if (!arch_write_trylock_once(rw))
171 _raw_write_lock_wait(rw); 243 _raw_write_lock_wait(rw);
172} 244 rw->owner = SPINLOCK_LOCKVAL;
173
174static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
175{
176 if (!arch_write_trylock_once(rw))
177 _raw_write_lock_wait_flags(rw, flags);
178} 245}
179 246
180static inline void arch_write_unlock(arch_rwlock_t *rw) 247static inline void arch_write_unlock(arch_rwlock_t *rw)
181{ 248{
182 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 249 typecheck(unsigned int, rw->lock);
250
251 rw->owner = 0;
252 asm volatile(
253 __ASM_BARRIER
254 "st %1,%0\n"
255 : "+Q" (rw->lock)
256 : "d" (0)
257 : "cc", "memory");
183} 258}
184 259
260#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
261
185static inline int arch_read_trylock(arch_rwlock_t *rw) 262static inline int arch_read_trylock(arch_rwlock_t *rw)
186{ 263{
187 if (!arch_read_trylock_once(rw)) 264 if (!arch_read_trylock_once(rw))
@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
191 268
192static inline int arch_write_trylock(arch_rwlock_t *rw) 269static inline int arch_write_trylock(arch_rwlock_t *rw)
193{ 270{
194 if (!arch_write_trylock_once(rw)) 271 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
195 return _raw_write_trylock_retry(rw); 272 return 0;
273 rw->owner = SPINLOCK_LOCKVAL;
196 return 1; 274 return 1;
197} 275}
198 276
199#define arch_read_relax(lock) cpu_relax() 277static inline void arch_read_relax(arch_rwlock_t *rw)
200#define arch_write_relax(lock) cpu_relax() 278{
279 arch_lock_relax(rw->owner);
280}
281
282static inline void arch_write_relax(arch_rwlock_t *rw)
283{
284 arch_lock_relax(rw->owner);
285}
201 286
202#endif /* __ASM_SPINLOCK_H */ 287#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..d84b6939237c 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct {
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16 unsigned int owner;
16} arch_rwlock_t; 17} arch_rwlock_t;
17 18
18#define __ARCH_RW_LOCK_UNLOCKED { 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 18ea9e3f8142..2542a7e4c8b4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs)
103 asm volatile("ld 15,%0" : : "Q" (fprs[15])); 103 asm volatile("ld 15,%0" : : "Q" (fprs[15]));
104} 104}
105 105
106static inline void save_vx_regs(__vector128 *vxrs)
107{
108 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
109
110 asm volatile(
111 " la 1,%0\n"
112 " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
113 " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
114 : "=Q" (*(addrtype *) vxrs) : : "1");
115}
116
117static inline void save_vx_regs_safe(__vector128 *vxrs)
118{
119 unsigned long cr0, flags;
120
121 flags = arch_local_irq_save();
122 __ctl_store(cr0, 0, 0);
123 __ctl_set_bit(0, 17);
124 __ctl_set_bit(0, 18);
125 save_vx_regs(vxrs);
126 __ctl_load(cr0, 0, 0);
127 arch_local_irq_restore(flags);
128}
129
130static inline void restore_vx_regs(__vector128 *vxrs)
131{
132 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
133
134 asm volatile(
135 " la 1,%0\n"
136 " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
137 " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
138 : : "Q" (*(addrtype *) vxrs) : "1");
139}
140
141static inline void save_fp_vx_regs(struct task_struct *task)
142{
143#ifdef CONFIG_64BIT
144 if (task->thread.vxrs)
145 save_vx_regs(task->thread.vxrs);
146 else
147#endif
148 save_fp_regs(task->thread.fp_regs.fprs);
149}
150
151static inline void restore_fp_vx_regs(struct task_struct *task)
152{
153#ifdef CONFIG_64BIT
154 if (task->thread.vxrs)
155 restore_vx_regs(task->thread.vxrs);
156 else
157#endif
158 restore_fp_regs(task->thread.fp_regs.fprs);
159}
160
106static inline void save_access_regs(unsigned int *acrs) 161static inline void save_access_regs(unsigned int *acrs)
107{ 162{
108 typedef struct { int _[NUM_ACRS]; } acrstype; 163 typedef struct { int _[NUM_ACRS]; } acrstype;
@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs)
120#define switch_to(prev,next,last) do { \ 175#define switch_to(prev,next,last) do { \
121 if (prev->mm) { \ 176 if (prev->mm) { \
122 save_fp_ctl(&prev->thread.fp_regs.fpc); \ 177 save_fp_ctl(&prev->thread.fp_regs.fpc); \
123 save_fp_regs(prev->thread.fp_regs.fprs); \ 178 save_fp_vx_regs(prev); \
124 save_access_regs(&prev->thread.acrs[0]); \ 179 save_access_regs(&prev->thread.acrs[0]); \
125 save_ri_cb(prev->thread.ri_cb); \ 180 save_ri_cb(prev->thread.ri_cb); \
126 } \ 181 } \
127 if (next->mm) { \ 182 if (next->mm) { \
183 update_cr_regs(next); \
128 restore_fp_ctl(&next->thread.fp_regs.fpc); \ 184 restore_fp_ctl(&next->thread.fp_regs.fpc); \
129 restore_fp_regs(next->thread.fp_regs.fprs); \ 185 restore_fp_vx_regs(next); \
130 restore_access_regs(&next->thread.acrs[0]); \ 186 restore_access_regs(&next->thread.acrs[0]); \
131 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 187 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
132 update_cr_regs(next); \
133 } \ 188 } \
134 prev = __switch_to(prev,next); \ 189 prev = __switch_to(prev,next); \
135} while (0) 190} while (0)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b833e9c0bfbf..4d62fd5b56e5 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void)
84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
85#define TIF_SECCOMP 5 /* secure computing */ 85#define TIF_SECCOMP 5 /* secure computing */
86#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 86#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
87#define TIF_UPROBE 7 /* breakpointed or single-stepping */
87#define TIF_31BIT 16 /* 32bit process */ 88#define TIF_31BIT 16 /* 32bit process */
88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 89#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
89#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ 90#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
90#define TIF_SINGLE_STEP 19 /* This task is single stepped */ 91#define TIF_SINGLE_STEP 19 /* This task is single stepped */
91#define TIF_BLOCK_STEP 20 /* This task is block stepped */ 92#define TIF_BLOCK_STEP 20 /* This task is block stepped */
93#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
92 94
93#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 95#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
94#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 96#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void)
97#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 99#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
98#define _TIF_SECCOMP (1<<TIF_SECCOMP) 100#define _TIF_SECCOMP (1<<TIF_SECCOMP)
99#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 101#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
102#define _TIF_UPROBE (1<<TIF_UPROBE)
100#define _TIF_31BIT (1<<TIF_31BIT) 103#define _TIF_31BIT (1<<TIF_31BIT)
101#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 104#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
102 105
diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h
new file mode 100644
index 000000000000..1411dff7fea7
--- /dev/null
+++ b/arch/s390/include/asm/uprobes.h
@@ -0,0 +1,42 @@
1/*
2 * User-space Probes (UProbes) for s390
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Jan Willeke,
6 */
7
8#ifndef _ASM_UPROBES_H
9#define _ASM_UPROBES_H
10
11#include <linux/notifier.h>
12
13typedef u16 uprobe_opcode_t;
14
15#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
16
17#define UPROBE_SWBP_INSN 0x0002
18#define UPROBE_SWBP_INSN_SIZE 2
19
20struct arch_uprobe {
21 union{
22 uprobe_opcode_t insn[3];
23 uprobe_opcode_t ixol[3];
24 };
25 unsigned int saved_per : 1;
26 unsigned int saved_int_code;
27};
28
29struct arch_uprobe_task {
30};
31
32int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
33 unsigned long addr);
34int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
35int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
36bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
37int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
38 void *data);
39void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
40unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
41 struct pt_regs *regs);
42#endif /* _ASM_UPROBES_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bc9746a7d47c..a62526d09201 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -22,13 +22,17 @@ struct vdso_data {
22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ 22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
23 __u64 xtime_clock_sec; /* Kernel time 0x10 */ 23 __u64 xtime_clock_sec; /* Kernel time 0x10 */
24 __u64 xtime_clock_nsec; /* 0x18 */ 24 __u64 xtime_clock_nsec; /* 0x18 */
25 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ 25 __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 xtime_coarse_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u64 wtom_clock_nsec; /* 0x38 */
29 __u32 ectg_available; /* ECTG instruction present 0x38 */ 29 __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ 30 __u64 wtom_coarse_nsec; /* 0x48 */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ 31 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
32 __u32 tz_dsttime; /* Type of dst correction 0x54 */
33 __u32 ectg_available; /* ECTG instruction present 0x58 */
34 __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
35 __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
32}; 36};
33 37
34struct vdso_per_cpu_data { 38struct vdso_per_cpu_data {
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
index bfe25d513ad2..10a179af62d8 100644
--- a/arch/s390/include/asm/vtimer.h
+++ b/arch/s390/include/asm/vtimer.h
@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
28extern void init_cpu_vtimer(void); 28extern void init_cpu_vtimer(void);
29extern void vtime_init(void); 29extern void vtime_init(void);
30 30
31extern void vtime_stop_cpu(void);
32
33#endif /* _ASM_S390_TIMER_H */ 31#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index b30de9c01bbe..5f0b8d7ddb0b 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -7,10 +7,14 @@
7#define _ASM_S390_SIGCONTEXT_H 7#define _ASM_S390_SIGCONTEXT_H
8 8
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/types.h>
10 11
11#define __NUM_GPRS 16 12#define __NUM_GPRS 16
12#define __NUM_FPRS 16 13#define __NUM_FPRS 16
13#define __NUM_ACRS 16 14#define __NUM_ACRS 16
15#define __NUM_VXRS 32
16#define __NUM_VXRS_LOW 16
17#define __NUM_VXRS_HIGH 16
14 18
15#ifndef __s390x__ 19#ifndef __s390x__
16 20
@@ -59,6 +63,16 @@ typedef struct
59 _s390_fp_regs fpregs; 63 _s390_fp_regs fpregs;
60} _sigregs; 64} _sigregs;
61 65
66typedef struct
67{
68#ifndef __s390x__
69 unsigned long gprs_high[__NUM_GPRS];
70#endif
71 unsigned long long vxrs_low[__NUM_VXRS_LOW];
72 __vector128 vxrs_high[__NUM_VXRS_HIGH];
73 unsigned char __reserved[128];
74} _sigregs_ext;
75
62struct sigcontext 76struct sigcontext
63{ 77{
64 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; 78 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h
index 038f2b9178a4..3c3951e3415b 100644
--- a/arch/s390/include/uapi/asm/types.h
+++ b/arch/s390/include/uapi/asm/types.h
@@ -17,6 +17,10 @@
17typedef unsigned long addr_t; 17typedef unsigned long addr_t;
18typedef __signed__ long saddr_t; 18typedef __signed__ long saddr_t;
19 19
20typedef struct {
21 __u32 u[4];
22} __vector128;
23
20#endif /* __ASSEMBLY__ */ 24#endif /* __ASSEMBLY__ */
21 25
22#endif /* _UAPI_S390_TYPES_H */ 26#endif /* _UAPI_S390_TYPES_H */
diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h
index 3e077b2a4705..64a69aa5dde0 100644
--- a/arch/s390/include/uapi/asm/ucontext.h
+++ b/arch/s390/include/uapi/asm/ucontext.h
@@ -7,10 +7,15 @@
7#ifndef _ASM_S390_UCONTEXT_H 7#ifndef _ASM_S390_UCONTEXT_H
8#define _ASM_S390_UCONTEXT_H 8#define _ASM_S390_UCONTEXT_H
9 9
10#define UC_EXTENDED 0x00000001 10#define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */
11 11#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */
12#ifndef __s390x__
13 12
13/*
14 * The struct ucontext_extended describes how the registers are stored
15 * on a rt signal frame. Please note that the structure is not fixed,
16 * if new CPU registers are added to the user state the size of the
17 * struct ucontext_extended will increase.
18 */
14struct ucontext_extended { 19struct ucontext_extended {
15 unsigned long uc_flags; 20 unsigned long uc_flags;
16 struct ucontext *uc_link; 21 struct ucontext *uc_link;
@@ -19,11 +24,9 @@ struct ucontext_extended {
19 sigset_t uc_sigmask; 24 sigset_t uc_sigmask;
20 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 25 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
21 unsigned char __unused[128 - sizeof(sigset_t)]; 26 unsigned char __unused[128 - sizeof(sigset_t)];
22 unsigned long uc_gprs_high[16]; 27 _sigregs_ext uc_mcontext_ext;
23}; 28};
24 29
25#endif
26
27struct ucontext { 30struct ucontext {
28 unsigned long uc_flags; 31 unsigned long uc_flags;
29 struct ucontext *uc_link; 32 struct ucontext *uc_link;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a95c4ca99617..204c43a4c245 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
30 30
31obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
52 52
53obj-$(CONFIG_STACKTRACE) += stacktrace.o 53obj-$(CONFIG_STACKTRACE) += stacktrace.o
54obj-$(CONFIG_KPROBES) += kprobes.o 54obj-$(CONFIG_KPROBES) += kprobes.o
55obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) 55obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
56obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
57obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
58obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
59obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 56obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57obj-$(CONFIG_UPROBES) += uprobes.o
60 58
61ifdef CONFIG_64BIT 59ifdef CONFIG_64BIT
62obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ 60obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index afe1715a4eb7..ef279a136801 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,7 @@
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/kvm_host.h> 10#include <linux/kvm_host.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/cputime.h> 12#include <asm/idle.h>
13#include <asm/vdso.h> 13#include <asm/vdso.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
@@ -62,8 +62,12 @@ int main(void)
62 DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); 62 DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
63 DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); 63 DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
64 DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); 64 DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
65 DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
66 DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
65 DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 67 DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
66 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 68 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
69 DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
70 DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
67 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 71 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
68 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 72 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
69 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); 73 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
@@ -73,8 +77,11 @@ int main(void)
73 /* constants used by the vdso */ 77 /* constants used by the vdso */
74 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 78 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
75 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 79 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
80 DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
81 DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
76 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); 82 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
77 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 83 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
84 DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
78 BLANK(); 85 BLANK();
79 /* idle data offsets */ 86 /* idle data offsets */
80 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); 87 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 70d4b7c4beaa..a0a886c04977 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -50,6 +50,14 @@ typedef struct
50 _s390_fp_regs32 fpregs; 50 _s390_fp_regs32 fpregs;
51} _sigregs32; 51} _sigregs32;
52 52
53typedef struct
54{
55 __u32 gprs_high[__NUM_GPRS];
56 __u64 vxrs_low[__NUM_VXRS_LOW];
57 __vector128 vxrs_high[__NUM_VXRS_HIGH];
58 __u8 __reserved[128];
59} _sigregs_ext32;
60
53#define _SIGCONTEXT_NSIG32 64 61#define _SIGCONTEXT_NSIG32 64
54#define _SIGCONTEXT_NSIG_BPW32 32 62#define _SIGCONTEXT_NSIG_BPW32 32
55#define __SIGNAL_FRAMESIZE32 96 63#define __SIGNAL_FRAMESIZE32 96
@@ -72,6 +80,7 @@ struct ucontext32 {
72 compat_sigset_t uc_sigmask; 80 compat_sigset_t uc_sigmask;
73 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 81 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
74 unsigned char __unused[128 - sizeof(compat_sigset_t)]; 82 unsigned char __unused[128 - sizeof(compat_sigset_t)];
83 _sigregs_ext32 uc_mcontext_ext;
75}; 84};
76 85
77struct stat64_emu31; 86struct stat64_emu31;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 598b0b42668b..009f5eb11125 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -36,17 +36,16 @@ typedef struct
36 struct sigcontext32 sc; 36 struct sigcontext32 sc;
37 _sigregs32 sregs; 37 _sigregs32 sregs;
38 int signo; 38 int signo;
39 __u32 gprs_high[NUM_GPRS]; 39 _sigregs_ext32 sregs_ext;
40 __u8 retcode[S390_SYSCALL_SIZE]; 40 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
41} sigframe32; 41} sigframe32;
42 42
43typedef struct 43typedef struct
44{ 44{
45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
46 __u8 retcode[S390_SYSCALL_SIZE]; 46 __u16 svc_insn;
47 compat_siginfo_t info; 47 compat_siginfo_t info;
48 struct ucontext32 uc; 48 struct ucontext32 uc;
49 __u32 gprs_high[NUM_GPRS];
50} rt_sigframe32; 49} rt_sigframe32;
51 50
52int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) 51int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
@@ -151,6 +150,38 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
151 return err ? -EFAULT : 0; 150 return err ? -EFAULT : 0;
152} 151}
153 152
153/* Store registers needed to create the signal frame */
154static void store_sigregs(void)
155{
156 int i;
157
158 save_access_regs(current->thread.acrs);
159 save_fp_ctl(&current->thread.fp_regs.fpc);
160 if (current->thread.vxrs) {
161 save_vx_regs(current->thread.vxrs);
162 for (i = 0; i < __NUM_FPRS; i++)
163 current->thread.fp_regs.fprs[i] =
164 *(freg_t *)(current->thread.vxrs + i);
165 } else
166 save_fp_regs(current->thread.fp_regs.fprs);
167}
168
169/* Load registers after signal return */
170static void load_sigregs(void)
171{
172 int i;
173
174 restore_access_regs(current->thread.acrs);
175 /* restore_fp_ctl is done in restore_sigregs */
176 if (current->thread.vxrs) {
177 for (i = 0; i < __NUM_FPRS; i++)
178 *(freg_t *)(current->thread.vxrs + i) =
179 current->thread.fp_regs.fprs[i];
180 restore_vx_regs(current->thread.vxrs);
181 } else
182 restore_fp_regs(current->thread.fp_regs.fprs);
183}
184
154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) 185static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
155{ 186{
156 _sigregs32 user_sregs; 187 _sigregs32 user_sregs;
@@ -163,11 +194,8 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
163 (__u32)(regs->psw.mask & PSW_MASK_BA); 194 (__u32)(regs->psw.mask & PSW_MASK_BA);
164 for (i = 0; i < NUM_GPRS; i++) 195 for (i = 0; i < NUM_GPRS; i++)
165 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; 196 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
166 save_access_regs(current->thread.acrs);
167 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 197 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
168 sizeof(user_sregs.regs.acrs)); 198 sizeof(user_sregs.regs.acrs));
169 save_fp_ctl(&current->thread.fp_regs.fpc);
170 save_fp_regs(current->thread.fp_regs.fprs);
171 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, 199 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
172 sizeof(user_sregs.fpregs)); 200 sizeof(user_sregs.fpregs));
173 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) 201 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
@@ -207,37 +235,67 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
207 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; 235 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
208 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 236 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
209 sizeof(current->thread.acrs)); 237 sizeof(current->thread.acrs));
210 restore_access_regs(current->thread.acrs);
211 238
212 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, 239 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
213 sizeof(current->thread.fp_regs)); 240 sizeof(current->thread.fp_regs));
214 241
215 restore_fp_regs(current->thread.fp_regs.fprs);
216 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 242 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
217 return 0; 243 return 0;
218} 244}
219 245
220static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) 246static int save_sigregs_ext32(struct pt_regs *regs,
247 _sigregs_ext32 __user *sregs_ext)
221{ 248{
222 __u32 gprs_high[NUM_GPRS]; 249 __u32 gprs_high[NUM_GPRS];
250 __u64 vxrs[__NUM_VXRS_LOW];
223 int i; 251 int i;
224 252
253 /* Save high gprs to signal stack */
225 for (i = 0; i < NUM_GPRS; i++) 254 for (i = 0; i < NUM_GPRS; i++)
226 gprs_high[i] = regs->gprs[i] >> 32; 255 gprs_high[i] = regs->gprs[i] >> 32;
227 if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high))) 256 if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
257 sizeof(sregs_ext->gprs_high)))
228 return -EFAULT; 258 return -EFAULT;
259
260 /* Save vector registers to signal stack */
261 if (current->thread.vxrs) {
262 for (i = 0; i < __NUM_VXRS_LOW; i++)
263 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
264 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
265 sizeof(sregs_ext->vxrs_low)) ||
266 __copy_to_user(&sregs_ext->vxrs_high,
267 current->thread.vxrs + __NUM_VXRS_LOW,
268 sizeof(sregs_ext->vxrs_high)))
269 return -EFAULT;
270 }
229 return 0; 271 return 0;
230} 272}
231 273
232static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) 274static int restore_sigregs_ext32(struct pt_regs *regs,
275 _sigregs_ext32 __user *sregs_ext)
233{ 276{
234 __u32 gprs_high[NUM_GPRS]; 277 __u32 gprs_high[NUM_GPRS];
278 __u64 vxrs[__NUM_VXRS_LOW];
235 int i; 279 int i;
236 280
237 if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high))) 281 /* Restore high gprs from signal stack */
282 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
283 sizeof(&sregs_ext->gprs_high)))
238 return -EFAULT; 284 return -EFAULT;
239 for (i = 0; i < NUM_GPRS; i++) 285 for (i = 0; i < NUM_GPRS; i++)
240 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 286 *(__u32 *)&regs->gprs[i] = gprs_high[i];
287
288 /* Restore vector registers from signal stack */
289 if (current->thread.vxrs) {
290 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
291 sizeof(sregs_ext->vxrs_low)) ||
292 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
293 &sregs_ext->vxrs_high,
294 sizeof(sregs_ext->vxrs_high)))
295 return -EFAULT;
296 for (i = 0; i < __NUM_VXRS_LOW; i++)
297 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
298 }
241 return 0; 299 return 0;
242} 300}
243 301
@@ -252,8 +310,9 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
252 set_current_blocked(&set); 310 set_current_blocked(&set);
253 if (restore_sigregs32(regs, &frame->sregs)) 311 if (restore_sigregs32(regs, &frame->sregs))
254 goto badframe; 312 goto badframe;
255 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 313 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
256 goto badframe; 314 goto badframe;
315 load_sigregs();
257 return regs->gprs[2]; 316 return regs->gprs[2];
258badframe: 317badframe:
259 force_sig(SIGSEGV, current); 318 force_sig(SIGSEGV, current);
@@ -269,12 +328,13 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
269 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 328 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
270 goto badframe; 329 goto badframe;
271 set_current_blocked(&set); 330 set_current_blocked(&set);
331 if (compat_restore_altstack(&frame->uc.uc_stack))
332 goto badframe;
272 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 333 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
273 goto badframe; 334 goto badframe;
274 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 335 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
275 goto badframe; 336 goto badframe;
276 if (compat_restore_altstack(&frame->uc.uc_stack)) 337 load_sigregs();
277 goto badframe;
278 return regs->gprs[2]; 338 return regs->gprs[2];
279badframe: 339badframe:
280 force_sig(SIGSEGV, current); 340 force_sig(SIGSEGV, current);
@@ -324,37 +384,64 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
324 struct pt_regs *regs) 384 struct pt_regs *regs)
325{ 385{
326 int sig = ksig->sig; 386 int sig = ksig->sig;
327 sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(sigframe32)); 387 sigframe32 __user *frame;
328 388 struct sigcontext32 sc;
389 unsigned long restorer;
390 size_t frame_size;
391
392 /*
393 * gprs_high are always present for 31-bit compat tasks.
394 * The space for vector registers is only allocated if
395 * the machine supports it
396 */
397 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
398 if (!MACHINE_HAS_VX)
399 frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
400 sizeof(frame->sregs_ext.vxrs_high);
401 frame = get_sigframe(&ksig->ka, regs, frame_size);
329 if (frame == (void __user *) -1UL) 402 if (frame == (void __user *) -1UL)
330 return -EFAULT; 403 return -EFAULT;
331 404
332 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) 405 /* Set up backchain. */
406 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
407 return -EFAULT;
408
409 /* Create struct sigcontext32 on the signal stack */
410 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
411 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
412 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
333 return -EFAULT; 413 return -EFAULT;
334 414
415 /* Store registers needed to create the signal frame */
416 store_sigregs();
417
418 /* Create _sigregs32 on the signal stack */
335 if (save_sigregs32(regs, &frame->sregs)) 419 if (save_sigregs32(regs, &frame->sregs))
336 return -EFAULT; 420 return -EFAULT;
337 if (save_sigregs_gprs_high(regs, frame->gprs_high)) 421
422 /* Place signal number on stack to allow backtrace from handler. */
423 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
338 return -EFAULT; 424 return -EFAULT;
339 if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs)) 425
426 /* Create _sigregs_ext32 on the signal stack */
427 if (save_sigregs_ext32(regs, &frame->sregs_ext))
340 return -EFAULT; 428 return -EFAULT;
341 429
342 /* Set up to return from userspace. If provided, use a stub 430 /* Set up to return from userspace. If provided, use a stub
343 already in userspace. */ 431 already in userspace. */
344 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 432 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
345 regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 433 restorer = (unsigned long __force)
434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
346 } else { 435 } else {
347 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; 436 /* Signal frames without vectors registers are short ! */
348 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 437 __u16 __user *svc = (void *) frame + frame_size - 2;
349 (u16 __force __user *)(frame->retcode))) 438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
350 return -EFAULT; 439 return -EFAULT;
440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
351 } 441 }
352 442
353 /* Set up backchain. */
354 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
355 return -EFAULT;
356
357 /* Set up registers for signal handler */ 443 /* Set up registers for signal handler */
444 regs->gprs[14] = restorer;
358 regs->gprs[15] = (__force __u64) frame; 445 regs->gprs[15] = (__force __u64) frame;
359 /* Force 31 bit amode and default user address space control. */ 446 /* Force 31 bit amode and default user address space control. */
360 regs->psw.mask = PSW_MASK_BA | 447 regs->psw.mask = PSW_MASK_BA |
@@ -375,50 +462,69 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
375 regs->gprs[6] = task_thread_info(current)->last_break; 462 regs->gprs[6] = task_thread_info(current)->last_break;
376 } 463 }
377 464
378 /* Place signal number on stack to allow backtrace from handler. */
379 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
380 return -EFAULT;
381 return 0; 465 return 0;
382} 466}
383 467
384static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, 468static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
385 struct pt_regs *regs) 469 struct pt_regs *regs)
386{ 470{
387 int err = 0; 471 rt_sigframe32 __user *frame;
388 rt_sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe32)); 472 unsigned long restorer;
389 473 size_t frame_size;
474 u32 uc_flags;
475
476 frame_size = sizeof(*frame) -
477 sizeof(frame->uc.uc_mcontext_ext.__reserved);
478 /*
479 * gprs_high are always present for 31-bit compat tasks.
480 * The space for vector registers is only allocated if
481 * the machine supports it
482 */
483 uc_flags = UC_GPRS_HIGH;
484 if (MACHINE_HAS_VX) {
485 if (current->thread.vxrs)
486 uc_flags |= UC_VXRS;
487 } else
488 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
489 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
490 frame = get_sigframe(&ksig->ka, regs, frame_size);
390 if (frame == (void __user *) -1UL) 491 if (frame == (void __user *) -1UL)
391 return -EFAULT; 492 return -EFAULT;
392 493
393 if (copy_siginfo_to_user32(&frame->info, &ksig->info)) 494 /* Set up backchain. */
394 return -EFAULT; 495 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
395
396 /* Create the ucontext. */
397 err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
398 err |= __put_user(0, &frame->uc.uc_link);
399 err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
400 err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
401 err |= save_sigregs_gprs_high(regs, frame->gprs_high);
402 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
403 if (err)
404 return -EFAULT; 496 return -EFAULT;
405 497
406 /* Set up to return from userspace. If provided, use a stub 498 /* Set up to return from userspace. If provided, use a stub
407 already in userspace. */ 499 already in userspace. */
408 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 500 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
409 regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 501 restorer = (unsigned long __force)
502 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
410 } else { 503 } else {
411 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; 504 __u16 __user *svc = &frame->svc_insn;
412 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, 505 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
413 (u16 __force __user *)(frame->retcode)))
414 return -EFAULT; 506 return -EFAULT;
507 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
415 } 508 }
416 509
417 /* Set up backchain. */ 510 /* Create siginfo on the signal stack */
418 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) 511 if (copy_siginfo_to_user32(&frame->info, &ksig->info))
512 return -EFAULT;
513
514 /* Store registers needed to create the signal frame */
515 store_sigregs();
516
517 /* Create ucontext on the signal stack. */
518 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
519 __put_user(0, &frame->uc.uc_link) ||
520 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
521 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
522 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
523 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
419 return -EFAULT; 524 return -EFAULT;
420 525
421 /* Set up registers for signal handler */ 526 /* Set up registers for signal handler */
527 regs->gprs[14] = restorer;
422 regs->gprs[15] = (__force __u64) frame; 528 regs->gprs[15] = (__force __u64) frame;
423 /* Force 31 bit amode and default user address space control. */ 529 /* Force 31 bit amode and default user address space control. */
424 regs->psw.mask = PSW_MASK_BA | 530 regs->psw.mask = PSW_MASK_BA |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index a3b9150e6802..9f73c8059022 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas;
46/* 46/*
47 * Allocate and add a save area for a CPU 47 * Allocate and add a save area for a CPU
48 */ 48 */
49struct save_area *dump_save_area_create(int cpu) 49struct save_area_ext *dump_save_area_create(int cpu)
50{ 50{
51 struct save_area **save_areas, *save_area; 51 struct save_area_ext **save_areas, *save_area;
52 52
53 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); 53 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
54 if (!save_area) 54 if (!save_area)
@@ -386,9 +386,45 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
386} 386}
387 387
388/* 388/*
389 * Initialize vxrs high note (full 128 bit VX registers 16-31)
390 */
391static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
392{
393 return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
394 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
395}
396
397/*
398 * Initialize vxrs low note (lower halves of VX registers 0-15)
399 */
400static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
401{
402 Elf64_Nhdr *note;
403 u64 len;
404 int i;
405
406 note = (Elf64_Nhdr *)ptr;
407 note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
408 note->n_descsz = 16 * 8;
409 note->n_type = NT_S390_VXRS_LOW;
410 len = sizeof(Elf64_Nhdr);
411
412 memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
413 len = roundup(len + note->n_namesz, 4);
414
415 ptr += len;
416 /* Copy lower halves of SIMD registers 0-15 */
417 for (i = 0; i < 16; i++) {
418 memcpy(ptr, &vx_regs[i], 8);
419 ptr += 8;
420 }
421 return ptr;
422}
423
424/*
389 * Fill ELF notes for one CPU with save area registers 425 * Fill ELF notes for one CPU with save area registers
390 */ 426 */
391void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) 427void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
392{ 428{
393 ptr = nt_prstatus(ptr, sa); 429 ptr = nt_prstatus(ptr, sa);
394 ptr = nt_fpregset(ptr, sa); 430 ptr = nt_fpregset(ptr, sa);
@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
397 ptr = nt_s390_tod_preg(ptr, sa); 433 ptr = nt_s390_tod_preg(ptr, sa);
398 ptr = nt_s390_ctrs(ptr, sa); 434 ptr = nt_s390_ctrs(ptr, sa);
399 ptr = nt_s390_prefix(ptr, sa); 435 ptr = nt_s390_prefix(ptr, sa);
436 if (MACHINE_HAS_VX && vx_regs) {
437 ptr = nt_s390_vx_low(ptr, vx_regs);
438 ptr = nt_s390_vx_high(ptr, vx_regs);
439 }
400 return ptr; 440 return ptr;
401} 441}
402 442
@@ -484,7 +524,7 @@ static int get_cpu_cnt(void)
484 int i, cpus = 0; 524 int i, cpus = 0;
485 525
486 for (i = 0; i < dump_save_areas.count; i++) { 526 for (i = 0; i < dump_save_areas.count; i++) {
487 if (dump_save_areas.areas[i]->pref_reg == 0) 527 if (dump_save_areas.areas[i]->sa.pref_reg == 0)
488 continue; 528 continue;
489 cpus++; 529 cpus++;
490 } 530 }
@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
530 */ 570 */
531static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) 571static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
532{ 572{
533 struct save_area *sa; 573 struct save_area_ext *sa_ext;
534 void *ptr_start = ptr; 574 void *ptr_start = ptr;
535 int i; 575 int i;
536 576
537 ptr = nt_prpsinfo(ptr); 577 ptr = nt_prpsinfo(ptr);
538 578
539 for (i = 0; i < dump_save_areas.count; i++) { 579 for (i = 0; i < dump_save_areas.count; i++) {
540 sa = dump_save_areas.areas[i]; 580 sa_ext = dump_save_areas.areas[i];
541 if (sa->pref_reg == 0) 581 if (sa_ext->sa.pref_reg == 0)
542 continue; 582 continue;
543 ptr = fill_cpu_elf_notes(ptr, sa); 583 ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
544 } 584 }
545 ptr = nt_vmcoreinfo(ptr); 585 ptr = nt_vmcoreinfo(ptr);
546 memset(phdr, 0, sizeof(*phdr)); 586 memset(phdr, 0, sizeof(*phdr));
@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
581 621
582 mem_chunk_cnt = get_mem_chunk_cnt(); 622 mem_chunk_cnt = get_mem_chunk_cnt();
583 623
584 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + 624 alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
585 mem_chunk_cnt * sizeof(Elf64_Phdr); 625 mem_chunk_cnt * sizeof(Elf64_Phdr);
586 hdr = kzalloc_panic(alloc_size); 626 hdr = kzalloc_panic(alloc_size);
587 /* Init elf header */ 627 /* Init elf header */
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 993efe6a887c..f3762937dd82 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -60,6 +60,11 @@ enum {
60 A_28, /* Access reg. starting at position 28 */ 60 A_28, /* Access reg. starting at position 28 */
61 C_8, /* Control reg. starting at position 8 */ 61 C_8, /* Control reg. starting at position 8 */
62 C_12, /* Control reg. starting at position 12 */ 62 C_12, /* Control reg. starting at position 12 */
63 V_8, /* Vector reg. starting at position 8, extension bit at 36 */
64 V_12, /* Vector reg. starting at position 12, extension bit at 37 */
65 V_16, /* Vector reg. starting at position 16, extension bit at 38 */
66 V_32, /* Vector reg. starting at position 32, extension bit at 39 */
67 W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
63 B_16, /* Base register starting at position 16 */ 68 B_16, /* Base register starting at position 16 */
64 B_32, /* Base register starting at position 32 */ 69 B_32, /* Base register starting at position 32 */
65 X_12, /* Index register starting at position 12 */ 70 X_12, /* Index register starting at position 12 */
@@ -82,6 +87,8 @@ enum {
82 U8_24, /* 8 bit unsigned value starting at 24 */ 87 U8_24, /* 8 bit unsigned value starting at 24 */
83 U8_32, /* 8 bit unsigned value starting at 32 */ 88 U8_32, /* 8 bit unsigned value starting at 32 */
84 I8_8, /* 8 bit signed value starting at 8 */ 89 I8_8, /* 8 bit signed value starting at 8 */
90 I8_16, /* 8 bit signed value starting at 16 */
91 I8_24, /* 8 bit signed value starting at 24 */
85 I8_32, /* 8 bit signed value starting at 32 */ 92 I8_32, /* 8 bit signed value starting at 32 */
86 J12_12, /* PC relative offset at 12 */ 93 J12_12, /* PC relative offset at 12 */
87 I16_16, /* 16 bit signed value starting at 16 */ 94 I16_16, /* 16 bit signed value starting at 16 */
@@ -96,6 +103,9 @@ enum {
96 U32_16, /* 32 bit unsigned value starting at 16 */ 103 U32_16, /* 32 bit unsigned value starting at 16 */
97 M_16, /* 4 bit optional mask starting at 16 */ 104 M_16, /* 4 bit optional mask starting at 16 */
98 M_20, /* 4 bit optional mask starting at 20 */ 105 M_20, /* 4 bit optional mask starting at 20 */
106 M_24, /* 4 bit optional mask starting at 24 */
107 M_28, /* 4 bit optional mask starting at 28 */
108 M_32, /* 4 bit optional mask starting at 32 */
99 RO_28, /* optional GPR starting at position 28 */ 109 RO_28, /* optional GPR starting at position 28 */
100}; 110};
101 111
@@ -130,7 +140,7 @@ enum {
130 INSTR_RSY_RDRM, 140 INSTR_RSY_RDRM,
131 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
132 INSTR_RS_RURD, 142 INSTR_RS_RURD,
133 INSTR_RXE_FRRD, INSTR_RXE_RRRD, 143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
134 INSTR_RXF_FRRDF, 144 INSTR_RXF_FRRDF,
135 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, 145 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
136 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, 146 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
@@ -143,6 +153,17 @@ enum {
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, 153 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 154 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
145 INSTR_S_00, INSTR_S_RD, 155 INSTR_S_00, INSTR_S_RD,
156 INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
157 INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
158 INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
159 INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
160 INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
161 INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
162 INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
163 INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
164 INSTR_VRS_RVRDM,
165 INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
166 INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
146}; 167};
147 168
148static const struct s390_operand operands[] = 169static const struct s390_operand operands[] =
@@ -168,6 +189,11 @@ static const struct s390_operand operands[] =
168 [A_28] = { 4, 28, OPERAND_AR }, 189 [A_28] = { 4, 28, OPERAND_AR },
169 [C_8] = { 4, 8, OPERAND_CR }, 190 [C_8] = { 4, 8, OPERAND_CR },
170 [C_12] = { 4, 12, OPERAND_CR }, 191 [C_12] = { 4, 12, OPERAND_CR },
192 [V_8] = { 4, 8, OPERAND_VR },
193 [V_12] = { 4, 12, OPERAND_VR },
194 [V_16] = { 4, 16, OPERAND_VR },
195 [V_32] = { 4, 32, OPERAND_VR },
196 [W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
171 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, 197 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
172 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, 198 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
173 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, 199 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
@@ -190,6 +216,11 @@ static const struct s390_operand operands[] =
190 [U8_24] = { 8, 24, 0 }, 216 [U8_24] = { 8, 24, 0 },
191 [U8_32] = { 8, 32, 0 }, 217 [U8_32] = { 8, 32, 0 },
192 [J12_12] = { 12, 12, OPERAND_PCREL }, 218 [J12_12] = { 12, 12, OPERAND_PCREL },
219 [I8_8] = { 8, 8, OPERAND_SIGNED },
220 [I8_16] = { 8, 16, OPERAND_SIGNED },
221 [I8_24] = { 8, 24, OPERAND_SIGNED },
222 [I8_32] = { 8, 32, OPERAND_SIGNED },
223 [I16_32] = { 16, 32, OPERAND_SIGNED },
193 [I16_16] = { 16, 16, OPERAND_SIGNED }, 224 [I16_16] = { 16, 16, OPERAND_SIGNED },
194 [U16_16] = { 16, 16, 0 }, 225 [U16_16] = { 16, 16, 0 },
195 [U16_32] = { 16, 32, 0 }, 226 [U16_32] = { 16, 32, 0 },
@@ -202,6 +233,9 @@ static const struct s390_operand operands[] =
202 [U32_16] = { 32, 16, 0 }, 233 [U32_16] = { 32, 16, 0 },
203 [M_16] = { 4, 16, 0 }, 234 [M_16] = { 4, 16, 0 },
204 [M_20] = { 4, 20, 0 }, 235 [M_20] = { 4, 20, 0 },
236 [M_24] = { 4, 24, 0 },
237 [M_28] = { 4, 28, 0 },
238 [M_32] = { 4, 32, 0 },
205 [RO_28] = { 4, 28, OPERAND_GPR } 239 [RO_28] = { 4, 28, OPERAND_GPR }
206}; 240};
207 241
@@ -283,6 +317,7 @@ static const unsigned char formats[][7] = {
283 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, 317 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
284 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, 318 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
285 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, 319 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
320 [INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
286 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, 321 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
287 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, 322 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
288 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, 323 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
@@ -307,6 +342,37 @@ static const unsigned char formats[][7] = {
307 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, 342 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
308 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, 343 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
309 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, 344 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
345 [INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
346 [INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
347 [INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
348 [INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
349 [INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
350 [INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
351 [INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
352 [INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
353 [INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
354 [INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
355 [INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
356 [INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
357 [INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
358 [INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
359 [INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
360 [INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
361 [INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
362 [INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
363 [INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
364 [INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
365 [INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
366 [INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
367 [INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
368 [INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
369 [INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
370 [INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
371 [INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
372 [INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
373 [INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
374 [INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
375 [INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
310}; 376};
311 377
312enum { 378enum {
@@ -381,6 +447,11 @@ enum {
381 LONG_INSN_MPCIFC, 447 LONG_INSN_MPCIFC,
382 LONG_INSN_STPCIFC, 448 LONG_INSN_STPCIFC,
383 LONG_INSN_PCISTB, 449 LONG_INSN_PCISTB,
450 LONG_INSN_VPOPCT,
451 LONG_INSN_VERLLV,
452 LONG_INSN_VESRAV,
453 LONG_INSN_VESRLV,
454 LONG_INSN_VSBCBI
384}; 455};
385 456
386static char *long_insn_name[] = { 457static char *long_insn_name[] = {
@@ -455,6 +526,11 @@ static char *long_insn_name[] = {
455 [LONG_INSN_MPCIFC] = "mpcifc", 526 [LONG_INSN_MPCIFC] = "mpcifc",
456 [LONG_INSN_STPCIFC] = "stpcifc", 527 [LONG_INSN_STPCIFC] = "stpcifc",
457 [LONG_INSN_PCISTB] = "pcistb", 528 [LONG_INSN_PCISTB] = "pcistb",
529 [LONG_INSN_VPOPCT] = "vpopct",
530 [LONG_INSN_VERLLV] = "verllv",
531 [LONG_INSN_VESRAV] = "vesrav",
532 [LONG_INSN_VESRLV] = "vesrlv",
533 [LONG_INSN_VSBCBI] = "vsbcbi",
458}; 534};
459 535
460static struct s390_insn opcode[] = { 536static struct s390_insn opcode[] = {
@@ -1369,6 +1445,150 @@ static struct s390_insn opcode_e5[] = {
1369 { "", 0, INSTR_INVALID } 1445 { "", 0, INSTR_INVALID }
1370}; 1446};
1371 1447
1448static struct s390_insn opcode_e7[] = {
1449#ifdef CONFIG_64BIT
1450 { "lcbb", 0x27, INSTR_RXE_RRRDM },
1451 { "vgef", 0x13, INSTR_VRV_VVRDM },
1452 { "vgeg", 0x12, INSTR_VRV_VVRDM },
1453 { "vgbm", 0x44, INSTR_VRI_V0I0 },
1454 { "vgm", 0x46, INSTR_VRI_V0IIM },
1455 { "vl", 0x06, INSTR_VRX_VRRD0 },
1456 { "vlr", 0x56, INSTR_VRR_VV00000 },
1457 { "vlrp", 0x05, INSTR_VRX_VRRDM },
1458 { "vleb", 0x00, INSTR_VRX_VRRDM },
1459 { "vleh", 0x01, INSTR_VRX_VRRDM },
1460 { "vlef", 0x03, INSTR_VRX_VRRDM },
1461 { "vleg", 0x02, INSTR_VRX_VRRDM },
1462 { "vleib", 0x40, INSTR_VRI_V0IM },
1463 { "vleih", 0x41, INSTR_VRI_V0IM },
1464 { "vleif", 0x43, INSTR_VRI_V0IM },
1465 { "vleig", 0x42, INSTR_VRI_V0IM },
1466 { "vlgv", 0x21, INSTR_VRS_RVRDM },
1467 { "vllez", 0x04, INSTR_VRX_VRRDM },
1468 { "vlm", 0x36, INSTR_VRS_VVRD0 },
1469 { "vlbb", 0x07, INSTR_VRX_VRRDM },
1470 { "vlvg", 0x22, INSTR_VRS_VRRDM },
1471 { "vlvgp", 0x62, INSTR_VRR_VRR0000 },
1472 { "vll", 0x37, INSTR_VRS_VRRD0 },
1473 { "vmrh", 0x61, INSTR_VRR_VVV000M },
1474 { "vmrl", 0x60, INSTR_VRR_VVV000M },
1475 { "vpk", 0x94, INSTR_VRR_VVV000M },
1476 { "vpks", 0x97, INSTR_VRR_VVV0M0M },
1477 { "vpkls", 0x95, INSTR_VRR_VVV0M0M },
1478 { "vperm", 0x8c, INSTR_VRR_VVV000V },
1479 { "vpdi", 0x84, INSTR_VRR_VVV000M },
1480 { "vrep", 0x4d, INSTR_VRI_VVIM },
1481 { "vrepi", 0x45, INSTR_VRI_V0IM },
1482 { "vscef", 0x1b, INSTR_VRV_VWRDM },
1483 { "vsceg", 0x1a, INSTR_VRV_VWRDM },
1484 { "vsel", 0x8d, INSTR_VRR_VVV000V },
1485 { "vseg", 0x5f, INSTR_VRR_VV0000M },
1486 { "vst", 0x0e, INSTR_VRX_VRRD0 },
1487 { "vsteb", 0x08, INSTR_VRX_VRRDM },
1488 { "vsteh", 0x09, INSTR_VRX_VRRDM },
1489 { "vstef", 0x0b, INSTR_VRX_VRRDM },
1490 { "vsteg", 0x0a, INSTR_VRX_VRRDM },
1491 { "vstm", 0x3e, INSTR_VRS_VVRD0 },
1492 { "vstl", 0x3f, INSTR_VRS_VRRD0 },
1493 { "vuph", 0xd7, INSTR_VRR_VV0000M },
1494 { "vuplh", 0xd5, INSTR_VRR_VV0000M },
1495 { "vupl", 0xd6, INSTR_VRR_VV0000M },
1496 { "vupll", 0xd4, INSTR_VRR_VV0000M },
1497 { "va", 0xf3, INSTR_VRR_VVV000M },
1498 { "vacc", 0xf1, INSTR_VRR_VVV000M },
1499 { "vac", 0xbb, INSTR_VRR_VVVM00V },
1500 { "vaccc", 0xb9, INSTR_VRR_VVVM00V },
1501 { "vn", 0x68, INSTR_VRR_VVV0000 },
1502 { "vnc", 0x69, INSTR_VRR_VVV0000 },
1503 { "vavg", 0xf2, INSTR_VRR_VVV000M },
1504 { "vavgl", 0xf0, INSTR_VRR_VVV000M },
1505 { "vcksm", 0x66, INSTR_VRR_VVV0000 },
1506 { "vec", 0xdb, INSTR_VRR_VV0000M },
1507 { "vecl", 0xd9, INSTR_VRR_VV0000M },
1508 { "vceq", 0xf8, INSTR_VRR_VVV0M0M },
1509 { "vch", 0xfb, INSTR_VRR_VVV0M0M },
1510 { "vchl", 0xf9, INSTR_VRR_VVV0M0M },
1511 { "vclz", 0x53, INSTR_VRR_VV0000M },
1512 { "vctz", 0x52, INSTR_VRR_VV0000M },
1513 { "vx", 0x6d, INSTR_VRR_VVV0000 },
1514 { "vgfm", 0xb4, INSTR_VRR_VVV000M },
1515 { "vgfma", 0xbc, INSTR_VRR_VVVM00V },
1516 { "vlc", 0xde, INSTR_VRR_VV0000M },
1517 { "vlp", 0xdf, INSTR_VRR_VV0000M },
1518 { "vmx", 0xff, INSTR_VRR_VVV000M },
1519 { "vmxl", 0xfd, INSTR_VRR_VVV000M },
1520 { "vmn", 0xfe, INSTR_VRR_VVV000M },
1521 { "vmnl", 0xfc, INSTR_VRR_VVV000M },
1522 { "vmal", 0xaa, INSTR_VRR_VVVM00V },
1523 { "vmae", 0xae, INSTR_VRR_VVVM00V },
1524 { "vmale", 0xac, INSTR_VRR_VVVM00V },
1525 { "vmah", 0xab, INSTR_VRR_VVVM00V },
1526 { "vmalh", 0xa9, INSTR_VRR_VVVM00V },
1527 { "vmao", 0xaf, INSTR_VRR_VVVM00V },
1528 { "vmalo", 0xad, INSTR_VRR_VVVM00V },
1529 { "vmh", 0xa3, INSTR_VRR_VVV000M },
1530 { "vmlh", 0xa1, INSTR_VRR_VVV000M },
1531 { "vml", 0xa2, INSTR_VRR_VVV000M },
1532 { "vme", 0xa6, INSTR_VRR_VVV000M },
1533 { "vmle", 0xa4, INSTR_VRR_VVV000M },
1534 { "vmo", 0xa7, INSTR_VRR_VVV000M },
1535 { "vmlo", 0xa5, INSTR_VRR_VVV000M },
1536 { "vno", 0x6b, INSTR_VRR_VVV0000 },
1537 { "vo", 0x6a, INSTR_VRR_VVV0000 },
1538 { { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
1539 { { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
1540 { "verll", 0x33, INSTR_VRS_VVRDM },
1541 { "verim", 0x72, INSTR_VRI_VVV0IM },
1542 { "veslv", 0x70, INSTR_VRR_VVV000M },
1543 { "vesl", 0x30, INSTR_VRS_VVRDM },
1544 { { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
1545 { "vesra", 0x3a, INSTR_VRS_VVRDM },
1546 { { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
1547 { "vesrl", 0x38, INSTR_VRS_VVRDM },
1548 { "vsl", 0x74, INSTR_VRR_VVV0000 },
1549 { "vslb", 0x75, INSTR_VRR_VVV0000 },
1550 { "vsldb", 0x77, INSTR_VRI_VVV0I0 },
1551 { "vsra", 0x7e, INSTR_VRR_VVV0000 },
1552 { "vsrab", 0x7f, INSTR_VRR_VVV0000 },
1553 { "vsrl", 0x7c, INSTR_VRR_VVV0000 },
1554 { "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
1555 { "vs", 0xf7, INSTR_VRR_VVV000M },
1556 { "vscb", 0xf5, INSTR_VRR_VVV000M },
1557 { "vsb", 0xbf, INSTR_VRR_VVVM00V },
1558 { { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
1559 { "vsumg", 0x65, INSTR_VRR_VVV000M },
1560 { "vsumq", 0x67, INSTR_VRR_VVV000M },
1561 { "vsum", 0x64, INSTR_VRR_VVV000M },
1562 { "vtm", 0xd8, INSTR_VRR_VV00000 },
1563 { "vfae", 0x82, INSTR_VRR_VVV0M0M },
1564 { "vfee", 0x80, INSTR_VRR_VVV0M0M },
1565 { "vfene", 0x81, INSTR_VRR_VVV0M0M },
1566 { "vistr", 0x5c, INSTR_VRR_VV00M0M },
1567 { "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
1568 { "vfa", 0xe3, INSTR_VRR_VVV00MM },
1569 { "wfc", 0xcb, INSTR_VRR_VV000MM },
1570 { "wfk", 0xca, INSTR_VRR_VV000MM },
1571 { "vfce", 0xe8, INSTR_VRR_VVV0MMM },
1572 { "vfch", 0xeb, INSTR_VRR_VVV0MMM },
1573 { "vfche", 0xea, INSTR_VRR_VVV0MMM },
1574 { "vcdg", 0xc3, INSTR_VRR_VV00MMM },
1575 { "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
1576 { "vcgd", 0xc2, INSTR_VRR_VV00MMM },
1577 { "vclgd", 0xc0, INSTR_VRR_VV00MMM },
1578 { "vfd", 0xe5, INSTR_VRR_VVV00MM },
1579 { "vfi", 0xc7, INSTR_VRR_VV00MMM },
1580 { "vlde", 0xc4, INSTR_VRR_VV000MM },
1581 { "vled", 0xc5, INSTR_VRR_VV00MMM },
1582 { "vfm", 0xe7, INSTR_VRR_VVV00MM },
1583 { "vfma", 0x8f, INSTR_VRR_VVVM0MV },
1584 { "vfms", 0x8e, INSTR_VRR_VVVM0MV },
1585 { "vfpso", 0xcc, INSTR_VRR_VV00MMM },
1586 { "vfsq", 0xce, INSTR_VRR_VV000MM },
1587 { "vfs", 0xe2, INSTR_VRR_VVV00MM },
1588 { "vftci", 0x4a, INSTR_VRI_VVIMM },
1589#endif
1590};
1591
1372static struct s390_insn opcode_eb[] = { 1592static struct s390_insn opcode_eb[] = {
1373#ifdef CONFIG_64BIT 1593#ifdef CONFIG_64BIT
1374 { "lmg", 0x04, INSTR_RSY_RRRD }, 1594 { "lmg", 0x04, INSTR_RSY_RRRD },
@@ -1552,16 +1772,17 @@ static struct s390_insn opcode_ed[] = {
1552static unsigned int extract_operand(unsigned char *code, 1772static unsigned int extract_operand(unsigned char *code,
1553 const struct s390_operand *operand) 1773 const struct s390_operand *operand)
1554{ 1774{
1775 unsigned char *cp;
1555 unsigned int val; 1776 unsigned int val;
1556 int bits; 1777 int bits;
1557 1778
1558 /* Extract fragments of the operand byte for byte. */ 1779 /* Extract fragments of the operand byte for byte. */
1559 code += operand->shift / 8; 1780 cp = code + operand->shift / 8;
1560 bits = (operand->shift & 7) + operand->bits; 1781 bits = (operand->shift & 7) + operand->bits;
1561 val = 0; 1782 val = 0;
1562 do { 1783 do {
1563 val <<= 8; 1784 val <<= 8;
1564 val |= (unsigned int) *code++; 1785 val |= (unsigned int) *cp++;
1565 bits -= 8; 1786 bits -= 8;
1566 } while (bits > 0); 1787 } while (bits > 0);
1567 val >>= -bits; 1788 val >>= -bits;
@@ -1571,6 +1792,18 @@ static unsigned int extract_operand(unsigned char *code,
1571 if (operand->bits == 20 && operand->shift == 20) 1792 if (operand->bits == 20 && operand->shift == 20)
1572 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; 1793 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
1573 1794
1795 /* Check for register extensions bits for vector registers. */
1796 if (operand->flags & OPERAND_VR) {
1797 if (operand->shift == 8)
1798 val |= (code[4] & 8) << 1;
1799 else if (operand->shift == 12)
1800 val |= (code[4] & 4) << 2;
1801 else if (operand->shift == 16)
1802 val |= (code[4] & 2) << 3;
1803 else if (operand->shift == 32)
1804 val |= (code[4] & 1) << 4;
1805 }
1806
1574 /* Sign extend value if the operand is signed or pc relative. */ 1807 /* Sign extend value if the operand is signed or pc relative. */
1575 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && 1808 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
1576 (val & (1U << (operand->bits - 1)))) 1809 (val & (1U << (operand->bits - 1))))
@@ -1639,6 +1872,10 @@ struct s390_insn *find_insn(unsigned char *code)
1639 case 0xe5: 1872 case 0xe5:
1640 table = opcode_e5; 1873 table = opcode_e5;
1641 break; 1874 break;
1875 case 0xe7:
1876 table = opcode_e7;
1877 opfrag = code[5];
1878 break;
1642 case 0xeb: 1879 case 0xeb:
1643 table = opcode_eb; 1880 table = opcode_eb;
1644 opfrag = code[5]; 1881 opfrag = code[5];
@@ -1734,6 +1971,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1734 ptr += sprintf(ptr, "%%a%i", value); 1971 ptr += sprintf(ptr, "%%a%i", value);
1735 else if (operand->flags & OPERAND_CR) 1972 else if (operand->flags & OPERAND_CR)
1736 ptr += sprintf(ptr, "%%c%i", value); 1973 ptr += sprintf(ptr, "%%c%i", value);
1974 else if (operand->flags & OPERAND_VR)
1975 ptr += sprintf(ptr, "%%v%i", value);
1737 else if (operand->flags & OPERAND_PCREL) 1976 else if (operand->flags & OPERAND_PCREL)
1738 ptr += sprintf(ptr, "%lx", (signed int) value 1977 ptr += sprintf(ptr, "%lx", (signed int) value
1739 + addr); 1978 + addr);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 0dff972a169c..cef2879edff3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void)
390 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
391 if (test_facility(50) && test_facility(73)) 391 if (test_facility(50) && test_facility(73))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
393 if (test_facility(66))
394 S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
395 if (test_facility(51)) 393 if (test_facility(51))
396 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 394 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
395 if (test_facility(129))
396 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
397#endif 397#endif
398} 398}
399 399
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 1aad48398d06..0554b9771c9f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -4,7 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7#include <asm/cputime.h> 7#include <asm/idle.h>
8 8
9extern void *restart_stack; 9extern void *restart_stack;
10extern unsigned long suspend_zero_pages; 10extern unsigned long suspend_zero_pages;
@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long);
21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
23 23
24int alloc_vector_registers(struct task_struct *tsk);
25
24void do_protection_exception(struct pt_regs *regs); 26void do_protection_exception(struct pt_regs *regs);
25void do_dat_exception(struct pt_regs *regs); 27void do_dat_exception(struct pt_regs *regs);
26 28
@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs);
43void specification_exception(struct pt_regs *regs); 45void specification_exception(struct pt_regs *regs);
44void transaction_exception(struct pt_regs *regs); 46void transaction_exception(struct pt_regs *regs);
45void translation_exception(struct pt_regs *regs); 47void translation_exception(struct pt_regs *regs);
48void vector_exception(struct pt_regs *regs);
46 49
47void do_per_trap(struct pt_regs *regs); 50void do_per_trap(struct pt_regs *regs);
51void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
48void syscall_trace(struct pt_regs *regs, int entryexit); 52void syscall_trace(struct pt_regs *regs, int entryexit);
49void kernel_stack_overflow(struct pt_regs * regs); 53void kernel_stack_overflow(struct pt_regs * regs);
50void do_signal(struct pt_regs *regs); 54void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f2e674c702e1..7b2e03afd017 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE = 1 << STACK_SHIFT 42STACK_SIZE = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44 44
45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
46 _TIF_UPROBE)
46_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 47_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
47 _TIF_SYSCALL_TRACEPOINT) 48 _TIF_SYSCALL_TRACEPOINT)
48_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) 49_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -265,6 +266,10 @@ sysc_work:
265 jo sysc_mcck_pending 266 jo sysc_mcck_pending
266 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
267 jo sysc_reschedule 268 jo sysc_reschedule
269#ifdef CONFIG_UPROBES
270 tm __TI_flags+7(%r12),_TIF_UPROBE
271 jo sysc_uprobe_notify
272#endif
268 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
269 jo sysc_singlestep 274 jo sysc_singlestep
270 tm __TI_flags+7(%r12),_TIF_SIGPENDING 275 tm __TI_flags+7(%r12),_TIF_SIGPENDING
@@ -323,6 +328,16 @@ sysc_notify_resume:
323 jg do_notify_resume 328 jg do_notify_resume
324 329
325# 330#
331# _TIF_UPROBE is set, call uprobe_notify_resume
332#
333#ifdef CONFIG_UPROBES
334sysc_uprobe_notify:
335 lgr %r2,%r11 # pass pointer to pt_regs
336 larl %r14,sysc_return
337 jg uprobe_notify_resume
338#endif
339
340#
326# _PIF_PER_TRAP is set, call do_per_trap 341# _PIF_PER_TRAP is set, call do_per_trap
327# 342#
328sysc_singlestep: 343sysc_singlestep:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 54d6493c4a56..51d14fe5eb9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic function tracer architecture backend. 2 * Dynamic function tracer architecture backend.
3 * 3 *
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009,2014
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -17,100 +17,76 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include "entry.h" 18#include "entry.h"
19 19
20#ifdef CONFIG_DYNAMIC_FTRACE 20void mcount_replace_code(void);
21
22void ftrace_disable_code(void); 21void ftrace_disable_code(void);
23void ftrace_enable_insn(void); 22void ftrace_enable_insn(void);
24 23
25#ifdef CONFIG_64BIT
26/* 24/*
27 * The 64-bit mcount code looks like this: 25 * The mcount code looks like this:
28 * stg %r14,8(%r15) # offset 0 26 * stg %r14,8(%r15) # offset 0
29 * > larl %r1,<&counter> # offset 6 27 * larl %r1,<&counter> # offset 6
30 * > brasl %r14,_mcount # offset 12 28 * brasl %r14,_mcount # offset 12
31 * lg %r14,8(%r15) # offset 18 29 * lg %r14,8(%r15) # offset 18
32 * Total length is 24 bytes. The middle two instructions of the mcount 30 * Total length is 24 bytes. The complete mcount block initially gets replaced
33 * block get overwritten by ftrace_make_nop / ftrace_make_call. 31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
34 * The 64-bit enabled ftrace code block looks like this: 32 * only patch the jg/lg instruction within the block.
35 * stg %r14,8(%r15) # offset 0 33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
36 * The enabled ftrace code block looks like this:
37 * larl %r0,.+24 # offset 0
36 * > lg %r1,__LC_FTRACE_FUNC # offset 6 38 * > lg %r1,__LC_FTRACE_FUNC # offset 6
37 * > lgr %r0,%r0 # offset 12 39 * br %r1 # offset 12
38 * > basr %r14,%r1 # offset 16 40 * brcl 0,0 # offset 14
39 * lg %r14,8(%15) # offset 18 41 * brc 0,0 # offset 20
40 * The return points of the mcount/ftrace function have the same offset 18. 42 * The ftrace function gets called with a non-standard C function call ABI
41 * The 64-bit disable ftrace code block looks like this: 43 * where r0 contains the return address. It is also expected that the called
42 * stg %r14,8(%r15) # offset 0 44 * function only clobbers r0 and r1, but restores r2-r15.
45 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0
43 * > jg .+18 # offset 6 48 * > jg .+18 # offset 6
44 * > lgr %r0,%r0 # offset 12 49 * br %r1 # offset 12
45 * > basr %r14,%r1 # offset 16 50 * brcl 0,0 # offset 14
46 * lg %r14,8(%15) # offset 18 51 * brc 0,0 # offset 20
47 * The jg instruction branches to offset 24 to skip as many instructions 52 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible. 53 * as possible.
49 */ 54 */
50asm( 55asm(
51 " .align 4\n" 56 " .align 4\n"
57 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
52 "ftrace_disable_code:\n" 59 "ftrace_disable_code:\n"
53 " jg 0f\n" 60 " jg 0f\n"
54 " lgr %r0,%r0\n" 61 " br %r1\n"
55 " basr %r14,%r1\n" 62 " brcl 0,0\n"
63 " brc 0,0\n"
56 "0:\n" 64 "0:\n"
57 " .align 4\n" 65 " .align 4\n"
58 "ftrace_enable_insn:\n" 66 "ftrace_enable_insn:\n"
59 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); 67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
60 68
69#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
61#define FTRACE_INSN_SIZE 6 71#define FTRACE_INSN_SIZE 6
62 72
63#else /* CONFIG_64BIT */ 73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
64/* 74 unsigned long addr)
65 * The 31-bit mcount code looks like this: 75{
66 * st %r14,4(%r15) # offset 0 76 return 0;
67 * > bras %r1,0f # offset 4 77}
68 * > .long _mcount # offset 8
69 * > .long <&counter> # offset 12
70 * > 0: l %r14,0(%r1) # offset 16
71 * > l %r1,4(%r1) # offset 20
72 * basr %r14,%r14 # offset 24
73 * l %r14,4(%r15) # offset 26
74 * Total length is 30 bytes. The twenty bytes starting from offset 4
75 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
76 * The 31-bit enabled ftrace code block looks like this:
77 * st %r14,4(%r15) # offset 0
78 * > l %r14,__LC_FTRACE_FUNC # offset 4
79 * > j 0f # offset 8
80 * > .fill 12,1,0x07 # offset 12
81 * 0: basr %r14,%r14 # offset 24
82 * l %r14,4(%r14) # offset 26
83 * The return points of the mcount/ftrace function have the same offset 26.
84 * The 31-bit disabled ftrace code block looks like this:
85 * st %r14,4(%r15) # offset 0
86 * > j .+26 # offset 4
87 * > j 0f # offset 8
88 * > .fill 12,1,0x07 # offset 12
89 * 0: basr %r14,%r14 # offset 24
90 * l %r14,4(%r14) # offset 26
91 * The j instruction branches to offset 30 to skip as many instructions
92 * as possible.
93 */
94asm(
95 " .align 4\n"
96 "ftrace_disable_code:\n"
97 " j 1f\n"
98 " j 0f\n"
99 " .fill 12,1,0x07\n"
100 "0: basr %r14,%r14\n"
101 "1:\n"
102 " .align 4\n"
103 "ftrace_enable_insn:\n"
104 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
105
106#define FTRACE_INSN_SIZE 4
107
108#endif /* CONFIG_64BIT */
109
110 78
111int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
112 unsigned long addr) 80 unsigned long addr)
113{ 81{
82 /* Initial replacement of the whole mcount block */
83 if (addr == MCOUNT_ADDR) {
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
85 mcount_replace_code,
86 MCOUNT_BLOCK_SIZE))
87 return -EPERM;
88 return 0;
89 }
114 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
115 MCOUNT_INSN_SIZE)) 91 MCOUNT_INSN_SIZE))
116 return -EPERM; 92 return -EPERM;
@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void)
135 return 0; 111 return 0;
136} 112}
137 113
138#endif /* CONFIG_DYNAMIC_FTRACE */
139
140#ifdef CONFIG_FUNCTION_GRAPH_TRACER 114#ifdef CONFIG_FUNCTION_GRAPH_TRACER
141/* 115/*
142 * Hook the return address and push it in the stack of return addresses 116 * Hook the return address and push it in the stack of return addresses
@@ -162,31 +136,26 @@ out:
162 return parent; 136 return parent;
163} 137}
164 138
165#ifdef CONFIG_DYNAMIC_FTRACE
166/* 139/*
167 * Patch the kernel code at ftrace_graph_caller location. The instruction 140 * Patch the kernel code at ftrace_graph_caller location. The instruction
168 * there is branch relative and save to prepare_ftrace_return. To disable 141 * there is branch relative on condition. To enable the ftrace graph code
169 * the call to prepare_ftrace_return we patch the bras offset to point 142 * block, we simply patch the mask field of the instruction to zero and
170 * directly after the instructions. To enable the call we calculate 143 * turn the instruction into a nop.
171 * the original offset to prepare_ftrace_return and put it back. 144 * To disable the ftrace graph code the mask field will be patched to
145 * all ones, which turns the instruction into an unconditional branch.
172 */ 146 */
173int ftrace_enable_ftrace_graph_caller(void) 147int ftrace_enable_ftrace_graph_caller(void)
174{ 148{
175 unsigned short offset; 149 u8 op = 0x04; /* set mask field to zero */
176 150
177 offset = ((void *) prepare_ftrace_return - 151 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
178 (void *) ftrace_graph_caller) / 2;
179 return probe_kernel_write((void *) ftrace_graph_caller + 2,
180 &offset, sizeof(offset));
181} 152}
182 153
183int ftrace_disable_ftrace_graph_caller(void) 154int ftrace_disable_ftrace_graph_caller(void)
184{ 155{
185 static unsigned short offset = 0x0002; 156 u8 op = 0xf4; /* set mask field to all ones */
186 157
187 return probe_kernel_write((void *) ftrace_graph_caller + 2, 158 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
188 &offset, sizeof(offset));
189} 159}
190 160
191#endif /* CONFIG_DYNAMIC_FTRACE */
192#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 161#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index e88d35d74950..d62eee11f0b5 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -398,7 +398,7 @@ ENTRY(startup_kdump)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5 399#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
401 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list 401 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
403 jz 0f 403 jz 0f
404 la %r0,1 404 la %r0,1
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
new file mode 100644
index 000000000000..c846aee7372f
--- /dev/null
+++ b/arch/s390/kernel/idle.c
@@ -0,0 +1,124 @@
1/*
2 * Idle functions for s390.
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/kernel_stat.h>
11#include <linux/kprobes.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <asm/cputime.h>
16#include <asm/nmi.h>
17#include <asm/smp.h>
18#include "entry.h"
19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21
22void __kprobes enabled_wait(void)
23{
24 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
25 unsigned long long idle_time;
26 unsigned long psw_mask;
27
28 trace_hardirqs_on();
29
30 /* Wait for external, I/O or machine check interrupt. */
31 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
32 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
33 clear_cpu_flag(CIF_NOHZ_DELAY);
34
35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask);
37
38 /* Account time spent with enabled wait psw loaded as idle time. */
39 idle->sequence++;
40 smp_wmb();
41 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
42 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
43 idle->idle_time += idle_time;
44 idle->idle_count++;
45 account_idle_time(idle_time);
46 smp_wmb();
47 idle->sequence++;
48}
49
50static ssize_t show_idle_count(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
54 unsigned long long idle_count;
55 unsigned int sequence;
56
57 do {
58 sequence = ACCESS_ONCE(idle->sequence);
59 idle_count = ACCESS_ONCE(idle->idle_count);
60 if (ACCESS_ONCE(idle->clock_idle_enter))
61 idle_count++;
62 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
63 return sprintf(buf, "%llu\n", idle_count);
64}
65DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
66
67static ssize_t show_idle_time(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
71 unsigned long long now, idle_time, idle_enter, idle_exit;
72 unsigned int sequence;
73
74 do {
75 now = get_tod_clock();
76 sequence = ACCESS_ONCE(idle->sequence);
77 idle_time = ACCESS_ONCE(idle->idle_time);
78 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
79 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
80 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
81 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
82 return sprintf(buf, "%llu\n", idle_time >> 12);
83}
84DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
85
86cputime64_t arch_cpu_idle_time(int cpu)
87{
88 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
89 unsigned long long now, idle_enter, idle_exit;
90 unsigned int sequence;
91
92 do {
93 now = get_tod_clock();
94 sequence = ACCESS_ONCE(idle->sequence);
95 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
96 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
97 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
98 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
99}
100
101void arch_cpu_idle_enter(void)
102{
103 local_mcck_disable();
104}
105
106void arch_cpu_idle(void)
107{
108 if (!test_cpu_flag(CIF_MCCK_PENDING))
109 /* Halt the cpu and keep track of cpu time accounting. */
110 enabled_wait();
111 local_irq_enable();
112}
113
114void arch_cpu_idle_exit(void)
115{
116 local_mcck_enable();
117 if (test_cpu_flag(CIF_MCCK_PENDING))
118 s390_handle_mcck();
119}
120
121void arch_cpu_idle_dead(void)
122{
123 cpu_die();
124}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8eb82443cfbd..1b8a38ab7861 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, 72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
73 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 74 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, 75 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
75 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, 76 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
258 259
259 ext_code = *(struct ext_code *) &regs->int_code; 260 ext_code = *(struct ext_code *) &regs->int_code;
260 if (ext_code.code != EXT_IRQ_CLK_COMP) 261 if (ext_code.code != EXT_IRQ_CLK_COMP)
261 __get_cpu_var(s390_idle).nohz_delay = 1; 262 set_cpu_flag(CIF_NOHZ_DELAY);
262 263
263 index = ext_hash(ext_code.code); 264 index = ext_hash(ext_code.code);
264 rcu_read_lock(); 265 rcu_read_lock();
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index bc71a7b95af5..27ae5433fe4d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
58 .insn_size = MAX_INSN_SIZE, 58 .insn_size = MAX_INSN_SIZE,
59}; 59};
60 60
61static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
62{
63 if (!is_known_insn((unsigned char *)insn))
64 return -EINVAL;
65 switch (insn[0] >> 8) {
66 case 0x0c: /* bassm */
67 case 0x0b: /* bsm */
68 case 0x83: /* diag */
69 case 0x44: /* ex */
70 case 0xac: /* stnsm */
71 case 0xad: /* stosm */
72 return -EINVAL;
73 case 0xc6:
74 switch (insn[0] & 0x0f) {
75 case 0x00: /* exrl */
76 return -EINVAL;
77 }
78 }
79 switch (insn[0]) {
80 case 0x0101: /* pr */
81 case 0xb25a: /* bsa */
82 case 0xb240: /* bakr */
83 case 0xb258: /* bsg */
84 case 0xb218: /* pc */
85 case 0xb228: /* pt */
86 case 0xb98d: /* epsw */
87 return -EINVAL;
88 }
89 return 0;
90}
91
92static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
93{
94 /* default fixup method */
95 int fixup = FIXUP_PSW_NORMAL;
96
97 switch (insn[0] >> 8) {
98 case 0x05: /* balr */
99 case 0x0d: /* basr */
100 fixup = FIXUP_RETURN_REGISTER;
101 /* if r2 = 0, no branch will be taken */
102 if ((insn[0] & 0x0f) == 0)
103 fixup |= FIXUP_BRANCH_NOT_TAKEN;
104 break;
105 case 0x06: /* bctr */
106 case 0x07: /* bcr */
107 fixup = FIXUP_BRANCH_NOT_TAKEN;
108 break;
109 case 0x45: /* bal */
110 case 0x4d: /* bas */
111 fixup = FIXUP_RETURN_REGISTER;
112 break;
113 case 0x47: /* bc */
114 case 0x46: /* bct */
115 case 0x86: /* bxh */
116 case 0x87: /* bxle */
117 fixup = FIXUP_BRANCH_NOT_TAKEN;
118 break;
119 case 0x82: /* lpsw */
120 fixup = FIXUP_NOT_REQUIRED;
121 break;
122 case 0xb2: /* lpswe */
123 if ((insn[0] & 0xff) == 0xb2)
124 fixup = FIXUP_NOT_REQUIRED;
125 break;
126 case 0xa7: /* bras */
127 if ((insn[0] & 0x0f) == 0x05)
128 fixup |= FIXUP_RETURN_REGISTER;
129 break;
130 case 0xc0:
131 if ((insn[0] & 0x0f) == 0x05) /* brasl */
132 fixup |= FIXUP_RETURN_REGISTER;
133 break;
134 case 0xeb:
135 switch (insn[2] & 0xff) {
136 case 0x44: /* bxhg */
137 case 0x45: /* bxleg */
138 fixup = FIXUP_BRANCH_NOT_TAKEN;
139 break;
140 }
141 break;
142 case 0xe3: /* bctg */
143 if ((insn[2] & 0xff) == 0x46)
144 fixup = FIXUP_BRANCH_NOT_TAKEN;
145 break;
146 case 0xec:
147 switch (insn[2] & 0xff) {
148 case 0xe5: /* clgrb */
149 case 0xe6: /* cgrb */
150 case 0xf6: /* crb */
151 case 0xf7: /* clrb */
152 case 0xfc: /* cgib */
153 case 0xfd: /* cglib */
154 case 0xfe: /* cib */
155 case 0xff: /* clib */
156 fixup = FIXUP_BRANCH_NOT_TAKEN;
157 break;
158 }
159 break;
160 }
161 return fixup;
162}
163
164static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
165{
166 /* Check if we have a RIL-b or RIL-c format instruction which
167 * we need to modify in order to avoid instruction emulation. */
168 switch (insn[0] >> 8) {
169 case 0xc0:
170 if ((insn[0] & 0x0f) == 0x00) /* larl */
171 return true;
172 break;
173 case 0xc4:
174 switch (insn[0] & 0x0f) {
175 case 0x02: /* llhrl */
176 case 0x04: /* lghrl */
177 case 0x05: /* lhrl */
178 case 0x06: /* llghrl */
179 case 0x07: /* sthrl */
180 case 0x08: /* lgrl */
181 case 0x0b: /* stgrl */
182 case 0x0c: /* lgfrl */
183 case 0x0d: /* lrl */
184 case 0x0e: /* llgfrl */
185 case 0x0f: /* strl */
186 return true;
187 }
188 break;
189 case 0xc6:
190 switch (insn[0] & 0x0f) {
191 case 0x02: /* pfdrl */
192 case 0x04: /* cghrl */
193 case 0x05: /* chrl */
194 case 0x06: /* clghrl */
195 case 0x07: /* clhrl */
196 case 0x08: /* cgrl */
197 case 0x0a: /* clgrl */
198 case 0x0c: /* cgfrl */
199 case 0x0d: /* crl */
200 case 0x0e: /* clgfrl */
201 case 0x0f: /* clrl */
202 return true;
203 }
204 break;
205 }
206 return false;
207}
208
209static void __kprobes copy_instruction(struct kprobe *p) 61static void __kprobes copy_instruction(struct kprobe *p)
210{ 62{
211 s64 disp, new_disp; 63 s64 disp, new_disp;
212 u64 addr, new_addr; 64 u64 addr, new_addr;
213 65
214 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
215 if (!is_insn_relative_long(p->ainsn.insn)) 67 if (!probe_is_insn_relative_long(p->ainsn.insn))
216 return; 68 return;
217 /* 69 /*
218 * For pc-relative instructions in RIL-b or RIL-c format patch the 70 * For pc-relative instructions in RIL-b or RIL-c format patch the
@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
276 if ((unsigned long) p->addr & 0x01) 128 if ((unsigned long) p->addr & 0x01)
277 return -EINVAL; 129 return -EINVAL;
278 /* Make sure the probe isn't going on a difficult instruction */ 130 /* Make sure the probe isn't going on a difficult instruction */
279 if (is_prohibited_opcode(p->addr)) 131 if (probe_is_prohibited_opcode(p->addr))
280 return -EINVAL; 132 return -EINVAL;
281 if (s390_get_insn_slot(p)) 133 if (s390_get_insn_slot(p))
282 return -ENOMEM; 134 return -ENOMEM;
@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
605{ 457{
606 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
607 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
608 int fixup = get_fixup_type(p->ainsn.insn); 460 int fixup = probe_get_fixup_type(p->ainsn.insn);
609 461
610 if (fixup & FIXUP_PSW_NORMAL) 462 if (fixup & FIXUP_PSW_NORMAL)
611 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void)
789 asm volatile(".word 0x0002"); 641 asm volatile(".word 0x0002");
790} 642}
791 643
792static void __used __kprobes jprobe_return_end(void)
793{
794 asm volatile("bcr 0,0");
795}
796
797int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 644int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
798{ 645{
799 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 646 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 719e27b2cf22..4685337fa7c6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -25,6 +25,7 @@
25#include <asm/elf.h> 25#include <asm/elf.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/os_info.h> 27#include <asm/os_info.h>
28#include <asm/switch_to.h>
28 29
29typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 30typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
30 31
@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu)
43 44
44 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); 45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
45 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); 46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
46 ptr = fill_cpu_elf_notes(ptr, sa); 47 ptr = fill_cpu_elf_notes(ptr, sa, NULL);
47 memset(ptr, 0, sizeof(struct elf_note)); 48 memset(ptr, 0, sizeof(struct elf_note));
48} 49}
49 50
@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu)
53static void setup_regs(void) 54static void setup_regs(void)
54{ 55{
55 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 56 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
57 struct _lowcore *lc;
56 int cpu, this_cpu; 58 int cpu, this_cpu;
57 59
60 /* Get lowcore pointer from store status of this CPU (absolute zero) */
61 lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
58 this_cpu = smp_find_processor_id(stap()); 62 this_cpu = smp_find_processor_id(stap());
59 add_elf_notes(this_cpu); 63 add_elf_notes(this_cpu);
60 for_each_online_cpu(cpu) { 64 for_each_online_cpu(cpu) {
@@ -64,6 +68,8 @@ static void setup_regs(void)
64 continue; 68 continue;
65 add_elf_notes(cpu); 69 add_elf_notes(cpu);
66 } 70 }
71 if (MACHINE_HAS_VX)
72 save_vx_regs_safe((void *) lc->vector_save_area_addr);
67 /* Copy dump CPU store status info to absolute zero */ 73 /* Copy dump CPU store status info to absolute zero */
68 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 74 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
69} 75}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 433c6dbfa442..4300ea374826 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,62 +8,72 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/ptrace.h>
11 12
12 .section .kprobes.text, "ax" 13 .section .kprobes.text, "ax"
13 14
14ENTRY(ftrace_stub) 15ENTRY(ftrace_stub)
15 br %r14 16 br %r14
16 17
18#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
19#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
20#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
21#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
22
17ENTRY(_mcount) 23ENTRY(_mcount)
18#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14 24 br %r14
20 25
21ENTRY(ftrace_caller) 26ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15
30 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
33 stg %r0,(STACK_PTREGS_PSW+8)(%r15)
34 stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
35#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
36 aghik %r2,%r0,-MCOUNT_INSN_SIZE
37 lgrl %r4,function_trace_op
38 lgrl %r1,ftrace_trace_function
39#else
40 lgr %r2,%r0
41 aghi %r2,-MCOUNT_INSN_SIZE
42 larl %r4,function_trace_op
43 lg %r4,0(%r4)
44 larl %r1,ftrace_trace_function
45 lg %r1,0(%r1)
22#endif 46#endif
23 stm %r2,%r5,16(%r15) 47 lgr %r3,%r14
24 bras %r1,1f 48 la %r5,STACK_PTREGS(%r15)
250: .long ftrace_trace_function 49 basr %r14,%r1
261: st %r14,56(%r15)
27 lr %r0,%r15
28 ahi %r15,-96
29 l %r3,100(%r15)
30 la %r2,0(%r14)
31 st %r0,__SF_BACKCHAIN(%r15)
32 la %r3,0(%r3)
33 ahi %r2,-MCOUNT_INSN_SIZE
34 l %r14,0b-0b(%r1)
35 l %r14,0(%r14)
36 basr %r14,%r14
37#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
38 l %r2,100(%r15) 51# The j instruction gets runtime patched to a nop instruction.
39 l %r3,152(%r15) 52# See ftrace_enable_ftrace_graph_caller.
40ENTRY(ftrace_graph_caller) 53ENTRY(ftrace_graph_caller)
41# The bras instruction gets runtime patched to call prepare_ftrace_return. 54 j ftrace_graph_caller_end
42# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 55 lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
43# bras %r14,prepare_ftrace_return 56 lg %r3,(STACK_PTREGS_PSW+8)(%r15)
44 bras %r14,0f 57 brasl %r14,prepare_ftrace_return
450: st %r2,100(%r15) 58 stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
59ftrace_graph_caller_end:
60 .globl ftrace_graph_caller_end
46#endif 61#endif
47 ahi %r15,96 62 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
48 l %r14,56(%r15) 63 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
49 lm %r2,%r5,16(%r15) 64 br %r1
50 br %r14
51 65
52#ifdef CONFIG_FUNCTION_GRAPH_TRACER 66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
53 67
54ENTRY(return_to_handler) 68ENTRY(return_to_handler)
55 stm %r2,%r5,16(%r15) 69 stmg %r2,%r5,32(%r15)
56 st %r14,56(%r15) 70 lgr %r1,%r15
57 lr %r0,%r15 71 aghi %r15,-STACK_FRAME_OVERHEAD
58 ahi %r15,-96 72 stg %r1,__SF_BACKCHAIN(%r15)
59 st %r0,__SF_BACKCHAIN(%r15) 73 brasl %r14,ftrace_return_to_handler
60 bras %r1,0f 74 aghi %r15,STACK_FRAME_OVERHEAD
61 .long ftrace_return_to_handler 75 lgr %r14,%r2
620: l %r2,0b-0b(%r1) 76 lmg %r2,%r5,32(%r15)
63 basr %r14,%r2
64 lr %r14,%r2
65 ahi %r15,96
66 lm %r2,%r5,16(%r15)
67 br %r14 77 br %r14
68 78
69#endif 79#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
deleted file mode 100644
index c67a8bf0fd9a..000000000000
--- a/arch/s390/kernel/mcount64.S
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11
12 .section .kprobes.text, "ax"
13
14ENTRY(ftrace_stub)
15 br %r14
16
17ENTRY(_mcount)
18#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14
20
21ENTRY(ftrace_caller)
22#endif
23 stmg %r2,%r5,32(%r15)
24 stg %r14,112(%r15)
25 lgr %r1,%r15
26 aghi %r15,-160
27 stg %r1,__SF_BACKCHAIN(%r15)
28 lgr %r2,%r14
29 lg %r3,168(%r15)
30 aghi %r2,-MCOUNT_INSN_SIZE
31 larl %r14,ftrace_trace_function
32 lg %r14,0(%r14)
33 basr %r14,%r14
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35 lg %r2,168(%r15)
36 lg %r3,272(%r15)
37ENTRY(ftrace_graph_caller)
38# The bras instruction gets runtime patched to call prepare_ftrace_return.
39# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
40# bras %r14,prepare_ftrace_return
41 bras %r14,0f
420: stg %r2,168(%r15)
43#endif
44 aghi %r15,160
45 lmg %r2,%r5,32(%r15)
46 lg %r14,112(%r15)
47 br %r14
48
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50
51ENTRY(return_to_handler)
52 stmg %r2,%r5,32(%r15)
53 lgr %r1,%r15
54 aghi %r15,-160
55 stg %r1,__SF_BACKCHAIN(%r15)
56 brasl %r14,ftrace_return_to_handler
57 aghi %r15,160
58 lgr %r14,%r2
59 lmg %r2,%r5,32(%r15)
60 br %r14
61
62#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 210e1285f75a..db96b418160a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -20,6 +20,7 @@
20#include <asm/cputime.h> 20#include <asm/cputime.h>
21#include <asm/nmi.h> 21#include <asm/nmi.h>
22#include <asm/crw.h> 22#include <asm/crw.h>
23#include <asm/switch_to.h>
23 24
24struct mcck_struct { 25struct mcck_struct {
25 int kill_task; 26 int kill_task;
@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci)
163 " ld 15,120(%0)\n" 164 " ld 15,120(%0)\n"
164 : : "a" (fpt_save_area)); 165 : : "a" (fpt_save_area));
165 } 166 }
167
168#ifdef CONFIG_64BIT
169 /* Revalidate vector registers */
170 if (MACHINE_HAS_VX && current->thread.vxrs) {
171 if (!mci->vr) {
172 /*
173 * Vector registers can't be restored and therefore
174 * the process needs to be terminated.
175 */
176 kill_task = 1;
177 }
178 restore_vx_regs((__vector128 *)
179 S390_lowcore.vector_save_area_addr);
180 }
181#endif
166 /* Revalidate access registers */ 182 /* Revalidate access registers */
167 asm volatile( 183 asm volatile(
168 " lam 0,15,0(%0)" 184 " lam 0,15,0(%0)"
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 813ec7260878..f6f8886399f6 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */ 49PGM_CHECK_64BIT(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */ 50PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */ 51PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_DEFAULT /* 1b */ 52PGM_CHECK_64BIT(vector_exception) /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */ 53PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */ 54PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */ 55PGM_CHECK_DEFAULT /* 1e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 93b9ca42e5c0..ed84cc224899 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64void arch_cpu_idle(void)
65{
66 local_mcck_disable();
67 if (test_cpu_flag(CIF_MCCK_PENDING)) {
68 local_mcck_enable();
69 local_irq_enable();
70 return;
71 }
72 /* Halt the cpu and keep track of cpu time accounting. */
73 vtime_stop_cpu();
74 local_irq_enable();
75}
76
77void arch_cpu_idle_exit(void)
78{
79 if (test_cpu_flag(CIF_MCCK_PENDING))
80 s390_handle_mcck();
81}
82
83void arch_cpu_idle_dead(void)
84{
85 cpu_die();
86}
87
88extern void __kprobes kernel_thread_starter(void); 64extern void __kprobes kernel_thread_starter(void);
89 65
90/* 66/*
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 24612029f450..edefead3b43a 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
23 */ 23 */
24void cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 26 struct cpuid *id = &__get_cpu_var(cpu_id);
28 27
29 get_cpu_id(id); 28 get_cpu_id(id);
@@ -31,7 +30,6 @@ void cpu_init(void)
31 current->active_mm = &init_mm; 30 current->active_mm = &init_mm;
32 BUG_ON(current->mm); 31 BUG_ON(current->mm);
33 enter_lazy_tlb(&init_mm, current); 32 enter_lazy_tlb(&init_mm, current);
34 memset(idle, 0, sizeof(*idle));
35} 33}
36 34
37/* 35/*
@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
41{ 39{
42 static const char *hwcap_str[] = { 40 static const char *hwcap_str[] = {
43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 41 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
44 "edat", "etf3eh", "highgprs", "te" 42 "edat", "etf3eh", "highgprs", "te", "vx"
45 }; 43 };
46 unsigned long n = (unsigned long) v - 1; 44 unsigned long n = (unsigned long) v - 1;
47 int i; 45 int i;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index bebacad48305..f537e937a988 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -38,15 +38,6 @@
38#define CREATE_TRACE_POINTS 38#define CREATE_TRACE_POINTS
39#include <trace/events/syscalls.h> 39#include <trace/events/syscalls.h>
40 40
41enum s390_regset {
42 REGSET_GENERAL,
43 REGSET_FP,
44 REGSET_LAST_BREAK,
45 REGSET_TDB,
46 REGSET_SYSTEM_CALL,
47 REGSET_GENERAL_EXTENDED,
48};
49
50void update_cr_regs(struct task_struct *task) 41void update_cr_regs(struct task_struct *task)
51{ 42{
52 struct pt_regs *regs = task_pt_regs(task); 43 struct pt_regs *regs = task_pt_regs(task);
@@ -55,27 +46,39 @@ void update_cr_regs(struct task_struct *task)
55 46
56#ifdef CONFIG_64BIT 47#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 48 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 49 if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
59 unsigned long cr, cr_new; 50 unsigned long cr, cr_new;
60 51
61 __ctl_store(cr, 0, 0); 52 __ctl_store(cr, 0, 0);
62 /* Set or clear transaction execution TXC bit 8. */ 53 cr_new = cr;
63 cr_new = cr | (1UL << 55); 54 if (MACHINE_HAS_TE) {
64 if (task->thread.per_flags & PER_FLAG_NO_TE) 55 /* Set or clear transaction execution TXC bit 8. */
65 cr_new &= ~(1UL << 55); 56 cr_new |= (1UL << 55);
57 if (task->thread.per_flags & PER_FLAG_NO_TE)
58 cr_new &= ~(1UL << 55);
59 }
60 if (MACHINE_HAS_VX) {
61 /* Enable/disable of vector extension */
62 cr_new &= ~(1UL << 17);
63 if (task->thread.vxrs)
64 cr_new |= (1UL << 17);
65 }
66 if (cr_new != cr) 66 if (cr_new != cr)
67 __ctl_load(cr_new, 0, 0); 67 __ctl_load(cr_new, 0, 0);
68 /* Set or clear transaction execution TDC bits 62 and 63. */ 68 if (MACHINE_HAS_TE) {
69 __ctl_store(cr, 2, 2); 69 /* Set/clear transaction execution TDC bits 62/63. */
70 cr_new = cr & ~3UL; 70 __ctl_store(cr, 2, 2);
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 71 cr_new = cr & ~3UL;
72 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) 72 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
73 cr_new |= 1UL; 73 if (task->thread.per_flags &
74 else 74 PER_FLAG_TE_ABORT_RAND_TEND)
75 cr_new |= 2UL; 75 cr_new |= 1UL;
76 else
77 cr_new |= 2UL;
78 }
79 if (cr_new != cr)
80 __ctl_load(cr_new, 2, 2);
76 } 81 }
77 if (cr_new != cr)
78 __ctl_load(cr_new, 2, 2);
79 } 82 }
80#endif 83#endif
81 /* Copy user specified PER registers */ 84 /* Copy user specified PER registers */
@@ -84,7 +87,8 @@ void update_cr_regs(struct task_struct *task)
84 new.end = thread->per_user.end; 87 new.end = thread->per_user.end;
85 88
86 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 89 /* merge TIF_SINGLE_STEP into user specified PER registers. */
87 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 90 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
88 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) 92 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
89 new.control |= PER_EVENT_BRANCH; 93 new.control |= PER_EVENT_BRANCH;
90 else 94 else
@@ -93,6 +97,8 @@ void update_cr_regs(struct task_struct *task)
93 new.control |= PER_CONTROL_SUSPENSION; 97 new.control |= PER_CONTROL_SUSPENSION;
94 new.control |= PER_EVENT_TRANSACTION_END; 98 new.control |= PER_EVENT_TRANSACTION_END;
95#endif 99#endif
100 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
101 new.control |= PER_EVENT_IFETCH;
96 new.start = 0; 102 new.start = 0;
97 new.end = PSW_ADDR_INSN; 103 new.end = PSW_ADDR_INSN;
98 } 104 }
@@ -923,7 +929,15 @@ static int s390_fpregs_get(struct task_struct *target,
923 save_fp_ctl(&target->thread.fp_regs.fpc); 929 save_fp_ctl(&target->thread.fp_regs.fpc);
924 save_fp_regs(target->thread.fp_regs.fprs); 930 save_fp_regs(target->thread.fp_regs.fprs);
925 } 931 }
932#ifdef CONFIG_64BIT
933 else if (target->thread.vxrs) {
934 int i;
926 935
936 for (i = 0; i < __NUM_VXRS_LOW; i++)
937 target->thread.fp_regs.fprs[i] =
938 *(freg_t *)(target->thread.vxrs + i);
939 }
940#endif
927 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 941 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
928 &target->thread.fp_regs, 0, -1); 942 &target->thread.fp_regs, 0, -1);
929} 943}
@@ -957,9 +971,20 @@ static int s390_fpregs_set(struct task_struct *target,
957 target->thread.fp_regs.fprs, 971 target->thread.fp_regs.fprs,
958 offsetof(s390_fp_regs, fprs), -1); 972 offsetof(s390_fp_regs, fprs), -1);
959 973
960 if (rc == 0 && target == current) { 974 if (rc == 0) {
961 restore_fp_ctl(&target->thread.fp_regs.fpc); 975 if (target == current) {
962 restore_fp_regs(target->thread.fp_regs.fprs); 976 restore_fp_ctl(&target->thread.fp_regs.fpc);
977 restore_fp_regs(target->thread.fp_regs.fprs);
978 }
979#ifdef CONFIG_64BIT
980 else if (target->thread.vxrs) {
981 int i;
982
983 for (i = 0; i < __NUM_VXRS_LOW; i++)
984 *(freg_t *)(target->thread.vxrs + i) =
985 target->thread.fp_regs.fprs[i];
986 }
987#endif
963 } 988 }
964 989
965 return rc; 990 return rc;
@@ -1015,6 +1040,95 @@ static int s390_tdb_set(struct task_struct *target,
1015 return 0; 1040 return 0;
1016} 1041}
1017 1042
1043static int s390_vxrs_active(struct task_struct *target,
1044 const struct user_regset *regset)
1045{
1046 return !!target->thread.vxrs;
1047}
1048
1049static int s390_vxrs_low_get(struct task_struct *target,
1050 const struct user_regset *regset,
1051 unsigned int pos, unsigned int count,
1052 void *kbuf, void __user *ubuf)
1053{
1054 __u64 vxrs[__NUM_VXRS_LOW];
1055 int i;
1056
1057 if (target->thread.vxrs) {
1058 if (target == current)
1059 save_vx_regs(target->thread.vxrs);
1060 for (i = 0; i < __NUM_VXRS_LOW; i++)
1061 vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
1062 } else
1063 memset(vxrs, 0, sizeof(vxrs));
1064 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1065}
1066
1067static int s390_vxrs_low_set(struct task_struct *target,
1068 const struct user_regset *regset,
1069 unsigned int pos, unsigned int count,
1070 const void *kbuf, const void __user *ubuf)
1071{
1072 __u64 vxrs[__NUM_VXRS_LOW];
1073 int i, rc;
1074
1075 if (!target->thread.vxrs) {
1076 rc = alloc_vector_registers(target);
1077 if (rc)
1078 return rc;
1079 } else if (target == current)
1080 save_vx_regs(target->thread.vxrs);
1081
1082 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1083 if (rc == 0) {
1084 for (i = 0; i < __NUM_VXRS_LOW; i++)
1085 *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
1086 if (target == current)
1087 restore_vx_regs(target->thread.vxrs);
1088 }
1089
1090 return rc;
1091}
1092
1093static int s390_vxrs_high_get(struct task_struct *target,
1094 const struct user_regset *regset,
1095 unsigned int pos, unsigned int count,
1096 void *kbuf, void __user *ubuf)
1097{
1098 __vector128 vxrs[__NUM_VXRS_HIGH];
1099
1100 if (target->thread.vxrs) {
1101 if (target == current)
1102 save_vx_regs(target->thread.vxrs);
1103 memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
1104 sizeof(vxrs));
1105 } else
1106 memset(vxrs, 0, sizeof(vxrs));
1107 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1108}
1109
1110static int s390_vxrs_high_set(struct task_struct *target,
1111 const struct user_regset *regset,
1112 unsigned int pos, unsigned int count,
1113 const void *kbuf, const void __user *ubuf)
1114{
1115 int rc;
1116
1117 if (!target->thread.vxrs) {
1118 rc = alloc_vector_registers(target);
1119 if (rc)
1120 return rc;
1121 } else if (target == current)
1122 save_vx_regs(target->thread.vxrs);
1123
1124 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1125 target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
1126 if (rc == 0 && target == current)
1127 restore_vx_regs(target->thread.vxrs);
1128
1129 return rc;
1130}
1131
1018#endif 1132#endif
1019 1133
1020static int s390_system_call_get(struct task_struct *target, 1134static int s390_system_call_get(struct task_struct *target,
@@ -1038,7 +1152,7 @@ static int s390_system_call_set(struct task_struct *target,
1038} 1152}
1039 1153
1040static const struct user_regset s390_regsets[] = { 1154static const struct user_regset s390_regsets[] = {
1041 [REGSET_GENERAL] = { 1155 {
1042 .core_note_type = NT_PRSTATUS, 1156 .core_note_type = NT_PRSTATUS,
1043 .n = sizeof(s390_regs) / sizeof(long), 1157 .n = sizeof(s390_regs) / sizeof(long),
1044 .size = sizeof(long), 1158 .size = sizeof(long),
@@ -1046,7 +1160,7 @@ static const struct user_regset s390_regsets[] = {
1046 .get = s390_regs_get, 1160 .get = s390_regs_get,
1047 .set = s390_regs_set, 1161 .set = s390_regs_set,
1048 }, 1162 },
1049 [REGSET_FP] = { 1163 {
1050 .core_note_type = NT_PRFPREG, 1164 .core_note_type = NT_PRFPREG,
1051 .n = sizeof(s390_fp_regs) / sizeof(long), 1165 .n = sizeof(s390_fp_regs) / sizeof(long),
1052 .size = sizeof(long), 1166 .size = sizeof(long),
@@ -1054,8 +1168,16 @@ static const struct user_regset s390_regsets[] = {
1054 .get = s390_fpregs_get, 1168 .get = s390_fpregs_get,
1055 .set = s390_fpregs_set, 1169 .set = s390_fpregs_set,
1056 }, 1170 },
1171 {
1172 .core_note_type = NT_S390_SYSTEM_CALL,
1173 .n = 1,
1174 .size = sizeof(unsigned int),
1175 .align = sizeof(unsigned int),
1176 .get = s390_system_call_get,
1177 .set = s390_system_call_set,
1178 },
1057#ifdef CONFIG_64BIT 1179#ifdef CONFIG_64BIT
1058 [REGSET_LAST_BREAK] = { 1180 {
1059 .core_note_type = NT_S390_LAST_BREAK, 1181 .core_note_type = NT_S390_LAST_BREAK,
1060 .n = 1, 1182 .n = 1,
1061 .size = sizeof(long), 1183 .size = sizeof(long),
@@ -1063,7 +1185,7 @@ static const struct user_regset s390_regsets[] = {
1063 .get = s390_last_break_get, 1185 .get = s390_last_break_get,
1064 .set = s390_last_break_set, 1186 .set = s390_last_break_set,
1065 }, 1187 },
1066 [REGSET_TDB] = { 1188 {
1067 .core_note_type = NT_S390_TDB, 1189 .core_note_type = NT_S390_TDB,
1068 .n = 1, 1190 .n = 1,
1069 .size = 256, 1191 .size = 256,
@@ -1071,15 +1193,25 @@ static const struct user_regset s390_regsets[] = {
1071 .get = s390_tdb_get, 1193 .get = s390_tdb_get,
1072 .set = s390_tdb_set, 1194 .set = s390_tdb_set,
1073 }, 1195 },
1074#endif 1196 {
1075 [REGSET_SYSTEM_CALL] = { 1197 .core_note_type = NT_S390_VXRS_LOW,
1076 .core_note_type = NT_S390_SYSTEM_CALL, 1198 .n = __NUM_VXRS_LOW,
1077 .n = 1, 1199 .size = sizeof(__u64),
1078 .size = sizeof(unsigned int), 1200 .align = sizeof(__u64),
1079 .align = sizeof(unsigned int), 1201 .active = s390_vxrs_active,
1080 .get = s390_system_call_get, 1202 .get = s390_vxrs_low_get,
1081 .set = s390_system_call_set, 1203 .set = s390_vxrs_low_set,
1082 }, 1204 },
1205 {
1206 .core_note_type = NT_S390_VXRS_HIGH,
1207 .n = __NUM_VXRS_HIGH,
1208 .size = sizeof(__vector128),
1209 .align = sizeof(__vector128),
1210 .active = s390_vxrs_active,
1211 .get = s390_vxrs_high_get,
1212 .set = s390_vxrs_high_set,
1213 },
1214#endif
1083}; 1215};
1084 1216
1085static const struct user_regset_view user_s390_view = { 1217static const struct user_regset_view user_s390_view = {
@@ -1244,7 +1376,7 @@ static int s390_compat_last_break_set(struct task_struct *target,
1244} 1376}
1245 1377
1246static const struct user_regset s390_compat_regsets[] = { 1378static const struct user_regset s390_compat_regsets[] = {
1247 [REGSET_GENERAL] = { 1379 {
1248 .core_note_type = NT_PRSTATUS, 1380 .core_note_type = NT_PRSTATUS,
1249 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), 1381 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1250 .size = sizeof(compat_long_t), 1382 .size = sizeof(compat_long_t),
@@ -1252,7 +1384,7 @@ static const struct user_regset s390_compat_regsets[] = {
1252 .get = s390_compat_regs_get, 1384 .get = s390_compat_regs_get,
1253 .set = s390_compat_regs_set, 1385 .set = s390_compat_regs_set,
1254 }, 1386 },
1255 [REGSET_FP] = { 1387 {
1256 .core_note_type = NT_PRFPREG, 1388 .core_note_type = NT_PRFPREG,
1257 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), 1389 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1258 .size = sizeof(compat_long_t), 1390 .size = sizeof(compat_long_t),
@@ -1260,7 +1392,15 @@ static const struct user_regset s390_compat_regsets[] = {
1260 .get = s390_fpregs_get, 1392 .get = s390_fpregs_get,
1261 .set = s390_fpregs_set, 1393 .set = s390_fpregs_set,
1262 }, 1394 },
1263 [REGSET_LAST_BREAK] = { 1395 {
1396 .core_note_type = NT_S390_SYSTEM_CALL,
1397 .n = 1,
1398 .size = sizeof(compat_uint_t),
1399 .align = sizeof(compat_uint_t),
1400 .get = s390_system_call_get,
1401 .set = s390_system_call_set,
1402 },
1403 {
1264 .core_note_type = NT_S390_LAST_BREAK, 1404 .core_note_type = NT_S390_LAST_BREAK,
1265 .n = 1, 1405 .n = 1,
1266 .size = sizeof(long), 1406 .size = sizeof(long),
@@ -1268,7 +1408,7 @@ static const struct user_regset s390_compat_regsets[] = {
1268 .get = s390_compat_last_break_get, 1408 .get = s390_compat_last_break_get,
1269 .set = s390_compat_last_break_set, 1409 .set = s390_compat_last_break_set,
1270 }, 1410 },
1271 [REGSET_TDB] = { 1411 {
1272 .core_note_type = NT_S390_TDB, 1412 .core_note_type = NT_S390_TDB,
1273 .n = 1, 1413 .n = 1,
1274 .size = 256, 1414 .size = 256,
@@ -1276,15 +1416,25 @@ static const struct user_regset s390_compat_regsets[] = {
1276 .get = s390_tdb_get, 1416 .get = s390_tdb_get,
1277 .set = s390_tdb_set, 1417 .set = s390_tdb_set,
1278 }, 1418 },
1279 [REGSET_SYSTEM_CALL] = { 1419 {
1280 .core_note_type = NT_S390_SYSTEM_CALL, 1420 .core_note_type = NT_S390_VXRS_LOW,
1281 .n = 1, 1421 .n = __NUM_VXRS_LOW,
1282 .size = sizeof(compat_uint_t), 1422 .size = sizeof(__u64),
1283 .align = sizeof(compat_uint_t), 1423 .align = sizeof(__u64),
1284 .get = s390_system_call_get, 1424 .active = s390_vxrs_active,
1285 .set = s390_system_call_set, 1425 .get = s390_vxrs_low_get,
1426 .set = s390_vxrs_low_set,
1427 },
1428 {
1429 .core_note_type = NT_S390_VXRS_HIGH,
1430 .n = __NUM_VXRS_HIGH,
1431 .size = sizeof(__vector128),
1432 .align = sizeof(__vector128),
1433 .active = s390_vxrs_active,
1434 .get = s390_vxrs_high_get,
1435 .set = s390_vxrs_high_set,
1286 }, 1436 },
1287 [REGSET_GENERAL_EXTENDED] = { 1437 {
1288 .core_note_type = NT_S390_HIGH_GPRS, 1438 .core_note_type = NT_S390_HIGH_GPRS,
1289 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1439 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1290 .size = sizeof(compat_long_t), 1440 .size = sizeof(compat_long_t),
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 82bc113e8c1d..e80d9ff9a56d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -343,6 +343,9 @@ static void __init setup_lowcore(void)
343 __ctl_set_bit(14, 29); 343 __ctl_set_bit(14, 29);
344 } 344 }
345#else 345#else
346 if (MACHINE_HAS_VX)
347 lc->vector_save_area_addr =
348 (unsigned long) &lc->vector_save_area;
346 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 349 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
347#endif 350#endif
348 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 351 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -452,8 +455,8 @@ static void __init setup_memory_end(void)
452#ifdef CONFIG_64BIT 455#ifdef CONFIG_64BIT
453 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 456 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
454 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 457 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
455 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; 458 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
456 if (tmp <= (1UL << 42)) 459 if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
457 vmax = 1UL << 42; /* 3-level kernel page table */ 460 vmax = 1UL << 42; /* 3-level kernel page table */
458 else 461 else
459 vmax = 1UL << 53; /* 4-level kernel page table */ 462 vmax = 1UL << 53; /* 4-level kernel page table */
@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void)
765 */ 768 */
766 if (test_facility(50) && test_facility(73)) 769 if (test_facility(50) && test_facility(73))
767 elf_hwcap |= HWCAP_S390_TE; 770 elf_hwcap |= HWCAP_S390_TE;
771
772 /*
773 * Vector extension HWCAP_S390_VXRS is bit 11.
774 */
775 if (test_facility(129))
776 elf_hwcap |= HWCAP_S390_VXRS;
768#endif 777#endif
769 778
770 get_cpu_id(&cpu_id); 779 get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 469c4c6d9182..0c1a0ff0a558 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -31,30 +31,117 @@
31#include <asm/switch_to.h> 31#include <asm/switch_to.h>
32#include "entry.h" 32#include "entry.h"
33 33
34typedef struct 34/*
35 * Layout of an old-style signal-frame:
36 * -----------------------------------------
37 * | save area (_SIGNAL_FRAMESIZE) |
38 * -----------------------------------------
39 * | struct sigcontext |
40 * | oldmask |
41 * | _sigregs * |
42 * -----------------------------------------
43 * | _sigregs with |
44 * | _s390_regs_common |
45 * | _s390_fp_regs |
46 * -----------------------------------------
47 * | int signo |
48 * -----------------------------------------
49 * | _sigregs_ext with |
50 * | gprs_high 64 byte (opt) |
51 * | vxrs_low 128 byte (opt) |
52 * | vxrs_high 256 byte (opt) |
53 * | reserved 128 byte (opt) |
54 * -----------------------------------------
55 * | __u16 svc_insn |
56 * -----------------------------------------
57 * The svc_insn entry with the sigreturn system call opcode does not
58 * have a fixed position and moves if gprs_high or vxrs exist.
59 * Future extensions will be added to _sigregs_ext.
60 */
61struct sigframe
35{ 62{
36 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 63 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
37 struct sigcontext sc; 64 struct sigcontext sc;
38 _sigregs sregs; 65 _sigregs sregs;
39 int signo; 66 int signo;
40 __u8 retcode[S390_SYSCALL_SIZE]; 67 _sigregs_ext sregs_ext;
41} sigframe; 68 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
69};
42 70
43typedef struct 71/*
72 * Layout of an rt signal-frame:
73 * -----------------------------------------
74 * | save area (_SIGNAL_FRAMESIZE) |
75 * -----------------------------------------
76 * | svc __NR_rt_sigreturn 2 byte |
77 * -----------------------------------------
78 * | struct siginfo |
79 * -----------------------------------------
80 * | struct ucontext_extended with |
81 * | unsigned long uc_flags |
82 * | struct ucontext *uc_link |
83 * | stack_t uc_stack |
84 * | _sigregs uc_mcontext with |
85 * | _s390_regs_common |
86 * | _s390_fp_regs |
87 * | sigset_t uc_sigmask |
88 * | _sigregs_ext uc_mcontext_ext |
89 * | gprs_high 64 byte (opt) |
90 * | vxrs_low 128 byte (opt) |
91 * | vxrs_high 256 byte (opt)|
92 * | reserved 128 byte (opt) |
93 * -----------------------------------------
94 * Future extensions will be added to _sigregs_ext.
95 */
96struct rt_sigframe
44{ 97{
45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 98 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
46 __u8 retcode[S390_SYSCALL_SIZE]; 99 __u16 svc_insn;
47 struct siginfo info; 100 struct siginfo info;
48 struct ucontext uc; 101 struct ucontext_extended uc;
49} rt_sigframe; 102};
103
104/* Store registers needed to create the signal frame */
105static void store_sigregs(void)
106{
107 save_access_regs(current->thread.acrs);
108 save_fp_ctl(&current->thread.fp_regs.fpc);
109#ifdef CONFIG_64BIT
110 if (current->thread.vxrs) {
111 int i;
112
113 save_vx_regs(current->thread.vxrs);
114 for (i = 0; i < __NUM_FPRS; i++)
115 current->thread.fp_regs.fprs[i] =
116 *(freg_t *)(current->thread.vxrs + i);
117 } else
118#endif
119 save_fp_regs(current->thread.fp_regs.fprs);
120}
121
122/* Load registers after signal return */
123static void load_sigregs(void)
124{
125 restore_access_regs(current->thread.acrs);
126 /* restore_fp_ctl is done in restore_sigregs */
127#ifdef CONFIG_64BIT
128 if (current->thread.vxrs) {
129 int i;
130
131 for (i = 0; i < __NUM_FPRS; i++)
132 *(freg_t *)(current->thread.vxrs + i) =
133 current->thread.fp_regs.fprs[i];
134 restore_vx_regs(current->thread.vxrs);
135 } else
136#endif
137 restore_fp_regs(current->thread.fp_regs.fprs);
138}
50 139
51/* Returns non-zero on fault. */ 140/* Returns non-zero on fault. */
52static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) 141static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
53{ 142{
54 _sigregs user_sregs; 143 _sigregs user_sregs;
55 144
56 save_access_regs(current->thread.acrs);
57
58 /* Copy a 'clean' PSW mask to the user to avoid leaking 145 /* Copy a 'clean' PSW mask to the user to avoid leaking
59 information about whether PER is currently on. */ 146 information about whether PER is currently on. */
60 user_sregs.regs.psw.mask = PSW_USER_BITS | 147 user_sregs.regs.psw.mask = PSW_USER_BITS |
@@ -63,12 +150,6 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 150 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
64 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 151 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
65 sizeof(user_sregs.regs.acrs)); 152 sizeof(user_sregs.regs.acrs));
66 /*
67 * We have to store the fp registers to current->thread.fp_regs
68 * to merge them with the emulated registers.
69 */
70 save_fp_ctl(&current->thread.fp_regs.fpc);
71 save_fp_regs(current->thread.fp_regs.fprs);
72 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, 153 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
73 sizeof(user_sregs.fpregs)); 154 sizeof(user_sregs.fpregs));
74 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) 155 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
@@ -107,20 +188,64 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
107 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); 188 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
108 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 189 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
109 sizeof(current->thread.acrs)); 190 sizeof(current->thread.acrs));
110 restore_access_regs(current->thread.acrs);
111 191
112 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, 192 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
113 sizeof(current->thread.fp_regs)); 193 sizeof(current->thread.fp_regs));
114 194
115 restore_fp_regs(current->thread.fp_regs.fprs);
116 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 195 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
117 return 0; 196 return 0;
118} 197}
119 198
199/* Returns non-zero on fault. */
200static int save_sigregs_ext(struct pt_regs *regs,
201 _sigregs_ext __user *sregs_ext)
202{
203#ifdef CONFIG_64BIT
204 __u64 vxrs[__NUM_VXRS_LOW];
205 int i;
206
207 /* Save vector registers to signal stack */
208 if (current->thread.vxrs) {
209 for (i = 0; i < __NUM_VXRS_LOW; i++)
210 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
211 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
212 sizeof(sregs_ext->vxrs_low)) ||
213 __copy_to_user(&sregs_ext->vxrs_high,
214 current->thread.vxrs + __NUM_VXRS_LOW,
215 sizeof(sregs_ext->vxrs_high)))
216 return -EFAULT;
217 }
218#endif
219 return 0;
220}
221
222static int restore_sigregs_ext(struct pt_regs *regs,
223 _sigregs_ext __user *sregs_ext)
224{
225#ifdef CONFIG_64BIT
226 __u64 vxrs[__NUM_VXRS_LOW];
227 int i;
228
229 /* Restore vector registers from signal stack */
230 if (current->thread.vxrs) {
231 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
232 sizeof(sregs_ext->vxrs_low)) ||
233 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
234 &sregs_ext->vxrs_high,
235 sizeof(sregs_ext->vxrs_high)))
236 return -EFAULT;
237 for (i = 0; i < __NUM_VXRS_LOW; i++)
238 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
239 }
240#endif
241 return 0;
242}
243
120SYSCALL_DEFINE0(sigreturn) 244SYSCALL_DEFINE0(sigreturn)
121{ 245{
122 struct pt_regs *regs = task_pt_regs(current); 246 struct pt_regs *regs = task_pt_regs(current);
123 sigframe __user *frame = (sigframe __user *)regs->gprs[15]; 247 struct sigframe __user *frame =
248 (struct sigframe __user *) regs->gprs[15];
124 sigset_t set; 249 sigset_t set;
125 250
126 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 251 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
@@ -128,6 +253,9 @@ SYSCALL_DEFINE0(sigreturn)
128 set_current_blocked(&set); 253 set_current_blocked(&set);
129 if (restore_sigregs(regs, &frame->sregs)) 254 if (restore_sigregs(regs, &frame->sregs))
130 goto badframe; 255 goto badframe;
256 if (restore_sigregs_ext(regs, &frame->sregs_ext))
257 goto badframe;
258 load_sigregs();
131 return regs->gprs[2]; 259 return regs->gprs[2];
132badframe: 260badframe:
133 force_sig(SIGSEGV, current); 261 force_sig(SIGSEGV, current);
@@ -137,16 +265,20 @@ badframe:
137SYSCALL_DEFINE0(rt_sigreturn) 265SYSCALL_DEFINE0(rt_sigreturn)
138{ 266{
139 struct pt_regs *regs = task_pt_regs(current); 267 struct pt_regs *regs = task_pt_regs(current);
140 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; 268 struct rt_sigframe __user *frame =
269 (struct rt_sigframe __user *)regs->gprs[15];
141 sigset_t set; 270 sigset_t set;
142 271
143 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) 272 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
144 goto badframe; 273 goto badframe;
145 set_current_blocked(&set); 274 set_current_blocked(&set);
275 if (restore_altstack(&frame->uc.uc_stack))
276 goto badframe;
146 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 277 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
147 goto badframe; 278 goto badframe;
148 if (restore_altstack(&frame->uc.uc_stack)) 279 if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
149 goto badframe; 280 goto badframe;
281 load_sigregs();
150 return regs->gprs[2]; 282 return regs->gprs[2];
151badframe: 283badframe:
152 force_sig(SIGSEGV, current); 284 force_sig(SIGSEGV, current);
@@ -154,11 +286,6 @@ badframe:
154} 286}
155 287
156/* 288/*
157 * Set up a signal frame.
158 */
159
160
161/*
162 * Determine which stack to use.. 289 * Determine which stack to use..
163 */ 290 */
164static inline void __user * 291static inline void __user *
@@ -195,39 +322,63 @@ static inline int map_signal(int sig)
195static int setup_frame(int sig, struct k_sigaction *ka, 322static int setup_frame(int sig, struct k_sigaction *ka,
196 sigset_t *set, struct pt_regs * regs) 323 sigset_t *set, struct pt_regs * regs)
197{ 324{
198 sigframe __user *frame; 325 struct sigframe __user *frame;
199 326 struct sigcontext sc;
200 frame = get_sigframe(ka, regs, sizeof(sigframe)); 327 unsigned long restorer;
328 size_t frame_size;
201 329
330 /*
331 * gprs_high are only present for a 31-bit task running on
332 * a 64-bit kernel (see compat_signal.c) but the space for
333 * gprs_high need to be allocated if vector registers are
334 * included in the signal frame on a 31-bit system.
335 */
336 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
337 if (MACHINE_HAS_VX)
338 frame_size += sizeof(frame->sregs_ext);
339 frame = get_sigframe(ka, regs, frame_size);
202 if (frame == (void __user *) -1UL) 340 if (frame == (void __user *) -1UL)
203 return -EFAULT; 341 return -EFAULT;
204 342
205 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) 343 /* Set up backchain. */
344 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
206 return -EFAULT; 345 return -EFAULT;
207 346
347 /* Create struct sigcontext on the signal stack */
348 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE);
349 sc.sregs = (_sigregs __user __force *) &frame->sregs;
350 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
351 return -EFAULT;
352
353 /* Store registers needed to create the signal frame */
354 store_sigregs();
355
356 /* Create _sigregs on the signal stack */
208 if (save_sigregs(regs, &frame->sregs)) 357 if (save_sigregs(regs, &frame->sregs))
209 return -EFAULT; 358 return -EFAULT;
210 if (__put_user(&frame->sregs, &frame->sc.sregs)) 359
360 /* Place signal number on stack to allow backtrace from handler. */
361 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
362 return -EFAULT;
363
364 /* Create _sigregs_ext on the signal stack */
365 if (save_sigregs_ext(regs, &frame->sregs_ext))
211 return -EFAULT; 366 return -EFAULT;
212 367
213 /* Set up to return from userspace. If provided, use a stub 368 /* Set up to return from userspace. If provided, use a stub
214 already in userspace. */ 369 already in userspace. */
215 if (ka->sa.sa_flags & SA_RESTORER) { 370 if (ka->sa.sa_flags & SA_RESTORER) {
216 regs->gprs[14] = (unsigned long) 371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
217 ka->sa.sa_restorer | PSW_ADDR_AMODE;
218 } else { 372 } else {
219 regs->gprs[14] = (unsigned long) 373 /* Signal frame without vector registers are short ! */
220 frame->retcode | PSW_ADDR_AMODE; 374 __u16 __user *svc = (void *) frame + frame_size - 2;
221 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
222 (u16 __user *)(frame->retcode)))
223 return -EFAULT; 376 return -EFAULT;
377 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
224 } 378 }
225 379
226 /* Set up backchain. */
227 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
228 return -EFAULT;
229
230 /* Set up registers for signal handler */ 380 /* Set up registers for signal handler */
381 regs->gprs[14] = restorer;
231 regs->gprs[15] = (unsigned long) frame; 382 regs->gprs[15] = (unsigned long) frame;
232 /* Force default amode and default user address space control. */ 383 /* Force default amode and default user address space control. */
233 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 384 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
@@ -247,54 +398,69 @@ static int setup_frame(int sig, struct k_sigaction *ka,
247 regs->gprs[5] = regs->int_parm_long; 398 regs->gprs[5] = regs->int_parm_long;
248 regs->gprs[6] = task_thread_info(current)->last_break; 399 regs->gprs[6] = task_thread_info(current)->last_break;
249 } 400 }
250
251 /* Place signal number on stack to allow backtrace from handler. */
252 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
253 return -EFAULT;
254 return 0; 401 return 0;
255} 402}
256 403
257static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, 404static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
258 struct pt_regs *regs) 405 struct pt_regs *regs)
259{ 406{
260 int err = 0; 407 struct rt_sigframe __user *frame;
261 rt_sigframe __user *frame; 408 unsigned long uc_flags, restorer;
262 409 size_t frame_size;
263 frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe));
264 410
411 frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
412 /*
413 * gprs_high are only present for a 31-bit task running on
414 * a 64-bit kernel (see compat_signal.c) but the space for
415 * gprs_high need to be allocated if vector registers are
416 * included in the signal frame on a 31-bit system.
417 */
418 uc_flags = 0;
419#ifdef CONFIG_64BIT
420 if (MACHINE_HAS_VX) {
421 frame_size += sizeof(_sigregs_ext);
422 if (current->thread.vxrs)
423 uc_flags |= UC_VXRS;
424 }
425#endif
426 frame = get_sigframe(&ksig->ka, regs, frame_size);
265 if (frame == (void __user *) -1UL) 427 if (frame == (void __user *) -1UL)
266 return -EFAULT; 428 return -EFAULT;
267 429
268 if (copy_siginfo_to_user(&frame->info, &ksig->info)) 430 /* Set up backchain. */
269 return -EFAULT; 431 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
270
271 /* Create the ucontext. */
272 err |= __put_user(0, &frame->uc.uc_flags);
273 err |= __put_user(NULL, &frame->uc.uc_link);
274 err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
275 err |= save_sigregs(regs, &frame->uc.uc_mcontext);
276 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
277 if (err)
278 return -EFAULT; 432 return -EFAULT;
279 433
280 /* Set up to return from userspace. If provided, use a stub 434 /* Set up to return from userspace. If provided, use a stub
281 already in userspace. */ 435 already in userspace. */
282 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 436 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
283 regs->gprs[14] = (unsigned long) 437 restorer = (unsigned long)
284 ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; 438 ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
285 } else { 439 } else {
286 regs->gprs[14] = (unsigned long) 440 __u16 __user *svc = &frame->svc_insn;
287 frame->retcode | PSW_ADDR_AMODE; 441 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
288 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
289 (u16 __user *)(frame->retcode)))
290 return -EFAULT; 442 return -EFAULT;
443 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
291 } 444 }
292 445
293 /* Set up backchain. */ 446 /* Create siginfo on the signal stack */
294 if (__put_user(regs->gprs[15], (addr_t __user *) frame)) 447 if (copy_siginfo_to_user(&frame->info, &ksig->info))
448 return -EFAULT;
449
450 /* Store registers needed to create the signal frame */
451 store_sigregs();
452
453 /* Create ucontext on the signal stack. */
454 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
455 __put_user(NULL, &frame->uc.uc_link) ||
456 __save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
457 save_sigregs(regs, &frame->uc.uc_mcontext) ||
458 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
459 save_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
295 return -EFAULT; 460 return -EFAULT;
296 461
297 /* Set up registers for signal handler */ 462 /* Set up registers for signal handler */
463 regs->gprs[14] = restorer;
298 regs->gprs[15] = (unsigned long) frame; 464 regs->gprs[15] = (unsigned long) frame;
299 /* Force default amode and default user address space control. */ 465 /* Force default amode and default user address space control. */
300 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 466 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 243c7e512600..6fd9e60101f1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -45,6 +45,7 @@
45#include <asm/debug.h> 45#include <asm/debug.h>
46#include <asm/os_info.h> 46#include <asm/os_info.h>
47#include <asm/sigp.h> 47#include <asm/sigp.h>
48#include <asm/idle.h>
48#include "entry.h" 49#include "entry.h"
49 50
50enum { 51enum {
@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
82/* 83/*
83 * Signal processor helper functions. 84 * Signal processor helper functions.
84 */ 85 */
85static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) 86static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
87 u32 *status)
86{ 88{
87 int cc; 89 int cc;
88 90
@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
178 goto out; 180 goto out;
179 } 181 }
180#else 182#else
183 if (MACHINE_HAS_VX)
184 lc->vector_save_area_addr =
185 (unsigned long) &lc->vector_save_area;
181 if (vdso_alloc_per_cpu(lc)) 186 if (vdso_alloc_per_cpu(lc))
182 goto out; 187 goto out;
183#endif 188#endif
@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu)
333 return pcpu_running(pcpu_devices + cpu); 338 return pcpu_running(pcpu_devices + cpu);
334} 339}
335 340
336void smp_yield(void)
337{
338 if (MACHINE_HAS_DIAG44)
339 asm volatile("diag 0,0,0x44");
340}
341
342void smp_yield_cpu(int cpu) 341void smp_yield_cpu(int cpu)
343{ 342{
344 if (MACHINE_HAS_DIAG9C) 343 if (MACHINE_HAS_DIAG9C)
@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
517static void __init smp_get_save_area(int cpu, u16 address) 516static void __init smp_get_save_area(int cpu, u16 address)
518{ 517{
519 void *lc = pcpu_devices[0].lowcore; 518 void *lc = pcpu_devices[0].lowcore;
520 struct save_area *save_area; 519 struct save_area_ext *sa_ext;
520 unsigned long vx_sa;
521 521
522 if (is_kdump_kernel()) 522 if (is_kdump_kernel())
523 return; 523 return;
524 if (!OLDMEM_BASE && (address == boot_cpu_address || 524 if (!OLDMEM_BASE && (address == boot_cpu_address ||
525 ipl_info.type != IPL_TYPE_FCP_DUMP)) 525 ipl_info.type != IPL_TYPE_FCP_DUMP))
526 return; 526 return;
527 save_area = dump_save_area_create(cpu); 527 sa_ext = dump_save_area_create(cpu);
528 if (!save_area) 528 if (!sa_ext)
529 panic("could not allocate memory for save area\n"); 529 panic("could not allocate memory for save area\n");
530 if (address == boot_cpu_address) { 530 if (address == boot_cpu_address) {
531 /* Copy the registers of the boot cpu. */ 531 /* Copy the registers of the boot cpu. */
532 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), 532 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
533 SAVE_AREA_BASE - PAGE_SIZE, 0); 533 SAVE_AREA_BASE - PAGE_SIZE, 0);
534 if (MACHINE_HAS_VX)
535 save_vx_regs_safe(sa_ext->vx_regs);
534 return; 536 return;
535 } 537 }
536 /* Get the registers of a non-boot cpu. */ 538 /* Get the registers of a non-boot cpu. */
537 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); 539 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
538 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); 540 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
541 if (!MACHINE_HAS_VX)
542 return;
543 /* Get the VX registers */
544 vx_sa = __get_free_page(GFP_KERNEL);
545 if (!vx_sa)
546 panic("could not allocate memory for VX save area\n");
547 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
548 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
549 free_page(vx_sa);
539} 550}
540 551
541int smp_store_status(int cpu) 552int smp_store_status(int cpu)
542{ 553{
554 unsigned long vx_sa;
543 struct pcpu *pcpu; 555 struct pcpu *pcpu;
544 556
545 pcpu = pcpu_devices + cpu; 557 pcpu = pcpu_devices + cpu;
546 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, 558 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
547 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) 559 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
548 return -EIO; 560 return -EIO;
561 if (!MACHINE_HAS_VX)
562 return 0;
563 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
564 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
565 vx_sa, NULL);
549 return 0; 566 return 0;
550} 567}
551 568
@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
667 cpu_init(); 684 cpu_init();
668 preempt_disable(); 685 preempt_disable();
669 init_cpu_timer(); 686 init_cpu_timer();
670 init_cpu_vtimer(); 687 vtime_init();
671 pfault_init(); 688 pfault_init();
672 notify_cpu_starting(smp_processor_id()); 689 notify_cpu_starting(smp_processor_id());
673 set_cpu_online(smp_processor_id(), true); 690 set_cpu_online(smp_processor_id(), true);
@@ -726,6 +743,7 @@ int __cpu_disable(void)
726 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 743 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
727 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 744 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
728 __ctl_load(cregs, 0, 15); 745 __ctl_load(cregs, 0, 15);
746 clear_cpu_flag(CIF_NOHZ_DELAY);
729 return 0; 747 return 0;
730} 748}
731 749
@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = {
898 .attrs = cpu_common_attrs, 916 .attrs = cpu_common_attrs,
899}; 917};
900 918
901static ssize_t show_idle_count(struct device *dev,
902 struct device_attribute *attr, char *buf)
903{
904 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
905 unsigned long long idle_count;
906 unsigned int sequence;
907
908 do {
909 sequence = ACCESS_ONCE(idle->sequence);
910 idle_count = ACCESS_ONCE(idle->idle_count);
911 if (ACCESS_ONCE(idle->clock_idle_enter))
912 idle_count++;
913 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
914 return sprintf(buf, "%llu\n", idle_count);
915}
916static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
917
918static ssize_t show_idle_time(struct device *dev,
919 struct device_attribute *attr, char *buf)
920{
921 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
922 unsigned long long now, idle_time, idle_enter, idle_exit;
923 unsigned int sequence;
924
925 do {
926 now = get_tod_clock();
927 sequence = ACCESS_ONCE(idle->sequence);
928 idle_time = ACCESS_ONCE(idle->idle_time);
929 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
930 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
931 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
932 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
933 return sprintf(buf, "%llu\n", idle_time >> 12);
934}
935static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
936
937static struct attribute *cpu_online_attrs[] = { 919static struct attribute *cpu_online_attrs[] = {
938 &dev_attr_idle_count.attr, 920 &dev_attr_idle_count.attr,
939 &dev_attr_idle_time_us.attr, 921 &dev_attr_idle_time_us.attr,
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4cef607f3711..69e980de0f62 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk)
232 vdso_data->wtom_clock_nsec -= nsecps; 232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++; 233 vdso_data->wtom_clock_sec++;
234 } 234 }
235
236 vdso_data->xtime_coarse_sec = tk->xtime_sec;
237 vdso_data->xtime_coarse_nsec =
238 (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
239 vdso_data->wtom_coarse_sec =
240 vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
241 vdso_data->wtom_coarse_nsec =
242 vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
243 while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
244 vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
245 vdso_data->wtom_coarse_sec++;
246 }
247
235 vdso_data->tk_mult = tk->tkr.mult; 248 vdso_data->tk_mult = tk->tkr.mult;
236 vdso_data->tk_shift = tk->tkr.shift; 249 vdso_data->tk_shift = tk->tkr.shift;
237 smp_wmb(); 250 smp_wmb();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 355a16c55702..b93bed76ea94 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
464 464
465static int __init topology_init(void) 465static int __init topology_init(void)
466{ 466{
467 if (!MACHINE_HAS_TOPOLOGY) { 467 if (MACHINE_HAS_TOPOLOGY)
468 set_topology_timer();
469 else
468 topology_update_polarization_simple(); 470 topology_update_polarization_simple();
469 goto out;
470 }
471 set_topology_timer();
472out:
473
474 set_sched_topology(s390_topology);
475
476 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 471 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
477} 472}
478device_initcall(topology_init); 473device_initcall(topology_init);
474
475static int __init early_topology_init(void)
476{
477 set_sched_topology(s390_topology);
478 return 0;
479}
480early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5762324d9ee..9ff5ecba26ab 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,6 +18,8 @@
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <asm/switch_to.h>
21#include "entry.h" 23#include "entry.h"
22 24
23int show_unhandled_signals = 1; 25int show_unhandled_signals = 1;
@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr)
58 return 1; 60 return 1;
59} 61}
60 62
61static void __kprobes do_trap(struct pt_regs *regs, 63void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
62 int si_signo, int si_code, char *str)
63{ 64{
64 siginfo_t info; 65 siginfo_t info;
65 66
66 if (notify_die(DIE_TRAP, str, regs, 0,
67 regs->int_code, si_signo) == NOTIFY_STOP)
68 return;
69
70 if (user_mode(regs)) { 67 if (user_mode(regs)) {
71 info.si_signo = si_signo; 68 info.si_signo = si_signo;
72 info.si_errno = 0; 69 info.si_errno = 0;
@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs,
90 } 87 }
91} 88}
92 89
90static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
91 char *str)
92{
93 if (notify_die(DIE_TRAP, str, regs, 0,
94 regs->int_code, si_signo) == NOTIFY_STOP)
95 return;
96 do_report_trap(regs, si_signo, si_code, str);
97}
98
93void __kprobes do_per_trap(struct pt_regs *regs) 99void __kprobes do_per_trap(struct pt_regs *regs)
94{ 100{
95 siginfo_t info; 101 siginfo_t info;
@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
178 siginfo_t info; 184 siginfo_t info;
179 __u8 opcode[6]; 185 __u8 opcode[6];
180 __u16 __user *location; 186 __u16 __user *location;
187 int is_uprobe_insn = 0;
181 int signal = 0; 188 int signal = 0;
182 189
183 location = get_trap_ip(regs); 190 location = get_trap_ip(regs);
@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs)
194 force_sig_info(SIGTRAP, &info, current); 201 force_sig_info(SIGTRAP, &info, current);
195 } else 202 } else
196 signal = SIGILL; 203 signal = SIGILL;
204#ifdef CONFIG_UPROBES
205 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
206 is_uprobe_insn = 1;
207#endif
197#ifdef CONFIG_MATHEMU 208#ifdef CONFIG_MATHEMU
198 } else if (opcode[0] == 0xb3) { 209 } else if (opcode[0] == 0xb3) {
199 if (get_user(*((__u16 *) (opcode+2)), location+1)) 210 if (get_user(*((__u16 *) (opcode+2)), location+1))
@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs)
219#endif 230#endif
220 } else 231 } else
221 signal = SIGILL; 232 signal = SIGILL;
222 } else { 233 }
223 /* 234 /*
224 * If we get an illegal op in kernel mode, send it through the 235 * We got either an illegal op in kernel mode, or user space trapped
225 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 236 * on a uprobes illegal instruction. See if kprobes or uprobes picks
226 */ 237 * it up. If not, SIGILL.
238 */
239 if (is_uprobe_insn || !user_mode(regs)) {
227 if (notify_die(DIE_BPT, "bpt", regs, 0, 240 if (notify_die(DIE_BPT, "bpt", regs, 0,
228 3, SIGTRAP) != NOTIFY_STOP) 241 3, SIGTRAP) != NOTIFY_STOP)
229 signal = SIGILL; 242 signal = SIGILL;
@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
292 "specification exception"); 305 "specification exception");
293#endif 306#endif
294 307
308#ifdef CONFIG_64BIT
309int alloc_vector_registers(struct task_struct *tsk)
310{
311 __vector128 *vxrs;
312 int i;
313
314 /* Allocate vector register save area. */
315 vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
316 GFP_KERNEL|__GFP_REPEAT);
317 if (!vxrs)
318 return -ENOMEM;
319 preempt_disable();
320 if (tsk == current)
321 save_fp_regs(tsk->thread.fp_regs.fprs);
322 /* Copy the 16 floating point registers */
323 for (i = 0; i < 16; i++)
324 *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
325 tsk->thread.vxrs = vxrs;
326 if (tsk == current) {
327 __ctl_set_bit(0, 17);
328 restore_vx_regs(vxrs);
329 }
330 preempt_enable();
331 return 0;
332}
333
334void vector_exception(struct pt_regs *regs)
335{
336 int si_code, vic;
337
338 if (!MACHINE_HAS_VX) {
339 do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
340 return;
341 }
342
343 /* get vector interrupt code from fpc */
344 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
345 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
346 switch (vic) {
347 case 1: /* invalid vector operation */
348 si_code = FPE_FLTINV;
349 break;
350 case 2: /* division by zero */
351 si_code = FPE_FLTDIV;
352 break;
353 case 3: /* overflow */
354 si_code = FPE_FLTOVF;
355 break;
356 case 4: /* underflow */
357 si_code = FPE_FLTUND;
358 break;
359 case 5: /* inexact */
360 si_code = FPE_FLTRES;
361 break;
362 default: /* unknown cause */
363 si_code = 0;
364 }
365 do_trap(regs, SIGFPE, si_code, "vector exception");
366}
367
368static int __init disable_vector_extension(char *str)
369{
370 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
371 return 1;
372}
373__setup("novx", disable_vector_extension);
374#endif
375
295void data_exception(struct pt_regs *regs) 376void data_exception(struct pt_regs *regs)
296{ 377{
297 __u16 __user *location; 378 __u16 __user *location;
@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs)
357 } 438 }
358 } 439 }
359#endif 440#endif
441#ifdef CONFIG_64BIT
442 /* Check for vector register enablement */
443 if (MACHINE_HAS_VX && !current->thread.vxrs &&
444 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
445 alloc_vector_registers(current);
446 /* Vector data exception is suppressing, rewind psw. */
447 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
448 clear_pt_regs_flag(regs, PIF_PER_TRAP);
449 return;
450 }
451#endif
452
360 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 453 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
361 signal = SIGFPE; 454 signal = SIGFPE;
362 else 455 else
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
new file mode 100644
index 000000000000..956f4f7a591c
--- /dev/null
+++ b/arch/s390/kernel/uprobes.c
@@ -0,0 +1,332 @@
1/*
2 * User-space Probes (UProbes) for s390
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Jan Willeke,
6 */
7
8#include <linux/kprobes.h>
9#include <linux/uaccess.h>
10#include <linux/uprobes.h>
11#include <linux/compat.h>
12#include <linux/kdebug.h>
13#include <asm/switch_to.h>
14#include <asm/facility.h>
15#include <asm/dis.h>
16#include "entry.h"
17
18#define UPROBE_TRAP_NR UINT_MAX
19
20int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
21 unsigned long addr)
22{
23 return probe_is_prohibited_opcode(auprobe->insn);
24}
25
26int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
27{
28 if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
29 return -EINVAL;
30 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
31 return -EINVAL;
32 clear_pt_regs_flag(regs, PIF_PER_TRAP);
33 auprobe->saved_per = psw_bits(regs->psw).r;
34 auprobe->saved_int_code = regs->int_code;
35 regs->int_code = UPROBE_TRAP_NR;
36 regs->psw.addr = current->utask->xol_vaddr;
37 set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
38 update_cr_regs(current);
39 return 0;
40}
41
42bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
43{
44 struct pt_regs *regs = task_pt_regs(tsk);
45
46 if (regs->int_code != UPROBE_TRAP_NR)
47 return true;
48 return false;
49}
50
51int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
52{
53 int fixup = probe_get_fixup_type(auprobe->insn);
54 struct uprobe_task *utask = current->utask;
55
56 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
57 update_cr_regs(current);
58 psw_bits(regs->psw).r = auprobe->saved_per;
59 regs->int_code = auprobe->saved_int_code;
60
61 if (fixup & FIXUP_PSW_NORMAL)
62 regs->psw.addr += utask->vaddr - utask->xol_vaddr;
63 if (fixup & FIXUP_RETURN_REGISTER) {
64 int reg = (auprobe->insn[0] & 0xf0) >> 4;
65
66 regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
67 }
68 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
69 int ilen = insn_length(auprobe->insn[0] >> 8);
70
71 if (regs->psw.addr - utask->xol_vaddr == ilen)
72 regs->psw.addr = utask->vaddr + ilen;
73 }
74 /* If per tracing was active generate trap */
75 if (regs->psw.mask & PSW_MASK_PER)
76 do_per_trap(regs);
77 return 0;
78}
79
80int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
81 void *data)
82{
83 struct die_args *args = data;
84 struct pt_regs *regs = args->regs;
85
86 if (!user_mode(regs))
87 return NOTIFY_DONE;
88 if (regs->int_code & 0x200) /* Trap during transaction */
89 return NOTIFY_DONE;
90 switch (val) {
91 case DIE_BPT:
92 if (uprobe_pre_sstep_notifier(regs))
93 return NOTIFY_STOP;
94 break;
95 case DIE_SSTEP:
96 if (uprobe_post_sstep_notifier(regs))
97 return NOTIFY_STOP;
98 default:
99 break;
100 }
101 return NOTIFY_DONE;
102}
103
104void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
105{
106 clear_thread_flag(TIF_UPROBE_SINGLESTEP);
107 regs->int_code = auprobe->saved_int_code;
108 regs->psw.addr = current->utask->vaddr;
109}
110
111unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
112 struct pt_regs *regs)
113{
114 unsigned long orig;
115
116 orig = regs->gprs[14];
117 regs->gprs[14] = trampoline;
118 return orig;
119}
120
121/* Instruction Emulation */
122
123static void adjust_psw_addr(psw_t *psw, unsigned long len)
124{
125 psw->addr = __rewind_psw(*psw, -len);
126}
127
128#define EMU_ILLEGAL_OP 1
129#define EMU_SPECIFICATION 2
130#define EMU_ADDRESSING 3
131
132#define emu_load_ril(ptr, output) \
133({ \
134 unsigned int mask = sizeof(*(ptr)) - 1; \
135 __typeof__(*(ptr)) input; \
136 int __rc = 0; \
137 \
138 if (!test_facility(34)) \
139 __rc = EMU_ILLEGAL_OP; \
140 else if ((u64 __force)ptr & mask) \
141 __rc = EMU_SPECIFICATION; \
142 else if (get_user(input, ptr)) \
143 __rc = EMU_ADDRESSING; \
144 else \
145 *(output) = input; \
146 __rc; \
147})
148
149#define emu_store_ril(ptr, input) \
150({ \
151 unsigned int mask = sizeof(*(ptr)) - 1; \
152 int __rc = 0; \
153 \
154 if (!test_facility(34)) \
155 __rc = EMU_ILLEGAL_OP; \
156 else if ((u64 __force)ptr & mask) \
157 __rc = EMU_SPECIFICATION; \
158 else if (put_user(*(input), ptr)) \
159 __rc = EMU_ADDRESSING; \
160 __rc; \
161})
162
163#define emu_cmp_ril(regs, ptr, cmp) \
164({ \
165 unsigned int mask = sizeof(*(ptr)) - 1; \
166 __typeof__(*(ptr)) input; \
167 int __rc = 0; \
168 \
169 if (!test_facility(34)) \
170 __rc = EMU_ILLEGAL_OP; \
171 else if ((u64 __force)ptr & mask) \
172 __rc = EMU_SPECIFICATION; \
173 else if (get_user(input, ptr)) \
174 __rc = EMU_ADDRESSING; \
175 else if (input > *(cmp)) \
176 psw_bits((regs)->psw).cc = 1; \
177 else if (input < *(cmp)) \
178 psw_bits((regs)->psw).cc = 2; \
179 else \
180 psw_bits((regs)->psw).cc = 0; \
181 __rc; \
182})
183
184struct insn_ril {
185 u8 opc0;
186 u8 reg : 4;
187 u8 opc1 : 4;
188 s32 disp;
189} __packed;
190
191union split_register {
192 u64 u64;
193 u32 u32[2];
194 u16 u16[4];
195 s64 s64;
196 s32 s32[2];
197 s16 s16[4];
198};
199
200/*
201 * pc relative instructions are emulated, since parameters may not be
202 * accessible from the xol area due to range limitations.
203 */
204static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
205{
206 union split_register *rx;
207 struct insn_ril *insn;
208 unsigned int ilen;
209 void *uptr;
210 int rc = 0;
211
212 insn = (struct insn_ril *) &auprobe->insn;
213 rx = (union split_register *) &regs->gprs[insn->reg];
214 uptr = (void *)(regs->psw.addr + (insn->disp * 2));
215 ilen = insn_length(insn->opc0);
216
217 switch (insn->opc0) {
218 case 0xc0:
219 switch (insn->opc1) {
220 case 0x00: /* larl */
221 rx->u64 = (unsigned long)uptr;
222 break;
223 }
224 break;
225 case 0xc4:
226 switch (insn->opc1) {
227 case 0x02: /* llhrl */
228 rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
229 break;
230 case 0x04: /* lghrl */
231 rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
232 break;
233 case 0x05: /* lhrl */
234 rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
235 break;
236 case 0x06: /* llghrl */
237 rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
238 break;
239 case 0x08: /* lgrl */
240 rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
241 break;
242 case 0x0c: /* lgfrl */
243 rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
244 break;
245 case 0x0d: /* lrl */
246 rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
247 break;
248 case 0x0e: /* llgfrl */
249 rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
250 break;
251 case 0x07: /* sthrl */
252 rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
253 break;
254 case 0x0b: /* stgrl */
255 rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
256 break;
257 case 0x0f: /* strl */
258 rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
259 break;
260 }
261 break;
262 case 0xc6:
263 switch (insn->opc1) {
264 case 0x02: /* pfdrl */
265 if (!test_facility(34))
266 rc = EMU_ILLEGAL_OP;
267 break;
268 case 0x04: /* cghrl */
269 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
270 break;
271 case 0x05: /* chrl */
272 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
273 break;
274 case 0x06: /* clghrl */
275 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
276 break;
277 case 0x07: /* clhrl */
278 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
279 break;
280 case 0x08: /* cgrl */
281 rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
282 break;
283 case 0x0a: /* clgrl */
284 rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
285 break;
286 case 0x0c: /* cgfrl */
287 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
288 break;
289 case 0x0d: /* crl */
290 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
291 break;
292 case 0x0e: /* clgfrl */
293 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
294 break;
295 case 0x0f: /* clrl */
296 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
297 break;
298 }
299 break;
300 }
301 adjust_psw_addr(&regs->psw, ilen);
302 switch (rc) {
303 case EMU_ILLEGAL_OP:
304 regs->int_code = ilen << 16 | 0x0001;
305 do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
306 break;
307 case EMU_SPECIFICATION:
308 regs->int_code = ilen << 16 | 0x0006;
309 do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
310 break;
311 case EMU_ADDRESSING:
312 regs->int_code = ilen << 16 | 0x0005;
313 do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
314 break;
315 }
316}
317
318bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
319{
320 if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
321 ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
322 !is_compat_task())) {
323 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
324 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
325 return true;
326 }
327 if (probe_is_insn_relative_long(auprobe->insn)) {
328 handle_insn_ril(auprobe, regs);
329 return true;
330 }
331 return false;
332}
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 36aaa25d05da..eca3f001f081 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,14 +19,20 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 basr %r1,0
23 la %r1,4f-.(%r1)
22 chi %r2,__CLOCK_REALTIME 24 chi %r2,__CLOCK_REALTIME
23 je 0f 25 je 0f
24 chi %r2,__CLOCK_MONOTONIC 26 chi %r2,__CLOCK_MONOTONIC
27 je 0f
28 la %r1,5f-4f(%r1)
29 chi %r2,__CLOCK_REALTIME_COARSE
30 je 0f
31 chi %r2,__CLOCK_MONOTONIC_COARSE
25 jne 3f 32 jne 3f
260: ltr %r3,%r3 330: ltr %r3,%r3
27 jz 2f /* res == NULL */ 34 jz 2f /* res == NULL */
28 basr %r1,0 351: l %r0,0(%r1)
291: l %r0,4f-1b(%r1)
30 xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ 36 xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
31 st %r0,4(%r3) /* store tp->tv_usec */ 37 st %r0,4(%r3) /* store tp->tv_usec */
322: lhi %r2,0 382: lhi %r2,0
@@ -35,5 +41,6 @@ __kernel_clock_getres:
35 svc 0 41 svc 0
36 br %r14 42 br %r14
374: .long __CLOCK_REALTIME_RES 434: .long __CLOCK_REALTIME_RES
445: .long __CLOCK_COARSE_RES
38 .cfi_endproc 45 .cfi_endproc
39 .size __kernel_clock_getres,.-__kernel_clock_getres 46 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 7cf18f8d4cb4..48c2206a3956 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,8 +21,12 @@ __kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 basr %r5,0 22 basr %r5,0
230: al %r5,21f-0b(%r5) /* get &_vdso_data */ 230: al %r5,21f-0b(%r5) /* get &_vdso_data */
24 chi %r2,__CLOCK_REALTIME_COARSE
25 je 10f
24 chi %r2,__CLOCK_REALTIME 26 chi %r2,__CLOCK_REALTIME
25 je 11f 27 je 11f
28 chi %r2,__CLOCK_MONOTONIC_COARSE
29 je 9f
26 chi %r2,__CLOCK_MONOTONIC 30 chi %r2,__CLOCK_MONOTONIC
27 jne 19f 31 jne 19f
28 32
@@ -30,8 +34,8 @@ __kernel_clock_gettime:
301: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 341: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
31 tml %r4,0x0001 /* pending update ? loop */ 35 tml %r4,0x0001 /* pending update ? loop */
32 jnz 1b 36 jnz 1b
33 stck 24(%r15) /* Store TOD clock */ 37 stcke 24(%r15) /* Store TOD clock */
34 lm %r0,%r1,24(%r15) 38 lm %r0,%r1,25(%r15)
35 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 39 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
36 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 40 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
37 brc 3,2f 41 brc 3,2f
@@ -68,12 +72,32 @@ __kernel_clock_gettime:
68 lhi %r2,0 72 lhi %r2,0
69 br %r14 73 br %r14
70 74
75 /* CLOCK_MONOTONIC_COARSE */
769: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
77 tml %r4,0x0001 /* pending update ? loop */
78 jnz 9b
79 l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
80 l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
81 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
82 jne 9b
83 j 8b
84
85 /* CLOCK_REALTIME_COARSE */
8610: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
87 tml %r4,0x0001 /* pending update ? loop */
88 jnz 10b
89 l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
90 l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
91 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
92 jne 10b
93 j 17f
94
71 /* CLOCK_REALTIME */ 95 /* CLOCK_REALTIME */
7211: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 9611: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
73 tml %r4,0x0001 /* pending update ? loop */ 97 tml %r4,0x0001 /* pending update ? loop */
74 jnz 11b 98 jnz 11b
75 stck 24(%r15) /* Store TOD clock */ 99 stcke 24(%r15) /* Store TOD clock */
76 lm %r0,%r1,24(%r15) 100 lm %r0,%r1,25(%r15)
77 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 101 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
78 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 102 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
79 brc 3,12f 103 brc 3,12f
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index fd621a950f7c..60def5f562db 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -29,8 +29,8 @@ __kernel_gettimeofday:
29 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 29 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
30 tml %r4,0x0001 /* pending update ? loop */ 30 tml %r4,0x0001 /* pending update ? loop */
31 jnz 1b 31 jnz 1b
32 stck 24(%r15) /* Store TOD clock */ 32 stcke 24(%r15) /* Store TOD clock */
33 lm %r0,%r1,24(%r15) 33 lm %r0,%r1,25(%r15)
34 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 34 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 34deba7c7ed1..c8513deb8c66 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,6 +19,12 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 larl %r1,4f
23 cghi %r2,__CLOCK_REALTIME_COARSE
24 je 0f
25 cghi %r2,__CLOCK_MONOTONIC_COARSE
26 je 0f
27 larl %r1,3f
22 cghi %r2,__CLOCK_REALTIME 28 cghi %r2,__CLOCK_REALTIME
23 je 0f 29 je 0f
24 cghi %r2,__CLOCK_MONOTONIC 30 cghi %r2,__CLOCK_MONOTONIC
@@ -32,7 +38,6 @@ __kernel_clock_getres:
32 jz 2f 38 jz 2f
330: ltgr %r3,%r3 390: ltgr %r3,%r3
34 jz 1f /* res == NULL */ 40 jz 1f /* res == NULL */
35 larl %r1,3f
36 lg %r0,0(%r1) 41 lg %r0,0(%r1)
37 xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ 42 xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
38 stg %r0,8(%r3) /* store tp->tv_usec */ 43 stg %r0,8(%r3) /* store tp->tv_usec */
@@ -42,5 +47,6 @@ __kernel_clock_getres:
42 svc 0 47 svc 0
43 br %r14 48 br %r14
443: .quad __CLOCK_REALTIME_RES 493: .quad __CLOCK_REALTIME_RES
504: .quad __CLOCK_COARSE_RES
45 .cfi_endproc 51 .cfi_endproc
46 .size __kernel_clock_getres,.-__kernel_clock_getres 52 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 3f34e09db5f4..9d9761f8e110 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,12 +20,16 @@
20__kernel_clock_gettime: 20__kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME_COARSE
24 je 4f
23 cghi %r2,__CLOCK_REALTIME 25 cghi %r2,__CLOCK_REALTIME
24 je 5f 26 je 5f
25 cghi %r2,__CLOCK_THREAD_CPUTIME_ID 27 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
26 je 9f 28 je 9f
27 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ 29 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
28 je 9f 30 je 9f
31 cghi %r2,__CLOCK_MONOTONIC_COARSE
32 je 3f
29 cghi %r2,__CLOCK_MONOTONIC 33 cghi %r2,__CLOCK_MONOTONIC
30 jne 12f 34 jne 12f
31 35
@@ -33,10 +37,10 @@ __kernel_clock_gettime:
330: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 370: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
34 tmll %r4,0x0001 /* pending update ? loop */ 38 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 39 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 40 stcke 48(%r15) /* Store TOD clock */
37 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 41 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
38 lg %r0,__VDSO_WTOM_SEC(%r5) 42 lg %r0,__VDSO_WTOM_SEC(%r5)
39 lg %r1,48(%r15) 43 lg %r1,49(%r15)
40 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 44 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
41 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 45 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 alg %r1,__VDSO_WTOM_NSEC(%r5) 46 alg %r1,__VDSO_WTOM_NSEC(%r5)
@@ -54,13 +58,33 @@ __kernel_clock_gettime:
54 lghi %r2,0 58 lghi %r2,0
55 br %r14 59 br %r14
56 60
61 /* CLOCK_MONOTONIC_COARSE */
623: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
63 tmll %r4,0x0001 /* pending update ? loop */
64 jnz 3b
65 lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
66 lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
67 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
68 jne 3b
69 j 2b
70
71 /* CLOCK_REALTIME_COARSE */
724: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
73 tmll %r4,0x0001 /* pending update ? loop */
74 jnz 4b
75 lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
76 lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
77 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
78 jne 4b
79 j 7f
80
57 /* CLOCK_REALTIME */ 81 /* CLOCK_REALTIME */
585: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 825: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
59 tmll %r4,0x0001 /* pending update ? loop */ 83 tmll %r4,0x0001 /* pending update ? loop */
60 jnz 5b 84 jnz 5b
61 stck 48(%r15) /* Store TOD clock */ 85 stcke 48(%r15) /* Store TOD clock */
62 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 86 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
63 lg %r1,48(%r15) 87 lg %r1,49(%r15)
64 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 88 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
65 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 89 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
66 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 90 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index d0860d1d0ccc..7a344995a97f 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -28,8 +28,8 @@ __kernel_gettimeofday:
28 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 28 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
29 tmll %r4,0x0001 /* pending update ? loop */ 29 tmll %r4,0x0001 /* pending update ? loop */
30 jnz 0b 30 jnz 0b
31 stck 48(%r15) /* Store TOD clock */ 31 stcke 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,49(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 8c34363d6f1e..416f2a323ba5 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -6,27 +6,18 @@
6 */ 6 */
7 7
8#include <linux/kernel_stat.h> 8#include <linux/kernel_stat.h>
9#include <linux/notifier.h>
10#include <linux/kprobes.h>
11#include <linux/export.h> 9#include <linux/export.h>
12#include <linux/kernel.h> 10#include <linux/kernel.h>
13#include <linux/timex.h> 11#include <linux/timex.h>
14#include <linux/types.h> 12#include <linux/types.h>
15#include <linux/time.h> 13#include <linux/time.h>
16#include <linux/cpu.h>
17#include <linux/smp.h>
18 14
19#include <asm/irq_regs.h>
20#include <asm/cputime.h> 15#include <asm/cputime.h>
21#include <asm/vtimer.h> 16#include <asm/vtimer.h>
22#include <asm/vtime.h> 17#include <asm/vtime.h>
23#include <asm/irq.h>
24#include "entry.h"
25 18
26static void virt_timer_expire(void); 19static void virt_timer_expire(void);
27 20
28DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
29
30static LIST_HEAD(virt_timer_list); 21static LIST_HEAD(virt_timer_list);
31static DEFINE_SPINLOCK(virt_timer_lock); 22static DEFINE_SPINLOCK(virt_timer_lock);
32static atomic64_t virt_timer_current; 23static atomic64_t virt_timer_current;
@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk)
152__attribute__((alias("vtime_account_irq_enter"))); 143__attribute__((alias("vtime_account_irq_enter")));
153EXPORT_SYMBOL_GPL(vtime_account_system); 144EXPORT_SYMBOL_GPL(vtime_account_system);
154 145
155void __kprobes vtime_stop_cpu(void)
156{
157 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
158 unsigned long long idle_time;
159 unsigned long psw_mask;
160
161 trace_hardirqs_on();
162
163 /* Wait for external, I/O or machine check interrupt. */
164 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
166 idle->nohz_delay = 0;
167
168 /* Call the assembler magic in entry.S */
169 psw_idle(idle, psw_mask);
170
171 /* Account time spent with enabled wait psw loaded as idle time. */
172 idle->sequence++;
173 smp_wmb();
174 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
175 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
176 idle->idle_time += idle_time;
177 idle->idle_count++;
178 account_idle_time(idle_time);
179 smp_wmb();
180 idle->sequence++;
181}
182
183cputime64_t s390_get_idle_time(int cpu)
184{
185 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
186 unsigned long long now, idle_enter, idle_exit;
187 unsigned int sequence;
188
189 do {
190 now = get_tod_clock();
191 sequence = ACCESS_ONCE(idle->sequence);
192 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
193 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
194 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
195 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
196}
197
198/* 146/*
199 * Sorted add to a list. List is linear searched until first bigger 147 * Sorted add to a list. List is linear searched until first bigger
200 * element is found. 148 * element is found.
@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer);
372/* 320/*
373 * Start the virtual CPU timer on the current CPU. 321 * Start the virtual CPU timer on the current CPU.
374 */ 322 */
375void init_cpu_vtimer(void) 323void vtime_init(void)
376{ 324{
377 /* set initial cpu timer */ 325 /* set initial cpu timer */
378 set_vtimer(VTIMER_MAX_SLICE); 326 set_vtimer(VTIMER_MAX_SLICE);
379} 327}
380
381static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
382 void *hcpu)
383{
384 struct s390_idle_data *idle;
385 long cpu = (long) hcpu;
386
387 idle = &per_cpu(s390_idle, cpu);
388 switch (action & ~CPU_TASKS_FROZEN) {
389 case CPU_DYING:
390 idle->nohz_delay = 0;
391 default:
392 break;
393 }
394 return NOTIFY_OK;
395}
396
397void __init vtime_init(void)
398{
399 /* Enable cpu timer interrupts on the boot cpu. */
400 init_cpu_vtimer();
401 cpu_notifier(s390_nohz_notify, 0);
402}
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index c6d752e8bf28..a01df233856f 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
9lib-$(CONFIG_KPROBES) += probes.o
10lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index a9f3d0042d58..16dc42d83f93 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs)
43 lockdep_off(); 43 lockdep_off();
44 do { 44 do {
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 enabled_wait();
47 } while (get_tod_clock_fast() < end); 47 } while (get_tod_clock_fast() < end);
48 lockdep_on(); 48 lockdep_on();
49 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0, 0, 0);
@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs)
62 clock_saved = local_tick_disable(); 62 clock_saved = local_tick_disable();
63 set_clock_comparator(end); 63 set_clock_comparator(end);
64 } 64 }
65 vtime_stop_cpu(); 65 enabled_wait();
66 if (clock_saved) 66 if (clock_saved)
67 local_tick_enable(clock_saved); 67 local_tick_enable(clock_saved);
68 } while (get_tod_clock_fast() < end); 68 } while (get_tod_clock_fast() < end);
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
new file mode 100644
index 000000000000..c5d64a099719
--- /dev/null
+++ b/arch/s390/lib/probes.c
@@ -0,0 +1,159 @@
1/*
2 * Common helper functions for kprobes and uprobes
3 *
4 * Copyright IBM Corp. 2014
5 */
6
7#include <linux/kprobes.h>
8#include <asm/dis.h>
9
10int probe_is_prohibited_opcode(u16 *insn)
11{
12 if (!is_known_insn((unsigned char *)insn))
13 return -EINVAL;
14 switch (insn[0] >> 8) {
15 case 0x0c: /* bassm */
16 case 0x0b: /* bsm */
17 case 0x83: /* diag */
18 case 0x44: /* ex */
19 case 0xac: /* stnsm */
20 case 0xad: /* stosm */
21 return -EINVAL;
22 case 0xc6:
23 switch (insn[0] & 0x0f) {
24 case 0x00: /* exrl */
25 return -EINVAL;
26 }
27 }
28 switch (insn[0]) {
29 case 0x0101: /* pr */
30 case 0xb25a: /* bsa */
31 case 0xb240: /* bakr */
32 case 0xb258: /* bsg */
33 case 0xb218: /* pc */
34 case 0xb228: /* pt */
35 case 0xb98d: /* epsw */
36 case 0xe560: /* tbegin */
37 case 0xe561: /* tbeginc */
38 case 0xb2f8: /* tend */
39 return -EINVAL;
40 }
41 return 0;
42}
43
44int probe_get_fixup_type(u16 *insn)
45{
46 /* default fixup method */
47 int fixup = FIXUP_PSW_NORMAL;
48
49 switch (insn[0] >> 8) {
50 case 0x05: /* balr */
51 case 0x0d: /* basr */
52 fixup = FIXUP_RETURN_REGISTER;
53 /* if r2 = 0, no branch will be taken */
54 if ((insn[0] & 0x0f) == 0)
55 fixup |= FIXUP_BRANCH_NOT_TAKEN;
56 break;
57 case 0x06: /* bctr */
58 case 0x07: /* bcr */
59 fixup = FIXUP_BRANCH_NOT_TAKEN;
60 break;
61 case 0x45: /* bal */
62 case 0x4d: /* bas */
63 fixup = FIXUP_RETURN_REGISTER;
64 break;
65 case 0x47: /* bc */
66 case 0x46: /* bct */
67 case 0x86: /* bxh */
68 case 0x87: /* bxle */
69 fixup = FIXUP_BRANCH_NOT_TAKEN;
70 break;
71 case 0x82: /* lpsw */
72 fixup = FIXUP_NOT_REQUIRED;
73 break;
74 case 0xb2: /* lpswe */
75 if ((insn[0] & 0xff) == 0xb2)
76 fixup = FIXUP_NOT_REQUIRED;
77 break;
78 case 0xa7: /* bras */
79 if ((insn[0] & 0x0f) == 0x05)
80 fixup |= FIXUP_RETURN_REGISTER;
81 break;
82 case 0xc0:
83 if ((insn[0] & 0x0f) == 0x05) /* brasl */
84 fixup |= FIXUP_RETURN_REGISTER;
85 break;
86 case 0xeb:
87 switch (insn[2] & 0xff) {
88 case 0x44: /* bxhg */
89 case 0x45: /* bxleg */
90 fixup = FIXUP_BRANCH_NOT_TAKEN;
91 break;
92 }
93 break;
94 case 0xe3: /* bctg */
95 if ((insn[2] & 0xff) == 0x46)
96 fixup = FIXUP_BRANCH_NOT_TAKEN;
97 break;
98 case 0xec:
99 switch (insn[2] & 0xff) {
100 case 0xe5: /* clgrb */
101 case 0xe6: /* cgrb */
102 case 0xf6: /* crb */
103 case 0xf7: /* clrb */
104 case 0xfc: /* cgib */
105 case 0xfd: /* cglib */
106 case 0xfe: /* cib */
107 case 0xff: /* clib */
108 fixup = FIXUP_BRANCH_NOT_TAKEN;
109 break;
110 }
111 break;
112 }
113 return fixup;
114}
115
116int probe_is_insn_relative_long(u16 *insn)
117{
118 /* Check if we have a RIL-b or RIL-c format instruction which
119 * we need to modify in order to avoid instruction emulation. */
120 switch (insn[0] >> 8) {
121 case 0xc0:
122 if ((insn[0] & 0x0f) == 0x00) /* larl */
123 return true;
124 break;
125 case 0xc4:
126 switch (insn[0] & 0x0f) {
127 case 0x02: /* llhrl */
128 case 0x04: /* lghrl */
129 case 0x05: /* lhrl */
130 case 0x06: /* llghrl */
131 case 0x07: /* sthrl */
132 case 0x08: /* lgrl */
133 case 0x0b: /* stgrl */
134 case 0x0c: /* lgfrl */
135 case 0x0d: /* lrl */
136 case 0x0e: /* llgfrl */
137 case 0x0f: /* strl */
138 return true;
139 }
140 break;
141 case 0xc6:
142 switch (insn[0] & 0x0f) {
143 case 0x02: /* pfdrl */
144 case 0x04: /* cghrl */
145 case 0x05: /* chrl */
146 case 0x06: /* clghrl */
147 case 0x07: /* clhrl */
148 case 0x08: /* cgrl */
149 case 0x0a: /* clgrl */
150 case 0x0c: /* cgfrl */
151 case 0x0d: /* crl */
152 case 0x0e: /* clgfrl */
153 case 0x0f: /* clrl */
154 return true;
155 }
156 break;
157 }
158 return false;
159}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..034a35a3e9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98} 98}
99EXPORT_SYMBOL(arch_spin_lock_wait_flags); 99EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100 100
101void arch_spin_relax(arch_spinlock_t *lp)
102{
103 unsigned int cpu = lp->lock;
104 if (cpu != 0) {
105 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
106 !smp_vcpu_scheduled(~cpu))
107 smp_yield_cpu(~cpu);
108 }
109}
110EXPORT_SYMBOL(arch_spin_relax);
111
112int arch_spin_trylock_retry(arch_spinlock_t *lp) 101int arch_spin_trylock_retry(arch_spinlock_t *lp)
113{ 102{
114 int count; 103 int count;
@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
122 111
123void _raw_read_lock_wait(arch_rwlock_t *rw) 112void _raw_read_lock_wait(arch_rwlock_t *rw)
124{ 113{
125 unsigned int old; 114 unsigned int owner, old;
126 int count = spin_retry; 115 int count = spin_retry;
127 116
117#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
118 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
119#endif
120 owner = 0;
128 while (1) { 121 while (1) {
129 if (count-- <= 0) { 122 if (count-- <= 0) {
130 smp_yield(); 123 if (owner && !smp_vcpu_scheduled(~owner))
124 smp_yield_cpu(~owner);
131 count = spin_retry; 125 count = spin_retry;
132 } 126 }
133 old = ACCESS_ONCE(rw->lock); 127 old = ACCESS_ONCE(rw->lock);
128 owner = ACCESS_ONCE(rw->owner);
134 if ((int) old < 0) 129 if ((int) old < 0)
135 continue; 130 continue;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 131 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
139} 134}
140EXPORT_SYMBOL(_raw_read_lock_wait); 135EXPORT_SYMBOL(_raw_read_lock_wait);
141 136
142void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
143{
144 unsigned int old;
145 int count = spin_retry;
146
147 local_irq_restore(flags);
148 while (1) {
149 if (count-- <= 0) {
150 smp_yield();
151 count = spin_retry;
152 }
153 old = ACCESS_ONCE(rw->lock);
154 if ((int) old < 0)
155 continue;
156 local_irq_disable();
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
158 return;
159 local_irq_restore(flags);
160 }
161}
162EXPORT_SYMBOL(_raw_read_lock_wait_flags);
163
164int _raw_read_trylock_retry(arch_rwlock_t *rw) 137int _raw_read_trylock_retry(arch_rwlock_t *rw)
165{ 138{
166 unsigned int old; 139 unsigned int old;
@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
177} 150}
178EXPORT_SYMBOL(_raw_read_trylock_retry); 151EXPORT_SYMBOL(_raw_read_trylock_retry);
179 152
180void _raw_write_lock_wait(arch_rwlock_t *rw) 153#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154
155void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
181{ 156{
182 unsigned int old; 157 unsigned int owner, old;
183 int count = spin_retry; 158 int count = spin_retry;
184 159
160 owner = 0;
185 while (1) { 161 while (1) {
186 if (count-- <= 0) { 162 if (count-- <= 0) {
187 smp_yield(); 163 if (owner && !smp_vcpu_scheduled(~owner))
164 smp_yield_cpu(~owner);
188 count = spin_retry; 165 count = spin_retry;
189 } 166 }
190 old = ACCESS_ONCE(rw->lock); 167 old = ACCESS_ONCE(rw->lock);
191 if (old) 168 owner = ACCESS_ONCE(rw->owner);
192 continue; 169 smp_rmb();
193 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 170 if ((int) old >= 0) {
194 return; 171 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
172 old = prev;
173 }
174 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
175 break;
195 } 176 }
196} 177}
197EXPORT_SYMBOL(_raw_write_lock_wait); 178EXPORT_SYMBOL(_raw_write_lock_wait);
198 179
199void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 180#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
181
182void _raw_write_lock_wait(arch_rwlock_t *rw)
200{ 183{
201 unsigned int old; 184 unsigned int owner, old, prev;
202 int count = spin_retry; 185 int count = spin_retry;
203 186
204 local_irq_restore(flags); 187 prev = 0x80000000;
188 owner = 0;
205 while (1) { 189 while (1) {
206 if (count-- <= 0) { 190 if (count-- <= 0) {
207 smp_yield(); 191 if (owner && !smp_vcpu_scheduled(~owner))
192 smp_yield_cpu(~owner);
208 count = spin_retry; 193 count = spin_retry;
209 } 194 }
210 old = ACCESS_ONCE(rw->lock); 195 old = ACCESS_ONCE(rw->lock);
211 if (old) 196 owner = ACCESS_ONCE(rw->owner);
212 continue; 197 if ((int) old >= 0 &&
213 local_irq_disable(); 198 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 199 prev = old;
215 return; 200 else
216 local_irq_restore(flags); 201 smp_rmb();
202 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
203 break;
217 } 204 }
218} 205}
219EXPORT_SYMBOL(_raw_write_lock_wait_flags); 206EXPORT_SYMBOL(_raw_write_lock_wait);
207
208#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220 209
221int _raw_write_trylock_retry(arch_rwlock_t *rw) 210int _raw_write_trylock_retry(arch_rwlock_t *rw)
222{ 211{
@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
233 return 0; 222 return 0;
234} 223}
235EXPORT_SYMBOL(_raw_write_trylock_retry); 224EXPORT_SYMBOL(_raw_write_trylock_retry);
225
226void arch_lock_relax(unsigned int cpu)
227{
228 if (!cpu)
229 return;
230 if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
231 return;
232 smp_yield_cpu(~cpu);
233}
234EXPORT_SYMBOL(arch_lock_relax);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 46d517c3c763..d46cadeda204 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
54 return; 54 return;
55 } 55 }
56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW "); 56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
58 seq_putc(m, '\n'); 57 seq_putc(m, '\n');
59} 58}
60 59
@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
129} 128}
130 129
131#ifdef CONFIG_64BIT 130#ifdef CONFIG_64BIT
132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO) 131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
133#else 132#else
134#define _PMD_PROT_MASK 0 133#define _PMD_PROT_MASK 0
135#endif 134#endif
@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
157} 156}
158 157
159#ifdef CONFIG_64BIT 158#ifdef CONFIG_64BIT
160#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO) 159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
161#else 160#else
162#define _PUD_PROT_MASK 0 161#define _PUD_PROT_MASK 0
163#endif 162#endif
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 389bc17934b7..3c80d2e38f03 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
89 pmd_val(pmd) |= pte_page(pte)[1].index; 89 pmd_val(pmd) |= pte_page(pte)[1].index;
90 } else 90 } else
91 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO; 91 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
92 *(pmd_t *) ptep = pmd; 92 *(pmd_t *) ptep = pmd;
93} 93}
94 94
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 8400f494623f..3fef3b299665 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -6,6 +6,7 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9#include <asm/facility.h>
9#include <asm/pgtable.h> 10#include <asm/pgtable.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages)
103} 104}
104 105
105#ifdef CONFIG_DEBUG_PAGEALLOC 106#ifdef CONFIG_DEBUG_PAGEALLOC
107
108static void ipte_range(pte_t *pte, unsigned long address, int nr)
109{
110 int i;
111
112 if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
113 __ptep_ipte_range(address, nr - 1, pte);
114 return;
115 }
116 for (i = 0; i < nr; i++) {
117 __ptep_ipte(address, pte);
118 address += PAGE_SIZE;
119 pte++;
120 }
121}
122
106void kernel_map_pages(struct page *page, int numpages, int enable) 123void kernel_map_pages(struct page *page, int numpages, int enable)
107{ 124{
108 unsigned long address; 125 unsigned long address;
126 int nr, i, j;
109 pgd_t *pgd; 127 pgd_t *pgd;
110 pud_t *pud; 128 pud_t *pud;
111 pmd_t *pmd; 129 pmd_t *pmd;
112 pte_t *pte; 130 pte_t *pte;
113 int i;
114 131
115 for (i = 0; i < numpages; i++) { 132 for (i = 0; i < numpages;) {
116 address = page_to_phys(page + i); 133 address = page_to_phys(page + i);
117 pgd = pgd_offset_k(address); 134 pgd = pgd_offset_k(address);
118 pud = pud_offset(pgd, address); 135 pud = pud_offset(pgd, address);
119 pmd = pmd_offset(pud, address); 136 pmd = pmd_offset(pud, address);
120 pte = pte_offset_kernel(pmd, address); 137 pte = pte_offset_kernel(pmd, address);
121 if (!enable) { 138 nr = (unsigned long)pte >> ilog2(sizeof(long));
122 __ptep_ipte(address, pte); 139 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
123 pte_val(*pte) = _PAGE_INVALID; 140 nr = min(numpages - i, nr);
124 continue; 141 if (enable) {
142 for (j = 0; j < nr; j++) {
143 pte_val(*pte) = __pa(address);
144 address += PAGE_SIZE;
145 pte++;
146 }
147 } else {
148 ipte_range(pte, address, nr);
125 } 149 }
126 pte_val(*pte) = __pa(address); 150 i += nr;
127 } 151 }
128} 152}
129 153
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fdbd7888cb07..b1593c2f751a 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
236 if (!new_page) 236 if (!new_page)
237 goto out; 237 goto out;
238 pmd_val(*pm_dir) = __pa(new_page) | 238 pmd_val(*pm_dir) = __pa(new_page) |
239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
240 _SEGMENT_ENTRY_CO;
241 address = (address + PMD_SIZE) & PMD_MASK; 240 address = (address + PMD_SIZE) & PMD_MASK;
242 continue; 241 continue;
243 } 242 }
@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
253 252
254 pt_dir = pte_offset_kernel(pm_dir, address); 253 pt_dir = pte_offset_kernel(pm_dir, address);
255 if (pte_none(*pt_dir)) { 254 if (pte_none(*pt_dir)) {
256 unsigned long new_page; 255 void *new_page;
257 256
258 new_page =__pa(vmem_alloc_pages(0)); 257 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
259 if (!new_page) 258 if (!new_page)
260 goto out; 259 goto out;
261 pte_val(*pt_dir) = 260 pte_val(*pt_dir) =
@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
263 } 262 }
264 address += PAGE_SIZE; 263 address += PAGE_SIZE;
265 } 264 }
266 memset((void *)start, 0, end - start);
267 ret = 0; 265 ret = 0;
268out: 266out:
269 return ret; 267 return ret;