aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:47:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 21:47:00 -0400
commit1ee07ef6b5db7235b133ee257a3adf507697e6b3 (patch)
tree9c7a00cf98462c2a70610da9d09770c835ef8fcd
parent77654908ff1a58cee4886298968b5262884aff0b (diff)
parent0cccdda8d1512af4d3f6913044e8c8e58e15ef37 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "This patch set contains the main portion of the changes for 3.18 in regard to the s390 architecture. It is a bit bigger than usual, mainly because of a new driver and the vector extension patches. The interesting bits are: - Quite a bit of work on the tracing front. Uprobes is enabled and the ftrace code is reworked to get some of the lost performance back if CONFIG_FTRACE is enabled. - To improve boot time with CONFIG_DEBIG_PAGEALLOC, support for the IPTE range facility is added. - The rwlock code is re-factored to improve writer fairness and to be able to use the interlocked-access instructions. - The kernel part for the support of the vector extension is added. - The device driver to access the CD/DVD on the HMC is added, this will hopefully come in handy to improve the installation process. - Add support for control-unit initiated reconfiguration. - The crypto device driver is enhanced to enable the additional AP domains and to allow the new crypto hardware to be used. - Bug fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits) s390/ftrace: simplify enabling/disabling of ftrace_graph_caller s390/ftrace: remove 31 bit ftrace support s390/kdump: add support for vector extension s390/disassembler: add vector instructions s390: add support for vector extension s390/zcrypt: Toleration of new crypto hardware s390/idle: consolidate idle functions and definitions s390/nohz: use a per-cpu flag for arch_needs_cpu s390/vtime: do not reset idle data on CPU hotplug s390/dasd: add support for control unit initiated reconfiguration s390/dasd: fix infinite loop during format s390/mm: make use of ipte range facility s390/setup: correct 4-level kernel page table detection s390/topology: call set_sched_topology early s390/uprobes: architecture backend for uprobes s390/uprobes: common library for kprobes and uprobes s390/rwlock: use the interlocked-access facility 1 instructions s390/rwlock: improve writer fairness s390/rwlock: remove interrupt-enabling rwlock variant. s390/mm: remove change bit override support ...
-rw-r--r--Documentation/kprobes.txt1
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/Makefile17
-rw-r--r--arch/s390/include/asm/barrier.h6
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/dis.h13
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/ftrace.h9
-rw-r--r--arch/s390/include/asm/idle.h26
-rw-r--r--arch/s390/include/asm/ipl.h4
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/lowcore.h21
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/pgtable.h25
-rw-r--r--arch/s390/include/asm/processor.h12
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/sigp.h6
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/spinlock.h135
-rw-r--r--arch/s390/include/asm/spinlock_types.h1
-rw-r--r--arch/s390/include/asm/switch_to.h61
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/uprobes.h42
-rw-r--r--arch/s390/include/asm/vdso.h18
-rw-r--r--arch/s390/include/asm/vtimer.h2
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h20
-rw-r--r--arch/s390/include/uapi/asm/types.h4
-rw-r--r--arch/s390/include/uapi/asm/ucontext.h15
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/compat_linux.h9
-rw-r--r--arch/s390/kernel/compat_signal.c212
-rw-r--r--arch/s390/kernel/crash_dump.c58
-rw-r--r--arch/s390/kernel/dis.c245
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.h6
-rw-r--r--arch/s390/kernel/entry64.S17
-rw-r--r--arch/s390/kernel/ftrace.c139
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/idle.c124
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/kprobes.c159
-rw-r--r--arch/s390/kernel/machine_kexec.c8
-rw-r--r--arch/s390/kernel/mcount.S86
-rw-r--r--arch/s390/kernel/mcount64.S62
-rw-r--r--arch/s390/kernel/nmi.c16
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c24
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c254
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/kernel/signal.c296
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/time.c13
-rw-r--r--arch/s390/kernel/topology.c18
-rw-r--r--arch/s390/kernel/traps.c115
-rw-r--r--arch/s390/kernel/uprobes.c332
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S11
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S8
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vtime.c77
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/lib/probes.c159
-rw-r--r--arch/s390/lib/spinlock.c105
-rw-r--r--arch/s390/mm/dump_pagetables.c5
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pageattr.c38
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_devmap.c24
-rw-r--r--drivers/s390/block/dasd_eckd.c372
-rw-r--r--drivers/s390/block/dasd_eckd.h63
-rw-r--r--drivers/s390/block/dasd_int.h10
-rw-r--r--drivers/s390/char/Kconfig13
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/diag_ftp.c237
-rw-r--r--drivers/s390/char/diag_ftp.h21
-rw-r--r--drivers/s390/char/hmcdrv_cache.c252
-rw-r--r--drivers/s390/char/hmcdrv_cache.h24
-rw-r--r--drivers/s390/char/hmcdrv_dev.c370
-rw-r--r--drivers/s390/char/hmcdrv_dev.h14
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c343
-rw-r--r--drivers/s390/char/hmcdrv_ftp.h63
-rw-r--r--drivers/s390/char/hmcdrv_mod.c64
-rw-r--r--drivers/s390/char/sclp.h2
-rw-r--r--drivers/s390/char/sclp_diag.h89
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_ftp.c275
-rw-r--r--drivers/s390/char/sclp_ftp.h21
-rw-r--r--drivers/s390/char/sclp_rw.c13
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_char.c4
-rw-r--r--drivers/s390/char/zcore.c18
-rw-r--r--drivers/s390/cio/airq.c2
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c30
-rw-r--r--drivers/s390/crypto/ap_bus.h7
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--scripts/recordmcount.c4
-rwxr-xr-xscripts/recordmcount.pl7
109 files changed, 4738 insertions, 972 deletions
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 4bbeca8483ed..4227ec2e3ab2 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -300,6 +300,7 @@ architectures:
300- arm 300- arm
301- ppc 301- ppc
302- mips 302- mips
303- s390
303 304
3043. Configuring Kprobes 3053. Configuring Kprobes
305 306
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 296391395b95..f2cf1f90295b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -58,6 +58,9 @@ config NO_IOPORT_MAP
58config PCI_QUIRKS 58config PCI_QUIRKS
59 def_bool n 59 def_bool n
60 60
61config ARCH_SUPPORTS_UPROBES
62 def_bool 64BIT
63
61config S390 64config S390
62 def_bool y 65 def_bool y
63 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 66 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -97,6 +100,7 @@ config S390
97 select ARCH_WANT_IPC_PARSE_VERSION 100 select ARCH_WANT_IPC_PARSE_VERSION
98 select BUILDTIME_EXTABLE_SORT 101 select BUILDTIME_EXTABLE_SORT
99 select CLONE_BACKWARDS2 102 select CLONE_BACKWARDS2
103 select DYNAMIC_FTRACE if FUNCTION_TRACER
100 select GENERIC_CLOCKEVENTS 104 select GENERIC_CLOCKEVENTS
101 select GENERIC_CPU_DEVICES if !SMP 105 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 106 select GENERIC_FIND_FIRST_BIT
@@ -113,10 +117,11 @@ config S390
113 select HAVE_CMPXCHG_LOCAL 117 select HAVE_CMPXCHG_LOCAL
114 select HAVE_C_RECORDMCOUNT 118 select HAVE_C_RECORDMCOUNT
115 select HAVE_DEBUG_KMEMLEAK 119 select HAVE_DEBUG_KMEMLEAK
116 select HAVE_DYNAMIC_FTRACE 120 select HAVE_DYNAMIC_FTRACE if 64BIT
121 select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
117 select HAVE_FTRACE_MCOUNT_RECORD 122 select HAVE_FTRACE_MCOUNT_RECORD
118 select HAVE_FUNCTION_GRAPH_TRACER 123 select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
119 select HAVE_FUNCTION_TRACER 124 select HAVE_FUNCTION_TRACER if 64BIT
120 select HAVE_FUTEX_CMPXCHG if FUTEX 125 select HAVE_FUTEX_CMPXCHG if FUTEX
121 select HAVE_KERNEL_BZIP2 126 select HAVE_KERNEL_BZIP2
122 select HAVE_KERNEL_GZIP 127 select HAVE_KERNEL_GZIP
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 874e6d6e9c5f..878e67973151 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,16 @@ endif
35 35
36export LD_BFD 36export LD_BFD
37 37
38cflags-$(CONFIG_MARCH_G5) += -march=g5 38mflags-$(CONFIG_MARCH_G5) := -march=g5
39cflags-$(CONFIG_MARCH_Z900) += -march=z900 39mflags-$(CONFIG_MARCH_Z900) := -march=z900
40cflags-$(CONFIG_MARCH_Z990) += -march=z990 40mflags-$(CONFIG_MARCH_Z990) := -march=z990
41cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109 41mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
42cflags-$(CONFIG_MARCH_Z10) += -march=z10 42mflags-$(CONFIG_MARCH_Z10) := -march=z10
43cflags-$(CONFIG_MARCH_Z196) += -march=z196 43mflags-$(CONFIG_MARCH_Z196) := -march=z196
44cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12 44mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
45
46aflags-y += $(mflags-y)
47cflags-y += $(mflags-y)
45 48
46cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 49cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
47cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 50cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 19ff956b752b..b5dce6544d76 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -15,11 +15,13 @@
15 15
16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
17/* Fast-BCR without checkpoint synchronization */ 17/* Fast-BCR without checkpoint synchronization */
18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) 18#define __ASM_BARRIER "bcr 14,0\n"
19#else 19#else
20#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) 20#define __ASM_BARRIER "bcr 15,0\n"
21#endif 21#endif
22 22
23#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24
23#define rmb() mb() 25#define rmb() mb()
24#define wmb() mb() 26#define wmb() mb()
25#define read_barrier_depends() do { } while(0) 27#define read_barrier_depends() do { } while(0)
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 3001887f94b7..f8c196984853 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,8 +8,6 @@
8#define _S390_CPUTIME_H 8#define _S390_CPUTIME_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/percpu.h>
12#include <linux/spinlock.h>
13#include <asm/div64.h> 11#include <asm/div64.h>
14 12
15 13
@@ -167,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
167 return clock; 165 return clock;
168} 166}
169 167
170struct s390_idle_data { 168cputime64_t arch_cpu_idle_time(int cpu);
171 int nohz_delay;
172 unsigned int sequence;
173 unsigned long long idle_count;
174 unsigned long long idle_time;
175 unsigned long long clock_idle_enter;
176 unsigned long long clock_idle_exit;
177 unsigned long long timer_idle_enter;
178 unsigned long long timer_idle_exit;
179};
180 169
181DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 170#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
182
183cputime64_t s390_get_idle_time(int cpu);
184
185#define arch_idle_time(cpu) s390_get_idle_time(cpu)
186
187static inline int s390_nohz_delay(int cpu)
188{
189 return __get_cpu_var(s390_idle).nohz_delay != 0;
190}
191
192#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
193 171
194#endif /* _S390_CPUTIME_H */ 172#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 04a83f5773cd..60323c21938b 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -13,12 +13,13 @@
13#define OPERAND_FPR 0x2 /* Operand printed as %fx */ 13#define OPERAND_FPR 0x2 /* Operand printed as %fx */
14#define OPERAND_AR 0x4 /* Operand printed as %ax */ 14#define OPERAND_AR 0x4 /* Operand printed as %ax */
15#define OPERAND_CR 0x8 /* Operand printed as %cx */ 15#define OPERAND_CR 0x8 /* Operand printed as %cx */
16#define OPERAND_DISP 0x10 /* Operand printed as displacement */ 16#define OPERAND_VR 0x10 /* Operand printed as %vx */
17#define OPERAND_BASE 0x20 /* Operand printed as base register */ 17#define OPERAND_DISP 0x20 /* Operand printed as displacement */
18#define OPERAND_INDEX 0x40 /* Operand printed as index register */ 18#define OPERAND_BASE 0x40 /* Operand printed as base register */
19#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ 19#define OPERAND_INDEX 0x80 /* Operand printed as index register */
20#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ 20#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
21#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ 21#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
22#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
22 23
23 24
24struct s390_operand { 25struct s390_operand {
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 78f4f8711d58..f6e43d39e3d8 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -102,6 +102,7 @@
102#define HWCAP_S390_ETF3EH 256 102#define HWCAP_S390_ETF3EH 256
103#define HWCAP_S390_HIGH_GPRS 512 103#define HWCAP_S390_HIGH_GPRS 512
104#define HWCAP_S390_TE 1024 104#define HWCAP_S390_TE 1024
105#define HWCAP_S390_VXRS 2048
105 106
106/* 107/*
107 * These are used to set parameters in the core dumps. 108 * These are used to set parameters in the core dumps.
@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
225extern unsigned long arch_randomize_brk(struct mm_struct *mm); 226extern unsigned long arch_randomize_brk(struct mm_struct *mm);
226#define arch_randomize_brk arch_randomize_brk 227#define arch_randomize_brk arch_randomize_brk
227 228
228void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); 229void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
229 230
230#endif 231#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index bf246dae1367..3aef8afec336 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,6 +4,7 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern char ftrace_graph_caller_end;
7 8
8struct dyn_arch_ftrace { }; 9struct dyn_arch_ftrace { };
9 10
@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
17 18
18#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
19 20
20#ifdef CONFIG_64BIT 21#define MCOUNT_INSN_SIZE 18
21#define MCOUNT_INSN_SIZE 12 22
22#else 23#define ARCH_SUPPORTS_FTRACE_OPS 1
23#define MCOUNT_INSN_SIZE 22
24#endif
25 24
26#endif /* _ASM_S390_FTRACE_H */ 25#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
new file mode 100644
index 000000000000..6af037f574b8
--- /dev/null
+++ b/arch/s390/include/asm/idle.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright IBM Corp. 2014
3 *
4 * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef _S390_IDLE_H
8#define _S390_IDLE_H
9
10#include <linux/types.h>
11#include <linux/device.h>
12
13struct s390_idle_data {
14 unsigned int sequence;
15 unsigned long long idle_count;
16 unsigned long long idle_time;
17 unsigned long long clock_idle_enter;
18 unsigned long long clock_idle_exit;
19 unsigned long long timer_idle_enter;
20 unsigned long long timer_idle_exit;
21};
22
23extern struct device_attribute dev_attr_idle_count;
24extern struct device_attribute dev_attr_idle_time_us;
25
26#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index c81661e756a0..ece606c2ee86 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -89,12 +89,12 @@ extern u32 ipl_flags;
89extern u32 dump_prefix_page; 89extern u32 dump_prefix_page;
90 90
91struct dump_save_areas { 91struct dump_save_areas {
92 struct save_area **areas; 92 struct save_area_ext **areas;
93 int count; 93 int count;
94}; 94};
95 95
96extern struct dump_save_areas dump_save_areas; 96extern struct dump_save_areas dump_save_areas;
97struct save_area *dump_save_area_create(int cpu); 97struct save_area_ext *dump_save_area_create(int cpu);
98 98
99extern void do_reipl(void); 99extern void do_reipl(void);
100extern void do_halt(void); 100extern void do_halt(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index c4dd400a2791..e787cc1bff8f 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -51,6 +51,7 @@ enum interruption_class {
51 IRQEXT_CMS, 51 IRQEXT_CMS,
52 IRQEXT_CMC, 52 IRQEXT_CMC,
53 IRQEXT_CMR, 53 IRQEXT_CMR,
54 IRQEXT_FTP,
54 IRQIO_CIO, 55 IRQIO_CIO,
55 IRQIO_QAI, 56 IRQIO_QAI,
56 IRQIO_DAS, 57 IRQIO_DAS,
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 4176dfe0fba1..98629173ce3b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
84int kprobe_exceptions_notify(struct notifier_block *self, 84int kprobe_exceptions_notify(struct notifier_block *self,
85 unsigned long val, void *data); 85 unsigned long val, void *data);
86 86
87int probe_is_prohibited_opcode(u16 *insn);
88int probe_get_fixup_type(u16 *insn);
89int probe_is_insn_relative_long(u16 *insn);
90
87#define flush_insn_slot(p) do { } while (0) 91#define flush_insn_slot(p) do { } while (0)
88 92
89#endif /* _ASM_S390_KPROBES_H */ 93#endif /* _ASM_S390_KPROBES_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 4349197ab9df..6cc51fe84410 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/cpu.h> 13#include <asm/cpu.h>
14#include <asm/types.h>
14 15
15#ifdef CONFIG_32BIT 16#ifdef CONFIG_32BIT
16 17
@@ -31,6 +32,11 @@ struct save_area {
31 u32 ctrl_regs[16]; 32 u32 ctrl_regs[16];
32} __packed; 33} __packed;
33 34
35struct save_area_ext {
36 struct save_area sa;
37 __vector128 vx_regs[32];
38};
39
34struct _lowcore { 40struct _lowcore {
35 psw_t restart_psw; /* 0x0000 */ 41 psw_t restart_psw; /* 0x0000 */
36 psw_t restart_old_psw; /* 0x0008 */ 42 psw_t restart_old_psw; /* 0x0008 */
@@ -183,6 +189,11 @@ struct save_area {
183 u64 ctrl_regs[16]; 189 u64 ctrl_regs[16];
184} __packed; 190} __packed;
185 191
192struct save_area_ext {
193 struct save_area sa;
194 __vector128 vx_regs[32];
195};
196
186struct _lowcore { 197struct _lowcore {
187 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ 198 __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
188 __u32 ipl_parmblock_ptr; /* 0x0014 */ 199 __u32 ipl_parmblock_ptr; /* 0x0014 */
@@ -310,7 +321,10 @@ struct _lowcore {
310 321
311 /* Extended facility list */ 322 /* Extended facility list */
312 __u64 stfle_fac_list[32]; /* 0x0f00 */ 323 __u64 stfle_fac_list[32]; /* 0x0f00 */
313 __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */ 324 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
325
326 /* Pointer to vector register save area */
327 __u64 vector_save_area_addr; /* 0x11b0 */
314 328
315 /* 64 bit extparam used for pfault/diag 250: defined by architecture */ 329 /* 64 bit extparam used for pfault/diag 250: defined by architecture */
316 __u64 ext_params2; /* 0x11B8 */ 330 __u64 ext_params2; /* 0x11B8 */
@@ -334,9 +348,10 @@ struct _lowcore {
334 348
335 /* Transaction abort diagnostic block */ 349 /* Transaction abort diagnostic block */
336 __u8 pgm_tdb[256]; /* 0x1800 */ 350 __u8 pgm_tdb[256]; /* 0x1800 */
351 __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
337 352
338 /* align to the top of the prefix area */ 353 /* Software defined save area for vector registers */
339 __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ 354 __u8 vector_save_area[1024]; /* 0x1c00 */
340} __packed; 355} __packed;
341 356
342#endif /* CONFIG_32BIT */ 357#endif /* CONFIG_32BIT */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 35f8ec185616..3027a5a72b74 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -38,7 +38,7 @@ struct mci {
38 __u32 pm : 1; /* 22 psw program mask and cc validity */ 38 __u32 pm : 1; /* 22 psw program mask and cc validity */
39 __u32 ia : 1; /* 23 psw instruction address validity */ 39 __u32 ia : 1; /* 23 psw instruction address validity */
40 __u32 fa : 1; /* 24 failing storage address validity */ 40 __u32 fa : 1; /* 24 failing storage address validity */
41 __u32 : 1; /* 25 */ 41 __u32 vr : 1; /* 25 vector register validity */
42 __u32 ec : 1; /* 26 external damage code validity */ 42 __u32 ec : 1; /* 26 external damage code validity */
43 __u32 fp : 1; /* 27 floating point register validity */ 43 __u32 fp : 1; /* 27 floating point register validity */
44 __u32 gr : 1; /* 28 general register validity */ 44 __u32 gr : 1; /* 28 general register validity */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b7054356cc98..57c882761dea 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,7 +217,6 @@ extern unsigned long MODULES_END;
217 */ 217 */
218 218
219/* Hardware bits in the page table entry */ 219/* Hardware bits in the page table entry */
220#define _PAGE_CO 0x100 /* HW Change-bit override */
221#define _PAGE_PROTECT 0x200 /* HW read-only bit */ 220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
222#define _PAGE_INVALID 0x400 /* HW invalid bit */ 221#define _PAGE_INVALID 0x400 /* HW invalid bit */
223#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@@ -234,8 +233,8 @@ extern unsigned long MODULES_END;
234#define __HAVE_ARCH_PTE_SPECIAL 233#define __HAVE_ARCH_PTE_SPECIAL
235 234
236/* Set of bits not changed in pte_modify */ 235/* Set of bits not changed in pte_modify */
237#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 236#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
238 _PAGE_DIRTY | _PAGE_YOUNG) 237 _PAGE_YOUNG)
239 238
240/* 239/*
241 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the 240 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
@@ -354,7 +353,6 @@ extern unsigned long MODULES_END;
354 353
355#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 354#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
356#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 355#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
357#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
358 356
359/* Bits in the segment table entry */ 357/* Bits in the segment table entry */
360#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 358#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -371,7 +369,6 @@ extern unsigned long MODULES_END;
371#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 369#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
372#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */ 370#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
373#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 371#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
374#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
375#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 372#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
376#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 373#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
377 374
@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
873 pgste = pgste_set_pte(ptep, pgste, entry); 870 pgste = pgste_set_pte(ptep, pgste, entry);
874 pgste_set_unlock(ptep, pgste); 871 pgste_set_unlock(ptep, pgste);
875 } else { 872 } else {
876 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
877 pte_val(entry) |= _PAGE_CO;
878 *ptep = entry; 873 *ptep = entry;
879 } 874 }
880} 875}
@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1044 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); 1039 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1045} 1040}
1046 1041
1042static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1043{
1044 unsigned long pto = (unsigned long) ptep;
1045
1046#ifndef CONFIG_64BIT
1047 /* pto in ESA mode must point to the start of the segment table */
1048 pto &= 0x7ffffc00;
1049#endif
1050 /* Invalidate a range of ptes + global TLB flush of the ptes */
1051 do {
1052 asm volatile(
1053 " .insn rrf,0xb2210000,%2,%0,%1,0"
1054 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1055 } while (nr != 255);
1056}
1057
1047static inline void ptep_flush_direct(struct mm_struct *mm, 1058static inline void ptep_flush_direct(struct mm_struct *mm,
1048 unsigned long address, pte_t *ptep) 1059 unsigned long address, pte_t *ptep)
1049{ 1060{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e568fc8a7250..d559bdb03d18 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -13,9 +13,11 @@
13 13
14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
16#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
16 17
17#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) 18#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
18#define _CIF_ASCE (1<<CIF_ASCE) 19#define _CIF_ASCE (1<<CIF_ASCE)
20#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
19 21
20 22
21#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag)
43 return !!(S390_lowcore.cpu_flags & (1U << flag)); 45 return !!(S390_lowcore.cpu_flags & (1U << flag));
44} 46}
45 47
48#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
49
46/* 50/*
47 * Default implementation of macro that returns current 51 * Default implementation of macro that returns current
48 * instruction pointer ("program counter"). 52 * instruction pointer ("program counter").
@@ -113,6 +117,7 @@ struct thread_struct {
113 int ri_signum; 117 int ri_signum;
114#ifdef CONFIG_64BIT 118#ifdef CONFIG_64BIT
115 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 119 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
120 __vector128 *vxrs; /* Vector register save area */
116#endif 121#endif
117}; 122};
118 123
@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
285 return (psw.addr - ilc) & mask; 290 return (psw.addr - ilc) & mask;
286#endif 291#endif
287} 292}
288 293
294/*
295 * Function to stop a processor until the next interrupt occurs
296 */
297void enabled_wait(void);
298
289/* 299/*
290 * Function to drop a processor into disabled wait state 300 * Function to drop a processor into disabled wait state
291 */ 301 */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 55d69dd7473c..be317feff7ac 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs)
161 return regs->gprs[2]; 161 return regs->gprs[2];
162} 162}
163 163
164static inline void instruction_pointer_set(struct pt_regs *regs,
165 unsigned long val)
166{
167 regs->psw.addr = val | PSW_ADDR_AMODE;
168}
169
164int regs_query_register_offset(const char *name); 170int regs_query_register_offset(const char *name);
165const char *regs_query_register_name(unsigned int offset); 171const char *regs_query_register_name(unsigned int offset);
166unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); 172unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 089a49814c50..7736fdd72595 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void);
55#define MACHINE_FLAG_LPP (1UL << 13) 55#define MACHINE_FLAG_LPP (1UL << 13)
56#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 56#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
57#define MACHINE_FLAG_TE (1UL << 15) 57#define MACHINE_FLAG_TE (1UL << 15)
58#define MACHINE_FLAG_RRBM (1UL << 16)
59#define MACHINE_FLAG_TLB_LC (1UL << 17) 58#define MACHINE_FLAG_TLB_LC (1UL << 17)
59#define MACHINE_FLAG_VX (1UL << 18)
60 60
61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 61#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 62#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void);
78#define MACHINE_HAS_LPP (0) 78#define MACHINE_HAS_LPP (0)
79#define MACHINE_HAS_TOPOLOGY (0) 79#define MACHINE_HAS_TOPOLOGY (0)
80#define MACHINE_HAS_TE (0) 80#define MACHINE_HAS_TE (0)
81#define MACHINE_HAS_RRBM (0)
82#define MACHINE_HAS_TLB_LC (0) 81#define MACHINE_HAS_TLB_LC (0)
82#define MACHINE_HAS_VX (0)
83#else /* CONFIG_64BIT */ 83#else /* CONFIG_64BIT */
84#define MACHINE_HAS_IEEE (1) 84#define MACHINE_HAS_IEEE (1)
85#define MACHINE_HAS_CSP (1) 85#define MACHINE_HAS_CSP (1)
@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void);
91#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 91#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
92#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 92#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 93#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
94#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
95#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 94#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
95#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
96#endif /* CONFIG_64BIT */ 96#endif /* CONFIG_64BIT */
97 97
98/* 98/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index bf9c823d4020..49576115dbb7 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -15,6 +15,7 @@
15#define SIGP_SET_ARCHITECTURE 18 15#define SIGP_SET_ARCHITECTURE 18
16#define SIGP_COND_EMERGENCY_SIGNAL 19 16#define SIGP_COND_EMERGENCY_SIGNAL 19
17#define SIGP_SENSE_RUNNING 21 17#define SIGP_SENSE_RUNNING 21
18#define SIGP_STORE_ADDITIONAL_STATUS 23
18 19
19/* SIGP condition codes */ 20/* SIGP condition codes */
20#define SIGP_CC_ORDER_CODE_ACCEPTED 0 21#define SIGP_CC_ORDER_CODE_ACCEPTED 0
@@ -33,9 +34,10 @@
33 34
34#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
35 36
36static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) 37static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
38 u32 *status)
37{ 39{
38 register unsigned int reg1 asm ("1") = parm; 40 register unsigned long reg1 asm ("1") = parm;
39 int cc; 41 int cc;
40 42
41 asm volatile( 43 asm volatile(
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 4f1307962a95..762d4f88af5a 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
29extern int smp_store_status(int cpu); 29extern int smp_store_status(int cpu);
30extern int smp_vcpu_scheduled(int cpu); 30extern int smp_vcpu_scheduled(int cpu);
31extern void smp_yield_cpu(int cpu); 31extern void smp_yield_cpu(int cpu);
32extern void smp_yield(void);
33extern void smp_cpu_set_polarization(int cpu, int val); 32extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu); 33extern int smp_cpu_get_polarization(int cpu);
35extern void smp_fill_possible_mask(void); 34extern void smp_fill_possible_mask(void);
@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
50static inline int smp_store_status(int cpu) { return 0; } 49static inline int smp_store_status(int cpu) { return 0; }
51static inline int smp_vcpu_scheduled(int cpu) { return 1; } 50static inline int smp_vcpu_scheduled(int cpu) { return 1; }
52static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
53static inline void smp_yield(void) { }
54static inline void smp_fill_possible_mask(void) { } 52static inline void smp_fill_possible_mask(void) { }
55 53
56#endif /* CONFIG_SMP */ 54#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 96879f7ad6da..d6bdf906caa5 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
37 * (the type definitions are in asm/spinlock_types.h) 37 * (the type definitions are in asm/spinlock_types.h)
38 */ 38 */
39 39
40void arch_lock_relax(unsigned int cpu);
41
40void arch_spin_lock_wait(arch_spinlock_t *); 42void arch_spin_lock_wait(arch_spinlock_t *);
41int arch_spin_trylock_retry(arch_spinlock_t *); 43int arch_spin_trylock_retry(arch_spinlock_t *);
42void arch_spin_relax(arch_spinlock_t *);
43void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 44void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 45
46static inline void arch_spin_relax(arch_spinlock_t *lock)
47{
48 arch_lock_relax(lock->lock);
49}
50
45static inline u32 arch_spin_lockval(int cpu) 51static inline u32 arch_spin_lockval(int cpu)
46{ 52{
47 return ~cpu; 53 return ~cpu;
@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
64 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); 70 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
65} 71}
66 72
67static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
68{
69 return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
70}
71
72static inline void arch_spin_lock(arch_spinlock_t *lp) 73static inline void arch_spin_lock(arch_spinlock_t *lp)
73{ 74{
74 if (!arch_spin_trylock_once(lp)) 75 if (!arch_spin_trylock_once(lp))
@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
91 92
92static inline void arch_spin_unlock(arch_spinlock_t *lp) 93static inline void arch_spin_unlock(arch_spinlock_t *lp)
93{ 94{
94 arch_spin_tryrelease_once(lp); 95 typecheck(unsigned int, lp->lock);
96 asm volatile(
97 __ASM_BARRIER
98 "st %1,%0\n"
99 : "+Q" (lp->lock)
100 : "d" (0)
101 : "cc", "memory");
95} 102}
96 103
97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 104static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
123 */ 130 */
124#define arch_write_can_lock(x) ((x)->lock == 0) 131#define arch_write_can_lock(x) ((x)->lock == 0)
125 132
126extern void _raw_read_lock_wait(arch_rwlock_t *lp);
127extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
128extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 133extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129extern void _raw_write_lock_wait(arch_rwlock_t *lp);
130extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
131extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 134extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
132 135
136#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
137#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
138
133static inline int arch_read_trylock_once(arch_rwlock_t *rw) 139static inline int arch_read_trylock_once(arch_rwlock_t *rw)
134{ 140{
135 unsigned int old = ACCESS_ONCE(rw->lock); 141 unsigned int old = ACCESS_ONCE(rw->lock);
@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
144 _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); 150 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
145} 151}
146 152
153#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154
155#define __RAW_OP_OR "lao"
156#define __RAW_OP_AND "lan"
157#define __RAW_OP_ADD "laa"
158
159#define __RAW_LOCK(ptr, op_val, op_string) \
160({ \
161 unsigned int old_val; \
162 \
163 typecheck(unsigned int *, ptr); \
164 asm volatile( \
165 op_string " %0,%2,%1\n" \
166 "bcr 14,0\n" \
167 : "=d" (old_val), "+Q" (*ptr) \
168 : "d" (op_val) \
169 : "cc", "memory"); \
170 old_val; \
171})
172
173#define __RAW_UNLOCK(ptr, op_val, op_string) \
174({ \
175 unsigned int old_val; \
176 \
177 typecheck(unsigned int *, ptr); \
178 asm volatile( \
179 "bcr 14,0\n" \
180 op_string " %0,%2,%1\n" \
181 : "=d" (old_val), "+Q" (*ptr) \
182 : "d" (op_val) \
183 : "cc", "memory"); \
184 old_val; \
185})
186
187extern void _raw_read_lock_wait(arch_rwlock_t *lp);
188extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
189
147static inline void arch_read_lock(arch_rwlock_t *rw) 190static inline void arch_read_lock(arch_rwlock_t *rw)
148{ 191{
149 if (!arch_read_trylock_once(rw)) 192 unsigned int old;
193
194 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
195 if ((int) old < 0)
150 _raw_read_lock_wait(rw); 196 _raw_read_lock_wait(rw);
151} 197}
152 198
153static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 199static inline void arch_read_unlock(arch_rwlock_t *rw)
200{
201 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
202}
203
204static inline void arch_write_lock(arch_rwlock_t *rw)
205{
206 unsigned int old;
207
208 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
209 if (old != 0)
210 _raw_write_lock_wait(rw, old);
211 rw->owner = SPINLOCK_LOCKVAL;
212}
213
214static inline void arch_write_unlock(arch_rwlock_t *rw)
215{
216 rw->owner = 0;
217 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
218}
219
220#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221
222extern void _raw_read_lock_wait(arch_rwlock_t *lp);
223extern void _raw_write_lock_wait(arch_rwlock_t *lp);
224
225static inline void arch_read_lock(arch_rwlock_t *rw)
154{ 226{
155 if (!arch_read_trylock_once(rw)) 227 if (!arch_read_trylock_once(rw))
156 _raw_read_lock_wait_flags(rw, flags); 228 _raw_read_lock_wait(rw);
157} 229}
158 230
159static inline void arch_read_unlock(arch_rwlock_t *rw) 231static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
169{ 241{
170 if (!arch_write_trylock_once(rw)) 242 if (!arch_write_trylock_once(rw))
171 _raw_write_lock_wait(rw); 243 _raw_write_lock_wait(rw);
172} 244 rw->owner = SPINLOCK_LOCKVAL;
173
174static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
175{
176 if (!arch_write_trylock_once(rw))
177 _raw_write_lock_wait_flags(rw, flags);
178} 245}
179 246
180static inline void arch_write_unlock(arch_rwlock_t *rw) 247static inline void arch_write_unlock(arch_rwlock_t *rw)
181{ 248{
182 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 249 typecheck(unsigned int, rw->lock);
250
251 rw->owner = 0;
252 asm volatile(
253 __ASM_BARRIER
254 "st %1,%0\n"
255 : "+Q" (rw->lock)
256 : "d" (0)
257 : "cc", "memory");
183} 258}
184 259
260#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
261
185static inline int arch_read_trylock(arch_rwlock_t *rw) 262static inline int arch_read_trylock(arch_rwlock_t *rw)
186{ 263{
187 if (!arch_read_trylock_once(rw)) 264 if (!arch_read_trylock_once(rw))
@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
191 268
192static inline int arch_write_trylock(arch_rwlock_t *rw) 269static inline int arch_write_trylock(arch_rwlock_t *rw)
193{ 270{
194 if (!arch_write_trylock_once(rw)) 271 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
195 return _raw_write_trylock_retry(rw); 272 return 0;
273 rw->owner = SPINLOCK_LOCKVAL;
196 return 1; 274 return 1;
197} 275}
198 276
199#define arch_read_relax(lock) cpu_relax() 277static inline void arch_read_relax(arch_rwlock_t *rw)
200#define arch_write_relax(lock) cpu_relax() 278{
279 arch_lock_relax(rw->owner);
280}
281
282static inline void arch_write_relax(arch_rwlock_t *rw)
283{
284 arch_lock_relax(rw->owner);
285}
201 286
202#endif /* __ASM_SPINLOCK_H */ 287#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..d84b6939237c 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct {
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16 unsigned int owner;
16} arch_rwlock_t; 17} arch_rwlock_t;
17 18
18#define __ARCH_RW_LOCK_UNLOCKED { 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 18ea9e3f8142..2542a7e4c8b4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs)
103 asm volatile("ld 15,%0" : : "Q" (fprs[15])); 103 asm volatile("ld 15,%0" : : "Q" (fprs[15]));
104} 104}
105 105
106static inline void save_vx_regs(__vector128 *vxrs)
107{
108 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
109
110 asm volatile(
111 " la 1,%0\n"
112 " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
113 " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
114 : "=Q" (*(addrtype *) vxrs) : : "1");
115}
116
117static inline void save_vx_regs_safe(__vector128 *vxrs)
118{
119 unsigned long cr0, flags;
120
121 flags = arch_local_irq_save();
122 __ctl_store(cr0, 0, 0);
123 __ctl_set_bit(0, 17);
124 __ctl_set_bit(0, 18);
125 save_vx_regs(vxrs);
126 __ctl_load(cr0, 0, 0);
127 arch_local_irq_restore(flags);
128}
129
130static inline void restore_vx_regs(__vector128 *vxrs)
131{
132 typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
133
134 asm volatile(
135 " la 1,%0\n"
136 " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
137 " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
138 : : "Q" (*(addrtype *) vxrs) : "1");
139}
140
141static inline void save_fp_vx_regs(struct task_struct *task)
142{
143#ifdef CONFIG_64BIT
144 if (task->thread.vxrs)
145 save_vx_regs(task->thread.vxrs);
146 else
147#endif
148 save_fp_regs(task->thread.fp_regs.fprs);
149}
150
151static inline void restore_fp_vx_regs(struct task_struct *task)
152{
153#ifdef CONFIG_64BIT
154 if (task->thread.vxrs)
155 restore_vx_regs(task->thread.vxrs);
156 else
157#endif
158 restore_fp_regs(task->thread.fp_regs.fprs);
159}
160
106static inline void save_access_regs(unsigned int *acrs) 161static inline void save_access_regs(unsigned int *acrs)
107{ 162{
108 typedef struct { int _[NUM_ACRS]; } acrstype; 163 typedef struct { int _[NUM_ACRS]; } acrstype;
@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs)
120#define switch_to(prev,next,last) do { \ 175#define switch_to(prev,next,last) do { \
121 if (prev->mm) { \ 176 if (prev->mm) { \
122 save_fp_ctl(&prev->thread.fp_regs.fpc); \ 177 save_fp_ctl(&prev->thread.fp_regs.fpc); \
123 save_fp_regs(prev->thread.fp_regs.fprs); \ 178 save_fp_vx_regs(prev); \
124 save_access_regs(&prev->thread.acrs[0]); \ 179 save_access_regs(&prev->thread.acrs[0]); \
125 save_ri_cb(prev->thread.ri_cb); \ 180 save_ri_cb(prev->thread.ri_cb); \
126 } \ 181 } \
127 if (next->mm) { \ 182 if (next->mm) { \
183 update_cr_regs(next); \
128 restore_fp_ctl(&next->thread.fp_regs.fpc); \ 184 restore_fp_ctl(&next->thread.fp_regs.fpc); \
129 restore_fp_regs(next->thread.fp_regs.fprs); \ 185 restore_fp_vx_regs(next); \
130 restore_access_regs(&next->thread.acrs[0]); \ 186 restore_access_regs(&next->thread.acrs[0]); \
131 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 187 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
132 update_cr_regs(next); \
133 } \ 188 } \
134 prev = __switch_to(prev,next); \ 189 prev = __switch_to(prev,next); \
135} while (0) 190} while (0)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b833e9c0bfbf..4d62fd5b56e5 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void)
84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
85#define TIF_SECCOMP 5 /* secure computing */ 85#define TIF_SECCOMP 5 /* secure computing */
86#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 86#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
87#define TIF_UPROBE 7 /* breakpointed or single-stepping */
87#define TIF_31BIT 16 /* 32bit process */ 88#define TIF_31BIT 16 /* 32bit process */
88#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 89#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
89#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ 90#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
90#define TIF_SINGLE_STEP 19 /* This task is single stepped */ 91#define TIF_SINGLE_STEP 19 /* This task is single stepped */
91#define TIF_BLOCK_STEP 20 /* This task is block stepped */ 92#define TIF_BLOCK_STEP 20 /* This task is block stepped */
93#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
92 94
93#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 95#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
94#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 96#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void)
97#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 99#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
98#define _TIF_SECCOMP (1<<TIF_SECCOMP) 100#define _TIF_SECCOMP (1<<TIF_SECCOMP)
99#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 101#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
102#define _TIF_UPROBE (1<<TIF_UPROBE)
100#define _TIF_31BIT (1<<TIF_31BIT) 103#define _TIF_31BIT (1<<TIF_31BIT)
101#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 104#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
102 105
diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h
new file mode 100644
index 000000000000..1411dff7fea7
--- /dev/null
+++ b/arch/s390/include/asm/uprobes.h
@@ -0,0 +1,42 @@
1/*
2 * User-space Probes (UProbes) for s390
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Jan Willeke,
6 */
7
8#ifndef _ASM_UPROBES_H
9#define _ASM_UPROBES_H
10
11#include <linux/notifier.h>
12
13typedef u16 uprobe_opcode_t;
14
15#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
16
17#define UPROBE_SWBP_INSN 0x0002
18#define UPROBE_SWBP_INSN_SIZE 2
19
20struct arch_uprobe {
21 union{
22 uprobe_opcode_t insn[3];
23 uprobe_opcode_t ixol[3];
24 };
25 unsigned int saved_per : 1;
26 unsigned int saved_int_code;
27};
28
29struct arch_uprobe_task {
30};
31
32int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
33 unsigned long addr);
34int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
35int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
36bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
37int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
38 void *data);
39void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
40unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
41 struct pt_regs *regs);
42#endif /* _ASM_UPROBES_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bc9746a7d47c..a62526d09201 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -22,13 +22,17 @@ struct vdso_data {
22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ 22 __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
23 __u64 xtime_clock_sec; /* Kernel time 0x10 */ 23 __u64 xtime_clock_sec; /* Kernel time 0x10 */
24 __u64 xtime_clock_nsec; /* 0x18 */ 24 __u64 xtime_clock_nsec; /* 0x18 */
25 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ 25 __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 xtime_coarse_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u64 wtom_clock_nsec; /* 0x38 */
29 __u32 ectg_available; /* ECTG instruction present 0x38 */ 29 __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ 30 __u64 wtom_coarse_nsec; /* 0x48 */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ 31 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
32 __u32 tz_dsttime; /* Type of dst correction 0x54 */
33 __u32 ectg_available; /* ECTG instruction present 0x58 */
34 __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
35 __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
32}; 36};
33 37
34struct vdso_per_cpu_data { 38struct vdso_per_cpu_data {
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
index bfe25d513ad2..10a179af62d8 100644
--- a/arch/s390/include/asm/vtimer.h
+++ b/arch/s390/include/asm/vtimer.h
@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
28extern void init_cpu_vtimer(void); 28extern void init_cpu_vtimer(void);
29extern void vtime_init(void); 29extern void vtime_init(void);
30 30
31extern void vtime_stop_cpu(void);
32
33#endif /* _ASM_S390_TIMER_H */ 31#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index b30de9c01bbe..5f0b8d7ddb0b 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -7,10 +7,14 @@
7#define _ASM_S390_SIGCONTEXT_H 7#define _ASM_S390_SIGCONTEXT_H
8 8
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/types.h>
10 11
11#define __NUM_GPRS 16 12#define __NUM_GPRS 16
12#define __NUM_FPRS 16 13#define __NUM_FPRS 16
13#define __NUM_ACRS 16 14#define __NUM_ACRS 16
15#define __NUM_VXRS 32
16#define __NUM_VXRS_LOW 16
17#define __NUM_VXRS_HIGH 16
14 18
15#ifndef __s390x__ 19#ifndef __s390x__
16 20
@@ -59,6 +63,16 @@ typedef struct
59 _s390_fp_regs fpregs; 63 _s390_fp_regs fpregs;
60} _sigregs; 64} _sigregs;
61 65
66typedef struct
67{
68#ifndef __s390x__
69 unsigned long gprs_high[__NUM_GPRS];
70#endif
71 unsigned long long vxrs_low[__NUM_VXRS_LOW];
72 __vector128 vxrs_high[__NUM_VXRS_HIGH];
73 unsigned char __reserved[128];
74} _sigregs_ext;
75
62struct sigcontext 76struct sigcontext
63{ 77{
64 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; 78 unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h
index 038f2b9178a4..3c3951e3415b 100644
--- a/arch/s390/include/uapi/asm/types.h
+++ b/arch/s390/include/uapi/asm/types.h
@@ -17,6 +17,10 @@
17typedef unsigned long addr_t; 17typedef unsigned long addr_t;
18typedef __signed__ long saddr_t; 18typedef __signed__ long saddr_t;
19 19
20typedef struct {
21 __u32 u[4];
22} __vector128;
23
20#endif /* __ASSEMBLY__ */ 24#endif /* __ASSEMBLY__ */
21 25
22#endif /* _UAPI_S390_TYPES_H */ 26#endif /* _UAPI_S390_TYPES_H */
diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h
index 3e077b2a4705..64a69aa5dde0 100644
--- a/arch/s390/include/uapi/asm/ucontext.h
+++ b/arch/s390/include/uapi/asm/ucontext.h
@@ -7,10 +7,15 @@
7#ifndef _ASM_S390_UCONTEXT_H 7#ifndef _ASM_S390_UCONTEXT_H
8#define _ASM_S390_UCONTEXT_H 8#define _ASM_S390_UCONTEXT_H
9 9
10#define UC_EXTENDED 0x00000001 10#define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */
11 11#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */
12#ifndef __s390x__
13 12
13/*
14 * The struct ucontext_extended describes how the registers are stored
15 * on a rt signal frame. Please note that the structure is not fixed,
16 * if new CPU registers are added to the user state the size of the
17 * struct ucontext_extended will increase.
18 */
14struct ucontext_extended { 19struct ucontext_extended {
15 unsigned long uc_flags; 20 unsigned long uc_flags;
16 struct ucontext *uc_link; 21 struct ucontext *uc_link;
@@ -19,11 +24,9 @@ struct ucontext_extended {
19 sigset_t uc_sigmask; 24 sigset_t uc_sigmask;
20 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 25 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
21 unsigned char __unused[128 - sizeof(sigset_t)]; 26 unsigned char __unused[128 - sizeof(sigset_t)];
22 unsigned long uc_gprs_high[16]; 27 _sigregs_ext uc_mcontext_ext;
23}; 28};
24 29
25#endif
26
27struct ucontext { 30struct ucontext {
28 unsigned long uc_flags; 31 unsigned long uc_flags;
29 struct ucontext *uc_link; 32 struct ucontext *uc_link;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a95c4ca99617..204c43a4c245 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
30 30
31obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
52 52
53obj-$(CONFIG_STACKTRACE) += stacktrace.o 53obj-$(CONFIG_STACKTRACE) += stacktrace.o
54obj-$(CONFIG_KPROBES) += kprobes.o 54obj-$(CONFIG_KPROBES) += kprobes.o
55obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) 55obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
56obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
57obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
58obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
59obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 56obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57obj-$(CONFIG_UPROBES) += uprobes.o
60 58
61ifdef CONFIG_64BIT 59ifdef CONFIG_64BIT
62obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ 60obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index afe1715a4eb7..ef279a136801 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,7 @@
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/kvm_host.h> 10#include <linux/kvm_host.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/cputime.h> 12#include <asm/idle.h>
13#include <asm/vdso.h> 13#include <asm/vdso.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
@@ -62,8 +62,12 @@ int main(void)
62 DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); 62 DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
63 DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); 63 DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
64 DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); 64 DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
65 DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
66 DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
65 DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 67 DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
66 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 68 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
69 DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
70 DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
67 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 71 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
68 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 72 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
69 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); 73 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
@@ -73,8 +77,11 @@ int main(void)
73 /* constants used by the vdso */ 77 /* constants used by the vdso */
74 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 78 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
75 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 79 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
80 DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
81 DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
76 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); 82 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
77 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 83 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
84 DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
78 BLANK(); 85 BLANK();
79 /* idle data offsets */ 86 /* idle data offsets */
80 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); 87 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 70d4b7c4beaa..a0a886c04977 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -50,6 +50,14 @@ typedef struct
50 _s390_fp_regs32 fpregs; 50 _s390_fp_regs32 fpregs;
51} _sigregs32; 51} _sigregs32;
52 52
53typedef struct
54{
55 __u32 gprs_high[__NUM_GPRS];
56 __u64 vxrs_low[__NUM_VXRS_LOW];
57 __vector128 vxrs_high[__NUM_VXRS_HIGH];
58 __u8 __reserved[128];
59} _sigregs_ext32;
60
53#define _SIGCONTEXT_NSIG32 64 61#define _SIGCONTEXT_NSIG32 64
54#define _SIGCONTEXT_NSIG_BPW32 32 62#define _SIGCONTEXT_NSIG_BPW32 32
55#define __SIGNAL_FRAMESIZE32 96 63#define __SIGNAL_FRAMESIZE32 96
@@ -72,6 +80,7 @@ struct ucontext32 {
72 compat_sigset_t uc_sigmask; 80 compat_sigset_t uc_sigmask;
73 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 81 /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
74 unsigned char __unused[128 - sizeof(compat_sigset_t)]; 82 unsigned char __unused[128 - sizeof(compat_sigset_t)];
83 _sigregs_ext32 uc_mcontext_ext;
75}; 84};
76 85
77struct stat64_emu31; 86struct stat64_emu31;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 598b0b42668b..009f5eb11125 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -36,17 +36,16 @@ typedef struct
36 struct sigcontext32 sc; 36 struct sigcontext32 sc;
37 _sigregs32 sregs; 37 _sigregs32 sregs;
38 int signo; 38 int signo;
39 __u32 gprs_high[NUM_GPRS]; 39 _sigregs_ext32 sregs_ext;
40 __u8 retcode[S390_SYSCALL_SIZE]; 40 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
41} sigframe32; 41} sigframe32;
42 42
43typedef struct 43typedef struct
44{ 44{
45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
46 __u8 retcode[S390_SYSCALL_SIZE]; 46 __u16 svc_insn;
47 compat_siginfo_t info; 47 compat_siginfo_t info;
48 struct ucontext32 uc; 48 struct ucontext32 uc;
49 __u32 gprs_high[NUM_GPRS];
50} rt_sigframe32; 49} rt_sigframe32;
51 50
52int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) 51int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
@@ -151,6 +150,38 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
151 return err ? -EFAULT : 0; 150 return err ? -EFAULT : 0;
152} 151}
153 152
153/* Store registers needed to create the signal frame */
154static void store_sigregs(void)
155{
156 int i;
157
158 save_access_regs(current->thread.acrs);
159 save_fp_ctl(&current->thread.fp_regs.fpc);
160 if (current->thread.vxrs) {
161 save_vx_regs(current->thread.vxrs);
162 for (i = 0; i < __NUM_FPRS; i++)
163 current->thread.fp_regs.fprs[i] =
164 *(freg_t *)(current->thread.vxrs + i);
165 } else
166 save_fp_regs(current->thread.fp_regs.fprs);
167}
168
169/* Load registers after signal return */
170static void load_sigregs(void)
171{
172 int i;
173
174 restore_access_regs(current->thread.acrs);
175 /* restore_fp_ctl is done in restore_sigregs */
176 if (current->thread.vxrs) {
177 for (i = 0; i < __NUM_FPRS; i++)
178 *(freg_t *)(current->thread.vxrs + i) =
179 current->thread.fp_regs.fprs[i];
180 restore_vx_regs(current->thread.vxrs);
181 } else
182 restore_fp_regs(current->thread.fp_regs.fprs);
183}
184
154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) 185static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
155{ 186{
156 _sigregs32 user_sregs; 187 _sigregs32 user_sregs;
@@ -163,11 +194,8 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
163 (__u32)(regs->psw.mask & PSW_MASK_BA); 194 (__u32)(regs->psw.mask & PSW_MASK_BA);
164 for (i = 0; i < NUM_GPRS; i++) 195 for (i = 0; i < NUM_GPRS; i++)
165 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; 196 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
166 save_access_regs(current->thread.acrs);
167 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 197 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
168 sizeof(user_sregs.regs.acrs)); 198 sizeof(user_sregs.regs.acrs));
169 save_fp_ctl(&current->thread.fp_regs.fpc);
170 save_fp_regs(current->thread.fp_regs.fprs);
171 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, 199 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
172 sizeof(user_sregs.fpregs)); 200 sizeof(user_sregs.fpregs));
173 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) 201 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
@@ -207,37 +235,67 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
207 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; 235 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
208 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 236 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
209 sizeof(current->thread.acrs)); 237 sizeof(current->thread.acrs));
210 restore_access_regs(current->thread.acrs);
211 238
212 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, 239 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
213 sizeof(current->thread.fp_regs)); 240 sizeof(current->thread.fp_regs));
214 241
215 restore_fp_regs(current->thread.fp_regs.fprs);
216 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 242 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
217 return 0; 243 return 0;
218} 244}
219 245
220static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) 246static int save_sigregs_ext32(struct pt_regs *regs,
247 _sigregs_ext32 __user *sregs_ext)
221{ 248{
222 __u32 gprs_high[NUM_GPRS]; 249 __u32 gprs_high[NUM_GPRS];
250 __u64 vxrs[__NUM_VXRS_LOW];
223 int i; 251 int i;
224 252
253 /* Save high gprs to signal stack */
225 for (i = 0; i < NUM_GPRS; i++) 254 for (i = 0; i < NUM_GPRS; i++)
226 gprs_high[i] = regs->gprs[i] >> 32; 255 gprs_high[i] = regs->gprs[i] >> 32;
227 if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high))) 256 if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
257 sizeof(sregs_ext->gprs_high)))
228 return -EFAULT; 258 return -EFAULT;
259
260 /* Save vector registers to signal stack */
261 if (current->thread.vxrs) {
262 for (i = 0; i < __NUM_VXRS_LOW; i++)
263 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
264 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
265 sizeof(sregs_ext->vxrs_low)) ||
266 __copy_to_user(&sregs_ext->vxrs_high,
267 current->thread.vxrs + __NUM_VXRS_LOW,
268 sizeof(sregs_ext->vxrs_high)))
269 return -EFAULT;
270 }
229 return 0; 271 return 0;
230} 272}
231 273
232static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) 274static int restore_sigregs_ext32(struct pt_regs *regs,
275 _sigregs_ext32 __user *sregs_ext)
233{ 276{
234 __u32 gprs_high[NUM_GPRS]; 277 __u32 gprs_high[NUM_GPRS];
278 __u64 vxrs[__NUM_VXRS_LOW];
235 int i; 279 int i;
236 280
237 if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high))) 281 /* Restore high gprs from signal stack */
282 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
283 sizeof(&sregs_ext->gprs_high)))
238 return -EFAULT; 284 return -EFAULT;
239 for (i = 0; i < NUM_GPRS; i++) 285 for (i = 0; i < NUM_GPRS; i++)
240 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 286 *(__u32 *)&regs->gprs[i] = gprs_high[i];
287
288 /* Restore vector registers from signal stack */
289 if (current->thread.vxrs) {
290 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
291 sizeof(sregs_ext->vxrs_low)) ||
292 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
293 &sregs_ext->vxrs_high,
294 sizeof(sregs_ext->vxrs_high)))
295 return -EFAULT;
296 for (i = 0; i < __NUM_VXRS_LOW; i++)
297 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
298 }
241 return 0; 299 return 0;
242} 300}
243 301
@@ -252,8 +310,9 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
252 set_current_blocked(&set); 310 set_current_blocked(&set);
253 if (restore_sigregs32(regs, &frame->sregs)) 311 if (restore_sigregs32(regs, &frame->sregs))
254 goto badframe; 312 goto badframe;
255 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 313 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
256 goto badframe; 314 goto badframe;
315 load_sigregs();
257 return regs->gprs[2]; 316 return regs->gprs[2];
258badframe: 317badframe:
259 force_sig(SIGSEGV, current); 318 force_sig(SIGSEGV, current);
@@ -269,12 +328,13 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
269 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 328 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
270 goto badframe; 329 goto badframe;
271 set_current_blocked(&set); 330 set_current_blocked(&set);
331 if (compat_restore_altstack(&frame->uc.uc_stack))
332 goto badframe;
272 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 333 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
273 goto badframe; 334 goto badframe;
274 if (restore_sigregs_gprs_high(regs, frame->gprs_high)) 335 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
275 goto badframe; 336 goto badframe;
276 if (compat_restore_altstack(&frame->uc.uc_stack)) 337 load_sigregs();
277 goto badframe;
278 return regs->gprs[2]; 338 return regs->gprs[2];
279badframe: 339badframe:
280 force_sig(SIGSEGV, current); 340 force_sig(SIGSEGV, current);
@@ -324,37 +384,64 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
324 struct pt_regs *regs) 384 struct pt_regs *regs)
325{ 385{
326 int sig = ksig->sig; 386 int sig = ksig->sig;
327 sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(sigframe32)); 387 sigframe32 __user *frame;
328 388 struct sigcontext32 sc;
389 unsigned long restorer;
390 size_t frame_size;
391
392 /*
393 * gprs_high are always present for 31-bit compat tasks.
394 * The space for vector registers is only allocated if
395 * the machine supports it
396 */
397 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
398 if (!MACHINE_HAS_VX)
399 frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
400 sizeof(frame->sregs_ext.vxrs_high);
401 frame = get_sigframe(&ksig->ka, regs, frame_size);
329 if (frame == (void __user *) -1UL) 402 if (frame == (void __user *) -1UL)
330 return -EFAULT; 403 return -EFAULT;
331 404
332 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) 405 /* Set up backchain. */
406 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
407 return -EFAULT;
408
409 /* Create struct sigcontext32 on the signal stack */
410 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
411 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
412 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
333 return -EFAULT; 413 return -EFAULT;
334 414
415 /* Store registers needed to create the signal frame */
416 store_sigregs();
417
418 /* Create _sigregs32 on the signal stack */
335 if (save_sigregs32(regs, &frame->sregs)) 419 if (save_sigregs32(regs, &frame->sregs))
336 return -EFAULT; 420 return -EFAULT;
337 if (save_sigregs_gprs_high(regs, frame->gprs_high)) 421
422 /* Place signal number on stack to allow backtrace from handler. */
423 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
338 return -EFAULT; 424 return -EFAULT;
339 if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs)) 425
426 /* Create _sigregs_ext32 on the signal stack */
427 if (save_sigregs_ext32(regs, &frame->sregs_ext))
340 return -EFAULT; 428 return -EFAULT;
341 429
342 /* Set up to return from userspace. If provided, use a stub 430 /* Set up to return from userspace. If provided, use a stub
343 already in userspace. */ 431 already in userspace. */
344 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 432 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
345 regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 433 restorer = (unsigned long __force)
434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
346 } else { 435 } else {
347 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; 436 /* Signal frames without vectors registers are short ! */
348 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 437 __u16 __user *svc = (void *) frame + frame_size - 2;
349 (u16 __force __user *)(frame->retcode))) 438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
350 return -EFAULT; 439 return -EFAULT;
440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
351 } 441 }
352 442
353 /* Set up backchain. */
354 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
355 return -EFAULT;
356
357 /* Set up registers for signal handler */ 443 /* Set up registers for signal handler */
444 regs->gprs[14] = restorer;
358 regs->gprs[15] = (__force __u64) frame; 445 regs->gprs[15] = (__force __u64) frame;
359 /* Force 31 bit amode and default user address space control. */ 446 /* Force 31 bit amode and default user address space control. */
360 regs->psw.mask = PSW_MASK_BA | 447 regs->psw.mask = PSW_MASK_BA |
@@ -375,50 +462,69 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
375 regs->gprs[6] = task_thread_info(current)->last_break; 462 regs->gprs[6] = task_thread_info(current)->last_break;
376 } 463 }
377 464
378 /* Place signal number on stack to allow backtrace from handler. */
379 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
380 return -EFAULT;
381 return 0; 465 return 0;
382} 466}
383 467
384static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, 468static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
385 struct pt_regs *regs) 469 struct pt_regs *regs)
386{ 470{
387 int err = 0; 471 rt_sigframe32 __user *frame;
388 rt_sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe32)); 472 unsigned long restorer;
389 473 size_t frame_size;
474 u32 uc_flags;
475
476 frame_size = sizeof(*frame) -
477 sizeof(frame->uc.uc_mcontext_ext.__reserved);
478 /*
479 * gprs_high are always present for 31-bit compat tasks.
480 * The space for vector registers is only allocated if
481 * the machine supports it
482 */
483 uc_flags = UC_GPRS_HIGH;
484 if (MACHINE_HAS_VX) {
485 if (current->thread.vxrs)
486 uc_flags |= UC_VXRS;
487 } else
488 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
489 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
490 frame = get_sigframe(&ksig->ka, regs, frame_size);
390 if (frame == (void __user *) -1UL) 491 if (frame == (void __user *) -1UL)
391 return -EFAULT; 492 return -EFAULT;
392 493
393 if (copy_siginfo_to_user32(&frame->info, &ksig->info)) 494 /* Set up backchain. */
394 return -EFAULT; 495 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
395
396 /* Create the ucontext. */
397 err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
398 err |= __put_user(0, &frame->uc.uc_link);
399 err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
400 err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
401 err |= save_sigregs_gprs_high(regs, frame->gprs_high);
402 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
403 if (err)
404 return -EFAULT; 496 return -EFAULT;
405 497
406 /* Set up to return from userspace. If provided, use a stub 498 /* Set up to return from userspace. If provided, use a stub
407 already in userspace. */ 499 already in userspace. */
408 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 500 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
409 regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 501 restorer = (unsigned long __force)
502 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
410 } else { 503 } else {
411 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; 504 __u16 __user *svc = &frame->svc_insn;
412 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, 505 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
413 (u16 __force __user *)(frame->retcode)))
414 return -EFAULT; 506 return -EFAULT;
507 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
415 } 508 }
416 509
417 /* Set up backchain. */ 510 /* Create siginfo on the signal stack */
418 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) 511 if (copy_siginfo_to_user32(&frame->info, &ksig->info))
512 return -EFAULT;
513
514 /* Store registers needed to create the signal frame */
515 store_sigregs();
516
517 /* Create ucontext on the signal stack. */
518 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
519 __put_user(0, &frame->uc.uc_link) ||
520 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
521 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
522 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
523 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
419 return -EFAULT; 524 return -EFAULT;
420 525
421 /* Set up registers for signal handler */ 526 /* Set up registers for signal handler */
527 regs->gprs[14] = restorer;
422 regs->gprs[15] = (__force __u64) frame; 528 regs->gprs[15] = (__force __u64) frame;
423 /* Force 31 bit amode and default user address space control. */ 529 /* Force 31 bit amode and default user address space control. */
424 regs->psw.mask = PSW_MASK_BA | 530 regs->psw.mask = PSW_MASK_BA |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index a3b9150e6802..9f73c8059022 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas;
46/* 46/*
47 * Allocate and add a save area for a CPU 47 * Allocate and add a save area for a CPU
48 */ 48 */
49struct save_area *dump_save_area_create(int cpu) 49struct save_area_ext *dump_save_area_create(int cpu)
50{ 50{
51 struct save_area **save_areas, *save_area; 51 struct save_area_ext **save_areas, *save_area;
52 52
53 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); 53 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
54 if (!save_area) 54 if (!save_area)
@@ -386,9 +386,45 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
386} 386}
387 387
388/* 388/*
389 * Initialize vxrs high note (full 128 bit VX registers 16-31)
390 */
391static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
392{
393 return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
394 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
395}
396
397/*
398 * Initialize vxrs low note (lower halves of VX registers 0-15)
399 */
400static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
401{
402 Elf64_Nhdr *note;
403 u64 len;
404 int i;
405
406 note = (Elf64_Nhdr *)ptr;
407 note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
408 note->n_descsz = 16 * 8;
409 note->n_type = NT_S390_VXRS_LOW;
410 len = sizeof(Elf64_Nhdr);
411
412 memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
413 len = roundup(len + note->n_namesz, 4);
414
415 ptr += len;
416 /* Copy lower halves of SIMD registers 0-15 */
417 for (i = 0; i < 16; i++) {
418 memcpy(ptr, &vx_regs[i], 8);
419 ptr += 8;
420 }
421 return ptr;
422}
423
424/*
389 * Fill ELF notes for one CPU with save area registers 425 * Fill ELF notes for one CPU with save area registers
390 */ 426 */
391void *fill_cpu_elf_notes(void *ptr, struct save_area *sa) 427void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
392{ 428{
393 ptr = nt_prstatus(ptr, sa); 429 ptr = nt_prstatus(ptr, sa);
394 ptr = nt_fpregset(ptr, sa); 430 ptr = nt_fpregset(ptr, sa);
@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
397 ptr = nt_s390_tod_preg(ptr, sa); 433 ptr = nt_s390_tod_preg(ptr, sa);
398 ptr = nt_s390_ctrs(ptr, sa); 434 ptr = nt_s390_ctrs(ptr, sa);
399 ptr = nt_s390_prefix(ptr, sa); 435 ptr = nt_s390_prefix(ptr, sa);
436 if (MACHINE_HAS_VX && vx_regs) {
437 ptr = nt_s390_vx_low(ptr, vx_regs);
438 ptr = nt_s390_vx_high(ptr, vx_regs);
439 }
400 return ptr; 440 return ptr;
401} 441}
402 442
@@ -484,7 +524,7 @@ static int get_cpu_cnt(void)
484 int i, cpus = 0; 524 int i, cpus = 0;
485 525
486 for (i = 0; i < dump_save_areas.count; i++) { 526 for (i = 0; i < dump_save_areas.count; i++) {
487 if (dump_save_areas.areas[i]->pref_reg == 0) 527 if (dump_save_areas.areas[i]->sa.pref_reg == 0)
488 continue; 528 continue;
489 cpus++; 529 cpus++;
490 } 530 }
@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
530 */ 570 */
531static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) 571static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
532{ 572{
533 struct save_area *sa; 573 struct save_area_ext *sa_ext;
534 void *ptr_start = ptr; 574 void *ptr_start = ptr;
535 int i; 575 int i;
536 576
537 ptr = nt_prpsinfo(ptr); 577 ptr = nt_prpsinfo(ptr);
538 578
539 for (i = 0; i < dump_save_areas.count; i++) { 579 for (i = 0; i < dump_save_areas.count; i++) {
540 sa = dump_save_areas.areas[i]; 580 sa_ext = dump_save_areas.areas[i];
541 if (sa->pref_reg == 0) 581 if (sa_ext->sa.pref_reg == 0)
542 continue; 582 continue;
543 ptr = fill_cpu_elf_notes(ptr, sa); 583 ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
544 } 584 }
545 ptr = nt_vmcoreinfo(ptr); 585 ptr = nt_vmcoreinfo(ptr);
546 memset(phdr, 0, sizeof(*phdr)); 586 memset(phdr, 0, sizeof(*phdr));
@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
581 621
582 mem_chunk_cnt = get_mem_chunk_cnt(); 622 mem_chunk_cnt = get_mem_chunk_cnt();
583 623
584 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + 624 alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
585 mem_chunk_cnt * sizeof(Elf64_Phdr); 625 mem_chunk_cnt * sizeof(Elf64_Phdr);
586 hdr = kzalloc_panic(alloc_size); 626 hdr = kzalloc_panic(alloc_size);
587 /* Init elf header */ 627 /* Init elf header */
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 993efe6a887c..f3762937dd82 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -60,6 +60,11 @@ enum {
60 A_28, /* Access reg. starting at position 28 */ 60 A_28, /* Access reg. starting at position 28 */
61 C_8, /* Control reg. starting at position 8 */ 61 C_8, /* Control reg. starting at position 8 */
62 C_12, /* Control reg. starting at position 12 */ 62 C_12, /* Control reg. starting at position 12 */
63 V_8, /* Vector reg. starting at position 8, extension bit at 36 */
64 V_12, /* Vector reg. starting at position 12, extension bit at 37 */
65 V_16, /* Vector reg. starting at position 16, extension bit at 38 */
66 V_32, /* Vector reg. starting at position 32, extension bit at 39 */
67 W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
63 B_16, /* Base register starting at position 16 */ 68 B_16, /* Base register starting at position 16 */
64 B_32, /* Base register starting at position 32 */ 69 B_32, /* Base register starting at position 32 */
65 X_12, /* Index register starting at position 12 */ 70 X_12, /* Index register starting at position 12 */
@@ -82,6 +87,8 @@ enum {
82 U8_24, /* 8 bit unsigned value starting at 24 */ 87 U8_24, /* 8 bit unsigned value starting at 24 */
83 U8_32, /* 8 bit unsigned value starting at 32 */ 88 U8_32, /* 8 bit unsigned value starting at 32 */
84 I8_8, /* 8 bit signed value starting at 8 */ 89 I8_8, /* 8 bit signed value starting at 8 */
90 I8_16, /* 8 bit signed value starting at 16 */
91 I8_24, /* 8 bit signed value starting at 24 */
85 I8_32, /* 8 bit signed value starting at 32 */ 92 I8_32, /* 8 bit signed value starting at 32 */
86 J12_12, /* PC relative offset at 12 */ 93 J12_12, /* PC relative offset at 12 */
87 I16_16, /* 16 bit signed value starting at 16 */ 94 I16_16, /* 16 bit signed value starting at 16 */
@@ -96,6 +103,9 @@ enum {
96 U32_16, /* 32 bit unsigned value starting at 16 */ 103 U32_16, /* 32 bit unsigned value starting at 16 */
97 M_16, /* 4 bit optional mask starting at 16 */ 104 M_16, /* 4 bit optional mask starting at 16 */
98 M_20, /* 4 bit optional mask starting at 20 */ 105 M_20, /* 4 bit optional mask starting at 20 */
106 M_24, /* 4 bit optional mask starting at 24 */
107 M_28, /* 4 bit optional mask starting at 28 */
108 M_32, /* 4 bit optional mask starting at 32 */
99 RO_28, /* optional GPR starting at position 28 */ 109 RO_28, /* optional GPR starting at position 28 */
100}; 110};
101 111
@@ -130,7 +140,7 @@ enum {
130 INSTR_RSY_RDRM, 140 INSTR_RSY_RDRM,
131 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
132 INSTR_RS_RURD, 142 INSTR_RS_RURD,
133 INSTR_RXE_FRRD, INSTR_RXE_RRRD, 143 INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
134 INSTR_RXF_FRRDF, 144 INSTR_RXF_FRRDF,
135 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, 145 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
136 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, 146 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
@@ -143,6 +153,17 @@ enum {
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, 153 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 154 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
145 INSTR_S_00, INSTR_S_RD, 155 INSTR_S_00, INSTR_S_RD,
156 INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
157 INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
158 INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
159 INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
160 INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
161 INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
162 INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
163 INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
164 INSTR_VRS_RVRDM,
165 INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
166 INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
146}; 167};
147 168
148static const struct s390_operand operands[] = 169static const struct s390_operand operands[] =
@@ -168,6 +189,11 @@ static const struct s390_operand operands[] =
168 [A_28] = { 4, 28, OPERAND_AR }, 189 [A_28] = { 4, 28, OPERAND_AR },
169 [C_8] = { 4, 8, OPERAND_CR }, 190 [C_8] = { 4, 8, OPERAND_CR },
170 [C_12] = { 4, 12, OPERAND_CR }, 191 [C_12] = { 4, 12, OPERAND_CR },
192 [V_8] = { 4, 8, OPERAND_VR },
193 [V_12] = { 4, 12, OPERAND_VR },
194 [V_16] = { 4, 16, OPERAND_VR },
195 [V_32] = { 4, 32, OPERAND_VR },
196 [W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
171 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, 197 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
172 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, 198 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
173 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, 199 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
@@ -190,6 +216,11 @@ static const struct s390_operand operands[] =
190 [U8_24] = { 8, 24, 0 }, 216 [U8_24] = { 8, 24, 0 },
191 [U8_32] = { 8, 32, 0 }, 217 [U8_32] = { 8, 32, 0 },
192 [J12_12] = { 12, 12, OPERAND_PCREL }, 218 [J12_12] = { 12, 12, OPERAND_PCREL },
219 [I8_8] = { 8, 8, OPERAND_SIGNED },
220 [I8_16] = { 8, 16, OPERAND_SIGNED },
221 [I8_24] = { 8, 24, OPERAND_SIGNED },
222 [I8_32] = { 8, 32, OPERAND_SIGNED },
223 [I16_32] = { 16, 32, OPERAND_SIGNED },
193 [I16_16] = { 16, 16, OPERAND_SIGNED }, 224 [I16_16] = { 16, 16, OPERAND_SIGNED },
194 [U16_16] = { 16, 16, 0 }, 225 [U16_16] = { 16, 16, 0 },
195 [U16_32] = { 16, 32, 0 }, 226 [U16_32] = { 16, 32, 0 },
@@ -202,6 +233,9 @@ static const struct s390_operand operands[] =
202 [U32_16] = { 32, 16, 0 }, 233 [U32_16] = { 32, 16, 0 },
203 [M_16] = { 4, 16, 0 }, 234 [M_16] = { 4, 16, 0 },
204 [M_20] = { 4, 20, 0 }, 235 [M_20] = { 4, 20, 0 },
236 [M_24] = { 4, 24, 0 },
237 [M_28] = { 4, 28, 0 },
238 [M_32] = { 4, 32, 0 },
205 [RO_28] = { 4, 28, OPERAND_GPR } 239 [RO_28] = { 4, 28, OPERAND_GPR }
206}; 240};
207 241
@@ -283,6 +317,7 @@ static const unsigned char formats[][7] = {
283 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, 317 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
284 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, 318 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
285 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, 319 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
320 [INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
286 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, 321 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
287 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, 322 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
288 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, 323 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
@@ -307,6 +342,37 @@ static const unsigned char formats[][7] = {
307 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, 342 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
308 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, 343 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
309 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, 344 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
345 [INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
346 [INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
347 [INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
348 [INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
349 [INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
350 [INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
351 [INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
352 [INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
353 [INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
354 [INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
355 [INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
356 [INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
357 [INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
358 [INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
359 [INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
360 [INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
361 [INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
362 [INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
363 [INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
364 [INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
365 [INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
366 [INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
367 [INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
368 [INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
369 [INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
370 [INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
371 [INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
372 [INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
373 [INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
374 [INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
375 [INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
310}; 376};
311 377
312enum { 378enum {
@@ -381,6 +447,11 @@ enum {
381 LONG_INSN_MPCIFC, 447 LONG_INSN_MPCIFC,
382 LONG_INSN_STPCIFC, 448 LONG_INSN_STPCIFC,
383 LONG_INSN_PCISTB, 449 LONG_INSN_PCISTB,
450 LONG_INSN_VPOPCT,
451 LONG_INSN_VERLLV,
452 LONG_INSN_VESRAV,
453 LONG_INSN_VESRLV,
454 LONG_INSN_VSBCBI
384}; 455};
385 456
386static char *long_insn_name[] = { 457static char *long_insn_name[] = {
@@ -455,6 +526,11 @@ static char *long_insn_name[] = {
455 [LONG_INSN_MPCIFC] = "mpcifc", 526 [LONG_INSN_MPCIFC] = "mpcifc",
456 [LONG_INSN_STPCIFC] = "stpcifc", 527 [LONG_INSN_STPCIFC] = "stpcifc",
457 [LONG_INSN_PCISTB] = "pcistb", 528 [LONG_INSN_PCISTB] = "pcistb",
529 [LONG_INSN_VPOPCT] = "vpopct",
530 [LONG_INSN_VERLLV] = "verllv",
531 [LONG_INSN_VESRAV] = "vesrav",
532 [LONG_INSN_VESRLV] = "vesrlv",
533 [LONG_INSN_VSBCBI] = "vsbcbi",
458}; 534};
459 535
460static struct s390_insn opcode[] = { 536static struct s390_insn opcode[] = {
@@ -1369,6 +1445,150 @@ static struct s390_insn opcode_e5[] = {
1369 { "", 0, INSTR_INVALID } 1445 { "", 0, INSTR_INVALID }
1370}; 1446};
1371 1447
1448static struct s390_insn opcode_e7[] = {
1449#ifdef CONFIG_64BIT
1450 { "lcbb", 0x27, INSTR_RXE_RRRDM },
1451 { "vgef", 0x13, INSTR_VRV_VVRDM },
1452 { "vgeg", 0x12, INSTR_VRV_VVRDM },
1453 { "vgbm", 0x44, INSTR_VRI_V0I0 },
1454 { "vgm", 0x46, INSTR_VRI_V0IIM },
1455 { "vl", 0x06, INSTR_VRX_VRRD0 },
1456 { "vlr", 0x56, INSTR_VRR_VV00000 },
1457 { "vlrp", 0x05, INSTR_VRX_VRRDM },
1458 { "vleb", 0x00, INSTR_VRX_VRRDM },
1459 { "vleh", 0x01, INSTR_VRX_VRRDM },
1460 { "vlef", 0x03, INSTR_VRX_VRRDM },
1461 { "vleg", 0x02, INSTR_VRX_VRRDM },
1462 { "vleib", 0x40, INSTR_VRI_V0IM },
1463 { "vleih", 0x41, INSTR_VRI_V0IM },
1464 { "vleif", 0x43, INSTR_VRI_V0IM },
1465 { "vleig", 0x42, INSTR_VRI_V0IM },
1466 { "vlgv", 0x21, INSTR_VRS_RVRDM },
1467 { "vllez", 0x04, INSTR_VRX_VRRDM },
1468 { "vlm", 0x36, INSTR_VRS_VVRD0 },
1469 { "vlbb", 0x07, INSTR_VRX_VRRDM },
1470 { "vlvg", 0x22, INSTR_VRS_VRRDM },
1471 { "vlvgp", 0x62, INSTR_VRR_VRR0000 },
1472 { "vll", 0x37, INSTR_VRS_VRRD0 },
1473 { "vmrh", 0x61, INSTR_VRR_VVV000M },
1474 { "vmrl", 0x60, INSTR_VRR_VVV000M },
1475 { "vpk", 0x94, INSTR_VRR_VVV000M },
1476 { "vpks", 0x97, INSTR_VRR_VVV0M0M },
1477 { "vpkls", 0x95, INSTR_VRR_VVV0M0M },
1478 { "vperm", 0x8c, INSTR_VRR_VVV000V },
1479 { "vpdi", 0x84, INSTR_VRR_VVV000M },
1480 { "vrep", 0x4d, INSTR_VRI_VVIM },
1481 { "vrepi", 0x45, INSTR_VRI_V0IM },
1482 { "vscef", 0x1b, INSTR_VRV_VWRDM },
1483 { "vsceg", 0x1a, INSTR_VRV_VWRDM },
1484 { "vsel", 0x8d, INSTR_VRR_VVV000V },
1485 { "vseg", 0x5f, INSTR_VRR_VV0000M },
1486 { "vst", 0x0e, INSTR_VRX_VRRD0 },
1487 { "vsteb", 0x08, INSTR_VRX_VRRDM },
1488 { "vsteh", 0x09, INSTR_VRX_VRRDM },
1489 { "vstef", 0x0b, INSTR_VRX_VRRDM },
1490 { "vsteg", 0x0a, INSTR_VRX_VRRDM },
1491 { "vstm", 0x3e, INSTR_VRS_VVRD0 },
1492 { "vstl", 0x3f, INSTR_VRS_VRRD0 },
1493 { "vuph", 0xd7, INSTR_VRR_VV0000M },
1494 { "vuplh", 0xd5, INSTR_VRR_VV0000M },
1495 { "vupl", 0xd6, INSTR_VRR_VV0000M },
1496 { "vupll", 0xd4, INSTR_VRR_VV0000M },
1497 { "va", 0xf3, INSTR_VRR_VVV000M },
1498 { "vacc", 0xf1, INSTR_VRR_VVV000M },
1499 { "vac", 0xbb, INSTR_VRR_VVVM00V },
1500 { "vaccc", 0xb9, INSTR_VRR_VVVM00V },
1501 { "vn", 0x68, INSTR_VRR_VVV0000 },
1502 { "vnc", 0x69, INSTR_VRR_VVV0000 },
1503 { "vavg", 0xf2, INSTR_VRR_VVV000M },
1504 { "vavgl", 0xf0, INSTR_VRR_VVV000M },
1505 { "vcksm", 0x66, INSTR_VRR_VVV0000 },
1506 { "vec", 0xdb, INSTR_VRR_VV0000M },
1507 { "vecl", 0xd9, INSTR_VRR_VV0000M },
1508 { "vceq", 0xf8, INSTR_VRR_VVV0M0M },
1509 { "vch", 0xfb, INSTR_VRR_VVV0M0M },
1510 { "vchl", 0xf9, INSTR_VRR_VVV0M0M },
1511 { "vclz", 0x53, INSTR_VRR_VV0000M },
1512 { "vctz", 0x52, INSTR_VRR_VV0000M },
1513 { "vx", 0x6d, INSTR_VRR_VVV0000 },
1514 { "vgfm", 0xb4, INSTR_VRR_VVV000M },
1515 { "vgfma", 0xbc, INSTR_VRR_VVVM00V },
1516 { "vlc", 0xde, INSTR_VRR_VV0000M },
1517 { "vlp", 0xdf, INSTR_VRR_VV0000M },
1518 { "vmx", 0xff, INSTR_VRR_VVV000M },
1519 { "vmxl", 0xfd, INSTR_VRR_VVV000M },
1520 { "vmn", 0xfe, INSTR_VRR_VVV000M },
1521 { "vmnl", 0xfc, INSTR_VRR_VVV000M },
1522 { "vmal", 0xaa, INSTR_VRR_VVVM00V },
1523 { "vmae", 0xae, INSTR_VRR_VVVM00V },
1524 { "vmale", 0xac, INSTR_VRR_VVVM00V },
1525 { "vmah", 0xab, INSTR_VRR_VVVM00V },
1526 { "vmalh", 0xa9, INSTR_VRR_VVVM00V },
1527 { "vmao", 0xaf, INSTR_VRR_VVVM00V },
1528 { "vmalo", 0xad, INSTR_VRR_VVVM00V },
1529 { "vmh", 0xa3, INSTR_VRR_VVV000M },
1530 { "vmlh", 0xa1, INSTR_VRR_VVV000M },
1531 { "vml", 0xa2, INSTR_VRR_VVV000M },
1532 { "vme", 0xa6, INSTR_VRR_VVV000M },
1533 { "vmle", 0xa4, INSTR_VRR_VVV000M },
1534 { "vmo", 0xa7, INSTR_VRR_VVV000M },
1535 { "vmlo", 0xa5, INSTR_VRR_VVV000M },
1536 { "vno", 0x6b, INSTR_VRR_VVV0000 },
1537 { "vo", 0x6a, INSTR_VRR_VVV0000 },
1538 { { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
1539 { { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
1540 { "verll", 0x33, INSTR_VRS_VVRDM },
1541 { "verim", 0x72, INSTR_VRI_VVV0IM },
1542 { "veslv", 0x70, INSTR_VRR_VVV000M },
1543 { "vesl", 0x30, INSTR_VRS_VVRDM },
1544 { { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
1545 { "vesra", 0x3a, INSTR_VRS_VVRDM },
1546 { { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
1547 { "vesrl", 0x38, INSTR_VRS_VVRDM },
1548 { "vsl", 0x74, INSTR_VRR_VVV0000 },
1549 { "vslb", 0x75, INSTR_VRR_VVV0000 },
1550 { "vsldb", 0x77, INSTR_VRI_VVV0I0 },
1551 { "vsra", 0x7e, INSTR_VRR_VVV0000 },
1552 { "vsrab", 0x7f, INSTR_VRR_VVV0000 },
1553 { "vsrl", 0x7c, INSTR_VRR_VVV0000 },
1554 { "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
1555 { "vs", 0xf7, INSTR_VRR_VVV000M },
1556 { "vscb", 0xf5, INSTR_VRR_VVV000M },
1557 { "vsb", 0xbf, INSTR_VRR_VVVM00V },
1558 { { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
1559 { "vsumg", 0x65, INSTR_VRR_VVV000M },
1560 { "vsumq", 0x67, INSTR_VRR_VVV000M },
1561 { "vsum", 0x64, INSTR_VRR_VVV000M },
1562 { "vtm", 0xd8, INSTR_VRR_VV00000 },
1563 { "vfae", 0x82, INSTR_VRR_VVV0M0M },
1564 { "vfee", 0x80, INSTR_VRR_VVV0M0M },
1565 { "vfene", 0x81, INSTR_VRR_VVV0M0M },
1566 { "vistr", 0x5c, INSTR_VRR_VV00M0M },
1567 { "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
1568 { "vfa", 0xe3, INSTR_VRR_VVV00MM },
1569 { "wfc", 0xcb, INSTR_VRR_VV000MM },
1570 { "wfk", 0xca, INSTR_VRR_VV000MM },
1571 { "vfce", 0xe8, INSTR_VRR_VVV0MMM },
1572 { "vfch", 0xeb, INSTR_VRR_VVV0MMM },
1573 { "vfche", 0xea, INSTR_VRR_VVV0MMM },
1574 { "vcdg", 0xc3, INSTR_VRR_VV00MMM },
1575 { "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
1576 { "vcgd", 0xc2, INSTR_VRR_VV00MMM },
1577 { "vclgd", 0xc0, INSTR_VRR_VV00MMM },
1578 { "vfd", 0xe5, INSTR_VRR_VVV00MM },
1579 { "vfi", 0xc7, INSTR_VRR_VV00MMM },
1580 { "vlde", 0xc4, INSTR_VRR_VV000MM },
1581 { "vled", 0xc5, INSTR_VRR_VV00MMM },
1582 { "vfm", 0xe7, INSTR_VRR_VVV00MM },
1583 { "vfma", 0x8f, INSTR_VRR_VVVM0MV },
1584 { "vfms", 0x8e, INSTR_VRR_VVVM0MV },
1585 { "vfpso", 0xcc, INSTR_VRR_VV00MMM },
1586 { "vfsq", 0xce, INSTR_VRR_VV000MM },
1587 { "vfs", 0xe2, INSTR_VRR_VVV00MM },
1588 { "vftci", 0x4a, INSTR_VRI_VVIMM },
1589#endif
1590};
1591
1372static struct s390_insn opcode_eb[] = { 1592static struct s390_insn opcode_eb[] = {
1373#ifdef CONFIG_64BIT 1593#ifdef CONFIG_64BIT
1374 { "lmg", 0x04, INSTR_RSY_RRRD }, 1594 { "lmg", 0x04, INSTR_RSY_RRRD },
@@ -1552,16 +1772,17 @@ static struct s390_insn opcode_ed[] = {
1552static unsigned int extract_operand(unsigned char *code, 1772static unsigned int extract_operand(unsigned char *code,
1553 const struct s390_operand *operand) 1773 const struct s390_operand *operand)
1554{ 1774{
1775 unsigned char *cp;
1555 unsigned int val; 1776 unsigned int val;
1556 int bits; 1777 int bits;
1557 1778
1558 /* Extract fragments of the operand byte for byte. */ 1779 /* Extract fragments of the operand byte for byte. */
1559 code += operand->shift / 8; 1780 cp = code + operand->shift / 8;
1560 bits = (operand->shift & 7) + operand->bits; 1781 bits = (operand->shift & 7) + operand->bits;
1561 val = 0; 1782 val = 0;
1562 do { 1783 do {
1563 val <<= 8; 1784 val <<= 8;
1564 val |= (unsigned int) *code++; 1785 val |= (unsigned int) *cp++;
1565 bits -= 8; 1786 bits -= 8;
1566 } while (bits > 0); 1787 } while (bits > 0);
1567 val >>= -bits; 1788 val >>= -bits;
@@ -1571,6 +1792,18 @@ static unsigned int extract_operand(unsigned char *code,
1571 if (operand->bits == 20 && operand->shift == 20) 1792 if (operand->bits == 20 && operand->shift == 20)
1572 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; 1793 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
1573 1794
1795 /* Check for register extensions bits for vector registers. */
1796 if (operand->flags & OPERAND_VR) {
1797 if (operand->shift == 8)
1798 val |= (code[4] & 8) << 1;
1799 else if (operand->shift == 12)
1800 val |= (code[4] & 4) << 2;
1801 else if (operand->shift == 16)
1802 val |= (code[4] & 2) << 3;
1803 else if (operand->shift == 32)
1804 val |= (code[4] & 1) << 4;
1805 }
1806
1574 /* Sign extend value if the operand is signed or pc relative. */ 1807 /* Sign extend value if the operand is signed or pc relative. */
1575 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && 1808 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
1576 (val & (1U << (operand->bits - 1)))) 1809 (val & (1U << (operand->bits - 1))))
@@ -1639,6 +1872,10 @@ struct s390_insn *find_insn(unsigned char *code)
1639 case 0xe5: 1872 case 0xe5:
1640 table = opcode_e5; 1873 table = opcode_e5;
1641 break; 1874 break;
1875 case 0xe7:
1876 table = opcode_e7;
1877 opfrag = code[5];
1878 break;
1642 case 0xeb: 1879 case 0xeb:
1643 table = opcode_eb; 1880 table = opcode_eb;
1644 opfrag = code[5]; 1881 opfrag = code[5];
@@ -1734,6 +1971,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1734 ptr += sprintf(ptr, "%%a%i", value); 1971 ptr += sprintf(ptr, "%%a%i", value);
1735 else if (operand->flags & OPERAND_CR) 1972 else if (operand->flags & OPERAND_CR)
1736 ptr += sprintf(ptr, "%%c%i", value); 1973 ptr += sprintf(ptr, "%%c%i", value);
1974 else if (operand->flags & OPERAND_VR)
1975 ptr += sprintf(ptr, "%%v%i", value);
1737 else if (operand->flags & OPERAND_PCREL) 1976 else if (operand->flags & OPERAND_PCREL)
1738 ptr += sprintf(ptr, "%lx", (signed int) value 1977 ptr += sprintf(ptr, "%lx", (signed int) value
1739 + addr); 1978 + addr);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 0dff972a169c..cef2879edff3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void)
390 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
391 if (test_facility(50) && test_facility(73)) 391 if (test_facility(50) && test_facility(73))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
393 if (test_facility(66))
394 S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
395 if (test_facility(51)) 393 if (test_facility(51))
396 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 394 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
395 if (test_facility(129))
396 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
397#endif 397#endif
398} 398}
399 399
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 1aad48398d06..0554b9771c9f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -4,7 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7#include <asm/cputime.h> 7#include <asm/idle.h>
8 8
9extern void *restart_stack; 9extern void *restart_stack;
10extern unsigned long suspend_zero_pages; 10extern unsigned long suspend_zero_pages;
@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long);
21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
23 23
24int alloc_vector_registers(struct task_struct *tsk);
25
24void do_protection_exception(struct pt_regs *regs); 26void do_protection_exception(struct pt_regs *regs);
25void do_dat_exception(struct pt_regs *regs); 27void do_dat_exception(struct pt_regs *regs);
26 28
@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs);
43void specification_exception(struct pt_regs *regs); 45void specification_exception(struct pt_regs *regs);
44void transaction_exception(struct pt_regs *regs); 46void transaction_exception(struct pt_regs *regs);
45void translation_exception(struct pt_regs *regs); 47void translation_exception(struct pt_regs *regs);
48void vector_exception(struct pt_regs *regs);
46 49
47void do_per_trap(struct pt_regs *regs); 50void do_per_trap(struct pt_regs *regs);
51void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
48void syscall_trace(struct pt_regs *regs, int entryexit); 52void syscall_trace(struct pt_regs *regs, int entryexit);
49void kernel_stack_overflow(struct pt_regs * regs); 53void kernel_stack_overflow(struct pt_regs * regs);
50void do_signal(struct pt_regs *regs); 54void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f2e674c702e1..7b2e03afd017 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE = 1 << STACK_SHIFT 42STACK_SIZE = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44 44
45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 45_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
46 _TIF_UPROBE)
46_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 47_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
47 _TIF_SYSCALL_TRACEPOINT) 48 _TIF_SYSCALL_TRACEPOINT)
48_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) 49_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -265,6 +266,10 @@ sysc_work:
265 jo sysc_mcck_pending 266 jo sysc_mcck_pending
266 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
267 jo sysc_reschedule 268 jo sysc_reschedule
269#ifdef CONFIG_UPROBES
270 tm __TI_flags+7(%r12),_TIF_UPROBE
271 jo sysc_uprobe_notify
272#endif
268 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
269 jo sysc_singlestep 274 jo sysc_singlestep
270 tm __TI_flags+7(%r12),_TIF_SIGPENDING 275 tm __TI_flags+7(%r12),_TIF_SIGPENDING
@@ -323,6 +328,16 @@ sysc_notify_resume:
323 jg do_notify_resume 328 jg do_notify_resume
324 329
325# 330#
331# _TIF_UPROBE is set, call uprobe_notify_resume
332#
333#ifdef CONFIG_UPROBES
334sysc_uprobe_notify:
335 lgr %r2,%r11 # pass pointer to pt_regs
336 larl %r14,sysc_return
337 jg uprobe_notify_resume
338#endif
339
340#
326# _PIF_PER_TRAP is set, call do_per_trap 341# _PIF_PER_TRAP is set, call do_per_trap
327# 342#
328sysc_singlestep: 343sysc_singlestep:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 54d6493c4a56..51d14fe5eb9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic function tracer architecture backend. 2 * Dynamic function tracer architecture backend.
3 * 3 *
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009,2014
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -17,100 +17,76 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include "entry.h" 18#include "entry.h"
19 19
20#ifdef CONFIG_DYNAMIC_FTRACE 20void mcount_replace_code(void);
21
22void ftrace_disable_code(void); 21void ftrace_disable_code(void);
23void ftrace_enable_insn(void); 22void ftrace_enable_insn(void);
24 23
25#ifdef CONFIG_64BIT
26/* 24/*
27 * The 64-bit mcount code looks like this: 25 * The mcount code looks like this:
28 * stg %r14,8(%r15) # offset 0 26 * stg %r14,8(%r15) # offset 0
29 * > larl %r1,<&counter> # offset 6 27 * larl %r1,<&counter> # offset 6
30 * > brasl %r14,_mcount # offset 12 28 * brasl %r14,_mcount # offset 12
31 * lg %r14,8(%r15) # offset 18 29 * lg %r14,8(%r15) # offset 18
32 * Total length is 24 bytes. The middle two instructions of the mcount 30 * Total length is 24 bytes. The complete mcount block initially gets replaced
33 * block get overwritten by ftrace_make_nop / ftrace_make_call. 31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
34 * The 64-bit enabled ftrace code block looks like this: 32 * only patch the jg/lg instruction within the block.
35 * stg %r14,8(%r15) # offset 0 33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
36 * The enabled ftrace code block looks like this:
37 * larl %r0,.+24 # offset 0
36 * > lg %r1,__LC_FTRACE_FUNC # offset 6 38 * > lg %r1,__LC_FTRACE_FUNC # offset 6
37 * > lgr %r0,%r0 # offset 12 39 * br %r1 # offset 12
38 * > basr %r14,%r1 # offset 16 40 * brcl 0,0 # offset 14
39 * lg %r14,8(%15) # offset 18 41 * brc 0,0 # offset 20
40 * The return points of the mcount/ftrace function have the same offset 18. 42 * The ftrace function gets called with a non-standard C function call ABI
41 * The 64-bit disable ftrace code block looks like this: 43 * where r0 contains the return address. It is also expected that the called
42 * stg %r14,8(%r15) # offset 0 44 * function only clobbers r0 and r1, but restores r2-r15.
45 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0
43 * > jg .+18 # offset 6 48 * > jg .+18 # offset 6
44 * > lgr %r0,%r0 # offset 12 49 * br %r1 # offset 12
45 * > basr %r14,%r1 # offset 16 50 * brcl 0,0 # offset 14
46 * lg %r14,8(%15) # offset 18 51 * brc 0,0 # offset 20
47 * The jg instruction branches to offset 24 to skip as many instructions 52 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible. 53 * as possible.
49 */ 54 */
50asm( 55asm(
51 " .align 4\n" 56 " .align 4\n"
57 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
52 "ftrace_disable_code:\n" 59 "ftrace_disable_code:\n"
53 " jg 0f\n" 60 " jg 0f\n"
54 " lgr %r0,%r0\n" 61 " br %r1\n"
55 " basr %r14,%r1\n" 62 " brcl 0,0\n"
63 " brc 0,0\n"
56 "0:\n" 64 "0:\n"
57 " .align 4\n" 65 " .align 4\n"
58 "ftrace_enable_insn:\n" 66 "ftrace_enable_insn:\n"
59 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); 67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
60 68
69#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
61#define FTRACE_INSN_SIZE 6 71#define FTRACE_INSN_SIZE 6
62 72
63#else /* CONFIG_64BIT */ 73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
64/* 74 unsigned long addr)
65 * The 31-bit mcount code looks like this: 75{
66 * st %r14,4(%r15) # offset 0 76 return 0;
67 * > bras %r1,0f # offset 4 77}
68 * > .long _mcount # offset 8
69 * > .long <&counter> # offset 12
70 * > 0: l %r14,0(%r1) # offset 16
71 * > l %r1,4(%r1) # offset 20
72 * basr %r14,%r14 # offset 24
73 * l %r14,4(%r15) # offset 26
74 * Total length is 30 bytes. The twenty bytes starting from offset 4
75 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
76 * The 31-bit enabled ftrace code block looks like this:
77 * st %r14,4(%r15) # offset 0
78 * > l %r14,__LC_FTRACE_FUNC # offset 4
79 * > j 0f # offset 8
80 * > .fill 12,1,0x07 # offset 12
81 * 0: basr %r14,%r14 # offset 24
82 * l %r14,4(%r14) # offset 26
83 * The return points of the mcount/ftrace function have the same offset 26.
84 * The 31-bit disabled ftrace code block looks like this:
85 * st %r14,4(%r15) # offset 0
86 * > j .+26 # offset 4
87 * > j 0f # offset 8
88 * > .fill 12,1,0x07 # offset 12
89 * 0: basr %r14,%r14 # offset 24
90 * l %r14,4(%r14) # offset 26
91 * The j instruction branches to offset 30 to skip as many instructions
92 * as possible.
93 */
94asm(
95 " .align 4\n"
96 "ftrace_disable_code:\n"
97 " j 1f\n"
98 " j 0f\n"
99 " .fill 12,1,0x07\n"
100 "0: basr %r14,%r14\n"
101 "1:\n"
102 " .align 4\n"
103 "ftrace_enable_insn:\n"
104 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
105
106#define FTRACE_INSN_SIZE 4
107
108#endif /* CONFIG_64BIT */
109
110 78
111int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
112 unsigned long addr) 80 unsigned long addr)
113{ 81{
82 /* Initial replacement of the whole mcount block */
83 if (addr == MCOUNT_ADDR) {
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
85 mcount_replace_code,
86 MCOUNT_BLOCK_SIZE))
87 return -EPERM;
88 return 0;
89 }
114 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
115 MCOUNT_INSN_SIZE)) 91 MCOUNT_INSN_SIZE))
116 return -EPERM; 92 return -EPERM;
@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void)
135 return 0; 111 return 0;
136} 112}
137 113
138#endif /* CONFIG_DYNAMIC_FTRACE */
139
140#ifdef CONFIG_FUNCTION_GRAPH_TRACER 114#ifdef CONFIG_FUNCTION_GRAPH_TRACER
141/* 115/*
142 * Hook the return address and push it in the stack of return addresses 116 * Hook the return address and push it in the stack of return addresses
@@ -162,31 +136,26 @@ out:
162 return parent; 136 return parent;
163} 137}
164 138
165#ifdef CONFIG_DYNAMIC_FTRACE
166/* 139/*
167 * Patch the kernel code at ftrace_graph_caller location. The instruction 140 * Patch the kernel code at ftrace_graph_caller location. The instruction
168 * there is branch relative and save to prepare_ftrace_return. To disable 141 * there is branch relative on condition. To enable the ftrace graph code
169 * the call to prepare_ftrace_return we patch the bras offset to point 142 * block, we simply patch the mask field of the instruction to zero and
170 * directly after the instructions. To enable the call we calculate 143 * turn the instruction into a nop.
171 * the original offset to prepare_ftrace_return and put it back. 144 * To disable the ftrace graph code the mask field will be patched to
145 * all ones, which turns the instruction into an unconditional branch.
172 */ 146 */
173int ftrace_enable_ftrace_graph_caller(void) 147int ftrace_enable_ftrace_graph_caller(void)
174{ 148{
175 unsigned short offset; 149 u8 op = 0x04; /* set mask field to zero */
176 150
177 offset = ((void *) prepare_ftrace_return - 151 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
178 (void *) ftrace_graph_caller) / 2;
179 return probe_kernel_write((void *) ftrace_graph_caller + 2,
180 &offset, sizeof(offset));
181} 152}
182 153
183int ftrace_disable_ftrace_graph_caller(void) 154int ftrace_disable_ftrace_graph_caller(void)
184{ 155{
185 static unsigned short offset = 0x0002; 156 u8 op = 0xf4; /* set mask field to all ones */
186 157
187 return probe_kernel_write((void *) ftrace_graph_caller + 2, 158 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
188 &offset, sizeof(offset));
189} 159}
190 160
191#endif /* CONFIG_DYNAMIC_FTRACE */
192#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 161#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index e88d35d74950..d62eee11f0b5 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -398,7 +398,7 @@ ENTRY(startup_kdump)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5 399#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
401 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list 401 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
403 jz 0f 403 jz 0f
404 la %r0,1 404 la %r0,1
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
new file mode 100644
index 000000000000..c846aee7372f
--- /dev/null
+++ b/arch/s390/kernel/idle.c
@@ -0,0 +1,124 @@
1/*
2 * Idle functions for s390.
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/kernel_stat.h>
11#include <linux/kprobes.h>
12#include <linux/notifier.h>
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <asm/cputime.h>
16#include <asm/nmi.h>
17#include <asm/smp.h>
18#include "entry.h"
19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21
22void __kprobes enabled_wait(void)
23{
24 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
25 unsigned long long idle_time;
26 unsigned long psw_mask;
27
28 trace_hardirqs_on();
29
30 /* Wait for external, I/O or machine check interrupt. */
31 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
32 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
33 clear_cpu_flag(CIF_NOHZ_DELAY);
34
35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask);
37
38 /* Account time spent with enabled wait psw loaded as idle time. */
39 idle->sequence++;
40 smp_wmb();
41 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
42 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
43 idle->idle_time += idle_time;
44 idle->idle_count++;
45 account_idle_time(idle_time);
46 smp_wmb();
47 idle->sequence++;
48}
49
50static ssize_t show_idle_count(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
54 unsigned long long idle_count;
55 unsigned int sequence;
56
57 do {
58 sequence = ACCESS_ONCE(idle->sequence);
59 idle_count = ACCESS_ONCE(idle->idle_count);
60 if (ACCESS_ONCE(idle->clock_idle_enter))
61 idle_count++;
62 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
63 return sprintf(buf, "%llu\n", idle_count);
64}
65DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
66
67static ssize_t show_idle_time(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
71 unsigned long long now, idle_time, idle_enter, idle_exit;
72 unsigned int sequence;
73
74 do {
75 now = get_tod_clock();
76 sequence = ACCESS_ONCE(idle->sequence);
77 idle_time = ACCESS_ONCE(idle->idle_time);
78 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
79 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
80 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
81 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
82 return sprintf(buf, "%llu\n", idle_time >> 12);
83}
84DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
85
86cputime64_t arch_cpu_idle_time(int cpu)
87{
88 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
89 unsigned long long now, idle_enter, idle_exit;
90 unsigned int sequence;
91
92 do {
93 now = get_tod_clock();
94 sequence = ACCESS_ONCE(idle->sequence);
95 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
96 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
97 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
98 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
99}
100
101void arch_cpu_idle_enter(void)
102{
103 local_mcck_disable();
104}
105
106void arch_cpu_idle(void)
107{
108 if (!test_cpu_flag(CIF_MCCK_PENDING))
109 /* Halt the cpu and keep track of cpu time accounting. */
110 enabled_wait();
111 local_irq_enable();
112}
113
114void arch_cpu_idle_exit(void)
115{
116 local_mcck_enable();
117 if (test_cpu_flag(CIF_MCCK_PENDING))
118 s390_handle_mcck();
119}
120
121void arch_cpu_idle_dead(void)
122{
123 cpu_die();
124}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8eb82443cfbd..1b8a38ab7861 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, 72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
73 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 74 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, 75 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
75 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, 76 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
258 259
259 ext_code = *(struct ext_code *) &regs->int_code; 260 ext_code = *(struct ext_code *) &regs->int_code;
260 if (ext_code.code != EXT_IRQ_CLK_COMP) 261 if (ext_code.code != EXT_IRQ_CLK_COMP)
261 __get_cpu_var(s390_idle).nohz_delay = 1; 262 set_cpu_flag(CIF_NOHZ_DELAY);
262 263
263 index = ext_hash(ext_code.code); 264 index = ext_hash(ext_code.code);
264 rcu_read_lock(); 265 rcu_read_lock();
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index bc71a7b95af5..27ae5433fe4d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
58 .insn_size = MAX_INSN_SIZE, 58 .insn_size = MAX_INSN_SIZE,
59}; 59};
60 60
61static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
62{
63 if (!is_known_insn((unsigned char *)insn))
64 return -EINVAL;
65 switch (insn[0] >> 8) {
66 case 0x0c: /* bassm */
67 case 0x0b: /* bsm */
68 case 0x83: /* diag */
69 case 0x44: /* ex */
70 case 0xac: /* stnsm */
71 case 0xad: /* stosm */
72 return -EINVAL;
73 case 0xc6:
74 switch (insn[0] & 0x0f) {
75 case 0x00: /* exrl */
76 return -EINVAL;
77 }
78 }
79 switch (insn[0]) {
80 case 0x0101: /* pr */
81 case 0xb25a: /* bsa */
82 case 0xb240: /* bakr */
83 case 0xb258: /* bsg */
84 case 0xb218: /* pc */
85 case 0xb228: /* pt */
86 case 0xb98d: /* epsw */
87 return -EINVAL;
88 }
89 return 0;
90}
91
92static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
93{
94 /* default fixup method */
95 int fixup = FIXUP_PSW_NORMAL;
96
97 switch (insn[0] >> 8) {
98 case 0x05: /* balr */
99 case 0x0d: /* basr */
100 fixup = FIXUP_RETURN_REGISTER;
101 /* if r2 = 0, no branch will be taken */
102 if ((insn[0] & 0x0f) == 0)
103 fixup |= FIXUP_BRANCH_NOT_TAKEN;
104 break;
105 case 0x06: /* bctr */
106 case 0x07: /* bcr */
107 fixup = FIXUP_BRANCH_NOT_TAKEN;
108 break;
109 case 0x45: /* bal */
110 case 0x4d: /* bas */
111 fixup = FIXUP_RETURN_REGISTER;
112 break;
113 case 0x47: /* bc */
114 case 0x46: /* bct */
115 case 0x86: /* bxh */
116 case 0x87: /* bxle */
117 fixup = FIXUP_BRANCH_NOT_TAKEN;
118 break;
119 case 0x82: /* lpsw */
120 fixup = FIXUP_NOT_REQUIRED;
121 break;
122 case 0xb2: /* lpswe */
123 if ((insn[0] & 0xff) == 0xb2)
124 fixup = FIXUP_NOT_REQUIRED;
125 break;
126 case 0xa7: /* bras */
127 if ((insn[0] & 0x0f) == 0x05)
128 fixup |= FIXUP_RETURN_REGISTER;
129 break;
130 case 0xc0:
131 if ((insn[0] & 0x0f) == 0x05) /* brasl */
132 fixup |= FIXUP_RETURN_REGISTER;
133 break;
134 case 0xeb:
135 switch (insn[2] & 0xff) {
136 case 0x44: /* bxhg */
137 case 0x45: /* bxleg */
138 fixup = FIXUP_BRANCH_NOT_TAKEN;
139 break;
140 }
141 break;
142 case 0xe3: /* bctg */
143 if ((insn[2] & 0xff) == 0x46)
144 fixup = FIXUP_BRANCH_NOT_TAKEN;
145 break;
146 case 0xec:
147 switch (insn[2] & 0xff) {
148 case 0xe5: /* clgrb */
149 case 0xe6: /* cgrb */
150 case 0xf6: /* crb */
151 case 0xf7: /* clrb */
152 case 0xfc: /* cgib */
153 case 0xfd: /* cglib */
154 case 0xfe: /* cib */
155 case 0xff: /* clib */
156 fixup = FIXUP_BRANCH_NOT_TAKEN;
157 break;
158 }
159 break;
160 }
161 return fixup;
162}
163
164static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
165{
166 /* Check if we have a RIL-b or RIL-c format instruction which
167 * we need to modify in order to avoid instruction emulation. */
168 switch (insn[0] >> 8) {
169 case 0xc0:
170 if ((insn[0] & 0x0f) == 0x00) /* larl */
171 return true;
172 break;
173 case 0xc4:
174 switch (insn[0] & 0x0f) {
175 case 0x02: /* llhrl */
176 case 0x04: /* lghrl */
177 case 0x05: /* lhrl */
178 case 0x06: /* llghrl */
179 case 0x07: /* sthrl */
180 case 0x08: /* lgrl */
181 case 0x0b: /* stgrl */
182 case 0x0c: /* lgfrl */
183 case 0x0d: /* lrl */
184 case 0x0e: /* llgfrl */
185 case 0x0f: /* strl */
186 return true;
187 }
188 break;
189 case 0xc6:
190 switch (insn[0] & 0x0f) {
191 case 0x02: /* pfdrl */
192 case 0x04: /* cghrl */
193 case 0x05: /* chrl */
194 case 0x06: /* clghrl */
195 case 0x07: /* clhrl */
196 case 0x08: /* cgrl */
197 case 0x0a: /* clgrl */
198 case 0x0c: /* cgfrl */
199 case 0x0d: /* crl */
200 case 0x0e: /* clgfrl */
201 case 0x0f: /* clrl */
202 return true;
203 }
204 break;
205 }
206 return false;
207}
208
209static void __kprobes copy_instruction(struct kprobe *p) 61static void __kprobes copy_instruction(struct kprobe *p)
210{ 62{
211 s64 disp, new_disp; 63 s64 disp, new_disp;
212 u64 addr, new_addr; 64 u64 addr, new_addr;
213 65
214 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
215 if (!is_insn_relative_long(p->ainsn.insn)) 67 if (!probe_is_insn_relative_long(p->ainsn.insn))
216 return; 68 return;
217 /* 69 /*
218 * For pc-relative instructions in RIL-b or RIL-c format patch the 70 * For pc-relative instructions in RIL-b or RIL-c format patch the
@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
276 if ((unsigned long) p->addr & 0x01) 128 if ((unsigned long) p->addr & 0x01)
277 return -EINVAL; 129 return -EINVAL;
278 /* Make sure the probe isn't going on a difficult instruction */ 130 /* Make sure the probe isn't going on a difficult instruction */
279 if (is_prohibited_opcode(p->addr)) 131 if (probe_is_prohibited_opcode(p->addr))
280 return -EINVAL; 132 return -EINVAL;
281 if (s390_get_insn_slot(p)) 133 if (s390_get_insn_slot(p))
282 return -ENOMEM; 134 return -ENOMEM;
@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
605{ 457{
606 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
607 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
608 int fixup = get_fixup_type(p->ainsn.insn); 460 int fixup = probe_get_fixup_type(p->ainsn.insn);
609 461
610 if (fixup & FIXUP_PSW_NORMAL) 462 if (fixup & FIXUP_PSW_NORMAL)
611 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void)
789 asm volatile(".word 0x0002"); 641 asm volatile(".word 0x0002");
790} 642}
791 643
792static void __used __kprobes jprobe_return_end(void)
793{
794 asm volatile("bcr 0,0");
795}
796
797int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 644int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
798{ 645{
799 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 646 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 719e27b2cf22..4685337fa7c6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -25,6 +25,7 @@
25#include <asm/elf.h> 25#include <asm/elf.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/os_info.h> 27#include <asm/os_info.h>
28#include <asm/switch_to.h>
28 29
29typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 30typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
30 31
@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu)
43 44
44 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); 45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
45 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); 46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
46 ptr = fill_cpu_elf_notes(ptr, sa); 47 ptr = fill_cpu_elf_notes(ptr, sa, NULL);
47 memset(ptr, 0, sizeof(struct elf_note)); 48 memset(ptr, 0, sizeof(struct elf_note));
48} 49}
49 50
@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu)
53static void setup_regs(void) 54static void setup_regs(void)
54{ 55{
55 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 56 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
57 struct _lowcore *lc;
56 int cpu, this_cpu; 58 int cpu, this_cpu;
57 59
60 /* Get lowcore pointer from store status of this CPU (absolute zero) */
61 lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
58 this_cpu = smp_find_processor_id(stap()); 62 this_cpu = smp_find_processor_id(stap());
59 add_elf_notes(this_cpu); 63 add_elf_notes(this_cpu);
60 for_each_online_cpu(cpu) { 64 for_each_online_cpu(cpu) {
@@ -64,6 +68,8 @@ static void setup_regs(void)
64 continue; 68 continue;
65 add_elf_notes(cpu); 69 add_elf_notes(cpu);
66 } 70 }
71 if (MACHINE_HAS_VX)
72 save_vx_regs_safe((void *) lc->vector_save_area_addr);
67 /* Copy dump CPU store status info to absolute zero */ 73 /* Copy dump CPU store status info to absolute zero */
68 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 74 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
69} 75}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 433c6dbfa442..4300ea374826 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,62 +8,72 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/ptrace.h>
11 12
12 .section .kprobes.text, "ax" 13 .section .kprobes.text, "ax"
13 14
14ENTRY(ftrace_stub) 15ENTRY(ftrace_stub)
15 br %r14 16 br %r14
16 17
18#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
19#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
20#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
21#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
22
17ENTRY(_mcount) 23ENTRY(_mcount)
18#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14 24 br %r14
20 25
21ENTRY(ftrace_caller) 26ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15
30 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
33 stg %r0,(STACK_PTREGS_PSW+8)(%r15)
34 stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
35#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
36 aghik %r2,%r0,-MCOUNT_INSN_SIZE
37 lgrl %r4,function_trace_op
38 lgrl %r1,ftrace_trace_function
39#else
40 lgr %r2,%r0
41 aghi %r2,-MCOUNT_INSN_SIZE
42 larl %r4,function_trace_op
43 lg %r4,0(%r4)
44 larl %r1,ftrace_trace_function
45 lg %r1,0(%r1)
22#endif 46#endif
23 stm %r2,%r5,16(%r15) 47 lgr %r3,%r14
24 bras %r1,1f 48 la %r5,STACK_PTREGS(%r15)
250: .long ftrace_trace_function 49 basr %r14,%r1
261: st %r14,56(%r15)
27 lr %r0,%r15
28 ahi %r15,-96
29 l %r3,100(%r15)
30 la %r2,0(%r14)
31 st %r0,__SF_BACKCHAIN(%r15)
32 la %r3,0(%r3)
33 ahi %r2,-MCOUNT_INSN_SIZE
34 l %r14,0b-0b(%r1)
35 l %r14,0(%r14)
36 basr %r14,%r14
37#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
38 l %r2,100(%r15) 51# The j instruction gets runtime patched to a nop instruction.
39 l %r3,152(%r15) 52# See ftrace_enable_ftrace_graph_caller.
40ENTRY(ftrace_graph_caller) 53ENTRY(ftrace_graph_caller)
41# The bras instruction gets runtime patched to call prepare_ftrace_return. 54 j ftrace_graph_caller_end
42# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 55 lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
43# bras %r14,prepare_ftrace_return 56 lg %r3,(STACK_PTREGS_PSW+8)(%r15)
44 bras %r14,0f 57 brasl %r14,prepare_ftrace_return
450: st %r2,100(%r15) 58 stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
59ftrace_graph_caller_end:
60 .globl ftrace_graph_caller_end
46#endif 61#endif
47 ahi %r15,96 62 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
48 l %r14,56(%r15) 63 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
49 lm %r2,%r5,16(%r15) 64 br %r1
50 br %r14
51 65
52#ifdef CONFIG_FUNCTION_GRAPH_TRACER 66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
53 67
54ENTRY(return_to_handler) 68ENTRY(return_to_handler)
55 stm %r2,%r5,16(%r15) 69 stmg %r2,%r5,32(%r15)
56 st %r14,56(%r15) 70 lgr %r1,%r15
57 lr %r0,%r15 71 aghi %r15,-STACK_FRAME_OVERHEAD
58 ahi %r15,-96 72 stg %r1,__SF_BACKCHAIN(%r15)
59 st %r0,__SF_BACKCHAIN(%r15) 73 brasl %r14,ftrace_return_to_handler
60 bras %r1,0f 74 aghi %r15,STACK_FRAME_OVERHEAD
61 .long ftrace_return_to_handler 75 lgr %r14,%r2
620: l %r2,0b-0b(%r1) 76 lmg %r2,%r5,32(%r15)
63 basr %r14,%r2
64 lr %r14,%r2
65 ahi %r15,96
66 lm %r2,%r5,16(%r15)
67 br %r14 77 br %r14
68 78
69#endif 79#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
deleted file mode 100644
index c67a8bf0fd9a..000000000000
--- a/arch/s390/kernel/mcount64.S
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 *
6 */
7
8#include <linux/linkage.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11
12 .section .kprobes.text, "ax"
13
14ENTRY(ftrace_stub)
15 br %r14
16
17ENTRY(_mcount)
18#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14
20
21ENTRY(ftrace_caller)
22#endif
23 stmg %r2,%r5,32(%r15)
24 stg %r14,112(%r15)
25 lgr %r1,%r15
26 aghi %r15,-160
27 stg %r1,__SF_BACKCHAIN(%r15)
28 lgr %r2,%r14
29 lg %r3,168(%r15)
30 aghi %r2,-MCOUNT_INSN_SIZE
31 larl %r14,ftrace_trace_function
32 lg %r14,0(%r14)
33 basr %r14,%r14
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35 lg %r2,168(%r15)
36 lg %r3,272(%r15)
37ENTRY(ftrace_graph_caller)
38# The bras instruction gets runtime patched to call prepare_ftrace_return.
39# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
40# bras %r14,prepare_ftrace_return
41 bras %r14,0f
420: stg %r2,168(%r15)
43#endif
44 aghi %r15,160
45 lmg %r2,%r5,32(%r15)
46 lg %r14,112(%r15)
47 br %r14
48
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50
51ENTRY(return_to_handler)
52 stmg %r2,%r5,32(%r15)
53 lgr %r1,%r15
54 aghi %r15,-160
55 stg %r1,__SF_BACKCHAIN(%r15)
56 brasl %r14,ftrace_return_to_handler
57 aghi %r15,160
58 lgr %r14,%r2
59 lmg %r2,%r5,32(%r15)
60 br %r14
61
62#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 210e1285f75a..db96b418160a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -20,6 +20,7 @@
20#include <asm/cputime.h> 20#include <asm/cputime.h>
21#include <asm/nmi.h> 21#include <asm/nmi.h>
22#include <asm/crw.h> 22#include <asm/crw.h>
23#include <asm/switch_to.h>
23 24
24struct mcck_struct { 25struct mcck_struct {
25 int kill_task; 26 int kill_task;
@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci)
163 " ld 15,120(%0)\n" 164 " ld 15,120(%0)\n"
164 : : "a" (fpt_save_area)); 165 : : "a" (fpt_save_area));
165 } 166 }
167
168#ifdef CONFIG_64BIT
169 /* Revalidate vector registers */
170 if (MACHINE_HAS_VX && current->thread.vxrs) {
171 if (!mci->vr) {
172 /*
173 * Vector registers can't be restored and therefore
174 * the process needs to be terminated.
175 */
176 kill_task = 1;
177 }
178 restore_vx_regs((__vector128 *)
179 S390_lowcore.vector_save_area_addr);
180 }
181#endif
166 /* Revalidate access registers */ 182 /* Revalidate access registers */
167 asm volatile( 183 asm volatile(
168 " lam 0,15,0(%0)" 184 " lam 0,15,0(%0)"
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 813ec7260878..f6f8886399f6 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */ 49PGM_CHECK_64BIT(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */ 50PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */ 51PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_DEFAULT /* 1b */ 52PGM_CHECK_64BIT(vector_exception) /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */ 53PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */ 54PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */ 55PGM_CHECK_DEFAULT /* 1e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 93b9ca42e5c0..ed84cc224899 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64void arch_cpu_idle(void)
65{
66 local_mcck_disable();
67 if (test_cpu_flag(CIF_MCCK_PENDING)) {
68 local_mcck_enable();
69 local_irq_enable();
70 return;
71 }
72 /* Halt the cpu and keep track of cpu time accounting. */
73 vtime_stop_cpu();
74 local_irq_enable();
75}
76
77void arch_cpu_idle_exit(void)
78{
79 if (test_cpu_flag(CIF_MCCK_PENDING))
80 s390_handle_mcck();
81}
82
83void arch_cpu_idle_dead(void)
84{
85 cpu_die();
86}
87
88extern void __kprobes kernel_thread_starter(void); 64extern void __kprobes kernel_thread_starter(void);
89 65
90/* 66/*
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 24612029f450..edefead3b43a 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
23 */ 23 */
24void cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 26 struct cpuid *id = &__get_cpu_var(cpu_id);
28 27
29 get_cpu_id(id); 28 get_cpu_id(id);
@@ -31,7 +30,6 @@ void cpu_init(void)
31 current->active_mm = &init_mm; 30 current->active_mm = &init_mm;
32 BUG_ON(current->mm); 31 BUG_ON(current->mm);
33 enter_lazy_tlb(&init_mm, current); 32 enter_lazy_tlb(&init_mm, current);
34 memset(idle, 0, sizeof(*idle));
35} 33}
36 34
37/* 35/*
@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
41{ 39{
42 static const char *hwcap_str[] = { 40 static const char *hwcap_str[] = {
43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 41 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
44 "edat", "etf3eh", "highgprs", "te" 42 "edat", "etf3eh", "highgprs", "te", "vx"
45 }; 43 };
46 unsigned long n = (unsigned long) v - 1; 44 unsigned long n = (unsigned long) v - 1;
47 int i; 45 int i;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index bebacad48305..f537e937a988 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -38,15 +38,6 @@
38#define CREATE_TRACE_POINTS 38#define CREATE_TRACE_POINTS
39#include <trace/events/syscalls.h> 39#include <trace/events/syscalls.h>
40 40
41enum s390_regset {
42 REGSET_GENERAL,
43 REGSET_FP,
44 REGSET_LAST_BREAK,
45 REGSET_TDB,
46 REGSET_SYSTEM_CALL,
47 REGSET_GENERAL_EXTENDED,
48};
49
50void update_cr_regs(struct task_struct *task) 41void update_cr_regs(struct task_struct *task)
51{ 42{
52 struct pt_regs *regs = task_pt_regs(task); 43 struct pt_regs *regs = task_pt_regs(task);
@@ -55,27 +46,39 @@ void update_cr_regs(struct task_struct *task)
55 46
56#ifdef CONFIG_64BIT 47#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 48 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 49 if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
59 unsigned long cr, cr_new; 50 unsigned long cr, cr_new;
60 51
61 __ctl_store(cr, 0, 0); 52 __ctl_store(cr, 0, 0);
62 /* Set or clear transaction execution TXC bit 8. */ 53 cr_new = cr;
63 cr_new = cr | (1UL << 55); 54 if (MACHINE_HAS_TE) {
64 if (task->thread.per_flags & PER_FLAG_NO_TE) 55 /* Set or clear transaction execution TXC bit 8. */
65 cr_new &= ~(1UL << 55); 56 cr_new |= (1UL << 55);
57 if (task->thread.per_flags & PER_FLAG_NO_TE)
58 cr_new &= ~(1UL << 55);
59 }
60 if (MACHINE_HAS_VX) {
61 /* Enable/disable of vector extension */
62 cr_new &= ~(1UL << 17);
63 if (task->thread.vxrs)
64 cr_new |= (1UL << 17);
65 }
66 if (cr_new != cr) 66 if (cr_new != cr)
67 __ctl_load(cr_new, 0, 0); 67 __ctl_load(cr_new, 0, 0);
68 /* Set or clear transaction execution TDC bits 62 and 63. */ 68 if (MACHINE_HAS_TE) {
69 __ctl_store(cr, 2, 2); 69 /* Set/clear transaction execution TDC bits 62/63. */
70 cr_new = cr & ~3UL; 70 __ctl_store(cr, 2, 2);
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 71 cr_new = cr & ~3UL;
72 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) 72 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
73 cr_new |= 1UL; 73 if (task->thread.per_flags &
74 else 74 PER_FLAG_TE_ABORT_RAND_TEND)
75 cr_new |= 2UL; 75 cr_new |= 1UL;
76 else
77 cr_new |= 2UL;
78 }
79 if (cr_new != cr)
80 __ctl_load(cr_new, 2, 2);
76 } 81 }
77 if (cr_new != cr)
78 __ctl_load(cr_new, 2, 2);
79 } 82 }
80#endif 83#endif
81 /* Copy user specified PER registers */ 84 /* Copy user specified PER registers */
@@ -84,7 +87,8 @@ void update_cr_regs(struct task_struct *task)
84 new.end = thread->per_user.end; 87 new.end = thread->per_user.end;
85 88
86 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 89 /* merge TIF_SINGLE_STEP into user specified PER registers. */
87 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 90 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
88 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) 92 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
89 new.control |= PER_EVENT_BRANCH; 93 new.control |= PER_EVENT_BRANCH;
90 else 94 else
@@ -93,6 +97,8 @@ void update_cr_regs(struct task_struct *task)
93 new.control |= PER_CONTROL_SUSPENSION; 97 new.control |= PER_CONTROL_SUSPENSION;
94 new.control |= PER_EVENT_TRANSACTION_END; 98 new.control |= PER_EVENT_TRANSACTION_END;
95#endif 99#endif
100 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
101 new.control |= PER_EVENT_IFETCH;
96 new.start = 0; 102 new.start = 0;
97 new.end = PSW_ADDR_INSN; 103 new.end = PSW_ADDR_INSN;
98 } 104 }
@@ -923,7 +929,15 @@ static int s390_fpregs_get(struct task_struct *target,
923 save_fp_ctl(&target->thread.fp_regs.fpc); 929 save_fp_ctl(&target->thread.fp_regs.fpc);
924 save_fp_regs(target->thread.fp_regs.fprs); 930 save_fp_regs(target->thread.fp_regs.fprs);
925 } 931 }
932#ifdef CONFIG_64BIT
933 else if (target->thread.vxrs) {
934 int i;
926 935
936 for (i = 0; i < __NUM_VXRS_LOW; i++)
937 target->thread.fp_regs.fprs[i] =
938 *(freg_t *)(target->thread.vxrs + i);
939 }
940#endif
927 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 941 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
928 &target->thread.fp_regs, 0, -1); 942 &target->thread.fp_regs, 0, -1);
929} 943}
@@ -957,9 +971,20 @@ static int s390_fpregs_set(struct task_struct *target,
957 target->thread.fp_regs.fprs, 971 target->thread.fp_regs.fprs,
958 offsetof(s390_fp_regs, fprs), -1); 972 offsetof(s390_fp_regs, fprs), -1);
959 973
960 if (rc == 0 && target == current) { 974 if (rc == 0) {
961 restore_fp_ctl(&target->thread.fp_regs.fpc); 975 if (target == current) {
962 restore_fp_regs(target->thread.fp_regs.fprs); 976 restore_fp_ctl(&target->thread.fp_regs.fpc);
977 restore_fp_regs(target->thread.fp_regs.fprs);
978 }
979#ifdef CONFIG_64BIT
980 else if (target->thread.vxrs) {
981 int i;
982
983 for (i = 0; i < __NUM_VXRS_LOW; i++)
984 *(freg_t *)(target->thread.vxrs + i) =
985 target->thread.fp_regs.fprs[i];
986 }
987#endif
963 } 988 }
964 989
965 return rc; 990 return rc;
@@ -1015,6 +1040,95 @@ static int s390_tdb_set(struct task_struct *target,
1015 return 0; 1040 return 0;
1016} 1041}
1017 1042
1043static int s390_vxrs_active(struct task_struct *target,
1044 const struct user_regset *regset)
1045{
1046 return !!target->thread.vxrs;
1047}
1048
1049static int s390_vxrs_low_get(struct task_struct *target,
1050 const struct user_regset *regset,
1051 unsigned int pos, unsigned int count,
1052 void *kbuf, void __user *ubuf)
1053{
1054 __u64 vxrs[__NUM_VXRS_LOW];
1055 int i;
1056
1057 if (target->thread.vxrs) {
1058 if (target == current)
1059 save_vx_regs(target->thread.vxrs);
1060 for (i = 0; i < __NUM_VXRS_LOW; i++)
1061 vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
1062 } else
1063 memset(vxrs, 0, sizeof(vxrs));
1064 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1065}
1066
1067static int s390_vxrs_low_set(struct task_struct *target,
1068 const struct user_regset *regset,
1069 unsigned int pos, unsigned int count,
1070 const void *kbuf, const void __user *ubuf)
1071{
1072 __u64 vxrs[__NUM_VXRS_LOW];
1073 int i, rc;
1074
1075 if (!target->thread.vxrs) {
1076 rc = alloc_vector_registers(target);
1077 if (rc)
1078 return rc;
1079 } else if (target == current)
1080 save_vx_regs(target->thread.vxrs);
1081
1082 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1083 if (rc == 0) {
1084 for (i = 0; i < __NUM_VXRS_LOW; i++)
1085 *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
1086 if (target == current)
1087 restore_vx_regs(target->thread.vxrs);
1088 }
1089
1090 return rc;
1091}
1092
1093static int s390_vxrs_high_get(struct task_struct *target,
1094 const struct user_regset *regset,
1095 unsigned int pos, unsigned int count,
1096 void *kbuf, void __user *ubuf)
1097{
1098 __vector128 vxrs[__NUM_VXRS_HIGH];
1099
1100 if (target->thread.vxrs) {
1101 if (target == current)
1102 save_vx_regs(target->thread.vxrs);
1103 memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
1104 sizeof(vxrs));
1105 } else
1106 memset(vxrs, 0, sizeof(vxrs));
1107 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1108}
1109
1110static int s390_vxrs_high_set(struct task_struct *target,
1111 const struct user_regset *regset,
1112 unsigned int pos, unsigned int count,
1113 const void *kbuf, const void __user *ubuf)
1114{
1115 int rc;
1116
1117 if (!target->thread.vxrs) {
1118 rc = alloc_vector_registers(target);
1119 if (rc)
1120 return rc;
1121 } else if (target == current)
1122 save_vx_regs(target->thread.vxrs);
1123
1124 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1125 target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
1126 if (rc == 0 && target == current)
1127 restore_vx_regs(target->thread.vxrs);
1128
1129 return rc;
1130}
1131
1018#endif 1132#endif
1019 1133
1020static int s390_system_call_get(struct task_struct *target, 1134static int s390_system_call_get(struct task_struct *target,
@@ -1038,7 +1152,7 @@ static int s390_system_call_set(struct task_struct *target,
1038} 1152}
1039 1153
1040static const struct user_regset s390_regsets[] = { 1154static const struct user_regset s390_regsets[] = {
1041 [REGSET_GENERAL] = { 1155 {
1042 .core_note_type = NT_PRSTATUS, 1156 .core_note_type = NT_PRSTATUS,
1043 .n = sizeof(s390_regs) / sizeof(long), 1157 .n = sizeof(s390_regs) / sizeof(long),
1044 .size = sizeof(long), 1158 .size = sizeof(long),
@@ -1046,7 +1160,7 @@ static const struct user_regset s390_regsets[] = {
1046 .get = s390_regs_get, 1160 .get = s390_regs_get,
1047 .set = s390_regs_set, 1161 .set = s390_regs_set,
1048 }, 1162 },
1049 [REGSET_FP] = { 1163 {
1050 .core_note_type = NT_PRFPREG, 1164 .core_note_type = NT_PRFPREG,
1051 .n = sizeof(s390_fp_regs) / sizeof(long), 1165 .n = sizeof(s390_fp_regs) / sizeof(long),
1052 .size = sizeof(long), 1166 .size = sizeof(long),
@@ -1054,8 +1168,16 @@ static const struct user_regset s390_regsets[] = {
1054 .get = s390_fpregs_get, 1168 .get = s390_fpregs_get,
1055 .set = s390_fpregs_set, 1169 .set = s390_fpregs_set,
1056 }, 1170 },
1171 {
1172 .core_note_type = NT_S390_SYSTEM_CALL,
1173 .n = 1,
1174 .size = sizeof(unsigned int),
1175 .align = sizeof(unsigned int),
1176 .get = s390_system_call_get,
1177 .set = s390_system_call_set,
1178 },
1057#ifdef CONFIG_64BIT 1179#ifdef CONFIG_64BIT
1058 [REGSET_LAST_BREAK] = { 1180 {
1059 .core_note_type = NT_S390_LAST_BREAK, 1181 .core_note_type = NT_S390_LAST_BREAK,
1060 .n = 1, 1182 .n = 1,
1061 .size = sizeof(long), 1183 .size = sizeof(long),
@@ -1063,7 +1185,7 @@ static const struct user_regset s390_regsets[] = {
1063 .get = s390_last_break_get, 1185 .get = s390_last_break_get,
1064 .set = s390_last_break_set, 1186 .set = s390_last_break_set,
1065 }, 1187 },
1066 [REGSET_TDB] = { 1188 {
1067 .core_note_type = NT_S390_TDB, 1189 .core_note_type = NT_S390_TDB,
1068 .n = 1, 1190 .n = 1,
1069 .size = 256, 1191 .size = 256,
@@ -1071,15 +1193,25 @@ static const struct user_regset s390_regsets[] = {
1071 .get = s390_tdb_get, 1193 .get = s390_tdb_get,
1072 .set = s390_tdb_set, 1194 .set = s390_tdb_set,
1073 }, 1195 },
1074#endif 1196 {
1075 [REGSET_SYSTEM_CALL] = { 1197 .core_note_type = NT_S390_VXRS_LOW,
1076 .core_note_type = NT_S390_SYSTEM_CALL, 1198 .n = __NUM_VXRS_LOW,
1077 .n = 1, 1199 .size = sizeof(__u64),
1078 .size = sizeof(unsigned int), 1200 .align = sizeof(__u64),
1079 .align = sizeof(unsigned int), 1201 .active = s390_vxrs_active,
1080 .get = s390_system_call_get, 1202 .get = s390_vxrs_low_get,
1081 .set = s390_system_call_set, 1203 .set = s390_vxrs_low_set,
1082 }, 1204 },
1205 {
1206 .core_note_type = NT_S390_VXRS_HIGH,
1207 .n = __NUM_VXRS_HIGH,
1208 .size = sizeof(__vector128),
1209 .align = sizeof(__vector128),
1210 .active = s390_vxrs_active,
1211 .get = s390_vxrs_high_get,
1212 .set = s390_vxrs_high_set,
1213 },
1214#endif
1083}; 1215};
1084 1216
1085static const struct user_regset_view user_s390_view = { 1217static const struct user_regset_view user_s390_view = {
@@ -1244,7 +1376,7 @@ static int s390_compat_last_break_set(struct task_struct *target,
1244} 1376}
1245 1377
1246static const struct user_regset s390_compat_regsets[] = { 1378static const struct user_regset s390_compat_regsets[] = {
1247 [REGSET_GENERAL] = { 1379 {
1248 .core_note_type = NT_PRSTATUS, 1380 .core_note_type = NT_PRSTATUS,
1249 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), 1381 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1250 .size = sizeof(compat_long_t), 1382 .size = sizeof(compat_long_t),
@@ -1252,7 +1384,7 @@ static const struct user_regset s390_compat_regsets[] = {
1252 .get = s390_compat_regs_get, 1384 .get = s390_compat_regs_get,
1253 .set = s390_compat_regs_set, 1385 .set = s390_compat_regs_set,
1254 }, 1386 },
1255 [REGSET_FP] = { 1387 {
1256 .core_note_type = NT_PRFPREG, 1388 .core_note_type = NT_PRFPREG,
1257 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), 1389 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1258 .size = sizeof(compat_long_t), 1390 .size = sizeof(compat_long_t),
@@ -1260,7 +1392,15 @@ static const struct user_regset s390_compat_regsets[] = {
1260 .get = s390_fpregs_get, 1392 .get = s390_fpregs_get,
1261 .set = s390_fpregs_set, 1393 .set = s390_fpregs_set,
1262 }, 1394 },
1263 [REGSET_LAST_BREAK] = { 1395 {
1396 .core_note_type = NT_S390_SYSTEM_CALL,
1397 .n = 1,
1398 .size = sizeof(compat_uint_t),
1399 .align = sizeof(compat_uint_t),
1400 .get = s390_system_call_get,
1401 .set = s390_system_call_set,
1402 },
1403 {
1264 .core_note_type = NT_S390_LAST_BREAK, 1404 .core_note_type = NT_S390_LAST_BREAK,
1265 .n = 1, 1405 .n = 1,
1266 .size = sizeof(long), 1406 .size = sizeof(long),
@@ -1268,7 +1408,7 @@ static const struct user_regset s390_compat_regsets[] = {
1268 .get = s390_compat_last_break_get, 1408 .get = s390_compat_last_break_get,
1269 .set = s390_compat_last_break_set, 1409 .set = s390_compat_last_break_set,
1270 }, 1410 },
1271 [REGSET_TDB] = { 1411 {
1272 .core_note_type = NT_S390_TDB, 1412 .core_note_type = NT_S390_TDB,
1273 .n = 1, 1413 .n = 1,
1274 .size = 256, 1414 .size = 256,
@@ -1276,15 +1416,25 @@ static const struct user_regset s390_compat_regsets[] = {
1276 .get = s390_tdb_get, 1416 .get = s390_tdb_get,
1277 .set = s390_tdb_set, 1417 .set = s390_tdb_set,
1278 }, 1418 },
1279 [REGSET_SYSTEM_CALL] = { 1419 {
1280 .core_note_type = NT_S390_SYSTEM_CALL, 1420 .core_note_type = NT_S390_VXRS_LOW,
1281 .n = 1, 1421 .n = __NUM_VXRS_LOW,
1282 .size = sizeof(compat_uint_t), 1422 .size = sizeof(__u64),
1283 .align = sizeof(compat_uint_t), 1423 .align = sizeof(__u64),
1284 .get = s390_system_call_get, 1424 .active = s390_vxrs_active,
1285 .set = s390_system_call_set, 1425 .get = s390_vxrs_low_get,
1426 .set = s390_vxrs_low_set,
1427 },
1428 {
1429 .core_note_type = NT_S390_VXRS_HIGH,
1430 .n = __NUM_VXRS_HIGH,
1431 .size = sizeof(__vector128),
1432 .align = sizeof(__vector128),
1433 .active = s390_vxrs_active,
1434 .get = s390_vxrs_high_get,
1435 .set = s390_vxrs_high_set,
1286 }, 1436 },
1287 [REGSET_GENERAL_EXTENDED] = { 1437 {
1288 .core_note_type = NT_S390_HIGH_GPRS, 1438 .core_note_type = NT_S390_HIGH_GPRS,
1289 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1439 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1290 .size = sizeof(compat_long_t), 1440 .size = sizeof(compat_long_t),
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 82bc113e8c1d..e80d9ff9a56d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -343,6 +343,9 @@ static void __init setup_lowcore(void)
343 __ctl_set_bit(14, 29); 343 __ctl_set_bit(14, 29);
344 } 344 }
345#else 345#else
346 if (MACHINE_HAS_VX)
347 lc->vector_save_area_addr =
348 (unsigned long) &lc->vector_save_area;
346 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 349 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
347#endif 350#endif
348 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 351 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -452,8 +455,8 @@ static void __init setup_memory_end(void)
452#ifdef CONFIG_64BIT 455#ifdef CONFIG_64BIT
453 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 456 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
454 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 457 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
455 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; 458 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
456 if (tmp <= (1UL << 42)) 459 if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
457 vmax = 1UL << 42; /* 3-level kernel page table */ 460 vmax = 1UL << 42; /* 3-level kernel page table */
458 else 461 else
459 vmax = 1UL << 53; /* 4-level kernel page table */ 462 vmax = 1UL << 53; /* 4-level kernel page table */
@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void)
765 */ 768 */
766 if (test_facility(50) && test_facility(73)) 769 if (test_facility(50) && test_facility(73))
767 elf_hwcap |= HWCAP_S390_TE; 770 elf_hwcap |= HWCAP_S390_TE;
771
772 /*
773 * Vector extension HWCAP_S390_VXRS is bit 11.
774 */
775 if (test_facility(129))
776 elf_hwcap |= HWCAP_S390_VXRS;
768#endif 777#endif
769 778
770 get_cpu_id(&cpu_id); 779 get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 469c4c6d9182..0c1a0ff0a558 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -31,30 +31,117 @@
31#include <asm/switch_to.h> 31#include <asm/switch_to.h>
32#include "entry.h" 32#include "entry.h"
33 33
34typedef struct 34/*
35 * Layout of an old-style signal-frame:
36 * -----------------------------------------
37 * | save area (_SIGNAL_FRAMESIZE) |
38 * -----------------------------------------
39 * | struct sigcontext |
40 * | oldmask |
41 * | _sigregs * |
42 * -----------------------------------------
43 * | _sigregs with |
44 * | _s390_regs_common |
45 * | _s390_fp_regs |
46 * -----------------------------------------
47 * | int signo |
48 * -----------------------------------------
49 * | _sigregs_ext with |
50 * | gprs_high 64 byte (opt) |
51 * | vxrs_low 128 byte (opt) |
52 * | vxrs_high 256 byte (opt) |
53 * | reserved 128 byte (opt) |
54 * -----------------------------------------
55 * | __u16 svc_insn |
56 * -----------------------------------------
57 * The svc_insn entry with the sigreturn system call opcode does not
58 * have a fixed position and moves if gprs_high or vxrs exist.
59 * Future extensions will be added to _sigregs_ext.
60 */
61struct sigframe
35{ 62{
36 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 63 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
37 struct sigcontext sc; 64 struct sigcontext sc;
38 _sigregs sregs; 65 _sigregs sregs;
39 int signo; 66 int signo;
40 __u8 retcode[S390_SYSCALL_SIZE]; 67 _sigregs_ext sregs_ext;
41} sigframe; 68 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
69};
42 70
43typedef struct 71/*
72 * Layout of an rt signal-frame:
73 * -----------------------------------------
74 * | save area (_SIGNAL_FRAMESIZE) |
75 * -----------------------------------------
76 * | svc __NR_rt_sigreturn 2 byte |
77 * -----------------------------------------
78 * | struct siginfo |
79 * -----------------------------------------
80 * | struct ucontext_extended with |
81 * | unsigned long uc_flags |
82 * | struct ucontext *uc_link |
83 * | stack_t uc_stack |
84 * | _sigregs uc_mcontext with |
85 * | _s390_regs_common |
86 * | _s390_fp_regs |
87 * | sigset_t uc_sigmask |
88 * | _sigregs_ext uc_mcontext_ext |
89 * | gprs_high 64 byte (opt) |
90 * | vxrs_low 128 byte (opt) |
91 * | vxrs_high 256 byte (opt)|
92 * | reserved 128 byte (opt) |
93 * -----------------------------------------
94 * Future extensions will be added to _sigregs_ext.
95 */
96struct rt_sigframe
44{ 97{
45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 98 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
46 __u8 retcode[S390_SYSCALL_SIZE]; 99 __u16 svc_insn;
47 struct siginfo info; 100 struct siginfo info;
48 struct ucontext uc; 101 struct ucontext_extended uc;
49} rt_sigframe; 102};
103
104/* Store registers needed to create the signal frame */
105static void store_sigregs(void)
106{
107 save_access_regs(current->thread.acrs);
108 save_fp_ctl(&current->thread.fp_regs.fpc);
109#ifdef CONFIG_64BIT
110 if (current->thread.vxrs) {
111 int i;
112
113 save_vx_regs(current->thread.vxrs);
114 for (i = 0; i < __NUM_FPRS; i++)
115 current->thread.fp_regs.fprs[i] =
116 *(freg_t *)(current->thread.vxrs + i);
117 } else
118#endif
119 save_fp_regs(current->thread.fp_regs.fprs);
120}
121
122/* Load registers after signal return */
123static void load_sigregs(void)
124{
125 restore_access_regs(current->thread.acrs);
126 /* restore_fp_ctl is done in restore_sigregs */
127#ifdef CONFIG_64BIT
128 if (current->thread.vxrs) {
129 int i;
130
131 for (i = 0; i < __NUM_FPRS; i++)
132 *(freg_t *)(current->thread.vxrs + i) =
133 current->thread.fp_regs.fprs[i];
134 restore_vx_regs(current->thread.vxrs);
135 } else
136#endif
137 restore_fp_regs(current->thread.fp_regs.fprs);
138}
50 139
51/* Returns non-zero on fault. */ 140/* Returns non-zero on fault. */
52static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) 141static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
53{ 142{
54 _sigregs user_sregs; 143 _sigregs user_sregs;
55 144
56 save_access_regs(current->thread.acrs);
57
58 /* Copy a 'clean' PSW mask to the user to avoid leaking 145 /* Copy a 'clean' PSW mask to the user to avoid leaking
59 information about whether PER is currently on. */ 146 information about whether PER is currently on. */
60 user_sregs.regs.psw.mask = PSW_USER_BITS | 147 user_sregs.regs.psw.mask = PSW_USER_BITS |
@@ -63,12 +150,6 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 150 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
64 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 151 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
65 sizeof(user_sregs.regs.acrs)); 152 sizeof(user_sregs.regs.acrs));
66 /*
67 * We have to store the fp registers to current->thread.fp_regs
68 * to merge them with the emulated registers.
69 */
70 save_fp_ctl(&current->thread.fp_regs.fpc);
71 save_fp_regs(current->thread.fp_regs.fprs);
72 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, 153 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
73 sizeof(user_sregs.fpregs)); 154 sizeof(user_sregs.fpregs));
74 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) 155 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
@@ -107,20 +188,64 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
107 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); 188 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
108 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 189 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
109 sizeof(current->thread.acrs)); 190 sizeof(current->thread.acrs));
110 restore_access_regs(current->thread.acrs);
111 191
112 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, 192 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
113 sizeof(current->thread.fp_regs)); 193 sizeof(current->thread.fp_regs));
114 194
115 restore_fp_regs(current->thread.fp_regs.fprs);
116 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 195 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
117 return 0; 196 return 0;
118} 197}
119 198
199/* Returns non-zero on fault. */
200static int save_sigregs_ext(struct pt_regs *regs,
201 _sigregs_ext __user *sregs_ext)
202{
203#ifdef CONFIG_64BIT
204 __u64 vxrs[__NUM_VXRS_LOW];
205 int i;
206
207 /* Save vector registers to signal stack */
208 if (current->thread.vxrs) {
209 for (i = 0; i < __NUM_VXRS_LOW; i++)
210 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
211 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
212 sizeof(sregs_ext->vxrs_low)) ||
213 __copy_to_user(&sregs_ext->vxrs_high,
214 current->thread.vxrs + __NUM_VXRS_LOW,
215 sizeof(sregs_ext->vxrs_high)))
216 return -EFAULT;
217 }
218#endif
219 return 0;
220}
221
222static int restore_sigregs_ext(struct pt_regs *regs,
223 _sigregs_ext __user *sregs_ext)
224{
225#ifdef CONFIG_64BIT
226 __u64 vxrs[__NUM_VXRS_LOW];
227 int i;
228
229 /* Restore vector registers from signal stack */
230 if (current->thread.vxrs) {
231 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
232 sizeof(sregs_ext->vxrs_low)) ||
233 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
234 &sregs_ext->vxrs_high,
235 sizeof(sregs_ext->vxrs_high)))
236 return -EFAULT;
237 for (i = 0; i < __NUM_VXRS_LOW; i++)
238 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
239 }
240#endif
241 return 0;
242}
243
120SYSCALL_DEFINE0(sigreturn) 244SYSCALL_DEFINE0(sigreturn)
121{ 245{
122 struct pt_regs *regs = task_pt_regs(current); 246 struct pt_regs *regs = task_pt_regs(current);
123 sigframe __user *frame = (sigframe __user *)regs->gprs[15]; 247 struct sigframe __user *frame =
248 (struct sigframe __user *) regs->gprs[15];
124 sigset_t set; 249 sigset_t set;
125 250
126 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 251 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
@@ -128,6 +253,9 @@ SYSCALL_DEFINE0(sigreturn)
128 set_current_blocked(&set); 253 set_current_blocked(&set);
129 if (restore_sigregs(regs, &frame->sregs)) 254 if (restore_sigregs(regs, &frame->sregs))
130 goto badframe; 255 goto badframe;
256 if (restore_sigregs_ext(regs, &frame->sregs_ext))
257 goto badframe;
258 load_sigregs();
131 return regs->gprs[2]; 259 return regs->gprs[2];
132badframe: 260badframe:
133 force_sig(SIGSEGV, current); 261 force_sig(SIGSEGV, current);
@@ -137,16 +265,20 @@ badframe:
137SYSCALL_DEFINE0(rt_sigreturn) 265SYSCALL_DEFINE0(rt_sigreturn)
138{ 266{
139 struct pt_regs *regs = task_pt_regs(current); 267 struct pt_regs *regs = task_pt_regs(current);
140 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; 268 struct rt_sigframe __user *frame =
269 (struct rt_sigframe __user *)regs->gprs[15];
141 sigset_t set; 270 sigset_t set;
142 271
143 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) 272 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
144 goto badframe; 273 goto badframe;
145 set_current_blocked(&set); 274 set_current_blocked(&set);
275 if (restore_altstack(&frame->uc.uc_stack))
276 goto badframe;
146 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 277 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
147 goto badframe; 278 goto badframe;
148 if (restore_altstack(&frame->uc.uc_stack)) 279 if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
149 goto badframe; 280 goto badframe;
281 load_sigregs();
150 return regs->gprs[2]; 282 return regs->gprs[2];
151badframe: 283badframe:
152 force_sig(SIGSEGV, current); 284 force_sig(SIGSEGV, current);
@@ -154,11 +286,6 @@ badframe:
154} 286}
155 287
156/* 288/*
157 * Set up a signal frame.
158 */
159
160
161/*
162 * Determine which stack to use.. 289 * Determine which stack to use..
163 */ 290 */
164static inline void __user * 291static inline void __user *
@@ -195,39 +322,63 @@ static inline int map_signal(int sig)
195static int setup_frame(int sig, struct k_sigaction *ka, 322static int setup_frame(int sig, struct k_sigaction *ka,
196 sigset_t *set, struct pt_regs * regs) 323 sigset_t *set, struct pt_regs * regs)
197{ 324{
198 sigframe __user *frame; 325 struct sigframe __user *frame;
199 326 struct sigcontext sc;
200 frame = get_sigframe(ka, regs, sizeof(sigframe)); 327 unsigned long restorer;
328 size_t frame_size;
201 329
330 /*
331 * gprs_high are only present for a 31-bit task running on
332 * a 64-bit kernel (see compat_signal.c) but the space for
333 * gprs_high need to be allocated if vector registers are
334 * included in the signal frame on a 31-bit system.
335 */
336 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
337 if (MACHINE_HAS_VX)
338 frame_size += sizeof(frame->sregs_ext);
339 frame = get_sigframe(ka, regs, frame_size);
202 if (frame == (void __user *) -1UL) 340 if (frame == (void __user *) -1UL)
203 return -EFAULT; 341 return -EFAULT;
204 342
205 if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) 343 /* Set up backchain. */
344 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
206 return -EFAULT; 345 return -EFAULT;
207 346
347 /* Create struct sigcontext on the signal stack */
348 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE);
349 sc.sregs = (_sigregs __user __force *) &frame->sregs;
350 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
351 return -EFAULT;
352
353 /* Store registers needed to create the signal frame */
354 store_sigregs();
355
356 /* Create _sigregs on the signal stack */
208 if (save_sigregs(regs, &frame->sregs)) 357 if (save_sigregs(regs, &frame->sregs))
209 return -EFAULT; 358 return -EFAULT;
210 if (__put_user(&frame->sregs, &frame->sc.sregs)) 359
360 /* Place signal number on stack to allow backtrace from handler. */
361 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
362 return -EFAULT;
363
364 /* Create _sigregs_ext on the signal stack */
365 if (save_sigregs_ext(regs, &frame->sregs_ext))
211 return -EFAULT; 366 return -EFAULT;
212 367
213 /* Set up to return from userspace. If provided, use a stub 368 /* Set up to return from userspace. If provided, use a stub
214 already in userspace. */ 369 already in userspace. */
215 if (ka->sa.sa_flags & SA_RESTORER) { 370 if (ka->sa.sa_flags & SA_RESTORER) {
216 regs->gprs[14] = (unsigned long) 371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
217 ka->sa.sa_restorer | PSW_ADDR_AMODE;
218 } else { 372 } else {
219 regs->gprs[14] = (unsigned long) 373 /* Signal frame without vector registers are short ! */
220 frame->retcode | PSW_ADDR_AMODE; 374 __u16 __user *svc = (void *) frame + frame_size - 2;
221 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
222 (u16 __user *)(frame->retcode)))
223 return -EFAULT; 376 return -EFAULT;
377 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
224 } 378 }
225 379
226 /* Set up backchain. */
227 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
228 return -EFAULT;
229
230 /* Set up registers for signal handler */ 380 /* Set up registers for signal handler */
381 regs->gprs[14] = restorer;
231 regs->gprs[15] = (unsigned long) frame; 382 regs->gprs[15] = (unsigned long) frame;
232 /* Force default amode and default user address space control. */ 383 /* Force default amode and default user address space control. */
233 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 384 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
@@ -247,54 +398,69 @@ static int setup_frame(int sig, struct k_sigaction *ka,
247 regs->gprs[5] = regs->int_parm_long; 398 regs->gprs[5] = regs->int_parm_long;
248 regs->gprs[6] = task_thread_info(current)->last_break; 399 regs->gprs[6] = task_thread_info(current)->last_break;
249 } 400 }
250
251 /* Place signal number on stack to allow backtrace from handler. */
252 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
253 return -EFAULT;
254 return 0; 401 return 0;
255} 402}
256 403
257static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, 404static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
258 struct pt_regs *regs) 405 struct pt_regs *regs)
259{ 406{
260 int err = 0; 407 struct rt_sigframe __user *frame;
261 rt_sigframe __user *frame; 408 unsigned long uc_flags, restorer;
262 409 size_t frame_size;
263 frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe));
264 410
411 frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
412 /*
413 * gprs_high are only present for a 31-bit task running on
414 * a 64-bit kernel (see compat_signal.c) but the space for
415 * gprs_high need to be allocated if vector registers are
416 * included in the signal frame on a 31-bit system.
417 */
418 uc_flags = 0;
419#ifdef CONFIG_64BIT
420 if (MACHINE_HAS_VX) {
421 frame_size += sizeof(_sigregs_ext);
422 if (current->thread.vxrs)
423 uc_flags |= UC_VXRS;
424 }
425#endif
426 frame = get_sigframe(&ksig->ka, regs, frame_size);
265 if (frame == (void __user *) -1UL) 427 if (frame == (void __user *) -1UL)
266 return -EFAULT; 428 return -EFAULT;
267 429
268 if (copy_siginfo_to_user(&frame->info, &ksig->info)) 430 /* Set up backchain. */
269 return -EFAULT; 431 if (__put_user(regs->gprs[15], (addr_t __user *) frame))
270
271 /* Create the ucontext. */
272 err |= __put_user(0, &frame->uc.uc_flags);
273 err |= __put_user(NULL, &frame->uc.uc_link);
274 err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
275 err |= save_sigregs(regs, &frame->uc.uc_mcontext);
276 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
277 if (err)
278 return -EFAULT; 432 return -EFAULT;
279 433
280 /* Set up to return from userspace. If provided, use a stub 434 /* Set up to return from userspace. If provided, use a stub
281 already in userspace. */ 435 already in userspace. */
282 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 436 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
283 regs->gprs[14] = (unsigned long) 437 restorer = (unsigned long)
284 ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; 438 ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
285 } else { 439 } else {
286 regs->gprs[14] = (unsigned long) 440 __u16 __user *svc = &frame->svc_insn;
287 frame->retcode | PSW_ADDR_AMODE; 441 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
288 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
289 (u16 __user *)(frame->retcode)))
290 return -EFAULT; 442 return -EFAULT;
443 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
291 } 444 }
292 445
293 /* Set up backchain. */ 446 /* Create siginfo on the signal stack */
294 if (__put_user(regs->gprs[15], (addr_t __user *) frame)) 447 if (copy_siginfo_to_user(&frame->info, &ksig->info))
448 return -EFAULT;
449
450 /* Store registers needed to create the signal frame */
451 store_sigregs();
452
453 /* Create ucontext on the signal stack. */
454 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
455 __put_user(NULL, &frame->uc.uc_link) ||
456 __save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
457 save_sigregs(regs, &frame->uc.uc_mcontext) ||
458 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
459 save_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
295 return -EFAULT; 460 return -EFAULT;
296 461
297 /* Set up registers for signal handler */ 462 /* Set up registers for signal handler */
463 regs->gprs[14] = restorer;
298 regs->gprs[15] = (unsigned long) frame; 464 regs->gprs[15] = (unsigned long) frame;
299 /* Force default amode and default user address space control. */ 465 /* Force default amode and default user address space control. */
300 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 466 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 243c7e512600..6fd9e60101f1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -45,6 +45,7 @@
45#include <asm/debug.h> 45#include <asm/debug.h>
46#include <asm/os_info.h> 46#include <asm/os_info.h>
47#include <asm/sigp.h> 47#include <asm/sigp.h>
48#include <asm/idle.h>
48#include "entry.h" 49#include "entry.h"
49 50
50enum { 51enum {
@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
82/* 83/*
83 * Signal processor helper functions. 84 * Signal processor helper functions.
84 */ 85 */
85static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) 86static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
87 u32 *status)
86{ 88{
87 int cc; 89 int cc;
88 90
@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
178 goto out; 180 goto out;
179 } 181 }
180#else 182#else
183 if (MACHINE_HAS_VX)
184 lc->vector_save_area_addr =
185 (unsigned long) &lc->vector_save_area;
181 if (vdso_alloc_per_cpu(lc)) 186 if (vdso_alloc_per_cpu(lc))
182 goto out; 187 goto out;
183#endif 188#endif
@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu)
333 return pcpu_running(pcpu_devices + cpu); 338 return pcpu_running(pcpu_devices + cpu);
334} 339}
335 340
336void smp_yield(void)
337{
338 if (MACHINE_HAS_DIAG44)
339 asm volatile("diag 0,0,0x44");
340}
341
342void smp_yield_cpu(int cpu) 341void smp_yield_cpu(int cpu)
343{ 342{
344 if (MACHINE_HAS_DIAG9C) 343 if (MACHINE_HAS_DIAG9C)
@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
517static void __init smp_get_save_area(int cpu, u16 address) 516static void __init smp_get_save_area(int cpu, u16 address)
518{ 517{
519 void *lc = pcpu_devices[0].lowcore; 518 void *lc = pcpu_devices[0].lowcore;
520 struct save_area *save_area; 519 struct save_area_ext *sa_ext;
520 unsigned long vx_sa;
521 521
522 if (is_kdump_kernel()) 522 if (is_kdump_kernel())
523 return; 523 return;
524 if (!OLDMEM_BASE && (address == boot_cpu_address || 524 if (!OLDMEM_BASE && (address == boot_cpu_address ||
525 ipl_info.type != IPL_TYPE_FCP_DUMP)) 525 ipl_info.type != IPL_TYPE_FCP_DUMP))
526 return; 526 return;
527 save_area = dump_save_area_create(cpu); 527 sa_ext = dump_save_area_create(cpu);
528 if (!save_area) 528 if (!sa_ext)
529 panic("could not allocate memory for save area\n"); 529 panic("could not allocate memory for save area\n");
530 if (address == boot_cpu_address) { 530 if (address == boot_cpu_address) {
531 /* Copy the registers of the boot cpu. */ 531 /* Copy the registers of the boot cpu. */
532 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), 532 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
533 SAVE_AREA_BASE - PAGE_SIZE, 0); 533 SAVE_AREA_BASE - PAGE_SIZE, 0);
534 if (MACHINE_HAS_VX)
535 save_vx_regs_safe(sa_ext->vx_regs);
534 return; 536 return;
535 } 537 }
536 /* Get the registers of a non-boot cpu. */ 538 /* Get the registers of a non-boot cpu. */
537 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); 539 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
538 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); 540 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
541 if (!MACHINE_HAS_VX)
542 return;
543 /* Get the VX registers */
544 vx_sa = __get_free_page(GFP_KERNEL);
545 if (!vx_sa)
546 panic("could not allocate memory for VX save area\n");
547 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
548 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
549 free_page(vx_sa);
539} 550}
540 551
541int smp_store_status(int cpu) 552int smp_store_status(int cpu)
542{ 553{
554 unsigned long vx_sa;
543 struct pcpu *pcpu; 555 struct pcpu *pcpu;
544 556
545 pcpu = pcpu_devices + cpu; 557 pcpu = pcpu_devices + cpu;
546 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, 558 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
547 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) 559 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
548 return -EIO; 560 return -EIO;
561 if (!MACHINE_HAS_VX)
562 return 0;
563 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
564 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
565 vx_sa, NULL);
549 return 0; 566 return 0;
550} 567}
551 568
@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
667 cpu_init(); 684 cpu_init();
668 preempt_disable(); 685 preempt_disable();
669 init_cpu_timer(); 686 init_cpu_timer();
670 init_cpu_vtimer(); 687 vtime_init();
671 pfault_init(); 688 pfault_init();
672 notify_cpu_starting(smp_processor_id()); 689 notify_cpu_starting(smp_processor_id());
673 set_cpu_online(smp_processor_id(), true); 690 set_cpu_online(smp_processor_id(), true);
@@ -726,6 +743,7 @@ int __cpu_disable(void)
726 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 743 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
727 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 744 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
728 __ctl_load(cregs, 0, 15); 745 __ctl_load(cregs, 0, 15);
746 clear_cpu_flag(CIF_NOHZ_DELAY);
729 return 0; 747 return 0;
730} 748}
731 749
@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = {
898 .attrs = cpu_common_attrs, 916 .attrs = cpu_common_attrs,
899}; 917};
900 918
901static ssize_t show_idle_count(struct device *dev,
902 struct device_attribute *attr, char *buf)
903{
904 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
905 unsigned long long idle_count;
906 unsigned int sequence;
907
908 do {
909 sequence = ACCESS_ONCE(idle->sequence);
910 idle_count = ACCESS_ONCE(idle->idle_count);
911 if (ACCESS_ONCE(idle->clock_idle_enter))
912 idle_count++;
913 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
914 return sprintf(buf, "%llu\n", idle_count);
915}
916static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
917
918static ssize_t show_idle_time(struct device *dev,
919 struct device_attribute *attr, char *buf)
920{
921 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
922 unsigned long long now, idle_time, idle_enter, idle_exit;
923 unsigned int sequence;
924
925 do {
926 now = get_tod_clock();
927 sequence = ACCESS_ONCE(idle->sequence);
928 idle_time = ACCESS_ONCE(idle->idle_time);
929 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
930 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
931 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
932 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
933 return sprintf(buf, "%llu\n", idle_time >> 12);
934}
935static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
936
937static struct attribute *cpu_online_attrs[] = { 919static struct attribute *cpu_online_attrs[] = {
938 &dev_attr_idle_count.attr, 920 &dev_attr_idle_count.attr,
939 &dev_attr_idle_time_us.attr, 921 &dev_attr_idle_time_us.attr,
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4cef607f3711..69e980de0f62 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk)
232 vdso_data->wtom_clock_nsec -= nsecps; 232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++; 233 vdso_data->wtom_clock_sec++;
234 } 234 }
235
236 vdso_data->xtime_coarse_sec = tk->xtime_sec;
237 vdso_data->xtime_coarse_nsec =
238 (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
239 vdso_data->wtom_coarse_sec =
240 vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
241 vdso_data->wtom_coarse_nsec =
242 vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
243 while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
244 vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
245 vdso_data->wtom_coarse_sec++;
246 }
247
235 vdso_data->tk_mult = tk->tkr.mult; 248 vdso_data->tk_mult = tk->tkr.mult;
236 vdso_data->tk_shift = tk->tkr.shift; 249 vdso_data->tk_shift = tk->tkr.shift;
237 smp_wmb(); 250 smp_wmb();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 355a16c55702..b93bed76ea94 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
464 464
465static int __init topology_init(void) 465static int __init topology_init(void)
466{ 466{
467 if (!MACHINE_HAS_TOPOLOGY) { 467 if (MACHINE_HAS_TOPOLOGY)
468 set_topology_timer();
469 else
468 topology_update_polarization_simple(); 470 topology_update_polarization_simple();
469 goto out;
470 }
471 set_topology_timer();
472out:
473
474 set_sched_topology(s390_topology);
475
476 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 471 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
477} 472}
478device_initcall(topology_init); 473device_initcall(topology_init);
474
475static int __init early_topology_init(void)
476{
477 set_sched_topology(s390_topology);
478 return 0;
479}
480early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5762324d9ee..9ff5ecba26ab 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,6 +18,8 @@
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <asm/switch_to.h>
21#include "entry.h" 23#include "entry.h"
22 24
23int show_unhandled_signals = 1; 25int show_unhandled_signals = 1;
@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr)
58 return 1; 60 return 1;
59} 61}
60 62
61static void __kprobes do_trap(struct pt_regs *regs, 63void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
62 int si_signo, int si_code, char *str)
63{ 64{
64 siginfo_t info; 65 siginfo_t info;
65 66
66 if (notify_die(DIE_TRAP, str, regs, 0,
67 regs->int_code, si_signo) == NOTIFY_STOP)
68 return;
69
70 if (user_mode(regs)) { 67 if (user_mode(regs)) {
71 info.si_signo = si_signo; 68 info.si_signo = si_signo;
72 info.si_errno = 0; 69 info.si_errno = 0;
@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs,
90 } 87 }
91} 88}
92 89
90static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
91 char *str)
92{
93 if (notify_die(DIE_TRAP, str, regs, 0,
94 regs->int_code, si_signo) == NOTIFY_STOP)
95 return;
96 do_report_trap(regs, si_signo, si_code, str);
97}
98
93void __kprobes do_per_trap(struct pt_regs *regs) 99void __kprobes do_per_trap(struct pt_regs *regs)
94{ 100{
95 siginfo_t info; 101 siginfo_t info;
@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
178 siginfo_t info; 184 siginfo_t info;
179 __u8 opcode[6]; 185 __u8 opcode[6];
180 __u16 __user *location; 186 __u16 __user *location;
187 int is_uprobe_insn = 0;
181 int signal = 0; 188 int signal = 0;
182 189
183 location = get_trap_ip(regs); 190 location = get_trap_ip(regs);
@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs)
194 force_sig_info(SIGTRAP, &info, current); 201 force_sig_info(SIGTRAP, &info, current);
195 } else 202 } else
196 signal = SIGILL; 203 signal = SIGILL;
204#ifdef CONFIG_UPROBES
205 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
206 is_uprobe_insn = 1;
207#endif
197#ifdef CONFIG_MATHEMU 208#ifdef CONFIG_MATHEMU
198 } else if (opcode[0] == 0xb3) { 209 } else if (opcode[0] == 0xb3) {
199 if (get_user(*((__u16 *) (opcode+2)), location+1)) 210 if (get_user(*((__u16 *) (opcode+2)), location+1))
@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs)
219#endif 230#endif
220 } else 231 } else
221 signal = SIGILL; 232 signal = SIGILL;
222 } else { 233 }
223 /* 234 /*
224 * If we get an illegal op in kernel mode, send it through the 235 * We got either an illegal op in kernel mode, or user space trapped
225 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 236 * on a uprobes illegal instruction. See if kprobes or uprobes picks
226 */ 237 * it up. If not, SIGILL.
238 */
239 if (is_uprobe_insn || !user_mode(regs)) {
227 if (notify_die(DIE_BPT, "bpt", regs, 0, 240 if (notify_die(DIE_BPT, "bpt", regs, 0,
228 3, SIGTRAP) != NOTIFY_STOP) 241 3, SIGTRAP) != NOTIFY_STOP)
229 signal = SIGILL; 242 signal = SIGILL;
@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
292 "specification exception"); 305 "specification exception");
293#endif 306#endif
294 307
308#ifdef CONFIG_64BIT
309int alloc_vector_registers(struct task_struct *tsk)
310{
311 __vector128 *vxrs;
312 int i;
313
314 /* Allocate vector register save area. */
315 vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
316 GFP_KERNEL|__GFP_REPEAT);
317 if (!vxrs)
318 return -ENOMEM;
319 preempt_disable();
320 if (tsk == current)
321 save_fp_regs(tsk->thread.fp_regs.fprs);
322 /* Copy the 16 floating point registers */
323 for (i = 0; i < 16; i++)
324 *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
325 tsk->thread.vxrs = vxrs;
326 if (tsk == current) {
327 __ctl_set_bit(0, 17);
328 restore_vx_regs(vxrs);
329 }
330 preempt_enable();
331 return 0;
332}
333
334void vector_exception(struct pt_regs *regs)
335{
336 int si_code, vic;
337
338 if (!MACHINE_HAS_VX) {
339 do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
340 return;
341 }
342
343 /* get vector interrupt code from fpc */
344 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
345 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
346 switch (vic) {
347 case 1: /* invalid vector operation */
348 si_code = FPE_FLTINV;
349 break;
350 case 2: /* division by zero */
351 si_code = FPE_FLTDIV;
352 break;
353 case 3: /* overflow */
354 si_code = FPE_FLTOVF;
355 break;
356 case 4: /* underflow */
357 si_code = FPE_FLTUND;
358 break;
359 case 5: /* inexact */
360 si_code = FPE_FLTRES;
361 break;
362 default: /* unknown cause */
363 si_code = 0;
364 }
365 do_trap(regs, SIGFPE, si_code, "vector exception");
366}
367
368static int __init disable_vector_extension(char *str)
369{
370 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
371 return 1;
372}
373__setup("novx", disable_vector_extension);
374#endif
375
295void data_exception(struct pt_regs *regs) 376void data_exception(struct pt_regs *regs)
296{ 377{
297 __u16 __user *location; 378 __u16 __user *location;
@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs)
357 } 438 }
358 } 439 }
359#endif 440#endif
441#ifdef CONFIG_64BIT
442 /* Check for vector register enablement */
443 if (MACHINE_HAS_VX && !current->thread.vxrs &&
444 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
445 alloc_vector_registers(current);
446 /* Vector data exception is suppressing, rewind psw. */
447 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
448 clear_pt_regs_flag(regs, PIF_PER_TRAP);
449 return;
450 }
451#endif
452
360 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 453 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
361 signal = SIGFPE; 454 signal = SIGFPE;
362 else 455 else
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
new file mode 100644
index 000000000000..956f4f7a591c
--- /dev/null
+++ b/arch/s390/kernel/uprobes.c
@@ -0,0 +1,332 @@
1/*
2 * User-space Probes (UProbes) for s390
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Jan Willeke,
6 */
7
8#include <linux/kprobes.h>
9#include <linux/uaccess.h>
10#include <linux/uprobes.h>
11#include <linux/compat.h>
12#include <linux/kdebug.h>
13#include <asm/switch_to.h>
14#include <asm/facility.h>
15#include <asm/dis.h>
16#include "entry.h"
17
18#define UPROBE_TRAP_NR UINT_MAX
19
20int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
21 unsigned long addr)
22{
23 return probe_is_prohibited_opcode(auprobe->insn);
24}
25
26int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
27{
28 if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
29 return -EINVAL;
30 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
31 return -EINVAL;
32 clear_pt_regs_flag(regs, PIF_PER_TRAP);
33 auprobe->saved_per = psw_bits(regs->psw).r;
34 auprobe->saved_int_code = regs->int_code;
35 regs->int_code = UPROBE_TRAP_NR;
36 regs->psw.addr = current->utask->xol_vaddr;
37 set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
38 update_cr_regs(current);
39 return 0;
40}
41
42bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
43{
44 struct pt_regs *regs = task_pt_regs(tsk);
45
46 if (regs->int_code != UPROBE_TRAP_NR)
47 return true;
48 return false;
49}
50
51int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
52{
53 int fixup = probe_get_fixup_type(auprobe->insn);
54 struct uprobe_task *utask = current->utask;
55
56 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
57 update_cr_regs(current);
58 psw_bits(regs->psw).r = auprobe->saved_per;
59 regs->int_code = auprobe->saved_int_code;
60
61 if (fixup & FIXUP_PSW_NORMAL)
62 regs->psw.addr += utask->vaddr - utask->xol_vaddr;
63 if (fixup & FIXUP_RETURN_REGISTER) {
64 int reg = (auprobe->insn[0] & 0xf0) >> 4;
65
66 regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
67 }
68 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
69 int ilen = insn_length(auprobe->insn[0] >> 8);
70
71 if (regs->psw.addr - utask->xol_vaddr == ilen)
72 regs->psw.addr = utask->vaddr + ilen;
73 }
74 /* If per tracing was active generate trap */
75 if (regs->psw.mask & PSW_MASK_PER)
76 do_per_trap(regs);
77 return 0;
78}
79
80int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
81 void *data)
82{
83 struct die_args *args = data;
84 struct pt_regs *regs = args->regs;
85
86 if (!user_mode(regs))
87 return NOTIFY_DONE;
88 if (regs->int_code & 0x200) /* Trap during transaction */
89 return NOTIFY_DONE;
90 switch (val) {
91 case DIE_BPT:
92 if (uprobe_pre_sstep_notifier(regs))
93 return NOTIFY_STOP;
94 break;
95 case DIE_SSTEP:
96 if (uprobe_post_sstep_notifier(regs))
97 return NOTIFY_STOP;
98 default:
99 break;
100 }
101 return NOTIFY_DONE;
102}
103
104void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
105{
106 clear_thread_flag(TIF_UPROBE_SINGLESTEP);
107 regs->int_code = auprobe->saved_int_code;
108 regs->psw.addr = current->utask->vaddr;
109}
110
111unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
112 struct pt_regs *regs)
113{
114 unsigned long orig;
115
116 orig = regs->gprs[14];
117 regs->gprs[14] = trampoline;
118 return orig;
119}
120
121/* Instruction Emulation */
122
123static void adjust_psw_addr(psw_t *psw, unsigned long len)
124{
125 psw->addr = __rewind_psw(*psw, -len);
126}
127
128#define EMU_ILLEGAL_OP 1
129#define EMU_SPECIFICATION 2
130#define EMU_ADDRESSING 3
131
132#define emu_load_ril(ptr, output) \
133({ \
134 unsigned int mask = sizeof(*(ptr)) - 1; \
135 __typeof__(*(ptr)) input; \
136 int __rc = 0; \
137 \
138 if (!test_facility(34)) \
139 __rc = EMU_ILLEGAL_OP; \
140 else if ((u64 __force)ptr & mask) \
141 __rc = EMU_SPECIFICATION; \
142 else if (get_user(input, ptr)) \
143 __rc = EMU_ADDRESSING; \
144 else \
145 *(output) = input; \
146 __rc; \
147})
148
149#define emu_store_ril(ptr, input) \
150({ \
151 unsigned int mask = sizeof(*(ptr)) - 1; \
152 int __rc = 0; \
153 \
154 if (!test_facility(34)) \
155 __rc = EMU_ILLEGAL_OP; \
156 else if ((u64 __force)ptr & mask) \
157 __rc = EMU_SPECIFICATION; \
158 else if (put_user(*(input), ptr)) \
159 __rc = EMU_ADDRESSING; \
160 __rc; \
161})
162
163#define emu_cmp_ril(regs, ptr, cmp) \
164({ \
165 unsigned int mask = sizeof(*(ptr)) - 1; \
166 __typeof__(*(ptr)) input; \
167 int __rc = 0; \
168 \
169 if (!test_facility(34)) \
170 __rc = EMU_ILLEGAL_OP; \
171 else if ((u64 __force)ptr & mask) \
172 __rc = EMU_SPECIFICATION; \
173 else if (get_user(input, ptr)) \
174 __rc = EMU_ADDRESSING; \
175 else if (input > *(cmp)) \
176 psw_bits((regs)->psw).cc = 1; \
177 else if (input < *(cmp)) \
178 psw_bits((regs)->psw).cc = 2; \
179 else \
180 psw_bits((regs)->psw).cc = 0; \
181 __rc; \
182})
183
184struct insn_ril {
185 u8 opc0;
186 u8 reg : 4;
187 u8 opc1 : 4;
188 s32 disp;
189} __packed;
190
191union split_register {
192 u64 u64;
193 u32 u32[2];
194 u16 u16[4];
195 s64 s64;
196 s32 s32[2];
197 s16 s16[4];
198};
199
200/*
201 * pc relative instructions are emulated, since parameters may not be
202 * accessible from the xol area due to range limitations.
203 */
204static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
205{
206 union split_register *rx;
207 struct insn_ril *insn;
208 unsigned int ilen;
209 void *uptr;
210 int rc = 0;
211
212 insn = (struct insn_ril *) &auprobe->insn;
213 rx = (union split_register *) &regs->gprs[insn->reg];
214 uptr = (void *)(regs->psw.addr + (insn->disp * 2));
215 ilen = insn_length(insn->opc0);
216
217 switch (insn->opc0) {
218 case 0xc0:
219 switch (insn->opc1) {
220 case 0x00: /* larl */
221 rx->u64 = (unsigned long)uptr;
222 break;
223 }
224 break;
225 case 0xc4:
226 switch (insn->opc1) {
227 case 0x02: /* llhrl */
228 rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
229 break;
230 case 0x04: /* lghrl */
231 rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
232 break;
233 case 0x05: /* lhrl */
234 rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
235 break;
236 case 0x06: /* llghrl */
237 rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
238 break;
239 case 0x08: /* lgrl */
240 rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
241 break;
242 case 0x0c: /* lgfrl */
243 rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
244 break;
245 case 0x0d: /* lrl */
246 rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
247 break;
248 case 0x0e: /* llgfrl */
249 rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
250 break;
251 case 0x07: /* sthrl */
252 rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
253 break;
254 case 0x0b: /* stgrl */
255 rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
256 break;
257 case 0x0f: /* strl */
258 rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
259 break;
260 }
261 break;
262 case 0xc6:
263 switch (insn->opc1) {
264 case 0x02: /* pfdrl */
265 if (!test_facility(34))
266 rc = EMU_ILLEGAL_OP;
267 break;
268 case 0x04: /* cghrl */
269 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
270 break;
271 case 0x05: /* chrl */
272 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
273 break;
274 case 0x06: /* clghrl */
275 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
276 break;
277 case 0x07: /* clhrl */
278 rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
279 break;
280 case 0x08: /* cgrl */
281 rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
282 break;
283 case 0x0a: /* clgrl */
284 rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
285 break;
286 case 0x0c: /* cgfrl */
287 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
288 break;
289 case 0x0d: /* crl */
290 rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
291 break;
292 case 0x0e: /* clgfrl */
293 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
294 break;
295 case 0x0f: /* clrl */
296 rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
297 break;
298 }
299 break;
300 }
301 adjust_psw_addr(&regs->psw, ilen);
302 switch (rc) {
303 case EMU_ILLEGAL_OP:
304 regs->int_code = ilen << 16 | 0x0001;
305 do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
306 break;
307 case EMU_SPECIFICATION:
308 regs->int_code = ilen << 16 | 0x0006;
309 do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
310 break;
311 case EMU_ADDRESSING:
312 regs->int_code = ilen << 16 | 0x0005;
313 do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
314 break;
315 }
316}
317
318bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
319{
320 if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
321 ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
322 !is_compat_task())) {
323 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
324 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
325 return true;
326 }
327 if (probe_is_insn_relative_long(auprobe->insn)) {
328 handle_insn_ril(auprobe, regs);
329 return true;
330 }
331 return false;
332}
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 36aaa25d05da..eca3f001f081 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,14 +19,20 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 basr %r1,0
23 la %r1,4f-.(%r1)
22 chi %r2,__CLOCK_REALTIME 24 chi %r2,__CLOCK_REALTIME
23 je 0f 25 je 0f
24 chi %r2,__CLOCK_MONOTONIC 26 chi %r2,__CLOCK_MONOTONIC
27 je 0f
28 la %r1,5f-4f(%r1)
29 chi %r2,__CLOCK_REALTIME_COARSE
30 je 0f
31 chi %r2,__CLOCK_MONOTONIC_COARSE
25 jne 3f 32 jne 3f
260: ltr %r3,%r3 330: ltr %r3,%r3
27 jz 2f /* res == NULL */ 34 jz 2f /* res == NULL */
28 basr %r1,0 351: l %r0,0(%r1)
291: l %r0,4f-1b(%r1)
30 xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ 36 xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
31 st %r0,4(%r3) /* store tp->tv_usec */ 37 st %r0,4(%r3) /* store tp->tv_usec */
322: lhi %r2,0 382: lhi %r2,0
@@ -35,5 +41,6 @@ __kernel_clock_getres:
35 svc 0 41 svc 0
36 br %r14 42 br %r14
374: .long __CLOCK_REALTIME_RES 434: .long __CLOCK_REALTIME_RES
445: .long __CLOCK_COARSE_RES
38 .cfi_endproc 45 .cfi_endproc
39 .size __kernel_clock_getres,.-__kernel_clock_getres 46 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 7cf18f8d4cb4..48c2206a3956 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,8 +21,12 @@ __kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 basr %r5,0 22 basr %r5,0
230: al %r5,21f-0b(%r5) /* get &_vdso_data */ 230: al %r5,21f-0b(%r5) /* get &_vdso_data */
24 chi %r2,__CLOCK_REALTIME_COARSE
25 je 10f
24 chi %r2,__CLOCK_REALTIME 26 chi %r2,__CLOCK_REALTIME
25 je 11f 27 je 11f
28 chi %r2,__CLOCK_MONOTONIC_COARSE
29 je 9f
26 chi %r2,__CLOCK_MONOTONIC 30 chi %r2,__CLOCK_MONOTONIC
27 jne 19f 31 jne 19f
28 32
@@ -30,8 +34,8 @@ __kernel_clock_gettime:
301: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 341: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
31 tml %r4,0x0001 /* pending update ? loop */ 35 tml %r4,0x0001 /* pending update ? loop */
32 jnz 1b 36 jnz 1b
33 stck 24(%r15) /* Store TOD clock */ 37 stcke 24(%r15) /* Store TOD clock */
34 lm %r0,%r1,24(%r15) 38 lm %r0,%r1,25(%r15)
35 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 39 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
36 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 40 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
37 brc 3,2f 41 brc 3,2f
@@ -68,12 +72,32 @@ __kernel_clock_gettime:
68 lhi %r2,0 72 lhi %r2,0
69 br %r14 73 br %r14
70 74
75 /* CLOCK_MONOTONIC_COARSE */
769: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
77 tml %r4,0x0001 /* pending update ? loop */
78 jnz 9b
79 l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
80 l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
81 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
82 jne 9b
83 j 8b
84
85 /* CLOCK_REALTIME_COARSE */
8610: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
87 tml %r4,0x0001 /* pending update ? loop */
88 jnz 10b
89 l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
90 l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
91 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
92 jne 10b
93 j 17f
94
71 /* CLOCK_REALTIME */ 95 /* CLOCK_REALTIME */
7211: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 9611: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
73 tml %r4,0x0001 /* pending update ? loop */ 97 tml %r4,0x0001 /* pending update ? loop */
74 jnz 11b 98 jnz 11b
75 stck 24(%r15) /* Store TOD clock */ 99 stcke 24(%r15) /* Store TOD clock */
76 lm %r0,%r1,24(%r15) 100 lm %r0,%r1,25(%r15)
77 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 101 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
78 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 102 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
79 brc 3,12f 103 brc 3,12f
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index fd621a950f7c..60def5f562db 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -29,8 +29,8 @@ __kernel_gettimeofday:
29 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 29 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
30 tml %r4,0x0001 /* pending update ? loop */ 30 tml %r4,0x0001 /* pending update ? loop */
31 jnz 1b 31 jnz 1b
32 stck 24(%r15) /* Store TOD clock */ 32 stcke 24(%r15) /* Store TOD clock */
33 lm %r0,%r1,24(%r15) 33 lm %r0,%r1,25(%r15)
34 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 34 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 34deba7c7ed1..c8513deb8c66 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,6 +19,12 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 larl %r1,4f
23 cghi %r2,__CLOCK_REALTIME_COARSE
24 je 0f
25 cghi %r2,__CLOCK_MONOTONIC_COARSE
26 je 0f
27 larl %r1,3f
22 cghi %r2,__CLOCK_REALTIME 28 cghi %r2,__CLOCK_REALTIME
23 je 0f 29 je 0f
24 cghi %r2,__CLOCK_MONOTONIC 30 cghi %r2,__CLOCK_MONOTONIC
@@ -32,7 +38,6 @@ __kernel_clock_getres:
32 jz 2f 38 jz 2f
330: ltgr %r3,%r3 390: ltgr %r3,%r3
34 jz 1f /* res == NULL */ 40 jz 1f /* res == NULL */
35 larl %r1,3f
36 lg %r0,0(%r1) 41 lg %r0,0(%r1)
37 xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ 42 xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
38 stg %r0,8(%r3) /* store tp->tv_usec */ 43 stg %r0,8(%r3) /* store tp->tv_usec */
@@ -42,5 +47,6 @@ __kernel_clock_getres:
42 svc 0 47 svc 0
43 br %r14 48 br %r14
443: .quad __CLOCK_REALTIME_RES 493: .quad __CLOCK_REALTIME_RES
504: .quad __CLOCK_COARSE_RES
45 .cfi_endproc 51 .cfi_endproc
46 .size __kernel_clock_getres,.-__kernel_clock_getres 52 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 3f34e09db5f4..9d9761f8e110 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,12 +20,16 @@
20__kernel_clock_gettime: 20__kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME_COARSE
24 je 4f
23 cghi %r2,__CLOCK_REALTIME 25 cghi %r2,__CLOCK_REALTIME
24 je 5f 26 je 5f
25 cghi %r2,__CLOCK_THREAD_CPUTIME_ID 27 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
26 je 9f 28 je 9f
27 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ 29 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
28 je 9f 30 je 9f
31 cghi %r2,__CLOCK_MONOTONIC_COARSE
32 je 3f
29 cghi %r2,__CLOCK_MONOTONIC 33 cghi %r2,__CLOCK_MONOTONIC
30 jne 12f 34 jne 12f
31 35
@@ -33,10 +37,10 @@ __kernel_clock_gettime:
330: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 370: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
34 tmll %r4,0x0001 /* pending update ? loop */ 38 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 39 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 40 stcke 48(%r15) /* Store TOD clock */
37 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 41 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
38 lg %r0,__VDSO_WTOM_SEC(%r5) 42 lg %r0,__VDSO_WTOM_SEC(%r5)
39 lg %r1,48(%r15) 43 lg %r1,49(%r15)
40 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 44 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
41 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 45 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 alg %r1,__VDSO_WTOM_NSEC(%r5) 46 alg %r1,__VDSO_WTOM_NSEC(%r5)
@@ -54,13 +58,33 @@ __kernel_clock_gettime:
54 lghi %r2,0 58 lghi %r2,0
55 br %r14 59 br %r14
56 60
61 /* CLOCK_MONOTONIC_COARSE */
623: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
63 tmll %r4,0x0001 /* pending update ? loop */
64 jnz 3b
65 lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
66 lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
67 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
68 jne 3b
69 j 2b
70
71 /* CLOCK_REALTIME_COARSE */
724: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
73 tmll %r4,0x0001 /* pending update ? loop */
74 jnz 4b
75 lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
76 lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
77 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
78 jne 4b
79 j 7f
80
57 /* CLOCK_REALTIME */ 81 /* CLOCK_REALTIME */
585: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 825: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
59 tmll %r4,0x0001 /* pending update ? loop */ 83 tmll %r4,0x0001 /* pending update ? loop */
60 jnz 5b 84 jnz 5b
61 stck 48(%r15) /* Store TOD clock */ 85 stcke 48(%r15) /* Store TOD clock */
62 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 86 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
63 lg %r1,48(%r15) 87 lg %r1,49(%r15)
64 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 88 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
65 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 89 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
66 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 90 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index d0860d1d0ccc..7a344995a97f 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -28,8 +28,8 @@ __kernel_gettimeofday:
28 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 28 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
29 tmll %r4,0x0001 /* pending update ? loop */ 29 tmll %r4,0x0001 /* pending update ? loop */
30 jnz 0b 30 jnz 0b
31 stck 48(%r15) /* Store TOD clock */ 31 stcke 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,49(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 8c34363d6f1e..416f2a323ba5 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -6,27 +6,18 @@
6 */ 6 */
7 7
8#include <linux/kernel_stat.h> 8#include <linux/kernel_stat.h>
9#include <linux/notifier.h>
10#include <linux/kprobes.h>
11#include <linux/export.h> 9#include <linux/export.h>
12#include <linux/kernel.h> 10#include <linux/kernel.h>
13#include <linux/timex.h> 11#include <linux/timex.h>
14#include <linux/types.h> 12#include <linux/types.h>
15#include <linux/time.h> 13#include <linux/time.h>
16#include <linux/cpu.h>
17#include <linux/smp.h>
18 14
19#include <asm/irq_regs.h>
20#include <asm/cputime.h> 15#include <asm/cputime.h>
21#include <asm/vtimer.h> 16#include <asm/vtimer.h>
22#include <asm/vtime.h> 17#include <asm/vtime.h>
23#include <asm/irq.h>
24#include "entry.h"
25 18
26static void virt_timer_expire(void); 19static void virt_timer_expire(void);
27 20
28DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
29
30static LIST_HEAD(virt_timer_list); 21static LIST_HEAD(virt_timer_list);
31static DEFINE_SPINLOCK(virt_timer_lock); 22static DEFINE_SPINLOCK(virt_timer_lock);
32static atomic64_t virt_timer_current; 23static atomic64_t virt_timer_current;
@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk)
152__attribute__((alias("vtime_account_irq_enter"))); 143__attribute__((alias("vtime_account_irq_enter")));
153EXPORT_SYMBOL_GPL(vtime_account_system); 144EXPORT_SYMBOL_GPL(vtime_account_system);
154 145
155void __kprobes vtime_stop_cpu(void)
156{
157 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
158 unsigned long long idle_time;
159 unsigned long psw_mask;
160
161 trace_hardirqs_on();
162
163 /* Wait for external, I/O or machine check interrupt. */
164 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
166 idle->nohz_delay = 0;
167
168 /* Call the assembler magic in entry.S */
169 psw_idle(idle, psw_mask);
170
171 /* Account time spent with enabled wait psw loaded as idle time. */
172 idle->sequence++;
173 smp_wmb();
174 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
175 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
176 idle->idle_time += idle_time;
177 idle->idle_count++;
178 account_idle_time(idle_time);
179 smp_wmb();
180 idle->sequence++;
181}
182
183cputime64_t s390_get_idle_time(int cpu)
184{
185 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
186 unsigned long long now, idle_enter, idle_exit;
187 unsigned int sequence;
188
189 do {
190 now = get_tod_clock();
191 sequence = ACCESS_ONCE(idle->sequence);
192 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
193 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
194 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
195 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
196}
197
198/* 146/*
199 * Sorted add to a list. List is linear searched until first bigger 147 * Sorted add to a list. List is linear searched until first bigger
200 * element is found. 148 * element is found.
@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer);
372/* 320/*
373 * Start the virtual CPU timer on the current CPU. 321 * Start the virtual CPU timer on the current CPU.
374 */ 322 */
375void init_cpu_vtimer(void) 323void vtime_init(void)
376{ 324{
377 /* set initial cpu timer */ 325 /* set initial cpu timer */
378 set_vtimer(VTIMER_MAX_SLICE); 326 set_vtimer(VTIMER_MAX_SLICE);
379} 327}
380
381static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
382 void *hcpu)
383{
384 struct s390_idle_data *idle;
385 long cpu = (long) hcpu;
386
387 idle = &per_cpu(s390_idle, cpu);
388 switch (action & ~CPU_TASKS_FROZEN) {
389 case CPU_DYING:
390 idle->nohz_delay = 0;
391 default:
392 break;
393 }
394 return NOTIFY_OK;
395}
396
397void __init vtime_init(void)
398{
399 /* Enable cpu timer interrupts on the boot cpu. */
400 init_cpu_vtimer();
401 cpu_notifier(s390_nohz_notify, 0);
402}
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index c6d752e8bf28..a01df233856f 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
9lib-$(CONFIG_KPROBES) += probes.o
10lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index a9f3d0042d58..16dc42d83f93 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs)
43 lockdep_off(); 43 lockdep_off();
44 do { 44 do {
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 enabled_wait();
47 } while (get_tod_clock_fast() < end); 47 } while (get_tod_clock_fast() < end);
48 lockdep_on(); 48 lockdep_on();
49 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0, 0, 0);
@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs)
62 clock_saved = local_tick_disable(); 62 clock_saved = local_tick_disable();
63 set_clock_comparator(end); 63 set_clock_comparator(end);
64 } 64 }
65 vtime_stop_cpu(); 65 enabled_wait();
66 if (clock_saved) 66 if (clock_saved)
67 local_tick_enable(clock_saved); 67 local_tick_enable(clock_saved);
68 } while (get_tod_clock_fast() < end); 68 } while (get_tod_clock_fast() < end);
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
new file mode 100644
index 000000000000..c5d64a099719
--- /dev/null
+++ b/arch/s390/lib/probes.c
@@ -0,0 +1,159 @@
1/*
2 * Common helper functions for kprobes and uprobes
3 *
4 * Copyright IBM Corp. 2014
5 */
6
7#include <linux/kprobes.h>
8#include <asm/dis.h>
9
10int probe_is_prohibited_opcode(u16 *insn)
11{
12 if (!is_known_insn((unsigned char *)insn))
13 return -EINVAL;
14 switch (insn[0] >> 8) {
15 case 0x0c: /* bassm */
16 case 0x0b: /* bsm */
17 case 0x83: /* diag */
18 case 0x44: /* ex */
19 case 0xac: /* stnsm */
20 case 0xad: /* stosm */
21 return -EINVAL;
22 case 0xc6:
23 switch (insn[0] & 0x0f) {
24 case 0x00: /* exrl */
25 return -EINVAL;
26 }
27 }
28 switch (insn[0]) {
29 case 0x0101: /* pr */
30 case 0xb25a: /* bsa */
31 case 0xb240: /* bakr */
32 case 0xb258: /* bsg */
33 case 0xb218: /* pc */
34 case 0xb228: /* pt */
35 case 0xb98d: /* epsw */
36 case 0xe560: /* tbegin */
37 case 0xe561: /* tbeginc */
38 case 0xb2f8: /* tend */
39 return -EINVAL;
40 }
41 return 0;
42}
43
44int probe_get_fixup_type(u16 *insn)
45{
46 /* default fixup method */
47 int fixup = FIXUP_PSW_NORMAL;
48
49 switch (insn[0] >> 8) {
50 case 0x05: /* balr */
51 case 0x0d: /* basr */
52 fixup = FIXUP_RETURN_REGISTER;
53 /* if r2 = 0, no branch will be taken */
54 if ((insn[0] & 0x0f) == 0)
55 fixup |= FIXUP_BRANCH_NOT_TAKEN;
56 break;
57 case 0x06: /* bctr */
58 case 0x07: /* bcr */
59 fixup = FIXUP_BRANCH_NOT_TAKEN;
60 break;
61 case 0x45: /* bal */
62 case 0x4d: /* bas */
63 fixup = FIXUP_RETURN_REGISTER;
64 break;
65 case 0x47: /* bc */
66 case 0x46: /* bct */
67 case 0x86: /* bxh */
68 case 0x87: /* bxle */
69 fixup = FIXUP_BRANCH_NOT_TAKEN;
70 break;
71 case 0x82: /* lpsw */
72 fixup = FIXUP_NOT_REQUIRED;
73 break;
74 case 0xb2: /* lpswe */
75 if ((insn[0] & 0xff) == 0xb2)
76 fixup = FIXUP_NOT_REQUIRED;
77 break;
78 case 0xa7: /* bras */
79 if ((insn[0] & 0x0f) == 0x05)
80 fixup |= FIXUP_RETURN_REGISTER;
81 break;
82 case 0xc0:
83 if ((insn[0] & 0x0f) == 0x05) /* brasl */
84 fixup |= FIXUP_RETURN_REGISTER;
85 break;
86 case 0xeb:
87 switch (insn[2] & 0xff) {
88 case 0x44: /* bxhg */
89 case 0x45: /* bxleg */
90 fixup = FIXUP_BRANCH_NOT_TAKEN;
91 break;
92 }
93 break;
94 case 0xe3: /* bctg */
95 if ((insn[2] & 0xff) == 0x46)
96 fixup = FIXUP_BRANCH_NOT_TAKEN;
97 break;
98 case 0xec:
99 switch (insn[2] & 0xff) {
100 case 0xe5: /* clgrb */
101 case 0xe6: /* cgrb */
102 case 0xf6: /* crb */
103 case 0xf7: /* clrb */
104 case 0xfc: /* cgib */
105 case 0xfd: /* cglib */
106 case 0xfe: /* cib */
107 case 0xff: /* clib */
108 fixup = FIXUP_BRANCH_NOT_TAKEN;
109 break;
110 }
111 break;
112 }
113 return fixup;
114}
115
116int probe_is_insn_relative_long(u16 *insn)
117{
118 /* Check if we have a RIL-b or RIL-c format instruction which
119 * we need to modify in order to avoid instruction emulation. */
120 switch (insn[0] >> 8) {
121 case 0xc0:
122 if ((insn[0] & 0x0f) == 0x00) /* larl */
123 return true;
124 break;
125 case 0xc4:
126 switch (insn[0] & 0x0f) {
127 case 0x02: /* llhrl */
128 case 0x04: /* lghrl */
129 case 0x05: /* lhrl */
130 case 0x06: /* llghrl */
131 case 0x07: /* sthrl */
132 case 0x08: /* lgrl */
133 case 0x0b: /* stgrl */
134 case 0x0c: /* lgfrl */
135 case 0x0d: /* lrl */
136 case 0x0e: /* llgfrl */
137 case 0x0f: /* strl */
138 return true;
139 }
140 break;
141 case 0xc6:
142 switch (insn[0] & 0x0f) {
143 case 0x02: /* pfdrl */
144 case 0x04: /* cghrl */
145 case 0x05: /* chrl */
146 case 0x06: /* clghrl */
147 case 0x07: /* clhrl */
148 case 0x08: /* cgrl */
149 case 0x0a: /* clgrl */
150 case 0x0c: /* cgfrl */
151 case 0x0d: /* crl */
152 case 0x0e: /* clgfrl */
153 case 0x0f: /* clrl */
154 return true;
155 }
156 break;
157 }
158 return false;
159}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..034a35a3e9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98} 98}
99EXPORT_SYMBOL(arch_spin_lock_wait_flags); 99EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100 100
101void arch_spin_relax(arch_spinlock_t *lp)
102{
103 unsigned int cpu = lp->lock;
104 if (cpu != 0) {
105 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
106 !smp_vcpu_scheduled(~cpu))
107 smp_yield_cpu(~cpu);
108 }
109}
110EXPORT_SYMBOL(arch_spin_relax);
111
112int arch_spin_trylock_retry(arch_spinlock_t *lp) 101int arch_spin_trylock_retry(arch_spinlock_t *lp)
113{ 102{
114 int count; 103 int count;
@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
122 111
123void _raw_read_lock_wait(arch_rwlock_t *rw) 112void _raw_read_lock_wait(arch_rwlock_t *rw)
124{ 113{
125 unsigned int old; 114 unsigned int owner, old;
126 int count = spin_retry; 115 int count = spin_retry;
127 116
117#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
118 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
119#endif
120 owner = 0;
128 while (1) { 121 while (1) {
129 if (count-- <= 0) { 122 if (count-- <= 0) {
130 smp_yield(); 123 if (owner && !smp_vcpu_scheduled(~owner))
124 smp_yield_cpu(~owner);
131 count = spin_retry; 125 count = spin_retry;
132 } 126 }
133 old = ACCESS_ONCE(rw->lock); 127 old = ACCESS_ONCE(rw->lock);
128 owner = ACCESS_ONCE(rw->owner);
134 if ((int) old < 0) 129 if ((int) old < 0)
135 continue; 130 continue;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 131 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
139} 134}
140EXPORT_SYMBOL(_raw_read_lock_wait); 135EXPORT_SYMBOL(_raw_read_lock_wait);
141 136
142void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
143{
144 unsigned int old;
145 int count = spin_retry;
146
147 local_irq_restore(flags);
148 while (1) {
149 if (count-- <= 0) {
150 smp_yield();
151 count = spin_retry;
152 }
153 old = ACCESS_ONCE(rw->lock);
154 if ((int) old < 0)
155 continue;
156 local_irq_disable();
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
158 return;
159 local_irq_restore(flags);
160 }
161}
162EXPORT_SYMBOL(_raw_read_lock_wait_flags);
163
164int _raw_read_trylock_retry(arch_rwlock_t *rw) 137int _raw_read_trylock_retry(arch_rwlock_t *rw)
165{ 138{
166 unsigned int old; 139 unsigned int old;
@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
177} 150}
178EXPORT_SYMBOL(_raw_read_trylock_retry); 151EXPORT_SYMBOL(_raw_read_trylock_retry);
179 152
180void _raw_write_lock_wait(arch_rwlock_t *rw) 153#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154
155void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
181{ 156{
182 unsigned int old; 157 unsigned int owner, old;
183 int count = spin_retry; 158 int count = spin_retry;
184 159
160 owner = 0;
185 while (1) { 161 while (1) {
186 if (count-- <= 0) { 162 if (count-- <= 0) {
187 smp_yield(); 163 if (owner && !smp_vcpu_scheduled(~owner))
164 smp_yield_cpu(~owner);
188 count = spin_retry; 165 count = spin_retry;
189 } 166 }
190 old = ACCESS_ONCE(rw->lock); 167 old = ACCESS_ONCE(rw->lock);
191 if (old) 168 owner = ACCESS_ONCE(rw->owner);
192 continue; 169 smp_rmb();
193 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 170 if ((int) old >= 0) {
194 return; 171 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
172 old = prev;
173 }
174 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
175 break;
195 } 176 }
196} 177}
197EXPORT_SYMBOL(_raw_write_lock_wait); 178EXPORT_SYMBOL(_raw_write_lock_wait);
198 179
199void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 180#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
181
182void _raw_write_lock_wait(arch_rwlock_t *rw)
200{ 183{
201 unsigned int old; 184 unsigned int owner, old, prev;
202 int count = spin_retry; 185 int count = spin_retry;
203 186
204 local_irq_restore(flags); 187 prev = 0x80000000;
188 owner = 0;
205 while (1) { 189 while (1) {
206 if (count-- <= 0) { 190 if (count-- <= 0) {
207 smp_yield(); 191 if (owner && !smp_vcpu_scheduled(~owner))
192 smp_yield_cpu(~owner);
208 count = spin_retry; 193 count = spin_retry;
209 } 194 }
210 old = ACCESS_ONCE(rw->lock); 195 old = ACCESS_ONCE(rw->lock);
211 if (old) 196 owner = ACCESS_ONCE(rw->owner);
212 continue; 197 if ((int) old >= 0 &&
213 local_irq_disable(); 198 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 199 prev = old;
215 return; 200 else
216 local_irq_restore(flags); 201 smp_rmb();
202 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
203 break;
217 } 204 }
218} 205}
219EXPORT_SYMBOL(_raw_write_lock_wait_flags); 206EXPORT_SYMBOL(_raw_write_lock_wait);
207
208#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220 209
221int _raw_write_trylock_retry(arch_rwlock_t *rw) 210int _raw_write_trylock_retry(arch_rwlock_t *rw)
222{ 211{
@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
233 return 0; 222 return 0;
234} 223}
235EXPORT_SYMBOL(_raw_write_trylock_retry); 224EXPORT_SYMBOL(_raw_write_trylock_retry);
225
226void arch_lock_relax(unsigned int cpu)
227{
228 if (!cpu)
229 return;
230 if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
231 return;
232 smp_yield_cpu(~cpu);
233}
234EXPORT_SYMBOL(arch_lock_relax);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 46d517c3c763..d46cadeda204 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
54 return; 54 return;
55 } 55 }
56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW "); 56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
58 seq_putc(m, '\n'); 57 seq_putc(m, '\n');
59} 58}
60 59
@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
129} 128}
130 129
131#ifdef CONFIG_64BIT 130#ifdef CONFIG_64BIT
132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO) 131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
133#else 132#else
134#define _PMD_PROT_MASK 0 133#define _PMD_PROT_MASK 0
135#endif 134#endif
@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
157} 156}
158 157
159#ifdef CONFIG_64BIT 158#ifdef CONFIG_64BIT
160#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO) 159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
161#else 160#else
162#define _PUD_PROT_MASK 0 161#define _PUD_PROT_MASK 0
163#endif 162#endif
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 389bc17934b7..3c80d2e38f03 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
89 pmd_val(pmd) |= pte_page(pte)[1].index; 89 pmd_val(pmd) |= pte_page(pte)[1].index;
90 } else 90 } else
91 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO; 91 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
92 *(pmd_t *) ptep = pmd; 92 *(pmd_t *) ptep = pmd;
93} 93}
94 94
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 8400f494623f..3fef3b299665 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -6,6 +6,7 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9#include <asm/facility.h>
9#include <asm/pgtable.h> 10#include <asm/pgtable.h>
10#include <asm/page.h> 11#include <asm/page.h>
11 12
@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages)
103} 104}
104 105
105#ifdef CONFIG_DEBUG_PAGEALLOC 106#ifdef CONFIG_DEBUG_PAGEALLOC
107
108static void ipte_range(pte_t *pte, unsigned long address, int nr)
109{
110 int i;
111
112 if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
113 __ptep_ipte_range(address, nr - 1, pte);
114 return;
115 }
116 for (i = 0; i < nr; i++) {
117 __ptep_ipte(address, pte);
118 address += PAGE_SIZE;
119 pte++;
120 }
121}
122
106void kernel_map_pages(struct page *page, int numpages, int enable) 123void kernel_map_pages(struct page *page, int numpages, int enable)
107{ 124{
108 unsigned long address; 125 unsigned long address;
126 int nr, i, j;
109 pgd_t *pgd; 127 pgd_t *pgd;
110 pud_t *pud; 128 pud_t *pud;
111 pmd_t *pmd; 129 pmd_t *pmd;
112 pte_t *pte; 130 pte_t *pte;
113 int i;
114 131
115 for (i = 0; i < numpages; i++) { 132 for (i = 0; i < numpages;) {
116 address = page_to_phys(page + i); 133 address = page_to_phys(page + i);
117 pgd = pgd_offset_k(address); 134 pgd = pgd_offset_k(address);
118 pud = pud_offset(pgd, address); 135 pud = pud_offset(pgd, address);
119 pmd = pmd_offset(pud, address); 136 pmd = pmd_offset(pud, address);
120 pte = pte_offset_kernel(pmd, address); 137 pte = pte_offset_kernel(pmd, address);
121 if (!enable) { 138 nr = (unsigned long)pte >> ilog2(sizeof(long));
122 __ptep_ipte(address, pte); 139 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
123 pte_val(*pte) = _PAGE_INVALID; 140 nr = min(numpages - i, nr);
124 continue; 141 if (enable) {
142 for (j = 0; j < nr; j++) {
143 pte_val(*pte) = __pa(address);
144 address += PAGE_SIZE;
145 pte++;
146 }
147 } else {
148 ipte_range(pte, address, nr);
125 } 149 }
126 pte_val(*pte) = __pa(address); 150 i += nr;
127 } 151 }
128} 152}
129 153
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fdbd7888cb07..b1593c2f751a 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
236 if (!new_page) 236 if (!new_page)
237 goto out; 237 goto out;
238 pmd_val(*pm_dir) = __pa(new_page) | 238 pmd_val(*pm_dir) = __pa(new_page) |
239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
240 _SEGMENT_ENTRY_CO;
241 address = (address + PMD_SIZE) & PMD_MASK; 240 address = (address + PMD_SIZE) & PMD_MASK;
242 continue; 241 continue;
243 } 242 }
@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
253 252
254 pt_dir = pte_offset_kernel(pm_dir, address); 253 pt_dir = pte_offset_kernel(pm_dir, address);
255 if (pte_none(*pt_dir)) { 254 if (pte_none(*pt_dir)) {
256 unsigned long new_page; 255 void *new_page;
257 256
258 new_page =__pa(vmem_alloc_pages(0)); 257 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
259 if (!new_page) 258 if (!new_page)
260 goto out; 259 goto out;
261 pte_val(*pt_dir) = 260 pte_val(*pt_dir) =
@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
263 } 262 }
264 address += PAGE_SIZE; 263 address += PAGE_SIZE;
265 } 264 }
266 memset((void *)start, 0, end - start);
267 ret = 0; 265 ret = 0;
268out: 266out:
269 return ret; 267 return ret;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5df05f26b7d9..329db997ee66 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1660,6 +1660,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1660 device->discipline->check_for_device_change(device, cqr, irb); 1660 device->discipline->check_for_device_change(device, cqr, irb);
1661 dasd_put_device(device); 1661 dasd_put_device(device);
1662 } 1662 }
1663
1664 /* check for for attention message */
1665 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1666 device = dasd_device_from_cdev_locked(cdev);
1667 device->discipline->check_attention(device, irb->esw.esw1.lpum);
1668 dasd_put_device(device);
1669 }
1670
1663 if (!cqr) 1671 if (!cqr)
1664 return; 1672 return;
1665 1673
@@ -2261,8 +2269,8 @@ static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2261static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2269static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2262{ 2270{
2263 struct dasd_device *device; 2271 struct dasd_device *device;
2264 int rc;
2265 struct dasd_ccw_req *cqr, *n; 2272 struct dasd_ccw_req *cqr, *n;
2273 int rc;
2266 2274
2267retry: 2275retry:
2268 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2276 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
@@ -2310,21 +2318,26 @@ retry:
2310 /* 2318 /*
2311 * for alias devices simplify error recovery and 2319 * for alias devices simplify error recovery and
2312 * return to upper layer 2320 * return to upper layer
2321 * do not skip ERP requests
2313 */ 2322 */
2314 if (cqr->startdev != cqr->basedev && 2323 if (cqr->startdev != cqr->basedev && !cqr->refers &&
2315 (cqr->status == DASD_CQR_TERMINATED || 2324 (cqr->status == DASD_CQR_TERMINATED ||
2316 cqr->status == DASD_CQR_NEED_ERP)) 2325 cqr->status == DASD_CQR_NEED_ERP))
2317 return -EAGAIN; 2326 return -EAGAIN;
2318 else { 2327
2319 /* normal recovery for basedev IO */ 2328 /* normal recovery for basedev IO */
2320 if (__dasd_sleep_on_erp(cqr)) { 2329 if (__dasd_sleep_on_erp(cqr)) {
2321 if (!cqr->status == DASD_CQR_TERMINATED && 2330 goto retry;
2322 !cqr->status == DASD_CQR_NEED_ERP) 2331 /* remember that ERP was needed */
2323 break; 2332 rc = 1;
2324 rc = 1; 2333 /* skip processing for active cqr */
2325 } 2334 if (cqr->status != DASD_CQR_TERMINATED &&
2335 cqr->status != DASD_CQR_NEED_ERP)
2336 break;
2326 } 2337 }
2327 } 2338 }
2339
2340 /* start ERP requests in upper loop */
2328 if (rc) 2341 if (rc)
2329 goto retry; 2342 goto retry;
2330 2343
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 14ba80bfa571..8286f742436b 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev,
1432static DEVICE_ATTR(last_known_reservation_state, 0644, 1432static DEVICE_ATTR(last_known_reservation_state, 0644,
1433 dasd_reservation_state_show, dasd_reservation_state_store); 1433 dasd_reservation_state_show, dasd_reservation_state_store);
1434 1434
1435static ssize_t dasd_pm_show(struct device *dev,
1436 struct device_attribute *attr, char *buf)
1437{
1438 struct dasd_device *device;
1439 u8 opm, nppm, cablepm, cuirpm, hpfpm;
1440
1441 device = dasd_device_from_cdev(to_ccwdev(dev));
1442 if (IS_ERR(device))
1443 return sprintf(buf, "0\n");
1444
1445 opm = device->path_data.opm;
1446 nppm = device->path_data.npm;
1447 cablepm = device->path_data.cablepm;
1448 cuirpm = device->path_data.cuirpm;
1449 hpfpm = device->path_data.hpfpm;
1450 dasd_put_device(device);
1451
1452 return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
1453 cablepm, cuirpm, hpfpm);
1454}
1455
1456static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
1457
1435static struct attribute * dasd_attrs[] = { 1458static struct attribute * dasd_attrs[] = {
1436 &dev_attr_readonly.attr, 1459 &dev_attr_readonly.attr,
1437 &dev_attr_discipline.attr, 1460 &dev_attr_discipline.attr,
@@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = {
1450 &dev_attr_reservation_policy.attr, 1473 &dev_attr_reservation_policy.attr,
1451 &dev_attr_last_known_reservation_state.attr, 1474 &dev_attr_last_known_reservation_state.attr,
1452 &dev_attr_safe_offline.attr, 1475 &dev_attr_safe_offline.attr,
1476 &dev_attr_path_masks.attr,
1453 NULL, 1477 NULL,
1454}; 1478};
1455 1479
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 51dea7baf02c..d47f5b99623a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -29,6 +29,8 @@
29#include <asm/cio.h> 29#include <asm/cio.h>
30#include <asm/ccwdev.h> 30#include <asm/ccwdev.h>
31#include <asm/itcw.h> 31#include <asm/itcw.h>
32#include <asm/schid.h>
33#include <asm/chpid.h>
32 34
33#include "dasd_int.h" 35#include "dasd_int.h"
34#include "dasd_eckd.h" 36#include "dasd_eckd.h"
@@ -112,6 +114,12 @@ struct path_verification_work_data {
112static struct path_verification_work_data *path_verification_worker; 114static struct path_verification_work_data *path_verification_worker;
113static DEFINE_MUTEX(dasd_path_verification_mutex); 115static DEFINE_MUTEX(dasd_path_verification_mutex);
114 116
117struct check_attention_work_data {
118 struct work_struct worker;
119 struct dasd_device *device;
120 __u8 lpum;
121};
122
115/* initial attempt at a probe function. this can be simplified once 123/* initial attempt at a probe function. this can be simplified once
116 * the other detection code is gone */ 124 * the other detection code is gone */
117static int 125static int
@@ -1126,6 +1134,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1126 "device %s instead of %s\n", lpm, 1134 "device %s instead of %s\n", lpm,
1127 print_path_uid, print_device_uid); 1135 print_path_uid, print_device_uid);
1128 path_err = -EINVAL; 1136 path_err = -EINVAL;
1137 path_data->cablepm |= lpm;
1129 continue; 1138 continue;
1130 } 1139 }
1131 1140
@@ -1141,6 +1150,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1141 break; 1150 break;
1142 } 1151 }
1143 path_data->opm |= lpm; 1152 path_data->opm |= lpm;
1153 /*
1154 * if the path is used
1155 * it should not be in one of the negative lists
1156 */
1157 path_data->cablepm &= ~lpm;
1158 path_data->hpfpm &= ~lpm;
1159 path_data->cuirpm &= ~lpm;
1144 1160
1145 if (conf_data != private->conf_data) 1161 if (conf_data != private->conf_data)
1146 kfree(conf_data); 1162 kfree(conf_data);
@@ -1230,7 +1246,7 @@ static void do_path_verification_work(struct work_struct *work)
1230 struct dasd_eckd_private path_private; 1246 struct dasd_eckd_private path_private;
1231 struct dasd_uid *uid; 1247 struct dasd_uid *uid;
1232 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1248 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1233 __u8 lpm, opm, npm, ppm, epm; 1249 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1234 unsigned long flags; 1250 unsigned long flags;
1235 char print_uid[60]; 1251 char print_uid[60];
1236 int rc; 1252 int rc;
@@ -1248,6 +1264,9 @@ static void do_path_verification_work(struct work_struct *work)
1248 npm = 0; 1264 npm = 0;
1249 ppm = 0; 1265 ppm = 0;
1250 epm = 0; 1266 epm = 0;
1267 hpfpm = 0;
1268 cablepm = 0;
1269
1251 for (lpm = 0x80; lpm; lpm >>= 1) { 1270 for (lpm = 0x80; lpm; lpm >>= 1) {
1252 if (!(lpm & data->tbvpm)) 1271 if (!(lpm & data->tbvpm))
1253 continue; 1272 continue;
@@ -1289,6 +1308,7 @@ static void do_path_verification_work(struct work_struct *work)
1289 opm &= ~lpm; 1308 opm &= ~lpm;
1290 npm &= ~lpm; 1309 npm &= ~lpm;
1291 ppm &= ~lpm; 1310 ppm &= ~lpm;
1311 hpfpm |= lpm;
1292 continue; 1312 continue;
1293 } 1313 }
1294 1314
@@ -1350,6 +1370,7 @@ static void do_path_verification_work(struct work_struct *work)
1350 opm &= ~lpm; 1370 opm &= ~lpm;
1351 npm &= ~lpm; 1371 npm &= ~lpm;
1352 ppm &= ~lpm; 1372 ppm &= ~lpm;
1373 cablepm |= lpm;
1353 continue; 1374 continue;
1354 } 1375 }
1355 } 1376 }
@@ -1364,12 +1385,21 @@ static void do_path_verification_work(struct work_struct *work)
1364 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1385 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1365 if (!device->path_data.opm && opm) { 1386 if (!device->path_data.opm && opm) {
1366 device->path_data.opm = opm; 1387 device->path_data.opm = opm;
1388 device->path_data.cablepm &= ~opm;
1389 device->path_data.cuirpm &= ~opm;
1390 device->path_data.hpfpm &= ~opm;
1367 dasd_generic_path_operational(device); 1391 dasd_generic_path_operational(device);
1368 } else 1392 } else {
1369 device->path_data.opm |= opm; 1393 device->path_data.opm |= opm;
1394 device->path_data.cablepm &= ~opm;
1395 device->path_data.cuirpm &= ~opm;
1396 device->path_data.hpfpm &= ~opm;
1397 }
1370 device->path_data.npm |= npm; 1398 device->path_data.npm |= npm;
1371 device->path_data.ppm |= ppm; 1399 device->path_data.ppm |= ppm;
1372 device->path_data.tbvpm |= epm; 1400 device->path_data.tbvpm |= epm;
1401 device->path_data.cablepm |= cablepm;
1402 device->path_data.hpfpm |= hpfpm;
1373 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1403 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1374 } 1404 }
1375 1405
@@ -4475,6 +4505,343 @@ out_err:
4475 return -1; 4505 return -1;
4476} 4506}
4477 4507
4508static int dasd_eckd_read_message_buffer(struct dasd_device *device,
4509 struct dasd_rssd_messages *messages,
4510 __u8 lpum)
4511{
4512 struct dasd_rssd_messages *message_buf;
4513 struct dasd_psf_prssd_data *prssdp;
4514 struct dasd_eckd_private *private;
4515 struct dasd_ccw_req *cqr;
4516 struct ccw1 *ccw;
4517 int rc;
4518
4519 private = (struct dasd_eckd_private *) device->private;
4520 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
4521 (sizeof(struct dasd_psf_prssd_data) +
4522 sizeof(struct dasd_rssd_messages)),
4523 device);
4524 if (IS_ERR(cqr)) {
4525 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
4526 "Could not allocate read message buffer request");
4527 return PTR_ERR(cqr);
4528 }
4529
4530 cqr->startdev = device;
4531 cqr->memdev = device;
4532 cqr->block = NULL;
4533 cqr->retries = 256;
4534 cqr->expires = 10 * HZ;
4535
4536 /* we need to check for messages on exactly this path */
4537 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
4538 cqr->lpm = lpum;
4539
4540 /* Prepare for Read Subsystem Data */
4541 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4542 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
4543 prssdp->order = PSF_ORDER_PRSSD;
4544 prssdp->suborder = 0x03; /* Message Buffer */
4545 /* all other bytes of prssdp must be zero */
4546
4547 ccw = cqr->cpaddr;
4548 ccw->cmd_code = DASD_ECKD_CCW_PSF;
4549 ccw->count = sizeof(struct dasd_psf_prssd_data);
4550 ccw->flags |= CCW_FLAG_CC;
4551 ccw->flags |= CCW_FLAG_SLI;
4552 ccw->cda = (__u32)(addr_t) prssdp;
4553
4554 /* Read Subsystem Data - message buffer */
4555 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
4556 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
4557
4558 ccw++;
4559 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
4560 ccw->count = sizeof(struct dasd_rssd_messages);
4561 ccw->flags |= CCW_FLAG_SLI;
4562 ccw->cda = (__u32)(addr_t) message_buf;
4563
4564 cqr->buildclk = get_tod_clock();
4565 cqr->status = DASD_CQR_FILLED;
4566 rc = dasd_sleep_on_immediatly(cqr);
4567 if (rc == 0) {
4568 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4569 message_buf = (struct dasd_rssd_messages *)
4570 (prssdp + 1);
4571 memcpy(messages, message_buf,
4572 sizeof(struct dasd_rssd_messages));
4573 } else
4574 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4575 "Reading messages failed with rc=%d\n"
4576 , rc);
4577 dasd_sfree_request(cqr, cqr->memdev);
4578 return rc;
4579}
4580
4581/*
4582 * Perform Subsystem Function - CUIR response
4583 */
4584static int
4585dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
4586 __u32 message_id,
4587 struct channel_path_desc *desc,
4588 struct subchannel_id sch_id)
4589{
4590 struct dasd_psf_cuir_response *psf_cuir;
4591 struct dasd_ccw_req *cqr;
4592 struct ccw1 *ccw;
4593 int rc;
4594
4595 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
4596 sizeof(struct dasd_psf_cuir_response),
4597 device);
4598
4599 if (IS_ERR(cqr)) {
4600 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4601 "Could not allocate PSF-CUIR request");
4602 return PTR_ERR(cqr);
4603 }
4604
4605 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
4606 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
4607 psf_cuir->cc = response;
4608 if (desc)
4609 psf_cuir->chpid = desc->chpid;
4610 psf_cuir->message_id = message_id;
4611 psf_cuir->cssid = sch_id.cssid;
4612 psf_cuir->ssid = sch_id.ssid;
4613
4614 ccw = cqr->cpaddr;
4615 ccw->cmd_code = DASD_ECKD_CCW_PSF;
4616 ccw->cda = (__u32)(addr_t)psf_cuir;
4617 ccw->count = sizeof(struct dasd_psf_cuir_response);
4618
4619 cqr->startdev = device;
4620 cqr->memdev = device;
4621 cqr->block = NULL;
4622 cqr->retries = 256;
4623 cqr->expires = 10*HZ;
4624 cqr->buildclk = get_tod_clock();
4625 cqr->status = DASD_CQR_FILLED;
4626
4627 rc = dasd_sleep_on(cqr);
4628
4629 dasd_sfree_request(cqr, cqr->memdev);
4630 return rc;
4631}
4632
4633static int dasd_eckd_cuir_change_state(struct dasd_device *device, __u8 lpum)
4634{
4635 unsigned long flags;
4636 __u8 tbcpm;
4637
4638 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
4639 tbcpm = device->path_data.opm & ~lpum;
4640 if (tbcpm) {
4641 device->path_data.opm = tbcpm;
4642 device->path_data.cuirpm |= lpum;
4643 }
4644 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
4645 return tbcpm ? 0 : PSF_CUIR_LAST_PATH;
4646}
4647
4648/*
4649 * walk through all devices and quiesce them
4650 * if it is the last path return error
4651 *
4652 * if only part of the devices are quiesced and an error
4653 * occurs no onlining necessary, the storage server will
4654 * notify the already set offline devices again
4655 */
4656static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
4657 struct channel_path_desc *desc,
4658 struct subchannel_id sch_id)
4659{
4660 struct alias_pav_group *pavgroup, *tempgroup;
4661 struct dasd_eckd_private *private;
4662 struct dasd_device *dev, *n;
4663 int rc;
4664
4665 private = (struct dasd_eckd_private *) device->private;
4666 rc = 0;
4667
4668 /* active devices */
4669 list_for_each_entry_safe(dev, n,
4670 &private->lcu->active_devices,
4671 alias_list) {
4672 rc = dasd_eckd_cuir_change_state(dev, lpum);
4673 if (rc)
4674 goto out;
4675 }
4676
4677 /* inactive devices */
4678 list_for_each_entry_safe(dev, n,
4679 &private->lcu->inactive_devices,
4680 alias_list) {
4681 rc = dasd_eckd_cuir_change_state(dev, lpum);
4682 if (rc)
4683 goto out;
4684 }
4685
4686 /* devices in PAV groups */
4687 list_for_each_entry_safe(pavgroup, tempgroup,
4688 &private->lcu->grouplist, group) {
4689 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
4690 alias_list) {
4691 rc = dasd_eckd_cuir_change_state(dev, lpum);
4692 if (rc)
4693 goto out;
4694 }
4695 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
4696 alias_list) {
4697 rc = dasd_eckd_cuir_change_state(dev, lpum);
4698 if (rc)
4699 goto out;
4700 }
4701 }
4702
4703 pr_warn("Service on the storage server caused path %x.%02x to go offline",
4704 sch_id.cssid, desc ? desc->chpid : 0);
4705 rc = PSF_CUIR_COMPLETED;
4706out:
4707 return rc;
4708}
4709
4710static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
4711 struct channel_path_desc *desc,
4712 struct subchannel_id sch_id)
4713{
4714 struct alias_pav_group *pavgroup, *tempgroup;
4715 struct dasd_eckd_private *private;
4716 struct dasd_device *dev, *n;
4717
4718 pr_info("Path %x.%02x is back online after service on the storage server",
4719 sch_id.cssid, desc ? desc->chpid : 0);
4720 private = (struct dasd_eckd_private *) device->private;
4721
4722 /*
4723 * the path may have been added through a generic path event before
4724 * only trigger path verification if the path is not already in use
4725 */
4726
4727 list_for_each_entry_safe(dev, n,
4728 &private->lcu->active_devices,
4729 alias_list) {
4730 if (!(dev->path_data.opm & lpum)) {
4731 dev->path_data.tbvpm |= lpum;
4732 dasd_schedule_device_bh(dev);
4733 }
4734 }
4735
4736 list_for_each_entry_safe(dev, n,
4737 &private->lcu->inactive_devices,
4738 alias_list) {
4739 if (!(dev->path_data.opm & lpum)) {
4740 dev->path_data.tbvpm |= lpum;
4741 dasd_schedule_device_bh(dev);
4742 }
4743 }
4744
4745 /* devices in PAV groups */
4746 list_for_each_entry_safe(pavgroup, tempgroup,
4747 &private->lcu->grouplist,
4748 group) {
4749 list_for_each_entry_safe(dev, n,
4750 &pavgroup->baselist,
4751 alias_list) {
4752 if (!(dev->path_data.opm & lpum)) {
4753 dev->path_data.tbvpm |= lpum;
4754 dasd_schedule_device_bh(dev);
4755 }
4756 }
4757 list_for_each_entry_safe(dev, n,
4758 &pavgroup->aliaslist,
4759 alias_list) {
4760 if (!(dev->path_data.opm & lpum)) {
4761 dev->path_data.tbvpm |= lpum;
4762 dasd_schedule_device_bh(dev);
4763 }
4764 }
4765 }
4766 return PSF_CUIR_COMPLETED;
4767}
4768
4769static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
4770 __u8 lpum)
4771{
4772 struct dasd_cuir_message *cuir = messages;
4773 struct channel_path_desc *desc;
4774 struct subchannel_id sch_id;
4775 int pos, response;
4776 ccw_device_get_schid(device->cdev, &sch_id);
4777
4778 /* get position of path in mask */
4779 pos = 8 - ffs(lpum);
4780 /* get channel path descriptor from this position */
4781 desc = ccw_device_get_chp_desc(device->cdev, pos);
4782
4783 if (cuir->code == CUIR_QUIESCE) {
4784 /* quiesce */
4785 response = dasd_eckd_cuir_quiesce(device, lpum, desc, sch_id);
4786 } else if (cuir->code == CUIR_RESUME) {
4787 /* resume */
4788 response = dasd_eckd_cuir_resume(device, lpum, desc, sch_id);
4789 } else
4790 response = PSF_CUIR_NOT_SUPPORTED;
4791
4792 dasd_eckd_psf_cuir_response(device, response, cuir->message_id,
4793 desc, sch_id);
4794
4795 /* free descriptor copy */
4796 kfree(desc);
4797}
4798
4799static void dasd_eckd_check_attention_work(struct work_struct *work)
4800{
4801 struct check_attention_work_data *data;
4802 struct dasd_rssd_messages *messages;
4803 struct dasd_device *device;
4804 int rc;
4805
4806 data = container_of(work, struct check_attention_work_data, worker);
4807 device = data->device;
4808
4809 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
4810 if (!messages) {
4811 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4812 "Could not allocate attention message buffer");
4813 goto out;
4814 }
4815
4816 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
4817 if (rc)
4818 goto out;
4819
4820 if (messages->length == ATTENTION_LENGTH_CUIR &&
4821 messages->format == ATTENTION_FORMAT_CUIR)
4822 dasd_eckd_handle_cuir(device, messages, data->lpum);
4823
4824out:
4825 dasd_put_device(device);
4826 kfree(messages);
4827 kfree(data);
4828}
4829
4830static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
4831{
4832 struct check_attention_work_data *data;
4833
4834 data = kzalloc(sizeof(*data), GFP_ATOMIC);
4835 if (!data)
4836 return -ENOMEM;
4837 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
4838 dasd_get_device(device);
4839 data->device = device;
4840 data->lpum = lpum;
4841 schedule_work(&data->worker);
4842 return 0;
4843}
4844
4478static struct ccw_driver dasd_eckd_driver = { 4845static struct ccw_driver dasd_eckd_driver = {
4479 .driver = { 4846 .driver = {
4480 .name = "dasd-eckd", 4847 .name = "dasd-eckd",
@@ -4539,6 +4906,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
4539 .reload = dasd_eckd_reload_device, 4906 .reload = dasd_eckd_reload_device,
4540 .get_uid = dasd_eckd_get_uid, 4907 .get_uid = dasd_eckd_get_uid,
4541 .kick_validate = dasd_eckd_kick_validate_server, 4908 .kick_validate = dasd_eckd_kick_validate_server,
4909 .check_attention = dasd_eckd_check_attention,
4542}; 4910};
4543 4911
4544static int __init 4912static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 2555e494591f..ddab7df36e25 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -51,8 +51,35 @@
51/* 51/*
52 * Perform Subsystem Function / Sub-Orders 52 * Perform Subsystem Function / Sub-Orders
53 */ 53 */
54#define PSF_ORDER_PRSSD 0x18 54#define PSF_ORDER_PRSSD 0x18
55#define PSF_ORDER_SSC 0x1D 55#define PSF_ORDER_CUIR_RESPONSE 0x1A
56#define PSF_ORDER_SSC 0x1D
57
58/*
59 * CUIR response condition codes
60 */
61#define PSF_CUIR_INVALID 0x00
62#define PSF_CUIR_COMPLETED 0x01
63#define PSF_CUIR_NOT_SUPPORTED 0x02
64#define PSF_CUIR_ERROR_IN_REQ 0x03
65#define PSF_CUIR_DENIED 0x04
66#define PSF_CUIR_LAST_PATH 0x05
67#define PSF_CUIR_DEVICE_ONLINE 0x06
68#define PSF_CUIR_VARY_FAILURE 0x07
69#define PSF_CUIR_SOFTWARE_FAILURE 0x08
70#define PSF_CUIR_NOT_RECOGNIZED 0x09
71
72/*
73 * CUIR codes
74 */
75#define CUIR_QUIESCE 0x01
76#define CUIR_RESUME 0x02
77
78/*
79 * attention message definitions
80 */
81#define ATTENTION_LENGTH_CUIR 0x0e
82#define ATTENTION_FORMAT_CUIR 0x01
56 83
57/* 84/*
58 * Size that is reportet for large volumes in the old 16-bit no_cyl field 85 * Size that is reportet for large volumes in the old 16-bit no_cyl field
@@ -342,6 +369,38 @@ struct dasd_rssd_features {
342 char feature[256]; 369 char feature[256];
343} __attribute__((packed)); 370} __attribute__((packed));
344 371
372struct dasd_rssd_messages {
373 __u16 length;
374 __u8 format;
375 __u8 code;
376 __u32 message_id;
377 __u8 flags;
378 char messages[4087];
379} __packed;
380
381struct dasd_cuir_message {
382 __u16 length;
383 __u8 format;
384 __u8 code;
385 __u32 message_id;
386 __u8 flags;
387 __u8 neq_map[3];
388 __u8 ned_map;
389 __u8 record_selector;
390} __packed;
391
392struct dasd_psf_cuir_response {
393 __u8 order;
394 __u8 flags;
395 __u8 cc;
396 __u8 chpid;
397 __u16 device_nr;
398 __u16 reserved;
399 __u32 message_id;
400 __u64 system_id;
401 __u8 cssid;
402 __u8 ssid;
403} __packed;
345 404
346/* 405/*
347 * Perform Subsystem Function - Prepare for Read Subsystem Data 406 * Perform Subsystem Function - Prepare for Read Subsystem Data
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c20170166909..8b5d4100abf7 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -357,6 +357,7 @@ struct dasd_discipline {
357 357
358 int (*get_uid) (struct dasd_device *, struct dasd_uid *); 358 int (*get_uid) (struct dasd_device *, struct dasd_uid *);
359 void (*kick_validate) (struct dasd_device *); 359 void (*kick_validate) (struct dasd_device *);
360 int (*check_attention)(struct dasd_device *, __u8);
360}; 361};
361 362
362extern struct dasd_discipline *dasd_diag_discipline_pointer; 363extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -382,6 +383,10 @@ struct dasd_path {
382 __u8 tbvpm; 383 __u8 tbvpm;
383 __u8 ppm; 384 __u8 ppm;
384 __u8 npm; 385 __u8 npm;
386 /* paths that are not used because of a special condition */
387 __u8 cablepm; /* miss-cabled */
388 __u8 hpfpm; /* the HPF requirements of the other paths are not met */
389 __u8 cuirpm; /* CUIR varied offline */
385}; 390};
386 391
387struct dasd_profile_info { 392struct dasd_profile_info {
@@ -501,7 +506,10 @@ struct dasd_block {
501 struct dasd_profile profile; 506 struct dasd_profile profile;
502}; 507};
503 508
504 509struct dasd_attention_data {
510 struct dasd_device *device;
511 __u8 lpum;
512};
505 513
506/* reasons why device (ccw_device_start) was stopped */ 514/* reasons why device (ccw_device_start) was stopped */
507#define DASD_STOPPED_NOT_ACC 1 /* not accessible */ 515#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 71bf959732fe..dc24ecfac2d1 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -102,6 +102,19 @@ config SCLP_ASYNC
102 want for inform other people about your kernel panics, 102 want for inform other people about your kernel panics,
103 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
104 104
105config HMC_DRV
106 def_tristate m
107 prompt "Support for file transfers from HMC drive CD/DVD-ROM"
108 depends on 64BIT
109 select CRC16
110 help
111 This option enables support for file transfers from a Hardware
112 Management Console (HMC) drive CD/DVD-ROM. It is available as a
113 module, called 'hmcdrv', and also as kernel built-in. There is one
114 optional parameter for this module: cachesize=N, which modifies the
115 transfer cache size from it's default value 0.5MB to N bytes. If N
116 is zero, then no caching is performed.
117
105config S390_TAPE 118config S390_TAPE
106 def_tristate m 119 def_tristate m
107 prompt "S/390 tape device support" 120 prompt "S/390 tape device support"
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 78b6ace7edcb..6fa9364d1c07 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -33,3 +33,6 @@ obj-$(CONFIG_S390_VMUR) += vmur.o
33 33
34zcore_mod-objs := sclp_sdias.o zcore.o 34zcore_mod-objs := sclp_sdias.o zcore.o
35obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o 35obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
36
37hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
38obj-$(CONFIG_HMC_DRV) += hmcdrv.o
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
new file mode 100644
index 000000000000..93889632fdf9
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.c
@@ -0,0 +1,237 @@
1/*
2 * DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 *
7 */
8
9#define KMSG_COMPONENT "hmcdrv"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/irq.h>
15#include <linux/wait.h>
16#include <linux/string.h>
17#include <asm/ctl_reg.h>
18
19#include "hmcdrv_ftp.h"
20#include "diag_ftp.h"
21
22/* DIAGNOSE X'2C4' return codes in Ry */
23#define DIAG_FTP_RET_OK 0 /* HMC FTP started successfully */
24#define DIAG_FTP_RET_EBUSY 4 /* HMC FTP service currently busy */
25#define DIAG_FTP_RET_EIO 8 /* HMC FTP service I/O error */
26/* and an artificial extension */
27#define DIAG_FTP_RET_EPERM 2 /* HMC FTP service privilege error */
28
29/* FTP service status codes (after INTR at guest real location 133) */
30#define DIAG_FTP_STAT_OK 0U /* request completed successfully */
31#define DIAG_FTP_STAT_PGCC 4U /* program check condition */
32#define DIAG_FTP_STAT_PGIOE 8U /* paging I/O error */
33#define DIAG_FTP_STAT_TIMEOUT 12U /* timeout */
34#define DIAG_FTP_STAT_EBASE 16U /* base of error codes from SCLP */
35#define DIAG_FTP_STAT_LDFAIL (DIAG_FTP_STAT_EBASE + 1U) /* failed */
36#define DIAG_FTP_STAT_LDNPERM (DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
37#define DIAG_FTP_STAT_LDRUNS (DIAG_FTP_STAT_EBASE + 3U) /* runs */
38#define DIAG_FTP_STAT_LDNRUNS (DIAG_FTP_STAT_EBASE + 4U) /* not runs */
39
40/**
41 * struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
42 * @bufaddr: real buffer address (at 4k boundary)
43 * @buflen: length of buffer
44 * @offset: dir/file offset
45 * @intparm: interruption parameter (unused)
46 * @transferred: bytes transferred
47 * @fsize: file size, filled on GET
48 * @failaddr: failing address
49 * @spare: padding
50 * @fident: file name - ASCII
51 */
52struct diag_ftp_ldfpl {
53 u64 bufaddr;
54 u64 buflen;
55 u64 offset;
56 u64 intparm;
57 u64 transferred;
58 u64 fsize;
59 u64 failaddr;
60 u64 spare;
61 u8 fident[HMCDRV_FTP_FIDENT_MAX];
62} __packed;
63
64static DECLARE_COMPLETION(diag_ftp_rx_complete);
65static int diag_ftp_subcode;
66
67/**
68 * diag_ftp_handler() - FTP services IRQ handler
69 * @extirq: external interrupt (sub-) code
70 * @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
71 * @param64: unused (for 64-bit interrupt parameters)
72 */
73static void diag_ftp_handler(struct ext_code extirq,
74 unsigned int param32,
75 unsigned long param64)
76{
77 if ((extirq.subcode >> 8) != 8)
78 return; /* not a FTP services sub-code */
79
80 inc_irq_stat(IRQEXT_FTP);
81 diag_ftp_subcode = extirq.subcode & 0xffU;
82 complete(&diag_ftp_rx_complete);
83}
84
85/**
86 * diag_ftp_2c4() - DIAGNOSE X'2C4' service call
87 * @fpl: pointer to prepared LDFPL
88 * @cmd: FTP command to be executed
89 *
90 * Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
91 * @fpl and FTP function code @cmd. In case of an error the function does
92 * nothing and returns an (negative) error code.
93 *
94 * Notes:
95 * 1. This function only initiates a transfer, so the caller must wait
96 * for completion (asynchronous execution).
97 * 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
98 * 3. fpl->bufaddr must be a real address, 4k aligned
99 */
100static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
101 enum hmcdrv_ftp_cmdid cmd)
102{
103 int rc;
104
105 asm volatile(
106 " diag %[addr],%[cmd],0x2c4\n"
107 "0: j 2f\n"
108 "1: la %[rc],%[err]\n"
109 "2:\n"
110 EX_TABLE(0b, 1b)
111 : [rc] "=d" (rc), "+m" (*fpl)
112 : [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
113 [err] "i" (DIAG_FTP_RET_EPERM)
114 : "cc");
115
116 switch (rc) {
117 case DIAG_FTP_RET_OK:
118 return 0;
119 case DIAG_FTP_RET_EBUSY:
120 return -EBUSY;
121 case DIAG_FTP_RET_EPERM:
122 return -EPERM;
123 case DIAG_FTP_RET_EIO:
124 default:
125 return -EIO;
126 }
127}
128
129/**
130 * diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
131 * @ftp: pointer to FTP command specification
132 * @fsize: return of file size (or NULL if undesirable)
133 *
134 * Attention: Notice that this function is not reentrant - so the caller
135 * must ensure locking.
136 *
137 * Return: number of bytes read/written or a (negative) error code
138 */
139ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
140{
141 struct diag_ftp_ldfpl *ldfpl;
142 ssize_t len;
143#ifdef DEBUG
144 unsigned long start_jiffies;
145
146 pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
147 ftp->fname, ftp->len);
148 start_jiffies = jiffies;
149#endif
150 init_completion(&diag_ftp_rx_complete);
151
152 ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
153 if (!ldfpl) {
154 len = -ENOMEM;
155 goto out;
156 }
157
158 len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
159 if (len >= HMCDRV_FTP_FIDENT_MAX) {
160 len = -EINVAL;
161 goto out_free;
162 }
163
164 ldfpl->transferred = 0;
165 ldfpl->fsize = 0;
166 ldfpl->offset = ftp->ofs;
167 ldfpl->buflen = ftp->len;
168 ldfpl->bufaddr = virt_to_phys(ftp->buf);
169
170 len = diag_ftp_2c4(ldfpl, ftp->id);
171 if (len)
172 goto out_free;
173
174 /*
175 * There is no way to cancel the running diag X'2C4', the code
176 * needs to wait unconditionally until the transfer is complete.
177 */
178 wait_for_completion(&diag_ftp_rx_complete);
179
180#ifdef DEBUG
181 pr_debug("completed DIAG X'2C4' after %lu ms\n",
182 (jiffies - start_jiffies) * 1000 / HZ);
183 pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
184 diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
185#endif
186
187 switch (diag_ftp_subcode) {
188 case DIAG_FTP_STAT_OK: /* success */
189 len = ldfpl->transferred;
190 if (fsize)
191 *fsize = ldfpl->fsize;
192 break;
193 case DIAG_FTP_STAT_LDNPERM:
194 len = -EPERM;
195 break;
196 case DIAG_FTP_STAT_LDRUNS:
197 len = -EBUSY;
198 break;
199 case DIAG_FTP_STAT_LDFAIL:
200 len = -ENOENT; /* no such file or media */
201 break;
202 default:
203 len = -EIO;
204 break;
205 }
206
207out_free:
208 free_page((unsigned long) ldfpl);
209out:
210 return len;
211}
212
213/**
214 * diag_ftp_startup() - startup of FTP services, when running on z/VM
215 *
216 * Return: 0 on success, else an (negative) error code
217 */
218int diag_ftp_startup(void)
219{
220 int rc;
221
222 rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
223 if (rc)
224 return rc;
225
226 ctl_set_bit(0, 63 - 22);
227 return 0;
228}
229
230/**
231 * diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
232 */
233void diag_ftp_shutdown(void)
234{
235 ctl_clear_bit(0, 63 - 22);
236 unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
237}
diff --git a/drivers/s390/char/diag_ftp.h b/drivers/s390/char/diag_ftp.h
new file mode 100644
index 000000000000..3abd2614053a
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.h
@@ -0,0 +1,21 @@
1/*
2 * DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
3 *
4 * Notice that all functions exported here are not reentrant.
5 * So usage should be exclusive, ensured by the caller (e.g. using a
6 * mutex).
7 *
8 * Copyright IBM Corp. 2013
9 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
10 */
11
12#ifndef __DIAG_FTP_H__
13#define __DIAG_FTP_H__
14
15#include "hmcdrv_ftp.h"
16
17int diag_ftp_startup(void);
18void diag_ftp_shutdown(void);
19ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
20
21#endif /* __DIAG_FTP_H__ */
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
new file mode 100644
index 000000000000..4cda5ada143a
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -0,0 +1,252 @@
1/*
2 * SE/HMC Drive (Read) Cache Functions
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 *
7 */
8
9#define KMSG_COMPONENT "hmcdrv"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/jiffies.h>
15
16#include "hmcdrv_ftp.h"
17#include "hmcdrv_cache.h"
18
19#define HMCDRV_CACHE_TIMEOUT 30 /* aging timeout in seconds */
20
21/**
22 * struct hmcdrv_cache_entry - file cache (only used on read/dir)
23 * @id: FTP command ID
24 * @content: kernel-space buffer, 4k aligned
25 * @len: size of @content cache (0 if caching disabled)
26 * @ofs: start of content within file (-1 if no cached content)
27 * @fname: file name
28 * @fsize: file size
29 * @timeout: cache timeout in jiffies
30 *
31 * Notice that the first three members (id, fname, fsize) are cached on all
32 * read/dir requests. But content is cached only under some preconditions.
33 * Uncached content is signalled by a negative value of @ofs.
34 */
35struct hmcdrv_cache_entry {
36 enum hmcdrv_ftp_cmdid id;
37 char fname[HMCDRV_FTP_FIDENT_MAX];
38 size_t fsize;
39 loff_t ofs;
40 unsigned long timeout;
41 void *content;
42 size_t len;
43};
44
45static int hmcdrv_cache_order; /* cache allocated page order */
46
47static struct hmcdrv_cache_entry hmcdrv_cache_file = {
48 .fsize = SIZE_MAX,
49 .ofs = -1,
50 .len = 0,
51 .fname = {'\0'}
52};
53
54/**
55 * hmcdrv_cache_get() - looks for file data/content in read cache
56 * @ftp: pointer to FTP command specification
57 *
58 * Return: number of bytes read from cache or a negative number if nothing
59 * in content cache (for the file/cmd specified in @ftp)
60 */
61static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
62{
63 loff_t pos; /* position in cache (signed) */
64 ssize_t len;
65
66 if ((ftp->id != hmcdrv_cache_file.id) ||
67 strcmp(hmcdrv_cache_file.fname, ftp->fname))
68 return -1;
69
70 if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
71 return 0;
72
73 if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
74 time_after(jiffies, hmcdrv_cache_file.timeout))
75 return -1;
76
77 /* there seems to be cached content - calculate the maximum number
78 * of bytes that can be returned (regarding file size and offset)
79 */
80 len = hmcdrv_cache_file.fsize - ftp->ofs;
81
82 if (len > ftp->len)
83 len = ftp->len;
84
85 /* check if the requested chunk falls into our cache (which starts
86 * at offset 'hmcdrv_cache_file.ofs' in the file of interest)
87 */
88 pos = ftp->ofs - hmcdrv_cache_file.ofs;
89
90 if ((pos >= 0) &&
91 ((pos + len) <= hmcdrv_cache_file.len)) {
92
93 memcpy(ftp->buf,
94 hmcdrv_cache_file.content + pos,
95 len);
96 pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
97 hmcdrv_cache_file.fname, len,
98 hmcdrv_cache_file.fsize);
99
100 return len;
101 }
102
103 return -1;
104}
105
106/**
107 * hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
108 * @ftp: pointer to FTP command specification
109 * @func: FTP transfer function to be used
110 *
111 * Return: number of bytes read/written or a (negative) error code
112 */
113static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
114 hmcdrv_cache_ftpfunc func)
115{
116 ssize_t len;
117
118 /* only cache content if the read/dir cache really exists
119 * (hmcdrv_cache_file.len > 0), is large enough to handle the
120 * request (hmcdrv_cache_file.len >= ftp->len) and there is a need
121 * to do so (ftp->len > 0)
122 */
123 if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
124
125 /* because the cache is not located at ftp->buf, we have to
126 * assemble a new HMC drive FTP cmd specification (pointing
127 * to our cache, and using the increased size)
128 */
129 struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
130 cftp.buf = hmcdrv_cache_file.content; /* and update */
131 cftp.len = hmcdrv_cache_file.len; /* buffer data */
132
133 len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
134
135 if (len > 0) {
136 pr_debug("caching %zd bytes content for '%s'\n",
137 len, ftp->fname);
138
139 if (len > ftp->len)
140 len = ftp->len;
141
142 hmcdrv_cache_file.ofs = ftp->ofs;
143 hmcdrv_cache_file.timeout = jiffies +
144 HMCDRV_CACHE_TIMEOUT * HZ;
145 memcpy(ftp->buf, hmcdrv_cache_file.content, len);
146 }
147 } else {
148 len = func(ftp, &hmcdrv_cache_file.fsize);
149 hmcdrv_cache_file.ofs = -1; /* invalidate content */
150 }
151
152 if (len > 0) {
153 /* cache some file info (FTP command, file name and file
154 * size) unconditionally
155 */
156 strlcpy(hmcdrv_cache_file.fname, ftp->fname,
157 HMCDRV_FTP_FIDENT_MAX);
158 hmcdrv_cache_file.id = ftp->id;
159 pr_debug("caching cmd %d, file size %zu for '%s'\n",
160 ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
161 }
162
163 return len;
164}
165
166/**
167 * hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
168 * @ftp: pointer to FTP command specification
169 * @func: FTP transfer function to be used
170 *
171 * Attention: Notice that this function is not reentrant - so the caller
172 * must ensure exclusive execution.
173 *
174 * Return: number of bytes read/written or a (negative) error code
175 */
176ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
177 hmcdrv_cache_ftpfunc func)
178{
179 ssize_t len;
180
181 if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
182 (ftp->id == HMCDRV_FTP_NLIST) ||
183 (ftp->id == HMCDRV_FTP_GET)) {
184
185 len = hmcdrv_cache_get(ftp);
186
187 if (len >= 0) /* got it from cache ? */
188 return len; /* yes */
189
190 len = hmcdrv_cache_do(ftp, func);
191
192 if (len >= 0)
193 return len;
194
195 } else {
196 len = func(ftp, NULL); /* simply do original command */
197 }
198
199 /* invalidate the (read) cache in case there was a write operation
200 * or an error on read/dir
201 */
202 hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
203 hmcdrv_cache_file.fsize = LLONG_MAX;
204 hmcdrv_cache_file.ofs = -1;
205
206 return len;
207}
208
209/**
210 * hmcdrv_cache_startup() - startup of HMC drive cache
211 * @cachesize: cache size
212 *
213 * Return: 0 on success, else a (negative) error code
214 */
215int hmcdrv_cache_startup(size_t cachesize)
216{
217 if (cachesize > 0) { /* perform caching ? */
218 hmcdrv_cache_order = get_order(cachesize);
219 hmcdrv_cache_file.content =
220 (void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
221 hmcdrv_cache_order);
222
223 if (!hmcdrv_cache_file.content) {
224 pr_err("Allocating the requested cache size of %zu bytes failed\n",
225 cachesize);
226 return -ENOMEM;
227 }
228
229 pr_debug("content cache enabled, size is %zu bytes\n",
230 cachesize);
231 }
232
233 hmcdrv_cache_file.len = cachesize;
234 return 0;
235}
236
237/**
238 * hmcdrv_cache_shutdown() - shutdown of HMC drive cache
239 */
240void hmcdrv_cache_shutdown(void)
241{
242 if (hmcdrv_cache_file.content) {
243 free_pages((unsigned long) hmcdrv_cache_file.content,
244 hmcdrv_cache_order);
245 hmcdrv_cache_file.content = NULL;
246 }
247
248 hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
249 hmcdrv_cache_file.fsize = LLONG_MAX;
250 hmcdrv_cache_file.ofs = -1;
251 hmcdrv_cache_file.len = 0; /* no cache */
252}
diff --git a/drivers/s390/char/hmcdrv_cache.h b/drivers/s390/char/hmcdrv_cache.h
new file mode 100644
index 000000000000..a14b57526781
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.h
@@ -0,0 +1,24 @@
1/*
2 * SE/HMC Drive (Read) Cache Functions
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 */
7
8#ifndef __HMCDRV_CACHE_H__
9#define __HMCDRV_CACHE_H__
10
11#include <linux/mmzone.h>
12#include "hmcdrv_ftp.h"
13
14#define HMCDRV_CACHE_SIZE_DFLT (MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
15
16typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
17 size_t *fsize);
18
19ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
20 hmcdrv_cache_ftpfunc func);
21int hmcdrv_cache_startup(size_t cachesize);
22void hmcdrv_cache_shutdown(void);
23
24#endif /* __HMCDRV_CACHE_H__ */
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
new file mode 100644
index 000000000000..0c5176179c17
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -0,0 +1,370 @@
1/*
2 * HMC Drive CD/DVD Device
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 *
7 * This file provides a Linux "misc" character device for access to an
8 * assigned HMC drive CD/DVD-ROM. It works as follows: First create the
9 * device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
10 * SEEK_END) indicates that a new FTP command follows (not needed on the
11 * first command after open). Then write() the FTP command ASCII string
12 * to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
13 * end read() the response.
14 */
15
16#define KMSG_COMPONENT "hmcdrv"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <linux/miscdevice.h>
25#include <linux/device.h>
26#include <linux/capability.h>
27#include <linux/delay.h>
28#include <linux/uaccess.h>
29
30#include "hmcdrv_dev.h"
31#include "hmcdrv_ftp.h"
32
33/* If the following macro is defined, then the HMC device creates it's own
34 * separated device class (and dynamically assigns a major number). If not
35 * defined then the HMC device is assigned to the "misc" class devices.
36 *
37#define HMCDRV_DEV_CLASS "hmcftp"
38 */
39
40#define HMCDRV_DEV_NAME "hmcdrv"
41#define HMCDRV_DEV_BUSY_DELAY 500 /* delay between -EBUSY trials in ms */
42#define HMCDRV_DEV_BUSY_RETRIES 3 /* number of retries on -EBUSY */
43
44struct hmcdrv_dev_node {
45
46#ifdef HMCDRV_DEV_CLASS
47 struct cdev dev; /* character device structure */
48 umode_t mode; /* mode of device node (unused, zero) */
49#else
50 struct miscdevice dev; /* "misc" device structure */
51#endif
52
53};
54
55static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
56static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
57static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
58static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
59 size_t len, loff_t *pos);
60static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
61 size_t len, loff_t *pos);
62static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
63 char __user *buf, size_t len);
64
65/*
66 * device operations
67 */
68static const struct file_operations hmcdrv_dev_fops = {
69 .open = hmcdrv_dev_open,
70 .llseek = hmcdrv_dev_seek,
71 .release = hmcdrv_dev_release,
72 .read = hmcdrv_dev_read,
73 .write = hmcdrv_dev_write,
74};
75
76static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
77
78#ifdef HMCDRV_DEV_CLASS
79
80static struct class *hmcdrv_dev_class; /* device class pointer */
81static dev_t hmcdrv_dev_no; /* device number (major/minor) */
82
83/**
84 * hmcdrv_dev_name() - provides a naming hint for a device node in /dev
85 * @dev: device for which the naming/mode hint is
86 * @mode: file mode for device node created in /dev
87 *
88 * See: devtmpfs.c, function devtmpfs_create_node()
89 *
90 * Return: recommended device file name in /dev
91 */
92static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
93{
94 char *nodename = NULL;
95 const char *devname = dev_name(dev); /* kernel device name */
96
97 if (devname)
98 nodename = kasprintf(GFP_KERNEL, "%s", devname);
99
100 /* on device destroy (rmmod) the mode pointer may be NULL
101 */
102 if (mode)
103 *mode = hmcdrv_dev.mode;
104
105 return nodename;
106}
107
108#endif /* HMCDRV_DEV_CLASS */
109
110/*
111 * open()
112 */
113static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
114{
115 int rc;
116
117 /* check for non-blocking access, which is really unsupported
118 */
119 if (fp->f_flags & O_NONBLOCK)
120 return -EINVAL;
121
122 /* Because it makes no sense to open this device read-only (then a
123 * FTP command cannot be emitted), we respond with an error.
124 */
125 if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
126 return -EINVAL;
127
128 /* prevent unloading this module as long as anyone holds the
129 * device file open - so increment the reference count here
130 */
131 if (!try_module_get(THIS_MODULE))
132 return -ENODEV;
133
134 fp->private_data = NULL; /* no command yet */
135 rc = hmcdrv_ftp_startup();
136 if (rc)
137 module_put(THIS_MODULE);
138
139 pr_debug("open file '/dev/%s' with return code %d\n",
140 fp->f_dentry->d_name.name, rc);
141 return rc;
142}
143
144/*
145 * release()
146 */
147static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
148{
149 pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name);
150 kfree(fp->private_data);
151 fp->private_data = NULL;
152 hmcdrv_ftp_shutdown();
153 module_put(THIS_MODULE);
154 return 0;
155}
156
157/*
158 * lseek()
159 */
160static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
161{
162 switch (whence) {
163 case SEEK_CUR: /* relative to current file position */
164 pos += fp->f_pos; /* new position stored in 'pos' */
165 break;
166
167 case SEEK_SET: /* absolute (relative to beginning of file) */
168 break; /* SEEK_SET */
169
170 /* We use SEEK_END as a special indicator for a SEEK_SET
171 * (set absolute position), combined with a FTP command
172 * clear.
173 */
174 case SEEK_END:
175 if (fp->private_data) {
176 kfree(fp->private_data);
177 fp->private_data = NULL;
178 }
179
180 break; /* SEEK_END */
181
182 default: /* SEEK_DATA, SEEK_HOLE: unsupported */
183 return -EINVAL;
184 }
185
186 if (pos < 0)
187 return -EINVAL;
188
189 if (fp->f_pos != pos)
190 ++fp->f_version;
191
192 fp->f_pos = pos;
193 return pos;
194}
195
196/*
197 * transfer (helper function)
198 */
199static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
200 char __user *buf, size_t len)
201{
202 ssize_t retlen;
203 unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
204
205 do {
206 retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
207
208 if (retlen != -EBUSY)
209 break;
210
211 msleep(HMCDRV_DEV_BUSY_DELAY);
212
213 } while (--trials > 0);
214
215 return retlen;
216}
217
218/*
219 * read()
220 */
221static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
222 size_t len, loff_t *pos)
223{
224 ssize_t retlen;
225
226 if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
227 (fp->private_data == NULL)) { /* no FTP cmd defined ? */
228 return -EBADF;
229 }
230
231 retlen = hmcdrv_dev_transfer((char *) fp->private_data,
232 *pos, ubuf, len);
233
234 pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n",
235 fp->f_dentry->d_name.name, (long long) *pos, retlen, len);
236
237 if (retlen > 0)
238 *pos += retlen;
239
240 return retlen;
241}
242
243/*
244 * write()
245 */
246static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
247 size_t len, loff_t *pos)
248{
249 ssize_t retlen;
250
251 pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n",
252 fp->f_dentry->d_name.name, (long long) *pos, len);
253
254 if (!fp->private_data) { /* first expect a cmd write */
255 fp->private_data = kmalloc(len + 1, GFP_KERNEL);
256
257 if (!fp->private_data)
258 return -ENOMEM;
259
260 if (!copy_from_user(fp->private_data, ubuf, len)) {
261 ((char *)fp->private_data)[len] = '\0';
262 return len;
263 }
264
265 kfree(fp->private_data);
266 fp->private_data = NULL;
267 return -EFAULT;
268 }
269
270 retlen = hmcdrv_dev_transfer((char *) fp->private_data,
271 *pos, (char __user *) ubuf, len);
272 if (retlen > 0)
273 *pos += retlen;
274
275 pr_debug("write to file '/dev/%s' returned %zd\n",
276 fp->f_dentry->d_name.name, retlen);
277
278 return retlen;
279}
280
281/**
282 * hmcdrv_dev_init() - creates a HMC drive CD/DVD device
283 *
284 * This function creates a HMC drive CD/DVD kernel device and an associated
285 * device under /dev, using a dynamically allocated major number.
286 *
287 * Return: 0 on success, else an error code.
288 */
289int hmcdrv_dev_init(void)
290{
291 int rc;
292
293#ifdef HMCDRV_DEV_CLASS
294 struct device *dev;
295
296 rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
297
298 if (rc)
299 goto out_err;
300
301 cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
302 hmcdrv_dev.dev.owner = THIS_MODULE;
303 rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
304
305 if (rc)
306 goto out_unreg;
307
308 /* At this point the character device exists in the kernel (see
309 * /proc/devices), but not under /dev nor /sys/devices/virtual. So
310 * we have to create an associated class (see /sys/class).
311 */
312 hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
313
314 if (IS_ERR(hmcdrv_dev_class)) {
315 rc = PTR_ERR(hmcdrv_dev_class);
316 goto out_devdel;
317 }
318
319 /* Finally a device node in /dev has to be established (as 'mkdev'
320 * does from the command line). Notice that assignment of a device
321 * node name/mode function is optional (only for mode != 0600).
322 */
323 hmcdrv_dev.mode = 0; /* "unset" */
324 hmcdrv_dev_class->devnode = hmcdrv_dev_name;
325
326 dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
327 "%s", HMCDRV_DEV_NAME);
328 if (!IS_ERR(dev))
329 return 0;
330
331 rc = PTR_ERR(dev);
332 class_destroy(hmcdrv_dev_class);
333 hmcdrv_dev_class = NULL;
334
335out_devdel:
336 cdev_del(&hmcdrv_dev.dev);
337
338out_unreg:
339 unregister_chrdev_region(hmcdrv_dev_no, 1);
340
341out_err:
342
343#else /* !HMCDRV_DEV_CLASS */
344 hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
345 hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
346 hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
347 hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
348 rc = misc_register(&hmcdrv_dev.dev);
349#endif /* HMCDRV_DEV_CLASS */
350
351 return rc;
352}
353
354/**
355 * hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
356 */
357void hmcdrv_dev_exit(void)
358{
359#ifdef HMCDRV_DEV_CLASS
360 if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
361 device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
362 class_destroy(hmcdrv_dev_class);
363 }
364
365 cdev_del(&hmcdrv_dev.dev);
366 unregister_chrdev_region(hmcdrv_dev_no, 1);
367#else /* !HMCDRV_DEV_CLASS */
368 misc_deregister(&hmcdrv_dev.dev);
369#endif /* HMCDRV_DEV_CLASS */
370}
diff --git a/drivers/s390/char/hmcdrv_dev.h b/drivers/s390/char/hmcdrv_dev.h
new file mode 100644
index 000000000000..cb17f07e02de
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.h
@@ -0,0 +1,14 @@
1/*
2 * SE/HMC Drive FTP Device
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 */
7
8#ifndef __HMCDRV_DEV_H__
9#define __HMCDRV_DEV_H__
10
11int hmcdrv_dev_init(void);
12void hmcdrv_dev_exit(void);
13
14#endif /* __HMCDRV_DEV_H__ */
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
new file mode 100644
index 000000000000..4bd63322fc29
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -0,0 +1,343 @@
1/*
2 * HMC Drive FTP Services
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 */
7
8#define KMSG_COMPONENT "hmcdrv"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/uaccess.h>
14#include <linux/export.h>
15
16#include <linux/ctype.h>
17#include <linux/crc16.h>
18
19#include "hmcdrv_ftp.h"
20#include "hmcdrv_cache.h"
21#include "sclp_ftp.h"
22#include "diag_ftp.h"
23
24/**
25 * struct hmcdrv_ftp_ops - HMC drive FTP operations
26 * @startup: startup function
27 * @shutdown: shutdown function
28 * @cmd: FTP transfer function
29 */
30struct hmcdrv_ftp_ops {
31 int (*startup)(void);
32 void (*shutdown)(void);
33 ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
34 size_t *fsize);
35};
36
37static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
38static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
39
40static struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
41static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
42static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
43
44/**
45 * hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
46 * @cmd: FTP command string (NOT zero-terminated)
47 * @len: length of FTP command string in @cmd
48 */
49static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
50{
51 /* HMC FTP command descriptor */
52 struct hmcdrv_ftp_cmd_desc {
53 const char *str; /* command string */
54 enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
55 };
56
57 /* Description of all HMC drive FTP commands
58 *
59 * Notes:
60 * 1. Array size should be a prime number.
61 * 2. Do not change the order of commands in table (because the
62 * index is determined by CRC % ARRAY_SIZE).
63 * 3. Original command 'nlist' was renamed, else the CRC would
64 * collide with 'append' (see point 2).
65 */
66 static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
67
68 {.str = "get", /* [0] get (CRC = 0x68eb) */
69 .cmd = HMCDRV_FTP_GET},
70 {.str = "dir", /* [1] dir (CRC = 0x6a9e) */
71 .cmd = HMCDRV_FTP_DIR},
72 {.str = "delete", /* [2] delete (CRC = 0x53ae) */
73 .cmd = HMCDRV_FTP_DELETE},
74 {.str = "nls", /* [3] nls (CRC = 0xf87c) */
75 .cmd = HMCDRV_FTP_NLIST},
76 {.str = "put", /* [4] put (CRC = 0xac56) */
77 .cmd = HMCDRV_FTP_PUT},
78 {.str = "append", /* [5] append (CRC = 0xf56e) */
79 .cmd = HMCDRV_FTP_APPEND},
80 {.str = NULL} /* [6] unused */
81 };
82
83 const struct hmcdrv_ftp_cmd_desc *pdesc;
84
85 u16 crc = 0xffffU;
86
87 if (len == 0)
88 return HMCDRV_FTP_NOOP; /* error indiactor */
89
90 crc = crc16(crc, cmd, len);
91 pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
92 pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
93 cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
94
95 if (!pdesc->str || strncmp(pdesc->str, cmd, len))
96 return HMCDRV_FTP_NOOP;
97
98 pr_debug("FTP command '%s' found, with ID %d\n",
99 pdesc->str, pdesc->cmd);
100
101 return pdesc->cmd;
102}
103
104/**
105 * hmcdrv_ftp_parse() - HMC drive FTP command parser
106 * @cmd: FTP command string "<cmd> <filename>"
107 * @ftp: Pointer to FTP command specification buffer (output)
108 *
109 * Return: 0 on success, else a (negative) error code
110 */
111static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
112{
113 char *start;
114 int argc = 0;
115
116 ftp->id = HMCDRV_FTP_NOOP;
117 ftp->fname = NULL;
118
119 while (*cmd != '\0') {
120
121 while (isspace(*cmd))
122 ++cmd;
123
124 if (*cmd == '\0')
125 break;
126
127 start = cmd;
128
129 switch (argc) {
130 case 0: /* 1st argument (FTP command) */
131 while ((*cmd != '\0') && !isspace(*cmd))
132 ++cmd;
133 ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
134 break;
135 case 1: /* 2nd / last argument (rest of line) */
136 while ((*cmd != '\0') && !iscntrl(*cmd))
137 ++cmd;
138 ftp->fname = start;
139 /* fall through */
140 default:
141 *cmd = '\0';
142 break;
143 } /* switch */
144
145 ++argc;
146 } /* while */
147
148 if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
149 return -EINVAL;
150
151 return 0;
152}
153
154/**
155 * hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
156 * @ftp: pointer to FTP command specification
157 *
158 * Return: number of bytes read/written or a negative error code
159 */
160ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
161{
162 ssize_t len;
163
164 mutex_lock(&hmcdrv_ftp_mutex);
165
166 if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
167 pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
168 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
169 len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
170 } else {
171 len = -ENXIO;
172 }
173
174 mutex_unlock(&hmcdrv_ftp_mutex);
175 return len;
176}
177EXPORT_SYMBOL(hmcdrv_ftp_do);
178
179/**
180 * hmcdrv_ftp_probe() - probe for the HMC drive FTP service
181 *
182 * Return: 0 if service is available, else an (negative) error code
183 */
184int hmcdrv_ftp_probe(void)
185{
186 int rc;
187
188 struct hmcdrv_ftp_cmdspec ftp = {
189 .id = HMCDRV_FTP_NOOP,
190 .ofs = 0,
191 .fname = "",
192 .len = PAGE_SIZE
193 };
194
195 ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
196
197 if (!ftp.buf)
198 return -ENOMEM;
199
200 rc = hmcdrv_ftp_startup();
201
202 if (rc)
203 return rc;
204
205 rc = hmcdrv_ftp_do(&ftp);
206 free_page((unsigned long) ftp.buf);
207 hmcdrv_ftp_shutdown();
208
209 switch (rc) {
210 case -ENOENT: /* no such file/media or currently busy, */
211 case -EBUSY: /* but service seems to be available */
212 rc = 0;
213 break;
214 default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
215 if (rc > 0)
216 rc = 0; /* clear length (success) */
217 break;
218 } /* switch */
219
220 return rc;
221}
222EXPORT_SYMBOL(hmcdrv_ftp_probe);
223
224/**
225 * hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
226 *
227 * @cmd: FTP command string "<cmd> <filename>"
228 * @offset: file position to read/write
229 * @buf: user-space buffer for read/written directory/file
230 * @len: size of @buf (read/dir) or number of bytes to write
231 *
232 * This function must not be called before hmcdrv_ftp_startup() was called.
233 *
234 * Return: number of bytes read/written or a negative error code
235 */
236ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
237 char __user *buf, size_t len)
238{
239 int order;
240
241 struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
242 ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
243
244 if (retlen)
245 return retlen;
246
247 order = get_order(ftp.len);
248 ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
249
250 if (!ftp.buf)
251 return -ENOMEM;
252
253 switch (ftp.id) {
254 case HMCDRV_FTP_DIR:
255 case HMCDRV_FTP_NLIST:
256 case HMCDRV_FTP_GET:
257 retlen = hmcdrv_ftp_do(&ftp);
258
259 if ((retlen >= 0) &&
260 copy_to_user(buf, ftp.buf, retlen))
261 retlen = -EFAULT;
262 break;
263
264 case HMCDRV_FTP_PUT:
265 case HMCDRV_FTP_APPEND:
266 if (!copy_from_user(ftp.buf, buf, ftp.len))
267 retlen = hmcdrv_ftp_do(&ftp);
268 else
269 retlen = -EFAULT;
270 break;
271
272 case HMCDRV_FTP_DELETE:
273 retlen = hmcdrv_ftp_do(&ftp);
274 break;
275
276 default:
277 retlen = -EOPNOTSUPP;
278 break;
279 }
280
281 free_pages((unsigned long) ftp.buf, order);
282 return retlen;
283}
284
285/**
286 * hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
287 * dedicated (owner) instance
288 *
289 * Return: 0 on success, else an (negative) error code
290 */
291int hmcdrv_ftp_startup(void)
292{
293 static struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
294 .startup = diag_ftp_startup,
295 .shutdown = diag_ftp_shutdown,
296 .transfer = diag_ftp_cmd
297 };
298
299 static struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
300 .startup = sclp_ftp_startup,
301 .shutdown = sclp_ftp_shutdown,
302 .transfer = sclp_ftp_cmd
303 };
304
305 int rc = 0;
306
307 mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
308
309 if (hmcdrv_ftp_refcnt == 0) {
310 if (MACHINE_IS_VM)
311 hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
312 else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
313 hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
314 else
315 rc = -EOPNOTSUPP;
316
317 if (hmcdrv_ftp_funcs)
318 rc = hmcdrv_ftp_funcs->startup();
319 }
320
321 if (!rc)
322 ++hmcdrv_ftp_refcnt;
323
324 mutex_unlock(&hmcdrv_ftp_mutex);
325 return rc;
326}
327EXPORT_SYMBOL(hmcdrv_ftp_startup);
328
329/**
330 * hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
331 * dedicated (owner) instance
332 */
333void hmcdrv_ftp_shutdown(void)
334{
335 mutex_lock(&hmcdrv_ftp_mutex);
336 --hmcdrv_ftp_refcnt;
337
338 if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
339 hmcdrv_ftp_funcs->shutdown();
340
341 mutex_unlock(&hmcdrv_ftp_mutex);
342}
343EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
diff --git a/drivers/s390/char/hmcdrv_ftp.h b/drivers/s390/char/hmcdrv_ftp.h
new file mode 100644
index 000000000000..f3643a7b3676
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.h
@@ -0,0 +1,63 @@
1/*
2 * SE/HMC Drive FTP Services
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 */
7
8#ifndef __HMCDRV_FTP_H__
9#define __HMCDRV_FTP_H__
10
11#include <linux/types.h> /* size_t, loff_t */
12
13/*
14 * HMC drive FTP Service max. length of path (w/ EOS)
15 */
16#define HMCDRV_FTP_FIDENT_MAX 192
17
18/**
19 * enum hmcdrv_ftp_cmdid - HMC drive FTP commands
20 * @HMCDRV_FTP_NOOP: do nothing (only for probing)
21 * @HMCDRV_FTP_GET: read a file
22 * @HMCDRV_FTP_PUT: (over-) write a file
23 * @HMCDRV_FTP_APPEND: append to a file
24 * @HMCDRV_FTP_DIR: list directory long (ls -l)
25 * @HMCDRV_FTP_NLIST: list files, no directories (name list)
26 * @HMCDRV_FTP_DELETE: delete a file
27 * @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
28 */
29enum hmcdrv_ftp_cmdid {
30 HMCDRV_FTP_NOOP = 0,
31 HMCDRV_FTP_GET = 1,
32 HMCDRV_FTP_PUT = 2,
33 HMCDRV_FTP_APPEND = 3,
34 HMCDRV_FTP_DIR = 4,
35 HMCDRV_FTP_NLIST = 5,
36 HMCDRV_FTP_DELETE = 6,
37 HMCDRV_FTP_CANCEL = 7
38};
39
40/**
41 * struct hmcdrv_ftp_cmdspec - FTP command specification
42 * @id: FTP command ID
43 * @ofs: offset in file
44 * @fname: filename (ASCII), null-terminated
45 * @buf: kernel-space transfer data buffer, 4k aligned
46 * @len: (max) number of bytes to transfer from/to @buf
47 */
48struct hmcdrv_ftp_cmdspec {
49 enum hmcdrv_ftp_cmdid id;
50 loff_t ofs;
51 const char *fname;
52 void __kernel *buf;
53 size_t len;
54};
55
56int hmcdrv_ftp_startup(void);
57void hmcdrv_ftp_shutdown(void);
58int hmcdrv_ftp_probe(void);
59ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
60ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
61 char __user *buf, size_t len);
62
63#endif /* __HMCDRV_FTP_H__ */
diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c
new file mode 100644
index 000000000000..505c6a78ee1a
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_mod.c
@@ -0,0 +1,64 @@
1/*
2 * HMC Drive DVD Module
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 */
7
8#define KMSG_COMPONENT "hmcdrv"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/version.h>
15#include <linux/stat.h>
16
17#include "hmcdrv_ftp.h"
18#include "hmcdrv_dev.h"
19#include "hmcdrv_cache.h"
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Copyright 2013 IBM Corporation");
23MODULE_DESCRIPTION("HMC drive DVD access");
24
25/*
26 * module parameter 'cachesize'
27 */
28static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
29module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
30
31/**
32 * hmcdrv_mod_init() - module init function
33 */
34static int __init hmcdrv_mod_init(void)
35{
36 int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
37
38 if (rc)
39 return rc;
40
41 rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
42
43 if (rc)
44 return rc;
45
46 rc = hmcdrv_dev_init();
47
48 if (rc)
49 hmcdrv_cache_shutdown();
50
51 return rc;
52}
53
54/**
55 * hmcdrv_mod_exit() - module exit function
56 */
57static void __exit hmcdrv_mod_exit(void)
58{
59 hmcdrv_dev_exit();
60 hmcdrv_cache_shutdown();
61}
62
63module_init(hmcdrv_mod_init);
64module_exit(hmcdrv_mod_exit);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index a68b5ec7d042..a88069f8c677 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,6 +19,7 @@
19 19
20#define EVTYP_OPCMD 0x01 20#define EVTYP_OPCMD 0x01
21#define EVTYP_MSG 0x02 21#define EVTYP_MSG 0x02
22#define EVTYP_DIAG_TEST 0x07
22#define EVTYP_STATECHANGE 0x08 23#define EVTYP_STATECHANGE 0x08
23#define EVTYP_PMSGCMD 0x09 24#define EVTYP_PMSGCMD 0x09
24#define EVTYP_CNTLPROGOPCMD 0x20 25#define EVTYP_CNTLPROGOPCMD 0x20
@@ -32,6 +33,7 @@
32 33
33#define EVTYP_OPCMD_MASK 0x80000000 34#define EVTYP_OPCMD_MASK 0x80000000
34#define EVTYP_MSG_MASK 0x40000000 35#define EVTYP_MSG_MASK 0x40000000
36#define EVTYP_DIAG_TEST_MASK 0x02000000
35#define EVTYP_STATECHANGE_MASK 0x01000000 37#define EVTYP_STATECHANGE_MASK 0x01000000
36#define EVTYP_PMSGCMD_MASK 0x00800000 38#define EVTYP_PMSGCMD_MASK 0x00800000
37#define EVTYP_CTLPROGOPCMD_MASK 0x00000001 39#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
diff --git a/drivers/s390/char/sclp_diag.h b/drivers/s390/char/sclp_diag.h
new file mode 100644
index 000000000000..59c4afa5e670
--- /dev/null
+++ b/drivers/s390/char/sclp_diag.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright IBM Corp. 2013
3 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
4 */
5
6#ifndef _SCLP_DIAG_H
7#define _SCLP_DIAG_H
8
9#include <linux/types.h>
10
11/* return codes for Diagnostic Test FTP Service, as indicated in member
12 * sclp_diag_ftp::ldflg
13 */
14#define SCLP_DIAG_FTP_OK 0x80U /* success */
15#define SCLP_DIAG_FTP_LDFAIL 0x01U /* load failed */
16#define SCLP_DIAG_FTP_LDNPERM 0x02U /* not allowed */
17#define SCLP_DIAG_FTP_LDRUNS 0x03U /* LD runs */
18#define SCLP_DIAG_FTP_LDNRUNS 0x04U /* LD does not run */
19
20#define SCLP_DIAG_FTP_XPCX 0x80 /* PCX communication code */
21#define SCLP_DIAG_FTP_ROUTE 4 /* routing code for new FTP service */
22
23/*
24 * length of Diagnostic Test FTP Service event buffer
25 */
26#define SCLP_DIAG_FTP_EVBUF_LEN \
27 (offsetof(struct sclp_diag_evbuf, mdd) + \
28 sizeof(struct sclp_diag_ftp))
29
30/**
31 * struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
32 * @pcx: code for PCX communication (should be 0x80)
33 * @ldflg: load flag (see defines above)
34 * @cmd: FTP command
35 * @pgsize: page size (0 = 4kB, 1 = large page size)
36 * @srcflg: source flag
37 * @spare: reserved (zeroes)
38 * @offset: file offset
39 * @fsize: file size
40 * @length: buffer size resp. bytes transferred
41 * @failaddr: failing address
42 * @bufaddr: buffer address, virtual
43 * @asce: region or segment table designation
44 * @fident: file name (ASCII, zero-terminated)
45 */
46struct sclp_diag_ftp {
47 u8 pcx;
48 u8 ldflg;
49 u8 cmd;
50 u8 pgsize;
51 u8 srcflg;
52 u8 spare;
53 u64 offset;
54 u64 fsize;
55 u64 length;
56 u64 failaddr;
57 u64 bufaddr;
58 u64 asce;
59
60 u8 fident[256];
61} __packed;
62
63/**
64 * struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
65 * @hdr: event buffer header
66 * @route: diagnostic route
67 * @mdd: model-dependent data (@route dependent)
68 */
69struct sclp_diag_evbuf {
70 struct evbuf_header hdr;
71 u16 route;
72
73 union {
74 struct sclp_diag_ftp ftp;
75 } mdd;
76} __packed;
77
78/**
79 * struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
80 * @hdr: SCCB header
81 * @evbuf: event buffer
82 */
83struct sclp_diag_sccb {
84
85 struct sccb_header hdr;
86 struct sclp_diag_evbuf evbuf;
87} __packed;
88
89#endif /* _SCLP_DIAG_H */
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 1918d9dff45d..5bd6cb145a87 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -281,7 +281,7 @@ out:
281 281
282static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) 282static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
283{ 283{
284 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) 284 if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
285 return 0; 285 return 0;
286 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) 286 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
287 return 0; 287 return 0;
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
new file mode 100644
index 000000000000..6561cc5b2d5d
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.c
@@ -0,0 +1,275 @@
1/*
2 * SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
6 *
7 */
8
9#define KMSG_COMPONENT "hmcdrv"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/wait.h>
17#include <linux/string.h>
18#include <linux/jiffies.h>
19#include <asm/sysinfo.h>
20#include <asm/ebcdic.h>
21
22#include "sclp.h"
23#include "sclp_diag.h"
24#include "sclp_ftp.h"
25
26static DECLARE_COMPLETION(sclp_ftp_rx_complete);
27static u8 sclp_ftp_ldflg;
28static u64 sclp_ftp_fsize;
29static u64 sclp_ftp_length;
30
31/**
32 * sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
33 */
34static void sclp_ftp_txcb(struct sclp_req *req, void *data)
35{
36 struct completion *completion = data;
37
38#ifdef DEBUG
39 pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
40 req->sccb, 24, req->sccb);
41#endif
42 complete(completion);
43}
44
45/**
46 * sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
47 */
48static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
49{
50 struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
51
52 /*
53 * Check for Diagnostic Test FTP Service
54 */
55 if (evbuf->type != EVTYP_DIAG_TEST ||
56 diag->route != SCLP_DIAG_FTP_ROUTE ||
57 diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
58 evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
59 return;
60
61#ifdef DEBUG
62 pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
63 evbuf, 24, evbuf);
64#endif
65
66 /*
67 * Because the event buffer is located in a page which is owned
68 * by the SCLP core, all data of interest must be copied. The
69 * error indication is in 'sclp_ftp_ldflg'
70 */
71 sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
72 sclp_ftp_fsize = diag->mdd.ftp.fsize;
73 sclp_ftp_length = diag->mdd.ftp.length;
74
75 complete(&sclp_ftp_rx_complete);
76}
77
78/**
79 * sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
80 * @ftp: pointer to FTP descriptor
81 *
82 * Return: 0 on success, else a (negative) error code
83 */
84static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
85{
86 struct completion completion;
87 struct sclp_diag_sccb *sccb;
88 struct sclp_req *req;
89 size_t len;
90 int rc;
91
92 req = kzalloc(sizeof(*req), GFP_KERNEL);
93 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
94 if (!req || !sccb) {
95 rc = -ENOMEM;
96 goto out_free;
97 }
98
99 sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
100 sizeof(struct sccb_header);
101 sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
102 sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
103 sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
104 sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
105 sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
106 sccb->evbuf.mdd.ftp.srcflg = 0;
107 sccb->evbuf.mdd.ftp.pgsize = 0;
108 sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
109 sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
110 sccb->evbuf.mdd.ftp.fsize = 0;
111 sccb->evbuf.mdd.ftp.cmd = ftp->id;
112 sccb->evbuf.mdd.ftp.offset = ftp->ofs;
113 sccb->evbuf.mdd.ftp.length = ftp->len;
114 sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
115
116 len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
117 HMCDRV_FTP_FIDENT_MAX);
118 if (len >= HMCDRV_FTP_FIDENT_MAX) {
119 rc = -EINVAL;
120 goto out_free;
121 }
122
123 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
124 req->sccb = sccb;
125 req->status = SCLP_REQ_FILLED;
126 req->callback = sclp_ftp_txcb;
127 req->callback_data = &completion;
128
129 init_completion(&completion);
130
131 rc = sclp_add_request(req);
132 if (rc)
133 goto out_free;
134
135 /* Wait for end of ftp sclp command. */
136 wait_for_completion(&completion);
137
138#ifdef DEBUG
139 pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
140 sccb->hdr.response_code, sccb->evbuf.hdr.flags);
141#endif
142
143 /*
144 * Check if sclp accepted the request. The data transfer runs
145 * asynchronously and the completion is indicated with an
146 * sclp ET7 event.
147 */
148 if (req->status != SCLP_REQ_DONE ||
149 (sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
150 (sccb->hdr.response_code & 0xffU) != 0x20U) {
151 rc = -EIO;
152 }
153
154out_free:
155 free_page((unsigned long) sccb);
156 kfree(req);
157 return rc;
158}
159
160/**
161 * sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
162 * @ftp: pointer to FTP command specification
163 * @fsize: return of file size (or NULL if undesirable)
164 *
165 * Attention: Notice that this function is not reentrant - so the caller
166 * must ensure locking.
167 *
168 * Return: number of bytes read/written or a (negative) error code
169 */
170ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
171{
172 ssize_t len;
173#ifdef DEBUG
174 unsigned long start_jiffies;
175
176 pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
177 ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
178 start_jiffies = jiffies;
179#endif
180
181 init_completion(&sclp_ftp_rx_complete);
182
183 /* Start ftp sclp command. */
184 len = sclp_ftp_et7(ftp);
185 if (len)
186 goto out_unlock;
187
188 /*
189 * There is no way to cancel the sclp ET7 request, the code
190 * needs to wait unconditionally until the transfer is complete.
191 */
192 wait_for_completion(&sclp_ftp_rx_complete);
193
194#ifdef DEBUG
195 pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
196 (jiffies - start_jiffies) * 1000 / HZ);
197 pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
198 sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
199#endif
200
201 switch (sclp_ftp_ldflg) {
202 case SCLP_DIAG_FTP_OK:
203 len = sclp_ftp_length;
204 if (fsize)
205 *fsize = sclp_ftp_fsize;
206 break;
207 case SCLP_DIAG_FTP_LDNPERM:
208 len = -EPERM;
209 break;
210 case SCLP_DIAG_FTP_LDRUNS:
211 len = -EBUSY;
212 break;
213 case SCLP_DIAG_FTP_LDFAIL:
214 len = -ENOENT;
215 break;
216 default:
217 len = -EIO;
218 break;
219 }
220
221out_unlock:
222 return len;
223}
224
225/*
226 * ET7 event listener
227 */
228static struct sclp_register sclp_ftp_event = {
229 .send_mask = EVTYP_DIAG_TEST_MASK, /* want tx events */
230 .receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
231 .receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
232 .state_change_fn = NULL,
233 .pm_event_fn = NULL,
234};
235
236/**
237 * sclp_ftp_startup() - startup of FTP services, when running on LPAR
238 */
239int sclp_ftp_startup(void)
240{
241#ifdef DEBUG
242 unsigned long info;
243#endif
244 int rc;
245
246 rc = sclp_register(&sclp_ftp_event);
247 if (rc)
248 return rc;
249
250#ifdef DEBUG
251 info = get_zeroed_page(GFP_KERNEL);
252
253 if (info != 0) {
254 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
255
256 if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
257 info222->name[sizeof(info222->name) - 1] = '\0';
258 EBCASC_500(info222->name, sizeof(info222->name) - 1);
259 pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
260 info222->lpar_number, info222->name);
261 }
262
263 free_page(info);
264 }
265#endif /* DEBUG */
266 return 0;
267}
268
269/**
270 * sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
271 */
272void sclp_ftp_shutdown(void)
273{
274 sclp_unregister(&sclp_ftp_event);
275}
diff --git a/drivers/s390/char/sclp_ftp.h b/drivers/s390/char/sclp_ftp.h
new file mode 100644
index 000000000000..98ba3183e7d9
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.h
@@ -0,0 +1,21 @@
1/*
2 * SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
3 *
4 * Notice that all functions exported here are not reentrant.
5 * So usage should be exclusive, ensured by the caller (e.g. using a
6 * mutex).
7 *
8 * Copyright IBM Corp. 2013
9 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
10 */
11
12#ifndef __SCLP_FTP_H__
13#define __SCLP_FTP_H__
14
15#include "hmcdrv_ftp.h"
16
17int sclp_ftp_startup(void);
18void sclp_ftp_shutdown(void);
19ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
20
21#endif /* __SCLP_FTP_H__ */
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 3b13d58fe87b..35a84af875ee 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -33,7 +33,7 @@ static void sclp_rw_pm_event(struct sclp_register *reg,
33 33
34/* Event type structure for write message and write priority message */ 34/* Event type structure for write message and write priority message */
35static struct sclp_register sclp_rw_event = { 35static struct sclp_register sclp_rw_event = {
36 .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK, 36 .send_mask = EVTYP_MSG_MASK,
37 .pm_event_fn = sclp_rw_pm_event, 37 .pm_event_fn = sclp_rw_pm_event,
38}; 38};
39 39
@@ -456,14 +456,9 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
456 return -EIO; 456 return -EIO;
457 457
458 sccb = buffer->sccb; 458 sccb = buffer->sccb;
459 if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK) 459 /* Use normal write message */
460 /* Use normal write message */ 460 sccb->msg_buf.header.type = EVTYP_MSG;
461 sccb->msg_buf.header.type = EVTYP_MSG; 461
462 else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
463 /* Use write priority message */
464 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
465 else
466 return -EOPNOTSUPP;
467 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 462 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
468 buffer->request.status = SCLP_REQ_FILLED; 463 buffer->request.status = SCLP_REQ_FILLED;
469 buffer->request.callback = sclp_writedata_callback; 464 buffer->request.callback = sclp_writedata_callback;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9a9f721716d..ae67386c03d3 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -206,10 +206,6 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
206static int 206static int
207__sclp_vt220_emit(struct sclp_vt220_request *request) 207__sclp_vt220_emit(struct sclp_vt220_request *request)
208{ 208{
209 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
210 request->sclp_req.status = SCLP_REQ_FAILED;
211 return -EIO;
212 }
213 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; 209 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
214 request->sclp_req.status = SCLP_REQ_FILLED; 210 request->sclp_req.status = SCLP_REQ_FILLED;
215 request->sclp_req.callback = sclp_vt220_callback; 211 request->sclp_req.callback = sclp_vt220_callback;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 6dc60725de92..77f9b9c2f701 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -402,7 +402,9 @@ __tapechar_ioctl(struct tape_device *device,
402 memset(&get, 0, sizeof(get)); 402 memset(&get, 0, sizeof(get));
403 get.mt_type = MT_ISUNKNOWN; 403 get.mt_type = MT_ISUNKNOWN;
404 get.mt_resid = 0 /* device->devstat.rescnt */; 404 get.mt_resid = 0 /* device->devstat.rescnt */;
405 get.mt_dsreg = device->tape_state; 405 get.mt_dsreg =
406 ((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
407 & MT_ST_BLKSIZE_MASK);
406 /* FIXME: mt_gstat, mt_erreg, mt_fileno */ 408 /* FIXME: mt_gstat, mt_erreg, mt_fileno */
407 get.mt_gstat = 0; 409 get.mt_gstat = 0;
408 get.mt_erreg = 0; 410 get.mt_erreg = 0;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1884653e4472..efcf48481c5f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -28,6 +28,7 @@
28#include <asm/processor.h> 28#include <asm/processor.h>
29#include <asm/irqflags.h> 29#include <asm/irqflags.h>
30#include <asm/checksum.h> 30#include <asm/checksum.h>
31#include <asm/switch_to.h>
31#include "sclp.h" 32#include "sclp.h"
32 33
33#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 34#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
@@ -149,18 +150,21 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
149 150
150static int __init init_cpu_info(enum arch_id arch) 151static int __init init_cpu_info(enum arch_id arch)
151{ 152{
152 struct save_area *sa; 153 struct save_area_ext *sa_ext;
153 154
154 /* get info for boot cpu from lowcore, stored in the HSA */ 155 /* get info for boot cpu from lowcore, stored in the HSA */
155 156
156 sa = dump_save_area_create(0); 157 sa_ext = dump_save_area_create(0);
157 if (!sa) 158 if (!sa_ext)
158 return -ENOMEM; 159 return -ENOMEM;
159 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 160 if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
161 sys_info.sa_size) < 0) {
160 TRACE("could not copy from HSA\n"); 162 TRACE("could not copy from HSA\n");
161 kfree(sa); 163 kfree(sa_ext);
162 return -EIO; 164 return -EIO;
163 } 165 }
166 if (MACHINE_HAS_VX)
167 save_vx_regs_safe(sa_ext->vx_regs);
164 return 0; 168 return 0;
165} 169}
166 170
@@ -258,7 +262,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
258 unsigned long sa_start, sa_end; /* save area range */ 262 unsigned long sa_start, sa_end; /* save area range */
259 unsigned long prefix; 263 unsigned long prefix;
260 unsigned long sa_off, len, buf_off; 264 unsigned long sa_off, len, buf_off;
261 struct save_area *save_area = dump_save_areas.areas[i]; 265 struct save_area *save_area = &dump_save_areas.areas[i]->sa;
262 266
263 prefix = save_area->pref_reg; 267 prefix = save_area->pref_reg;
264 sa_start = prefix + sys_info.sa_base; 268 sa_start = prefix + sys_info.sa_base;
@@ -612,7 +616,7 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
612 hdr->tod = get_tod_clock(); 616 hdr->tod = get_tod_clock();
613 get_cpu_id(&hdr->cpu_id); 617 get_cpu_id(&hdr->cpu_id);
614 for (i = 0; i < dump_save_areas.count; i++) { 618 for (i = 0; i < dump_save_areas.count; i++) {
615 prefix = dump_save_areas.areas[i]->pref_reg; 619 prefix = dump_save_areas.areas[i]->sa.pref_reg;
616 hdr->real_cpu_cnt++; 620 hdr->real_cpu_cnt++;
617 if (!prefix) 621 if (!prefix)
618 continue; 622 continue;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 00bfbee0af9e..56eb4ee4deba 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -87,7 +87,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
87 struct airq_struct *airq; 87 struct airq_struct *airq;
88 struct hlist_head *head; 88 struct hlist_head *head;
89 89
90 __this_cpu_write(s390_idle.nohz_delay, 1); 90 set_cpu_flag(CIF_NOHZ_DELAY);
91 tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; 91 tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
92 head = &airq_lists[tpi_info->isc]; 92 head = &airq_lists[tpi_info->isc];
93 rcu_read_lock(); 93 rcu_read_lock();
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2905d8b0ec95..d5a6f287d2fe 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -561,7 +561,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
561 struct subchannel *sch; 561 struct subchannel *sch;
562 struct irb *irb; 562 struct irb *irb;
563 563
564 __this_cpu_write(s390_idle.nohz_delay, 1); 564 set_cpu_flag(CIF_NOHZ_DELAY);
565 tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; 565 tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
566 irb = &__get_cpu_var(cio_irb); 566 irb = &__get_cpu_var(cio_irb);
567 sch = (struct subchannel *)(unsigned long) tpi_info->intparm; 567 sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 4038437ff033..99485415dcc2 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -664,6 +664,17 @@ static ssize_t ap_hwtype_show(struct device *dev,
664} 664}
665 665
666static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 666static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
667
668static ssize_t ap_raw_hwtype_show(struct device *dev,
669 struct device_attribute *attr, char *buf)
670{
671 struct ap_device *ap_dev = to_ap_dev(dev);
672
673 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
674}
675
676static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
677
667static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 678static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
668 char *buf) 679 char *buf)
669{ 680{
@@ -734,6 +745,7 @@ static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
734 745
735static struct attribute *ap_dev_attrs[] = { 746static struct attribute *ap_dev_attrs[] = {
736 &dev_attr_hwtype.attr, 747 &dev_attr_hwtype.attr,
748 &dev_attr_raw_hwtype.attr,
737 &dev_attr_depth.attr, 749 &dev_attr_depth.attr,
738 &dev_attr_request_count.attr, 750 &dev_attr_request_count.attr,
739 &dev_attr_requestq_count.attr, 751 &dev_attr_requestq_count.attr,
@@ -1188,6 +1200,10 @@ static int ap_select_domain(void)
1188 ap_qid_t qid; 1200 ap_qid_t qid;
1189 int rc, i, j; 1201 int rc, i, j;
1190 1202
1203 /* IF APXA isn't installed, only 16 domains could be defined */
1204 if (!ap_configuration->ap_extended && (ap_domain_index > 15))
1205 return -EINVAL;
1206
1191 /* 1207 /*
1192 * We want to use a single domain. Either the one specified with 1208 * We want to use a single domain. Either the one specified with
1193 * the "domain=" parameter or the domain with the maximum number 1209 * the "domain=" parameter or the domain with the maximum number
@@ -1413,9 +1429,13 @@ static void ap_scan_bus(struct work_struct *unused)
1413 continue; 1429 continue;
1414 } 1430 }
1415 break; 1431 break;
1432 case 11:
1433 ap_dev->device_type = 10;
1434 break;
1416 default: 1435 default:
1417 ap_dev->device_type = device_type; 1436 ap_dev->device_type = device_type;
1418 } 1437 }
1438 ap_dev->raw_hwtype = device_type;
1419 1439
1420 rc = ap_query_functions(qid, &device_functions); 1440 rc = ap_query_functions(qid, &device_functions);
1421 if (!rc) 1441 if (!rc)
@@ -1900,9 +1920,15 @@ static void ap_reset_all(void)
1900{ 1920{
1901 int i, j; 1921 int i, j;
1902 1922
1903 for (i = 0; i < AP_DOMAINS; i++) 1923 for (i = 0; i < AP_DOMAINS; i++) {
1904 for (j = 0; j < AP_DEVICES; j++) 1924 if (!ap_test_config_domain(i))
1925 continue;
1926 for (j = 0; j < AP_DEVICES; j++) {
1927 if (!ap_test_config_card_id(j))
1928 continue;
1905 ap_reset_queue(AP_MKQID(j, i)); 1929 ap_reset_queue(AP_MKQID(j, i));
1930 }
1931 }
1906} 1932}
1907 1933
1908static struct reset_call ap_reset_call = { 1934static struct reset_call ap_reset_call = {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6405ae24a7a6..055a0f956d17 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -31,7 +31,7 @@
31#include <linux/types.h> 31#include <linux/types.h>
32 32
33#define AP_DEVICES 64 /* Number of AP devices. */ 33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */ 34#define AP_DOMAINS 256 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */ 35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ 36#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
37#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 37#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -45,9 +45,9 @@ extern int ap_domain_index;
45 */ 45 */
46typedef unsigned int ap_qid_t; 46typedef unsigned int ap_qid_t;
47 47
48#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15)) 48#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
49#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) 49#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
50#define AP_QID_QUEUE(_qid) ((_qid) & 15) 50#define AP_QID_QUEUE(_qid) ((_qid) & 255)
51 51
52/** 52/**
53 * structy ap_queue_status - Holds the AP queue status. 53 * structy ap_queue_status - Holds the AP queue status.
@@ -161,6 +161,7 @@ struct ap_device {
161 ap_qid_t qid; /* AP queue id. */ 161 ap_qid_t qid; /* AP queue id. */
162 int queue_depth; /* AP queue depth.*/ 162 int queue_depth; /* AP queue depth.*/
163 int device_type; /* AP device type. */ 163 int device_type; /* AP device type. */
164 int raw_hwtype; /* AP raw hardware type. */
164 unsigned int functions; /* AP device function bitfield. */ 165 unsigned int functions; /* AP device function bitfield. */
165 int unregistered; /* marks AP device as unregistered */ 166 int unregistered; /* marks AP device as unregistered */
166 struct timer_list timeout; /* Timer for request timeouts. */ 167 struct timer_list timeout; /* Timer for request timeouts. */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 0e18c5dcd91f..08f1830cbfc4 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -343,10 +343,11 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
343 break; 343 break;
344 } 344 }
345 } 345 }
346 if (!found || !try_module_get(zops->owner))
347 zops = NULL;
348
346 spin_unlock_bh(&zcrypt_ops_list_lock); 349 spin_unlock_bh(&zcrypt_ops_list_lock);
347 350
348 if (!found)
349 return NULL;
350 return zops; 351 return zops;
351} 352}
352 353
@@ -359,8 +360,6 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
359 request_module("%s", name); 360 request_module("%s", name);
360 zops = __ops_lookup(name, variant); 361 zops = __ops_lookup(name, variant);
361 } 362 }
362 if ((!zops) || (!try_module_get(zops->owner)))
363 return NULL;
364 return zops; 363 return zops;
365} 364}
366EXPORT_SYMBOL(zcrypt_msgtype_request); 365EXPORT_SYMBOL(zcrypt_msgtype_request);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 595ee86f5e0d..eda850ca757a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,7 @@ extern struct tick_sched *tick_get_tick_sched(int cpu);
108extern void tick_irq_enter(void); 108extern void tick_irq_enter(void);
109extern int tick_oneshot_mode_active(void); 109extern int tick_oneshot_mode_active(void);
110# ifndef arch_needs_cpu 110# ifndef arch_needs_cpu
111# define arch_needs_cpu(cpu) (0) 111# define arch_needs_cpu() (0)
112# endif 112# endif
113# else 113# else
114static inline void tick_clock_notify(void) { } 114static inline void tick_clock_notify(void) { }
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ef6103bf1f9b..ea9bf2561b9e 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -391,6 +391,8 @@ typedef struct elf64_shdr {
391#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ 391#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
392#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ 392#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
393#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */ 393#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
394#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
395#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
394#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ 396#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
395#define NT_ARM_TLS 0x401 /* ARM TLS register */ 397#define NT_ARM_TLS 0x401 /* ARM TLS register */
396#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ 398#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7c1412ea2d29..a73efdf6f696 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -586,7 +586,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
586 } while (read_seqretry(&jiffies_lock, seq)); 586 } while (read_seqretry(&jiffies_lock, seq));
587 587
588 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || 588 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
589 arch_needs_cpu(cpu) || irq_work_needs_cpu()) { 589 arch_needs_cpu() || irq_work_needs_cpu()) {
590 next_jiffies = last_jiffies + 1; 590 next_jiffies = last_jiffies + 1;
591 delta_jiffies = 1; 591 delta_jiffies = 1;
592 } else { 592 } else {
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 650ecc83d7d7..001facfa5b74 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -388,10 +388,6 @@ do_file(char const *const fname)
388 "unrecognized ET_REL file: %s\n", fname); 388 "unrecognized ET_REL file: %s\n", fname);
389 fail_file(); 389 fail_file();
390 } 390 }
391 if (w2(ehdr->e_machine) == EM_S390) {
392 reltype = R_390_32;
393 mcount_adjust_32 = -4;
394 }
395 if (w2(ehdr->e_machine) == EM_MIPS) { 391 if (w2(ehdr->e_machine) == EM_MIPS) {
396 reltype = R_MIPS_32; 392 reltype = R_MIPS_32;
397 is_fake_mcount32 = MIPS32_is_fake_mcount; 393 is_fake_mcount32 = MIPS32_is_fake_mcount;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 397b6b84e8c5..d4b665610d67 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -241,13 +241,6 @@ if ($arch eq "x86_64") {
241 $objcopy .= " -O elf32-i386"; 241 $objcopy .= " -O elf32-i386";
242 $cc .= " -m32"; 242 $cc .= " -m32";
243 243
244} elsif ($arch eq "s390" && $bits == 32) {
245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$";
246 $mcount_adjust = -4;
247 $alignment = 4;
248 $ld .= " -m elf_s390";
249 $cc .= " -m31";
250
251} elsif ($arch eq "s390" && $bits == 64) { 244} elsif ($arch eq "s390" && $bits == 64) {
252 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; 245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
253 $mcount_adjust = -8; 246 $mcount_adjust = -8;