aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/s390/kernel
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile19
-rw-r--r--arch/s390/kernel/asm-offsets.c38
-rw-r--r--arch/s390/kernel/base.S39
-rw-r--r--arch/s390/kernel/bitmap.c2
-rw-r--r--arch/s390/kernel/cache.c388
-rw-r--r--arch/s390/kernel/compat_exec_domain.c2
-rw-r--r--arch/s390/kernel/compat_linux.c121
-rw-r--r--arch/s390/kernel/compat_linux.h70
-rw-r--r--arch/s390/kernel/compat_signal.c121
-rw-r--r--arch/s390/kernel/compat_wrapper.S51
-rw-r--r--arch/s390/kernel/cpcmd.c5
-rw-r--r--arch/s390/kernel/crash_dump.c443
-rw-r--r--arch/s390/kernel/debug.c128
-rw-r--r--arch/s390/kernel/dis.c658
-rw-r--r--arch/s390/kernel/early.c106
-rw-r--r--arch/s390/kernel/ebcdic.c3
-rw-r--r--arch/s390/kernel/entry.S1304
-rw-r--r--arch/s390/kernel/entry.h60
-rw-r--r--arch/s390/kernel/entry64.S1221
-rw-r--r--arch/s390/kernel/head.S324
-rw-r--r--arch/s390/kernel/head31.S9
-rw-r--r--arch/s390/kernel/head64.S9
-rw-r--r--arch/s390/kernel/head_kdump.S108
-rw-r--r--arch/s390/kernel/ipl.c137
-rw-r--r--arch/s390/kernel/irq.c178
-rw-r--r--arch/s390/kernel/jump_label.c51
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/s390/kernel/lgr.c186
-rw-r--r--arch/s390/kernel/machine_kexec.c186
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/mcount64.S2
-rw-r--r--arch/s390/kernel/mem_detect.c81
-rw-r--r--arch/s390/kernel/module.c16
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/s390/kernel/os_info.c167
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c694
-rw-r--r--arch/s390/kernel/perf_event.c124
-rw-r--r--arch/s390/kernel/pgm_check.S152
-rw-r--r--arch/s390/kernel/process.c189
-rw-r--r--arch/s390/kernel/processor.c15
-rw-r--r--arch/s390/kernel/ptrace.c182
-rw-r--r--arch/s390/kernel/reipl.S13
-rw-r--r--arch/s390/kernel/reipl64.S14
-rw-r--r--arch/s390/kernel/relocate_kernel.S7
-rw-r--r--arch/s390/kernel/relocate_kernel64.S9
-rw-r--r--arch/s390/kernel/runtime_instr.c150
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c473
-rw-r--r--arch/s390/kernel/signal.c210
-rw-r--r--arch/s390/kernel/smp.c1302
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kernel/suspend.c121
-rw-r--r--arch/s390/kernel/swsusp_asm64.S27
-rw-r--r--arch/s390/kernel/sys_s390.c89
-rw-r--r--arch/s390/kernel/syscalls.S5
-rw-r--r--arch/s390/kernel/sysinfo.c374
-rw-r--r--arch/s390/kernel/time.c296
-rw-r--r--arch/s390/kernel/topology.c364
-rw-r--r--arch/s390/kernel/traps.c259
-rw-r--r--arch/s390/kernel/vdso.c47
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kernel/vtime.c541
63 files changed, 4581 insertions, 7351 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311ef5c9..df3732249ba 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,18 +23,18 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 26 sysinfo.o jump_label.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
31 30
32extra-y += head.o vmlinux.lds 31extra-y += head.o init_task.o vmlinux.lds
33extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) 32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
34 33
35obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
36obj-$(CONFIG_SMP) += smp.o 35obj-$(CONFIG_SMP) += smp.o topology.o
37obj-$(CONFIG_SCHED_BOOK) += topology.o 36obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
37 switch_cpu.o)
38obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 38obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
39obj-$(CONFIG_AUDIT) += audit.o 39obj-$(CONFIG_AUDIT) += audit.o
40compat-obj-$(CONFIG_AUDIT) += compat_audit.o 40compat-obj-$(CONFIG_AUDIT) += compat_audit.o
@@ -48,12 +48,11 @@ obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 48obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
51obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
52 51
53ifdef CONFIG_64BIT 52# Kexec part
54obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o 53S390_KEXEC_OBJS := machine_kexec.o crash.o
55obj-y += runtime_instr.o cache.o 54S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
56endif 55obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
57 56
58# vdso 57# vdso
59obj-$(CONFIG_64BIT) += vdso64/ 58obj-$(CONFIG_64BIT) += vdso64/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87056e..2b45591e158 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <asm/cputime.h>
12#include <asm/vdso.h> 11#include <asm/vdso.h>
12#include <asm/sigp.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14 14
15/* 15/*
@@ -45,8 +45,8 @@ int main(void)
45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
46 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); 46 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
47 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); 47 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
48 DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); 48 DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc));
49 DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); 49 DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr));
50 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 50 DEFINE(__PT_SIZE, sizeof(struct pt_regs));
51 BLANK(); 51 BLANK();
52 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); 52 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
@@ -70,14 +70,15 @@ int main(void)
70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK(); 72 BLANK();
73 /* idle data offsets */ 73 /* constants for SIGP */
74 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); 74 DEFINE(__SIGP_STOP, sigp_stop);
75 DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); 75 DEFINE(__SIGP_RESTART, sigp_restart);
76 DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); 76 DEFINE(__SIGP_SENSE, sigp_sense);
77 DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); 77 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
78 BLANK();
78 /* lowcore offsets */ 79 /* lowcore offsets */
79 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
80 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); 81 DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
81 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); 82 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
82 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); 83 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
83 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); 84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
@@ -94,22 +95,21 @@ int main(void)
94 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); 95 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
95 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); 96 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
96 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); 97 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
98 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
99 BLANK();
100 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
97 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); 101 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
98 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); 102 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
99 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); 103 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
100 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); 104 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
101 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); 105 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
102 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); 106 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
103 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
104 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); 107 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
105 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); 108 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
106 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); 109 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
107 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); 110 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
108 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); 111 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
109 BLANK(); 112 DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
110 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
111 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
112 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
113 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); 113 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
114 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); 114 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
115 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); 115 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
@@ -127,18 +127,12 @@ int main(void)
127 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 127 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
128 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 128 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
129 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 129 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
130 DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
131 DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
132 DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
133 DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
134 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 130 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
135 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
136 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
137 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
138 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
139 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 135 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
140 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
141 BLANK();
142 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 136 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
143 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 137 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
144 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); 138 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
@@ -147,6 +141,7 @@ int main(void)
147 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 141 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
148 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 142 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
149 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 143 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
144 DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
150#ifdef CONFIG_32BIT 145#ifdef CONFIG_32BIT
151 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); 146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
152#else /* CONFIG_32BIT */ 147#else /* CONFIG_32BIT */
@@ -157,8 +152,7 @@ int main(void)
157 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 152 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
158 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 153 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
159 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 154 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
160 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 155 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
161 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
162 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 156 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
163#endif /* CONFIG_32BIT */ 157#endif /* CONFIG_32BIT */
164 return 0; 158 return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a227..255435663bf 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/base.S 2 * arch/s390/kernel/base.S
3 * 3 *
4 * Copyright IBM Corp. 2006, 2007 4 * Copyright IBM Corp. 2006,2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com> 6 * Michael Holzheu <holzheu@de.ibm.com>
7 */ 7 */
@@ -9,7 +9,6 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/sigp.h>
13 12
14#ifdef CONFIG_64BIT 13#ifdef CONFIG_64BIT
15 14
@@ -34,7 +33,7 @@ s390_base_mcck_handler_fn:
34 .previous 33 .previous
35 34
36ENTRY(s390_base_ext_handler) 35ENTRY(s390_base_ext_handler)
37 stmg %r0,%r15,__LC_SAVE_AREA_ASYNC 36 stmg %r0,%r15,__LC_SAVE_AREA
38 basr %r13,0 37 basr %r13,0
390: aghi %r15,-STACK_FRAME_OVERHEAD 380: aghi %r15,-STACK_FRAME_OVERHEAD
40 larl %r1,s390_base_ext_handler_fn 39 larl %r1,s390_base_ext_handler_fn
@@ -42,7 +41,7 @@ ENTRY(s390_base_ext_handler)
42 ltgr %r1,%r1 41 ltgr %r1,%r1
43 jz 1f 42 jz 1f
44 basr %r14,%r1 43 basr %r14,%r1
451: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC 441: lmg %r0,%r15,__LC_SAVE_AREA
46 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
47 lpswe __LC_EXT_OLD_PSW 46 lpswe __LC_EXT_OLD_PSW
48 47
@@ -54,7 +53,7 @@ s390_base_ext_handler_fn:
54 .previous 53 .previous
55 54
56ENTRY(s390_base_pgm_handler) 55ENTRY(s390_base_pgm_handler)
57 stmg %r0,%r15,__LC_SAVE_AREA_SYNC 56 stmg %r0,%r15,__LC_SAVE_AREA
58 basr %r13,0 57 basr %r13,0
590: aghi %r15,-STACK_FRAME_OVERHEAD 580: aghi %r15,-STACK_FRAME_OVERHEAD
60 larl %r1,s390_base_pgm_handler_fn 59 larl %r1,s390_base_pgm_handler_fn
@@ -62,7 +61,7 @@ ENTRY(s390_base_pgm_handler)
62 ltgr %r1,%r1 61 ltgr %r1,%r1
63 jz 1f 62 jz 1f
64 basr %r14,%r1 63 basr %r14,%r1
65 lmg %r0,%r15,__LC_SAVE_AREA_SYNC 64 lmg %r0,%r15,__LC_SAVE_AREA
66 lpswe __LC_PGM_OLD_PSW 65 lpswe __LC_PGM_OLD_PSW
671: lpswe disabled_wait_psw-0b(%r13) 661: lpswe disabled_wait_psw-0b(%r13)
68 67
@@ -87,11 +86,6 @@ s390_base_pgm_handler_fn:
87ENTRY(diag308_reset) 86ENTRY(diag308_reset)
88 larl %r4,.Lctlregs # Save control registers 87 larl %r4,.Lctlregs # Save control registers
89 stctg %c0,%c15,0(%r4) 88 stctg %c0,%c15,0(%r4)
90 larl %r4,.Lfpctl # Floating point control register
91 stfpc 0(%r4)
92 larl %r4,.Lcontinue_psw # Save PSW flags
93 epsw %r2,%r3
94 stm %r2,%r3,0(%r4)
95 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 89 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
96 lghi %r3,0 90 lghi %r3,0
97 lg %r4,0(%r4) # Save PSW 91 lg %r4,0(%r4) # Save PSW
@@ -101,34 +95,21 @@ ENTRY(diag308_reset)
101.Lrestart_part2: 95.Lrestart_part2:
102 lhi %r0,0 # Load r0 with zero 96 lhi %r0,0 # Load r0 with zero
103 lhi %r1,2 # Use mode 2 = ESAME (dump) 97 lhi %r1,2 # Use mode 2 = ESAME (dump)
104 sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode 98 sigp %r1,%r0,0x12 # Switch to ESAME mode
105 sam64 # Switch to 64 bit addressing mode 99 sam64 # Switch to 64 bit addressing mode
106 larl %r4,.Lctlregs # Restore control registers 100 larl %r4,.Lctlregs # Restore control registers
107 lctlg %c0,%c15,0(%r4) 101 lctlg %c0,%c15,0(%r4)
108 larl %r4,.Lfpctl # Restore floating point ctl register
109 lfpc 0(%r4)
110 larl %r4,.Lcontinue_psw # Restore PSW flags
111 lpswe 0(%r4)
112.Lcontinue:
113 br %r14 102 br %r14
114.align 16 103.align 16
115.Lrestart_psw: 104.Lrestart_psw:
116 .long 0x00080000,0x80000000 + .Lrestart_part2 105 .long 0x00080000,0x80000000 + .Lrestart_part2
117 106
118 .section .data..nosave,"aw",@progbits
119.align 8
120.Lcontinue_psw:
121 .quad 0,.Lcontinue
122 .previous
123
124 .section .bss 107 .section .bss
125.align 8 108.align 8
126.Lctlregs: 109.Lctlregs:
127 .rept 16 110 .rept 16
128 .quad 0 111 .quad 0
129 .endr 112 .endr
130.Lfpctl:
131 .long 0
132 .previous 113 .previous
133 114
134#else /* CONFIG_64BIT */ 115#else /* CONFIG_64BIT */
@@ -155,7 +136,7 @@ s390_base_mcck_handler_fn:
155 .previous 136 .previous
156 137
157ENTRY(s390_base_ext_handler) 138ENTRY(s390_base_ext_handler)
158 stm %r0,%r15,__LC_SAVE_AREA_ASYNC 139 stm %r0,%r15,__LC_SAVE_AREA
159 basr %r13,0 140 basr %r13,0
1600: ahi %r15,-STACK_FRAME_OVERHEAD 1410: ahi %r15,-STACK_FRAME_OVERHEAD
161 l %r1,2f-0b(%r13) 142 l %r1,2f-0b(%r13)
@@ -163,7 +144,7 @@ ENTRY(s390_base_ext_handler)
163 ltr %r1,%r1 144 ltr %r1,%r1
164 jz 1f 145 jz 1f
165 basr %r14,%r1 146 basr %r14,%r1
1661: lm %r0,%r15,__LC_SAVE_AREA_ASYNC 1471: lm %r0,%r15,__LC_SAVE_AREA
167 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 148 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
168 lpsw __LC_EXT_OLD_PSW 149 lpsw __LC_EXT_OLD_PSW
169 150
@@ -177,7 +158,7 @@ s390_base_ext_handler_fn:
177 .previous 158 .previous
178 159
179ENTRY(s390_base_pgm_handler) 160ENTRY(s390_base_pgm_handler)
180 stm %r0,%r15,__LC_SAVE_AREA_SYNC 161 stm %r0,%r15,__LC_SAVE_AREA
181 basr %r13,0 162 basr %r13,0
1820: ahi %r15,-STACK_FRAME_OVERHEAD 1630: ahi %r15,-STACK_FRAME_OVERHEAD
183 l %r1,2f-0b(%r13) 164 l %r1,2f-0b(%r13)
@@ -185,7 +166,7 @@ ENTRY(s390_base_pgm_handler)
185 ltr %r1,%r1 166 ltr %r1,%r1
186 jz 1f 167 jz 1f
187 basr %r14,%r1 168 basr %r14,%r1
188 lm %r0,%r15,__LC_SAVE_AREA_SYNC 169 lm %r0,%r15,__LC_SAVE_AREA
189 lpsw __LC_PGM_OLD_PSW 170 lpsw __LC_PGM_OLD_PSW
190 171
1911: lpsw disabled_wait_psw-0b(%r13) 1721: lpsw disabled_wait_psw-0b(%r13)
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
index 102da5e2303..3ae4757b006 100644
--- a/arch/s390/kernel/bitmap.c
+++ b/arch/s390/kernel/bitmap.c
@@ -2,7 +2,7 @@
2 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ... 2 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
3 * See include/asm/{bitops.h|posix_types.h} for details 3 * See include/asm/{bitops.h|posix_types.h} for details
4 * 4 *
5 * Copyright IBM Corp. 1999, 2009 5 * Copyright IBM Corp. 1999,2009
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 */ 7 */
8 8
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
deleted file mode 100644
index 64b24650e4f..00000000000
--- a/arch/s390/kernel/cache.c
+++ /dev/null
@@ -1,388 +0,0 @@
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/notifier.h>
9#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h>
14#include <asm/facility.h>
15
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum {
40 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE,
42 CACHE_SCOPE_SHARED,
43 CACHE_SCOPE_RESERVED,
44};
45
46enum {
47 CACHE_TYPE_SEPARATE,
48 CACHE_TYPE_DATA,
49 CACHE_TYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED,
51};
52
53enum {
54 EXTRACT_TOPOLOGY,
55 EXTRACT_LINE_SIZE,
56 EXTRACT_SIZE,
57 EXTRACT_ASSOCIATIVITY,
58};
59
60enum {
61 CACHE_TI_UNIFIED = 0,
62 CACHE_TI_DATA = 0,
63 CACHE_TI_INSTRUCTION,
64};
65
66struct cache_info {
67 unsigned char : 4;
68 unsigned char scope : 2;
69 unsigned char type : 2;
70};
71
72#define CACHE_MAX_LEVEL 8
73
74union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw;
77};
78
79static const char * const cache_type_string[] = {
80 "Data",
81 "Instruction",
82 "Unified",
83};
84
85static struct cache_dir *cache_dir_cpu[NR_CPUS];
86static LIST_HEAD(cache_list);
87
88void show_cacheinfo(struct seq_file *m)
89{
90 struct cache *cache;
91 int index = 0;
92
93 list_for_each_entry(cache, &cache_list, list) {
94 seq_printf(m, "cache%-11d: ", index);
95 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98 seq_printf(m, "size=%luK ", cache->size >> 10);
99 seq_printf(m, "line_size=%u ", cache->line_size);
100 seq_printf(m, "associativity=%d", cache->associativity);
101 seq_puts(m, "\n");
102 index++;
103 }
104}
105
106static inline unsigned long ecag(int ai, int li, int ti)
107{
108 unsigned long cmd, val;
109
110 cmd = ai << 4 | li << 1 | ti;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val) : "a" (cmd));
113 return val;
114}
115
116static int __init cache_add(int level, int private, int type)
117{
118 struct cache *cache;
119 int ti;
120
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122 if (!cache)
123 return -ENOMEM;
124 if (type == CACHE_TYPE_INSTRUCTION)
125 ti = CACHE_TI_INSTRUCTION;
126 else
127 ti = CACHE_TI_UNIFIED;
128 cache->size = ecag(EXTRACT_SIZE, level, ti);
129 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131 cache->nr_sets = cache->size / cache->associativity;
132 cache->nr_sets /= cache->line_size;
133 cache->private = private;
134 cache->level = level + 1;
135 cache->type = type - 1;
136 list_add_tail(&cache->list, &cache_list);
137 return 0;
138}
139
140static void __init cache_build_info(void)
141{
142 struct cache *cache, *next;
143 union cache_topology ct;
144 int level, private, rc;
145
146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148 switch (ct.ci[level].scope) {
149 case CACHE_SCOPE_NOTEXISTS:
150 case CACHE_SCOPE_RESERVED:
151 return;
152 case CACHE_SCOPE_SHARED:
153 private = 0;
154 break;
155 case CACHE_SCOPE_PRIVATE:
156 private = 1;
157 break;
158 }
159 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
160 rc = cache_add(level, private, CACHE_TYPE_DATA);
161 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
162 } else {
163 rc = cache_add(level, private, ct.ci[level].type);
164 }
165 if (rc)
166 goto error;
167 }
168 return;
169error:
170 list_for_each_entry_safe(cache, next, &cache_list, list) {
171 list_del(&cache->list);
172 kfree(cache);
173 }
174}
175
176static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
177{
178 struct cache_dir *cache_dir;
179 struct kobject *kobj = NULL;
180 struct device *dev;
181
182 dev = get_cpu_device(cpu);
183 if (!dev)
184 goto out;
185 kobj = kobject_create_and_add("cache", &dev->kobj);
186 if (!kobj)
187 goto out;
188 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
189 if (!cache_dir)
190 goto out;
191 cache_dir->kobj = kobj;
192 cache_dir_cpu[cpu] = cache_dir;
193 return cache_dir;
194out:
195 kobject_put(kobj);
196 return NULL;
197}
198
199static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
200{
201 return container_of(kobj, struct cache_index_dir, kobj);
202}
203
204static void cache_index_release(struct kobject *kobj)
205{
206 struct cache_index_dir *index;
207
208 index = kobj_to_cache_index_dir(kobj);
209 kfree(index);
210}
211
212static ssize_t cache_index_show(struct kobject *kobj,
213 struct attribute *attr, char *buf)
214{
215 struct kobj_attribute *kobj_attr;
216
217 kobj_attr = container_of(attr, struct kobj_attribute, attr);
218 return kobj_attr->show(kobj, kobj_attr, buf);
219}
220
221#define DEFINE_CACHE_ATTR(_name, _format, _value) \
222static ssize_t cache_##_name##_show(struct kobject *kobj, \
223 struct kobj_attribute *attr, \
224 char *buf) \
225{ \
226 struct cache_index_dir *index; \
227 \
228 index = kobj_to_cache_index_dir(kobj); \
229 return sprintf(buf, _format, _value); \
230} \
231static struct kobj_attribute cache_##_name##_attr = \
232 __ATTR(_name, 0444, cache_##_name##_show, NULL);
233
234DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
235DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
236DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
237DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
238DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
239DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
240
241static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
242{
243 struct cache_index_dir *index;
244 int len;
245
246 index = kobj_to_cache_index_dir(kobj);
247 len = type ?
248 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
249 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
250 len += sprintf(&buf[len], "\n");
251 return len;
252}
253
254static ssize_t shared_cpu_map_show(struct kobject *kobj,
255 struct kobj_attribute *attr, char *buf)
256{
257 return shared_cpu_map_func(kobj, 0, buf);
258}
259static struct kobj_attribute cache_shared_cpu_map_attr =
260 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
261
262static ssize_t shared_cpu_list_show(struct kobject *kobj,
263 struct kobj_attribute *attr, char *buf)
264{
265 return shared_cpu_map_func(kobj, 1, buf);
266}
267static struct kobj_attribute cache_shared_cpu_list_attr =
268 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
269
270static struct attribute *cache_index_default_attrs[] = {
271 &cache_type_attr.attr,
272 &cache_size_attr.attr,
273 &cache_number_of_sets_attr.attr,
274 &cache_ways_of_associativity_attr.attr,
275 &cache_level_attr.attr,
276 &cache_coherency_line_size_attr.attr,
277 &cache_shared_cpu_map_attr.attr,
278 &cache_shared_cpu_list_attr.attr,
279 NULL,
280};
281
282static const struct sysfs_ops cache_index_ops = {
283 .show = cache_index_show,
284};
285
286static struct kobj_type cache_index_type = {
287 .sysfs_ops = &cache_index_ops,
288 .release = cache_index_release,
289 .default_attrs = cache_index_default_attrs,
290};
291
292static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
293 struct cache *cache, int index,
294 int cpu)
295{
296 struct cache_index_dir *index_dir;
297 int rc;
298
299 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
300 if (!index_dir)
301 return -ENOMEM;
302 index_dir->cache = cache;
303 index_dir->cpu = cpu;
304 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
305 cache_dir->kobj, "index%d", index);
306 if (rc)
307 goto out;
308 index_dir->next = cache_dir->index;
309 cache_dir->index = index_dir;
310 return 0;
311out:
312 kfree(index_dir);
313 return rc;
314}
315
316static int __cpuinit cache_add_cpu(int cpu)
317{
318 struct cache_dir *cache_dir;
319 struct cache *cache;
320 int rc, index = 0;
321
322 if (list_empty(&cache_list))
323 return 0;
324 cache_dir = cache_create_cache_dir(cpu);
325 if (!cache_dir)
326 return -ENOMEM;
327 list_for_each_entry(cache, &cache_list, list) {
328 if (!cache->private)
329 break;
330 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
331 if (rc)
332 return rc;
333 index++;
334 }
335 return 0;
336}
337
338static void __cpuinit cache_remove_cpu(int cpu)
339{
340 struct cache_index_dir *index, *next;
341 struct cache_dir *cache_dir;
342
343 cache_dir = cache_dir_cpu[cpu];
344 if (!cache_dir)
345 return;
346 index = cache_dir->index;
347 while (index) {
348 next = index->next;
349 kobject_put(&index->kobj);
350 index = next;
351 }
352 kobject_put(cache_dir->kobj);
353 kfree(cache_dir);
354 cache_dir_cpu[cpu] = NULL;
355}
356
357static int __cpuinit cache_hotplug(struct notifier_block *nfb,
358 unsigned long action, void *hcpu)
359{
360 int cpu = (long)hcpu;
361 int rc = 0;
362
363 switch (action & ~CPU_TASKS_FROZEN) {
364 case CPU_ONLINE:
365 rc = cache_add_cpu(cpu);
366 if (rc)
367 cache_remove_cpu(cpu);
368 break;
369 case CPU_DEAD:
370 cache_remove_cpu(cpu);
371 break;
372 }
373 return rc ? NOTIFY_BAD : NOTIFY_OK;
374}
375
376static int __init cache_init(void)
377{
378 int cpu;
379
380 if (!test_facility(34))
381 return 0;
382 cache_build_info();
383 for_each_online_cpu(cpu)
384 cache_add_cpu(cpu);
385 hotcpu_notifier(cache_hotplug, 0);
386 return 0;
387}
388device_initcall(cache_init);
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 765fabdada9..914d49444f9 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Support for 32-bit Linux for S390 personality. 2 * Support for 32-bit Linux for S390 personality.
3 * 3 *
4 * Copyright IBM Corp. 2000 4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com) 5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 * 6 *
7 * 7 *
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 65cca95843e..53acaa86dd9 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * arch/s390x/kernel/linux32.c
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 2000 5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Gerhard Tonn (ton@de.ibm.com) 7 * Gerhard Tonn (ton@de.ibm.com)
6 * Thomas Spatzier (tspat@de.ibm.com) 8 * Thomas Spatzier (tspat@de.ibm.com)
@@ -58,9 +60,12 @@
58 60
59#include "compat_linux.h" 61#include "compat_linux.h"
60 62
61u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | 63long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
62 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK | 64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
63 PSW32_MASK_PSTATE | PSW32_ASC_HOME; 65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
66long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
67 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
68 PSW32_MASK_PSTATE);
64 69
65/* For this source file, we want overflow handling. */ 70/* For this source file, we want overflow handling. */
66 71
@@ -131,19 +136,13 @@ asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
131 low2highuid(suid)); 136 low2highuid(suid));
132} 137}
133 138
134asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp) 139asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
135{ 140{
136 const struct cred *cred = current_cred();
137 int retval; 141 int retval;
138 u16 ruid, euid, suid;
139
140 ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
141 euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
142 suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
143 142
144 if (!(retval = put_user(ruid, ruidp)) && 143 if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) &&
145 !(retval = put_user(euid, euidp))) 144 !(retval = put_user(high2lowuid(current->cred->euid), euid)))
146 retval = put_user(suid, suidp); 145 retval = put_user(high2lowuid(current->cred->suid), suid);
147 146
148 return retval; 147 return retval;
149} 148}
@@ -154,19 +153,13 @@ asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
154 low2highgid(sgid)); 153 low2highgid(sgid));
155} 154}
156 155
157asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp) 156asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
158{ 157{
159 const struct cred *cred = current_cred();
160 int retval; 158 int retval;
161 u16 rgid, egid, sgid;
162 159
163 rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid)); 160 if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) &&
164 egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid)); 161 !(retval = put_user(high2lowgid(current->cred->egid), egid)))
165 sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid)); 162 retval = put_user(high2lowgid(current->cred->sgid), sgid);
166
167 if (!(retval = put_user(rgid, rgidp)) &&
168 !(retval = put_user(egid, egidp)))
169 retval = put_user(sgid, sgidp);
170 163
171 return retval; 164 return retval;
172} 165}
@@ -183,14 +176,11 @@ asmlinkage long sys32_setfsgid16(u16 gid)
183 176
184static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) 177static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
185{ 178{
186 struct user_namespace *user_ns = current_user_ns();
187 int i; 179 int i;
188 u16 group; 180 u16 group;
189 kgid_t kgid;
190 181
191 for (i = 0; i < group_info->ngroups; i++) { 182 for (i = 0; i < group_info->ngroups; i++) {
192 kgid = GROUP_AT(group_info, i); 183 group = (u16)GROUP_AT(group_info, i);
193 group = (u16)from_kgid_munged(user_ns, kgid);
194 if (put_user(group, grouplist+i)) 184 if (put_user(group, grouplist+i))
195 return -EFAULT; 185 return -EFAULT;
196 } 186 }
@@ -200,20 +190,13 @@ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info
200 190
201static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist) 191static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
202{ 192{
203 struct user_namespace *user_ns = current_user_ns();
204 int i; 193 int i;
205 u16 group; 194 u16 group;
206 kgid_t kgid;
207 195
208 for (i = 0; i < group_info->ngroups; i++) { 196 for (i = 0; i < group_info->ngroups; i++) {
209 if (get_user(group, grouplist+i)) 197 if (get_user(group, grouplist+i))
210 return -EFAULT; 198 return -EFAULT;
211 199 GROUP_AT(group_info, i) = (gid_t)group;
212 kgid = make_kgid(user_ns, (gid_t)group);
213 if (!gid_valid(kgid))
214 return -EINVAL;
215
216 GROUP_AT(group_info, i) = kgid;
217 } 200 }
218 201
219 return 0; 202 return 0;
@@ -270,22 +253,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
270 253
271asmlinkage long sys32_getuid16(void) 254asmlinkage long sys32_getuid16(void)
272{ 255{
273 return high2lowuid(from_kuid_munged(current_user_ns(), current_uid())); 256 return high2lowuid(current->cred->uid);
274} 257}
275 258
276asmlinkage long sys32_geteuid16(void) 259asmlinkage long sys32_geteuid16(void)
277{ 260{
278 return high2lowuid(from_kuid_munged(current_user_ns(), current_euid())); 261 return high2lowuid(current->cred->euid);
279} 262}
280 263
281asmlinkage long sys32_getgid16(void) 264asmlinkage long sys32_getgid16(void)
282{ 265{
283 return high2lowgid(from_kgid_munged(current_user_ns(), current_gid())); 266 return high2lowgid(current->cred->gid);
284} 267}
285 268
286asmlinkage long sys32_getegid16(void) 269asmlinkage long sys32_getegid16(void)
287{ 270{
288 return high2lowgid(from_kgid_munged(current_user_ns(), current_egid())); 271 return high2lowgid(current->cred->egid);
289} 272}
290 273
291/* 274/*
@@ -298,6 +281,9 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
298{ 281{
299 if (call >> 16) /* hack for backward compatibility */ 282 if (call >> 16) /* hack for backward compatibility */
300 return -EINVAL; 283 return -EINVAL;
284
285 call &= 0xffff;
286
301 switch (call) { 287 switch (call) {
302 case SEMTIMEDOP: 288 case SEMTIMEDOP:
303 return compat_sys_semtimedop(first, compat_ptr(ptr), 289 return compat_sys_semtimedop(first, compat_ptr(ptr),
@@ -379,7 +365,12 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
379 if (set) { 365 if (set) {
380 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 366 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
381 return -EFAULT; 367 return -EFAULT;
382 s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); 368 switch (_NSIG_WORDS) {
369 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
370 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
371 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
372 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
373 }
383 } 374 }
384 set_fs (KERNEL_DS); 375 set_fs (KERNEL_DS);
385 ret = sys_rt_sigprocmask(how, 376 ret = sys_rt_sigprocmask(how,
@@ -389,8 +380,12 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
389 set_fs (old_fs); 380 set_fs (old_fs);
390 if (ret) return ret; 381 if (ret) return ret;
391 if (oset) { 382 if (oset) {
392 s32.sig[1] = (s.sig[0] >> 32); 383 switch (_NSIG_WORDS) {
393 s32.sig[0] = s.sig[0]; 384 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
385 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
386 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
387 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
388 }
394 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) 389 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
395 return -EFAULT; 390 return -EFAULT;
396 } 391 }
@@ -409,8 +404,12 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
409 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); 404 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
410 set_fs (old_fs); 405 set_fs (old_fs);
411 if (!ret) { 406 if (!ret) {
412 s32.sig[1] = (s.sig[0] >> 32); 407 switch (_NSIG_WORDS) {
413 s32.sig[0] = s.sig[0]; 408 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
409 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
410 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
411 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
412 }
414 if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) 413 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
415 return -EFAULT; 414 return -EFAULT;
416 } 415 }
@@ -432,6 +431,32 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
432 return ret; 431 return ret;
433} 432}
434 433
434/*
435 * sys32_execve() executes a new program after the asm stub has set
436 * things up for us. This should basically do what I want it to.
437 */
438asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv,
439 compat_uptr_t __user *envp)
440{
441 struct pt_regs *regs = task_pt_regs(current);
442 char *filename;
443 long rc;
444
445 filename = getname(name);
446 rc = PTR_ERR(filename);
447 if (IS_ERR(filename))
448 return rc;
449 rc = compat_do_execve(filename, argv, envp, regs);
450 if (rc)
451 goto out;
452 current->thread.fp_regs.fpc=0;
453 asm volatile("sfpc %0,0" : : "d" (0));
454 rc = regs->gprs[2];
455out:
456 putname(filename);
457 return rc;
458}
459
435asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, 460asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
436 size_t count, u32 poshi, u32 poslo) 461 size_t count, u32 poshi, u32 poslo)
437{ 462{
@@ -531,8 +556,8 @@ static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
531 tmp.__st_ino = (u32)stat->ino; 556 tmp.__st_ino = (u32)stat->ino;
532 tmp.st_mode = stat->mode; 557 tmp.st_mode = stat->mode;
533 tmp.st_nlink = (unsigned int)stat->nlink; 558 tmp.st_nlink = (unsigned int)stat->nlink;
534 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 559 tmp.st_uid = stat->uid;
535 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 560 tmp.st_gid = stat->gid;
536 tmp.st_rdev = huge_encode_dev(stat->rdev); 561 tmp.st_rdev = huge_encode_dev(stat->rdev);
537 tmp.st_size = stat->size; 562 tmp.st_size = stat->size;
538 tmp.st_blksize = (u32)stat->blksize; 563 tmp.st_blksize = (u32)stat->blksize;
@@ -606,6 +631,7 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
606 return -EFAULT; 631 return -EFAULT;
607 if (a.offset & ~PAGE_MASK) 632 if (a.offset & ~PAGE_MASK)
608 return -EINVAL; 633 return -EINVAL;
634 a.addr = (unsigned long) compat_ptr(a.addr);
609 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 635 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
610 a.offset >> PAGE_SHIFT); 636 a.offset >> PAGE_SHIFT);
611} 637}
@@ -616,6 +642,7 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
616 642
617 if (copy_from_user(&a, arg, sizeof(a))) 643 if (copy_from_user(&a, arg, sizeof(a)))
618 return -EFAULT; 644 return -EFAULT;
645 a.addr = (unsigned long) compat_ptr(a.addr);
619 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); 646 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
620} 647}
621 648
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index d4d0239970a..9635d759c2b 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -23,6 +23,74 @@ struct old_sigaction32 {
23 __u32 sa_flags; 23 __u32 sa_flags;
24 __u32 sa_restorer; /* Another 32 bit pointer */ 24 __u32 sa_restorer; /* Another 32 bit pointer */
25}; 25};
26
27typedef struct compat_siginfo {
28 int si_signo;
29 int si_errno;
30 int si_code;
31
32 union {
33 int _pad[((128/sizeof(int)) - 3)];
34
35 /* kill() */
36 struct {
37 pid_t _pid; /* sender's pid */
38 uid_t _uid; /* sender's uid */
39 } _kill;
40
41 /* POSIX.1b timers */
42 struct {
43 compat_timer_t _tid; /* timer id */
44 int _overrun; /* overrun count */
45 compat_sigval_t _sigval; /* same as below */
46 int _sys_private; /* not to be passed to user */
47 } _timer;
48
49 /* POSIX.1b signals */
50 struct {
51 pid_t _pid; /* sender's pid */
52 uid_t _uid; /* sender's uid */
53 compat_sigval_t _sigval;
54 } _rt;
55
56 /* SIGCHLD */
57 struct {
58 pid_t _pid; /* which child */
59 uid_t _uid; /* sender's uid */
60 int _status;/* exit code */
61 compat_clock_t _utime;
62 compat_clock_t _stime;
63 } _sigchld;
64
65 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
66 struct {
67 __u32 _addr; /* faulting insn/memory ref. - pointer */
68 } _sigfault;
69
70 /* SIGPOLL */
71 struct {
72 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
73 int _fd;
74 } _sigpoll;
75 } _sifields;
76} compat_siginfo_t;
77
78/*
79 * How these fields are to be accessed.
80 */
81#define si_pid _sifields._kill._pid
82#define si_uid _sifields._kill._uid
83#define si_status _sifields._sigchld._status
84#define si_utime _sifields._sigchld._utime
85#define si_stime _sifields._sigchld._stime
86#define si_value _sifields._rt._sigval
87#define si_int _sifields._rt._sigval.sival_int
88#define si_ptr _sifields._rt._sigval.sival_ptr
89#define si_addr _sifields._sigfault._addr
90#define si_band _sifields._sigpoll._band
91#define si_fd _sifields._sigpoll._fd
92#define si_tid _sifields._timer._tid
93#define si_overrun _sifields._timer._overrun
26 94
27/* asm/sigcontext.h */ 95/* asm/sigcontext.h */
28typedef union 96typedef union
@@ -125,6 +193,8 @@ long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
125 compat_sigset_t __user *oset, size_t sigsetsize); 193 compat_sigset_t __user *oset, size_t sigsetsize);
126long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); 194long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
127long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); 195long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
196long sys32_execve(const char __user *name, compat_uptr_t __user *argv,
197 compat_uptr_t __user *envp);
128long sys32_init_module(void __user *umod, unsigned long len, 198long sys32_init_module(void __user *umod, unsigned long len,
129 const char __user *uargs); 199 const char __user *uargs);
130long sys32_delete_module(const char __user *name_user, unsigned int flags); 200long sys32_delete_module(const char __user *name_user, unsigned int flags);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 593fcc9253f..a9a285b8c4a 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2000, 2006 2 * arch/s390/kernel/compat_signal.c
3 *
4 * Copyright (C) IBM Corp. 2000,2006
3 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
4 * Gerhard Tonn (ton@de.ibm.com) 6 * Gerhard Tonn (ton@de.ibm.com)
5 * 7 *
@@ -25,11 +27,12 @@
25#include <asm/ucontext.h> 27#include <asm/ucontext.h>
26#include <asm/uaccess.h> 28#include <asm/uaccess.h>
27#include <asm/lowcore.h> 29#include <asm/lowcore.h>
28#include <asm/switch_to.h>
29#include "compat_linux.h" 30#include "compat_linux.h"
30#include "compat_ptrace.h" 31#include "compat_ptrace.h"
31#include "entry.h" 32#include "entry.h"
32 33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
33typedef struct 36typedef struct
34{ 37{
35 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 38 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
@@ -138,8 +141,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
138 break; 141 break;
139 case __SI_FAULT >> 16: 142 case __SI_FAULT >> 16:
140 err |= __get_user(tmp, &from->si_addr); 143 err |= __get_user(tmp, &from->si_addr);
141 to->si_addr = (void __force __user *) 144 to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN);
142 (u64) (tmp & PSW32_ADDR_INSN);
143 break; 145 break;
144 case __SI_POLL >> 16: 146 case __SI_POLL >> 16:
145 err |= __get_user(to->si_band, &from->si_band); 147 err |= __get_user(to->si_band, &from->si_band);
@@ -211,8 +213,16 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
211 ret = get_user(sa_handler, &act->sa_handler); 213 ret = get_user(sa_handler, &act->sa_handler);
212 ret |= __copy_from_user(&set32, &act->sa_mask, 214 ret |= __copy_from_user(&set32, &act->sa_mask,
213 sizeof(compat_sigset_t)); 215 sizeof(compat_sigset_t));
214 new_ka.sa.sa_mask.sig[0] = 216 switch (_NSIG_WORDS) {
215 set32.sig[0] | (((long)set32.sig[1]) << 32); 217 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
218 | (((long)set32.sig[7]) << 32);
219 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
220 | (((long)set32.sig[5]) << 32);
221 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
222 | (((long)set32.sig[3]) << 32);
223 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
224 | (((long)set32.sig[1]) << 32);
225 }
216 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 226 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
217 227
218 if (ret) 228 if (ret)
@@ -223,8 +233,20 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
223 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 233 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
224 234
225 if (!ret && oact) { 235 if (!ret && oact) {
226 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); 236 switch (_NSIG_WORDS) {
227 set32.sig[0] = old_ka.sa.sa_mask.sig[0]; 237 case 4:
238 set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
239 set32.sig[6] = old_ka.sa.sa_mask.sig[3];
240 case 3:
241 set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
242 set32.sig[4] = old_ka.sa.sa_mask.sig[2];
243 case 2:
244 set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
245 set32.sig[2] = old_ka.sa.sa_mask.sig[1];
246 case 1:
247 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
248 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
249 }
228 ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); 250 ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
229 ret |= __copy_to_user(&oact->sa_mask, &set32, 251 ret |= __copy_to_user(&oact->sa_mask, &set32,
230 sizeof(compat_sigset_t)); 252 sizeof(compat_sigset_t));
@@ -278,10 +300,9 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
278 _s390_regs_common32 regs32; 300 _s390_regs_common32 regs32;
279 int err, i; 301 int err, i;
280 302
281 regs32.psw.mask = psw32_user_bits | 303 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
282 ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); 304 (__u32)(regs->psw.mask >> 32));
283 regs32.psw.addr = (__u32) regs->psw.addr | 305 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
284 (__u32)(regs->psw.mask & PSW_MASK_BA);
285 for (i = 0; i < NUM_GPRS; i++) 306 for (i = 0; i < NUM_GPRS; i++)
286 regs32.gprs[i] = (__u32) regs->gprs[i]; 307 regs32.gprs[i] = (__u32) regs->gprs[i];
287 save_access_regs(current->thread.acrs); 308 save_access_regs(current->thread.acrs);
@@ -306,13 +327,8 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
306 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32)); 327 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
307 if (err) 328 if (err)
308 return err; 329 return err;
309 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 330 regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
310 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | 331 (__u64)regs32.psw.mask << 32);
311 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
312 /* Check for invalid user address space control. */
313 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
314 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
315 (regs->psw.mask & ~PSW_MASK_ASC);
316 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); 332 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
317 for (i = 0; i < NUM_GPRS; i++) 333 for (i = 0; i < NUM_GPRS; i++)
318 regs->gprs[i] = (__u64) regs32.gprs[i]; 334 regs->gprs[i] = (__u64) regs32.gprs[i];
@@ -326,7 +342,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
326 return err; 342 return err;
327 343
328 restore_fp_regs(&current->thread.fp_regs); 344 restore_fp_regs(&current->thread.fp_regs);
329 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ 345 regs->svcnr = 0; /* disable syscall checks */
330 return 0; 346 return 0;
331} 347}
332 348
@@ -364,6 +380,7 @@ asmlinkage long sys32_sigreturn(void)
364 goto badframe; 380 goto badframe;
365 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 381 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
366 goto badframe; 382 goto badframe;
383 sigdelsetmask(&set, ~_BLOCKABLE);
367 set_current_blocked(&set); 384 set_current_blocked(&set);
368 if (restore_sigregs32(regs, &frame->sregs)) 385 if (restore_sigregs32(regs, &frame->sregs))
369 goto badframe; 386 goto badframe;
@@ -389,6 +406,7 @@ asmlinkage long sys32_rt_sigreturn(void)
389 goto badframe; 406 goto badframe;
390 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 407 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
391 goto badframe; 408 goto badframe;
409 sigdelsetmask(&set, ~_BLOCKABLE);
392 set_current_blocked(&set); 410 set_current_blocked(&set);
393 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 411 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
394 goto badframe; 412 goto badframe;
@@ -435,6 +453,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
435 sp = current->sas_ss_sp + current->sas_ss_size; 453 sp = current->sas_ss_sp + current->sas_ss_size;
436 } 454 }
437 455
456 /* This is the legacy signal stack switching. */
457 else if (!user_mode(regs) &&
458 !(ka->sa.sa_flags & SA_RESTORER) &&
459 ka->sa.sa_restorer) {
460 sp = (unsigned long) ka->sa.sa_restorer;
461 }
462
438 return (void __user *)((sp - frame_size) & -8ul); 463 return (void __user *)((sp - frame_size) & -8ul);
439} 464}
440 465
@@ -471,11 +496,11 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
471 /* Set up to return from userspace. If provided, use a stub 496 /* Set up to return from userspace. If provided, use a stub
472 already in userspace. */ 497 already in userspace. */
473 if (ka->sa.sa_flags & SA_RESTORER) { 498 if (ka->sa.sa_flags & SA_RESTORER) {
474 regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; 499 regs->gprs[14] = (__u64) ka->sa.sa_restorer;
475 } else { 500 } else {
476 regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; 501 regs->gprs[14] = (__u64) frame->retcode;
477 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, 502 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
478 (u16 __force __user *)(frame->retcode))) 503 (u16 __user *)(frame->retcode)))
479 goto give_sigsegv; 504 goto give_sigsegv;
480 } 505 }
481 506
@@ -484,27 +509,19 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
484 goto give_sigsegv; 509 goto give_sigsegv;
485 510
486 /* Set up registers for signal handler */ 511 /* Set up registers for signal handler */
487 regs->gprs[15] = (__force __u64) frame; 512 regs->gprs[15] = (__u64) frame;
488 /* Force 31 bit amode and default user address space control. */ 513 regs->psw.addr = (__u64) ka->sa.sa_handler;
489 regs->psw.mask = PSW_MASK_BA |
490 (psw_user_bits & PSW_MASK_ASC) |
491 (regs->psw.mask & ~PSW_MASK_ASC);
492 regs->psw.addr = (__force __u64) ka->sa.sa_handler;
493 514
494 regs->gprs[2] = map_signal(sig); 515 regs->gprs[2] = map_signal(sig);
495 regs->gprs[3] = (__force __u64) &frame->sc; 516 regs->gprs[3] = (__u64) &frame->sc;
496 517
497 /* We forgot to include these in the sigcontext. 518 /* We forgot to include these in the sigcontext.
498 To avoid breaking binary compatibility, they are passed as args. */ 519 To avoid breaking binary compatibility, they are passed as args. */
499 if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 520 regs->gprs[4] = current->thread.trap_no;
500 sig == SIGTRAP || sig == SIGFPE) { 521 regs->gprs[5] = current->thread.prot_addr;
501 /* set extra registers only for synchronous signals */
502 regs->gprs[4] = regs->int_code & 127;
503 regs->gprs[5] = regs->int_parm_long;
504 }
505 522
506 /* Place signal number on stack to allow backtrace from handler. */ 523 /* Place signal number on stack to allow backtrace from handler. */
507 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) 524 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
508 goto give_sigsegv; 525 goto give_sigsegv;
509 return 0; 526 return 0;
510 527
@@ -543,28 +560,24 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
543 /* Set up to return from userspace. If provided, use a stub 560 /* Set up to return from userspace. If provided, use a stub
544 already in userspace. */ 561 already in userspace. */
545 if (ka->sa.sa_flags & SA_RESTORER) { 562 if (ka->sa.sa_flags & SA_RESTORER) {
546 regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; 563 regs->gprs[14] = (__u64) ka->sa.sa_restorer;
547 } else { 564 } else {
548 regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; 565 regs->gprs[14] = (__u64) frame->retcode;
549 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, 566 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
550 (u16 __force __user *)(frame->retcode)); 567 (u16 __user *)(frame->retcode));
551 } 568 }
552 569
553 /* Set up backchain. */ 570 /* Set up backchain. */
554 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) 571 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
555 goto give_sigsegv; 572 goto give_sigsegv;
556 573
557 /* Set up registers for signal handler */ 574 /* Set up registers for signal handler */
558 regs->gprs[15] = (__force __u64) frame; 575 regs->gprs[15] = (__u64) frame;
559 /* Force 31 bit amode and default user address space control. */
560 regs->psw.mask = PSW_MASK_BA |
561 (psw_user_bits & PSW_MASK_ASC) |
562 (regs->psw.mask & ~PSW_MASK_ASC);
563 regs->psw.addr = (__u64) ka->sa.sa_handler; 576 regs->psw.addr = (__u64) ka->sa.sa_handler;
564 577
565 regs->gprs[2] = map_signal(sig); 578 regs->gprs[2] = map_signal(sig);
566 regs->gprs[3] = (__force __u64) &frame->info; 579 regs->gprs[3] = (__u64) &frame->info;
567 regs->gprs[4] = (__force __u64) &frame->uc; 580 regs->gprs[4] = (__u64) &frame->uc;
568 return 0; 581 return 0;
569 582
570give_sigsegv: 583give_sigsegv:
@@ -576,9 +589,10 @@ give_sigsegv:
576 * OK, we're invoking a handler 589 * OK, we're invoking a handler
577 */ 590 */
578 591
579void handle_signal32(unsigned long sig, struct k_sigaction *ka, 592int handle_signal32(unsigned long sig, struct k_sigaction *ka,
580 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 593 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
581{ 594{
595 sigset_t blocked;
582 int ret; 596 int ret;
583 597
584 /* Set up the stack frame */ 598 /* Set up the stack frame */
@@ -587,8 +601,11 @@ void handle_signal32(unsigned long sig, struct k_sigaction *ka,
587 else 601 else
588 ret = setup_frame32(sig, ka, oldset, regs); 602 ret = setup_frame32(sig, ka, oldset, regs);
589 if (ret) 603 if (ret)
590 return; 604 return ret;
591 signal_delivered(sig, info, ka, regs, 605 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
592 test_thread_flag(TIF_SINGLE_STEP)); 606 if (!(ka->sa.sa_flags & SA_NODEFER))
607 sigaddset(&blocked, sig);
608 set_current_blocked(&blocked);
609 return 0;
593} 610}
594 611
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 9b9a805656b..7526db6bf50 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1,7 +1,8 @@
1/* 1/*
2* arch/s390/kernel/compat_wrapper.S
2* wrapper for 31 bit compatible system calls. 3* wrapper for 31 bit compatible system calls.
3* 4*
4* Copyright IBM Corp. 2000, 2006 5* Copyright (C) IBM Corp. 2000,2006
5* Author(s): Gerhard Tonn (ton@de.ibm.com), 6* Author(s): Gerhard Tonn (ton@de.ibm.com),
6* Thomas Spatzier (tspat@de.ibm.com) 7* Thomas Spatzier (tspat@de.ibm.com)
7*/ 8*/
@@ -28,7 +29,7 @@ ENTRY(sys32_open_wrapper)
28 llgtr %r2,%r2 # const char * 29 llgtr %r2,%r2 # const char *
29 lgfr %r3,%r3 # int 30 lgfr %r3,%r3 # int
30 lgfr %r4,%r4 # int 31 lgfr %r4,%r4 # int
31 jg compat_sys_open # branch to system call 32 jg sys_open # branch to system call
32 33
33ENTRY(sys32_close_wrapper) 34ENTRY(sys32_close_wrapper)
34 llgfr %r2,%r2 # unsigned int 35 llgfr %r2,%r2 # unsigned int
@@ -661,7 +662,7 @@ ENTRY(sys32_getresuid16_wrapper)
661ENTRY(sys32_poll_wrapper) 662ENTRY(sys32_poll_wrapper)
662 llgtr %r2,%r2 # struct pollfd * 663 llgtr %r2,%r2 # struct pollfd *
663 llgfr %r3,%r3 # unsigned int 664 llgfr %r3,%r3 # unsigned int
664 lgfr %r4,%r4 # int 665 lgfr %r4,%r4 # long
665 jg sys_poll # branch to system call 666 jg sys_poll # branch to system call
666 667
667ENTRY(sys32_setresgid16_wrapper) 668ENTRY(sys32_setresgid16_wrapper)
@@ -1576,7 +1577,7 @@ ENTRY(sys32_execve_wrapper)
1576 llgtr %r2,%r2 # char * 1577 llgtr %r2,%r2 # char *
1577 llgtr %r3,%r3 # compat_uptr_t * 1578 llgtr %r3,%r3 # compat_uptr_t *
1578 llgtr %r4,%r4 # compat_uptr_t * 1579 llgtr %r4,%r4 # compat_uptr_t *
1579 jg compat_sys_execve # branch to system call 1580 jg sys32_execve # branch to system call
1580 1581
1581ENTRY(sys_fanotify_init_wrapper) 1582ENTRY(sys_fanotify_init_wrapper)
1582 llgfr %r2,%r2 # unsigned int 1583 llgfr %r2,%r2 # unsigned int
@@ -1622,46 +1623,8 @@ ENTRY(sys_syncfs_wrapper)
1622 lgfr %r2,%r2 # int 1623 lgfr %r2,%r2 # int
1623 jg sys_syncfs 1624 jg sys_syncfs
1624 1625
1625ENTRY(sys_setns_wrapper) 1626 .globl sys_setns_wrapper
1627sys_setns_wrapper:
1626 lgfr %r2,%r2 # int 1628 lgfr %r2,%r2 # int
1627 lgfr %r3,%r3 # int 1629 lgfr %r3,%r3 # int
1628 jg sys_setns 1630 jg sys_setns
1629
1630ENTRY(compat_sys_process_vm_readv_wrapper)
1631 lgfr %r2,%r2 # compat_pid_t
1632 llgtr %r3,%r3 # struct compat_iovec __user *
1633 llgfr %r4,%r4 # unsigned long
1634 llgtr %r5,%r5 # struct compat_iovec __user *
1635 llgfr %r6,%r6 # unsigned long
1636 llgf %r0,164(%r15) # unsigned long
1637 stg %r0,160(%r15)
1638 jg compat_sys_process_vm_readv
1639
1640ENTRY(compat_sys_process_vm_writev_wrapper)
1641 lgfr %r2,%r2 # compat_pid_t
1642 llgtr %r3,%r3 # struct compat_iovec __user *
1643 llgfr %r4,%r4 # unsigned long
1644 llgtr %r5,%r5 # struct compat_iovec __user *
1645 llgfr %r6,%r6 # unsigned long
1646 llgf %r0,164(%r15) # unsigned long
1647 stg %r0,160(%r15)
1648 jg compat_sys_process_vm_writev
1649
1650ENTRY(sys_s390_runtime_instr_wrapper)
1651 lgfr %r2,%r2 # int
1652 lgfr %r3,%r3 # int
1653 jg sys_s390_runtime_instr
1654
1655ENTRY(sys_kcmp_wrapper)
1656 lgfr %r2,%r2 # pid_t
1657 lgfr %r3,%r3 # pid_t
1658 lgfr %r4,%r4 # int
1659 llgfr %r5,%r5 # unsigned long
1660 llgfr %r6,%r6 # unsigned long
1661 jg sys_kcmp
1662
1663ENTRY(sys_finit_module_wrapper)
1664 lgfr %r2,%r2 # int
1665 llgtr %r3,%r3 # const char __user *
1666 lgfr %r4,%r4 # int
1667 jg sys_finit_module
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d2788..3e8b8816f30 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/cpcmd.c
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 1999, 2007 5 * Copyright IBM Corp. 1999,2007
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Christian Borntraeger (cborntra@de.ibm.com), 7 * Christian Borntraeger (cborntra@de.ibm.com),
6 */ 8 */
@@ -16,6 +18,7 @@
16#include <linux/string.h> 18#include <linux/string.h>
17#include <asm/ebcdic.h> 19#include <asm/ebcdic.h>
18#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
21#include <asm/system.h>
19#include <asm/io.h> 22#include <asm/io.h>
20 23
21static DEFINE_SPINLOCK(cpcmd_lock); 24static DEFINE_SPINLOCK(cpcmd_lock);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
deleted file mode 100644
index fb8d8781a01..00000000000
--- a/arch/s390/kernel/crash_dump.c
+++ /dev/null
@@ -1,443 +0,0 @@
1/*
2 * S390 kdump implementation
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/crash_dump.h>
9#include <asm/lowcore.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/gfp.h>
13#include <linux/slab.h>
14#include <linux/bootmem.h>
15#include <linux/elf.h>
16#include <asm/os_info.h>
17#include <asm/elf.h>
18#include <asm/ipl.h>
19
20#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
23
24/*
25 * Copy one page from "oldmem"
26 *
27 * For the kdump reserved memory this functions performs a swap operation:
28 * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
29 * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
30 */
31ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset, int userbuf)
33{
34 unsigned long src;
35
36 if (!csize)
37 return 0;
38
39 src = (pfn << PAGE_SHIFT) + offset;
40 if (src < OLDMEM_SIZE)
41 src += OLDMEM_BASE;
42 else if (src > OLDMEM_BASE &&
43 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE;
45 if (userbuf)
46 copy_to_user_real((void __force __user *) buf, (void *) src,
47 csize);
48 else
49 memcpy_real(buf, (void *) src, csize);
50 return csize;
51}
52
53/*
54 * Copy memory from old kernel
55 */
56int copy_from_oldmem(void *dest, void *src, size_t count)
57{
58 unsigned long copied = 0;
59 int rc;
60
61 if ((unsigned long) src < OLDMEM_SIZE) {
62 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
63 rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
64 if (rc)
65 return rc;
66 }
67 return memcpy_real(dest + copied, src + copied, count - copied);
68}
69
70/*
71 * Alloc memory and panic in case of ENOMEM
72 */
73static void *kzalloc_panic(int len)
74{
75 void *rc;
76
77 rc = kzalloc(len, GFP_KERNEL);
78 if (!rc)
79 panic("s390 kdump kzalloc (%d) failed", len);
80 return rc;
81}
82
83/*
84 * Get memory layout and create hole for oldmem
85 */
86static struct mem_chunk *get_memory_layout(void)
87{
88 struct mem_chunk *chunk_array;
89
90 chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
91 detect_memory_layout(chunk_array);
92 create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
93 return chunk_array;
94}
95
96/*
97 * Initialize ELF note
98 */
99static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
100 const char *name)
101{
102 Elf64_Nhdr *note;
103 u64 len;
104
105 note = (Elf64_Nhdr *)buf;
106 note->n_namesz = strlen(name) + 1;
107 note->n_descsz = d_len;
108 note->n_type = type;
109 len = sizeof(Elf64_Nhdr);
110
111 memcpy(buf + len, name, note->n_namesz);
112 len = roundup(len + note->n_namesz, 4);
113
114 memcpy(buf + len, desc, note->n_descsz);
115 len = roundup(len + note->n_descsz, 4);
116
117 return PTR_ADD(buf, len);
118}
119
120/*
121 * Initialize prstatus note
122 */
123static void *nt_prstatus(void *ptr, struct save_area *sa)
124{
125 struct elf_prstatus nt_prstatus;
126 static int cpu_nr = 1;
127
128 memset(&nt_prstatus, 0, sizeof(nt_prstatus));
129 memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
130 memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
131 memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
132 nt_prstatus.pr_pid = cpu_nr;
133 cpu_nr++;
134
135 return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
136 "CORE");
137}
138
139/*
140 * Initialize fpregset (floating point) note
141 */
142static void *nt_fpregset(void *ptr, struct save_area *sa)
143{
144 elf_fpregset_t nt_fpregset;
145
146 memset(&nt_fpregset, 0, sizeof(nt_fpregset));
147 memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
148 memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
149
150 return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
151 "CORE");
152}
153
154/*
155 * Initialize timer note
156 */
157static void *nt_s390_timer(void *ptr, struct save_area *sa)
158{
159 return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
160 KEXEC_CORE_NOTE_NAME);
161}
162
163/*
164 * Initialize TOD clock comparator note
165 */
166static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
167{
168 return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
169 sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
170}
171
172/*
173 * Initialize TOD programmable register note
174 */
175static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
176{
177 return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
178 sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
179}
180
181/*
182 * Initialize control register note
183 */
184static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
185{
186 return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
187 sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
188}
189
190/*
191 * Initialize prefix register note
192 */
193static void *nt_s390_prefix(void *ptr, struct save_area *sa)
194{
195 return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
196 sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
197}
198
199/*
200 * Fill ELF notes for one CPU with save area registers
201 */
202void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
203{
204 ptr = nt_prstatus(ptr, sa);
205 ptr = nt_fpregset(ptr, sa);
206 ptr = nt_s390_timer(ptr, sa);
207 ptr = nt_s390_tod_cmp(ptr, sa);
208 ptr = nt_s390_tod_preg(ptr, sa);
209 ptr = nt_s390_ctrs(ptr, sa);
210 ptr = nt_s390_prefix(ptr, sa);
211 return ptr;
212}
213
214/*
215 * Initialize prpsinfo note (new kernel)
216 */
217static void *nt_prpsinfo(void *ptr)
218{
219 struct elf_prpsinfo prpsinfo;
220
221 memset(&prpsinfo, 0, sizeof(prpsinfo));
222 prpsinfo.pr_sname = 'R';
223 strcpy(prpsinfo.pr_fname, "vmlinux");
224 return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
225 KEXEC_CORE_NOTE_NAME);
226}
227
228/*
229 * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
230 */
231static void *get_vmcoreinfo_old(unsigned long *size)
232{
233 char nt_name[11], *vmcoreinfo;
234 Elf64_Nhdr note;
235 void *addr;
236
237 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
238 return NULL;
239 memset(nt_name, 0, sizeof(nt_name));
240 if (copy_from_oldmem(&note, addr, sizeof(note)))
241 return NULL;
242 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
243 return NULL;
244 if (strcmp(nt_name, "VMCOREINFO") != 0)
245 return NULL;
246 vmcoreinfo = kzalloc_panic(note.n_descsz);
247 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
248 return NULL;
249 *size = note.n_descsz;
250 return vmcoreinfo;
251}
252
253/*
254 * Initialize vmcoreinfo note (new kernel)
255 */
256static void *nt_vmcoreinfo(void *ptr)
257{
258 unsigned long size;
259 void *vmcoreinfo;
260
261 vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
262 if (!vmcoreinfo)
263 vmcoreinfo = get_vmcoreinfo_old(&size);
264 if (!vmcoreinfo)
265 return ptr;
266 return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
267}
268
269/*
270 * Initialize ELF header (new kernel)
271 */
272static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
273{
274 memset(ehdr, 0, sizeof(*ehdr));
275 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
276 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
277 ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
278 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
279 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
280 ehdr->e_type = ET_CORE;
281 ehdr->e_machine = EM_S390;
282 ehdr->e_version = EV_CURRENT;
283 ehdr->e_phoff = sizeof(Elf64_Ehdr);
284 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
285 ehdr->e_phentsize = sizeof(Elf64_Phdr);
286 ehdr->e_phnum = mem_chunk_cnt + 1;
287 return ehdr + 1;
288}
289
290/*
291 * Return CPU count for ELF header (new kernel)
292 */
293static int get_cpu_cnt(void)
294{
295 int i, cpus = 0;
296
297 for (i = 0; zfcpdump_save_areas[i]; i++) {
298 if (zfcpdump_save_areas[i]->pref_reg == 0)
299 continue;
300 cpus++;
301 }
302 return cpus;
303}
304
305/*
306 * Return memory chunk count for ELF header (new kernel)
307 */
308static int get_mem_chunk_cnt(void)
309{
310 struct mem_chunk *chunk_array, *mem_chunk;
311 int i, cnt = 0;
312
313 chunk_array = get_memory_layout();
314 for (i = 0; i < MEMORY_CHUNKS; i++) {
315 mem_chunk = &chunk_array[i];
316 if (chunk_array[i].type != CHUNK_READ_WRITE &&
317 chunk_array[i].type != CHUNK_READ_ONLY)
318 continue;
319 if (mem_chunk->size == 0)
320 continue;
321 cnt++;
322 }
323 kfree(chunk_array);
324 return cnt;
325}
326
327/*
328 * Relocate pointer in order to allow vmcore code access the data
329 */
330static inline unsigned long relocate(unsigned long addr)
331{
332 return OLDMEM_BASE + addr;
333}
334
335/*
336 * Initialize ELF loads (new kernel)
337 */
338static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
339{
340 struct mem_chunk *chunk_array, *mem_chunk;
341 int i;
342
343 chunk_array = get_memory_layout();
344 for (i = 0; i < MEMORY_CHUNKS; i++) {
345 mem_chunk = &chunk_array[i];
346 if (mem_chunk->size == 0)
347 break;
348 if (chunk_array[i].type != CHUNK_READ_WRITE &&
349 chunk_array[i].type != CHUNK_READ_ONLY)
350 continue;
351 else
352 phdr->p_filesz = mem_chunk->size;
353 phdr->p_type = PT_LOAD;
354 phdr->p_offset = mem_chunk->addr;
355 phdr->p_vaddr = mem_chunk->addr;
356 phdr->p_paddr = mem_chunk->addr;
357 phdr->p_memsz = mem_chunk->size;
358 phdr->p_flags = PF_R | PF_W | PF_X;
359 phdr->p_align = PAGE_SIZE;
360 phdr++;
361 }
362 kfree(chunk_array);
363 return i;
364}
365
366/*
367 * Initialize notes (new kernel)
368 */
369static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
370{
371 struct save_area *sa;
372 void *ptr_start = ptr;
373 int i;
374
375 ptr = nt_prpsinfo(ptr);
376
377 for (i = 0; zfcpdump_save_areas[i]; i++) {
378 sa = zfcpdump_save_areas[i];
379 if (sa->pref_reg == 0)
380 continue;
381 ptr = fill_cpu_elf_notes(ptr, sa);
382 }
383 ptr = nt_vmcoreinfo(ptr);
384 memset(phdr, 0, sizeof(*phdr));
385 phdr->p_type = PT_NOTE;
386 phdr->p_offset = relocate(notes_offset);
387 phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
388 phdr->p_memsz = phdr->p_filesz;
389 return ptr;
390}
391
392/*
393 * Create ELF core header (new kernel)
394 */
395static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
396{
397 Elf64_Phdr *phdr_notes, *phdr_loads;
398 int mem_chunk_cnt;
399 void *ptr, *hdr;
400 u32 alloc_size;
401 u64 hdr_off;
402
403 mem_chunk_cnt = get_mem_chunk_cnt();
404
405 alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
406 mem_chunk_cnt * sizeof(Elf64_Phdr);
407 hdr = kzalloc_panic(alloc_size);
408 /* Init elf header */
409 ptr = ehdr_init(hdr, mem_chunk_cnt);
410 /* Init program headers */
411 phdr_notes = ptr;
412 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
413 phdr_loads = ptr;
414 ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
415 /* Init notes */
416 hdr_off = PTR_DIFF(ptr, hdr);
417 ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
418 /* Init loads */
419 hdr_off = PTR_DIFF(ptr, hdr);
420 loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
421 *elfcorebuf_sz = hdr_off;
422 *elfcorebuf = (void *) relocate((unsigned long) hdr);
423 BUG_ON(*elfcorebuf_sz > alloc_size);
424}
425
426/*
427 * Create kdump ELF core header in new kernel, if it has not been passed via
428 * the "elfcorehdr" kernel parameter
429 */
430static int setup_kdump_elfcorehdr(void)
431{
432 size_t elfcorebuf_sz;
433 char *elfcorebuf;
434
435 if (!OLDMEM_BASE || is_kdump_kernel())
436 return -EINVAL;
437 s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
438 elfcorehdr_addr = (unsigned long long) elfcorebuf;
439 elfcorehdr_size = elfcorebuf_sz;
440 return 0;
441}
442
443subsys_initcall(setup_kdump_elfcorehdr);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 4e8215e0d4b..5ad6bc078bf 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/debug.c
2 * S/390 debug facility 3 * S/390 debug facility
3 * 4 *
4 * Copyright IBM Corp. 1999, 2012 5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
5 * 6 * IBM Corporation
6 * Author(s): Michael Holzheu (holzheu@de.ibm.com), 7 * Author(s): Michael Holzheu (holzheu@de.ibm.com),
7 * Holger Smolinski (Holger.Smolinski@de.ibm.com) 8 * Holger Smolinski (Holger.Smolinski@de.ibm.com)
8 * 9 *
@@ -73,7 +74,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
73static int debug_open(struct inode *inode, struct file *file); 74static int debug_open(struct inode *inode, struct file *file);
74static int debug_close(struct inode *inode, struct file *file); 75static int debug_close(struct inode *inode, struct file *file);
75static debug_info_t *debug_info_create(const char *name, int pages_per_area, 76static debug_info_t *debug_info_create(const char *name, int pages_per_area,
76 int nr_areas, int buf_size, umode_t mode); 77 int nr_areas, int buf_size, mode_t mode);
77static void debug_info_get(debug_info_t *); 78static void debug_info_get(debug_info_t *);
78static void debug_info_put(debug_info_t *); 79static void debug_info_put(debug_info_t *);
79static int debug_prolog_level_fn(debug_info_t * id, 80static int debug_prolog_level_fn(debug_info_t * id,
@@ -110,7 +111,6 @@ struct debug_view debug_raw_view = {
110 NULL, 111 NULL,
111 NULL 112 NULL
112}; 113};
113EXPORT_SYMBOL(debug_raw_view);
114 114
115struct debug_view debug_hex_ascii_view = { 115struct debug_view debug_hex_ascii_view = {
116 "hex_ascii", 116 "hex_ascii",
@@ -120,7 +120,6 @@ struct debug_view debug_hex_ascii_view = {
120 NULL, 120 NULL,
121 NULL 121 NULL
122}; 122};
123EXPORT_SYMBOL(debug_hex_ascii_view);
124 123
125static struct debug_view debug_level_view = { 124static struct debug_view debug_level_view = {
126 "level", 125 "level",
@@ -157,7 +156,6 @@ struct debug_view debug_sprintf_view = {
157 NULL, 156 NULL,
158 NULL 157 NULL
159}; 158};
160EXPORT_SYMBOL(debug_sprintf_view);
161 159
162/* used by dump analysis tools to determine version of debug feature */ 160/* used by dump analysis tools to determine version of debug feature */
163static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; 161static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
@@ -169,7 +167,6 @@ static debug_info_t *debug_area_last = NULL;
169static DEFINE_MUTEX(debug_mutex); 167static DEFINE_MUTEX(debug_mutex);
170 168
171static int initialized; 169static int initialized;
172static int debug_critical;
173 170
174static const struct file_operations debug_file_ops = { 171static const struct file_operations debug_file_ops = {
175 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
@@ -333,7 +330,7 @@ debug_info_free(debug_info_t* db_info){
333 330
334static debug_info_t* 331static debug_info_t*
335debug_info_create(const char *name, int pages_per_area, int nr_areas, 332debug_info_create(const char *name, int pages_per_area, int nr_areas,
336 int buf_size, umode_t mode) 333 int buf_size, mode_t mode)
337{ 334{
338 debug_info_t* rc; 335 debug_info_t* rc;
339 336
@@ -691,7 +688,7 @@ debug_close(struct inode *inode, struct file *file)
691 */ 688 */
692 689
693debug_info_t *debug_register_mode(const char *name, int pages_per_area, 690debug_info_t *debug_register_mode(const char *name, int pages_per_area,
694 int nr_areas, int buf_size, umode_t mode, 691 int nr_areas, int buf_size, mode_t mode,
695 uid_t uid, gid_t gid) 692 uid_t uid, gid_t gid)
696{ 693{
697 debug_info_t *rc = NULL; 694 debug_info_t *rc = NULL;
@@ -733,7 +730,6 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
733 return debug_register_mode(name, pages_per_area, nr_areas, buf_size, 730 return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
734 S_IRUSR | S_IWUSR, 0, 0); 731 S_IRUSR | S_IWUSR, 0, 0);
735} 732}
736EXPORT_SYMBOL(debug_register);
737 733
738/* 734/*
739 * debug_unregister: 735 * debug_unregister:
@@ -752,7 +748,6 @@ debug_unregister(debug_info_t * id)
752out: 748out:
753 return; 749 return;
754} 750}
755EXPORT_SYMBOL(debug_unregister);
756 751
757/* 752/*
758 * debug_set_size: 753 * debug_set_size:
@@ -815,7 +810,7 @@ debug_set_level(debug_info_t* id, int new_level)
815 } 810 }
816 spin_unlock_irqrestore(&id->lock,flags); 811 spin_unlock_irqrestore(&id->lock,flags);
817} 812}
818EXPORT_SYMBOL(debug_set_level); 813
819 814
820/* 815/*
821 * proceed_active_entry: 816 * proceed_active_entry:
@@ -935,12 +930,7 @@ debug_stop_all(void)
935 if (debug_stoppable) 930 if (debug_stoppable)
936 debug_active = 0; 931 debug_active = 0;
937} 932}
938EXPORT_SYMBOL(debug_stop_all);
939 933
940void debug_set_critical(void)
941{
942 debug_critical = 1;
943}
944 934
945/* 935/*
946 * debug_event_common: 936 * debug_event_common:
@@ -955,11 +945,7 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
955 945
956 if (!debug_active || !id->areas) 946 if (!debug_active || !id->areas)
957 return NULL; 947 return NULL;
958 if (debug_critical) { 948 spin_lock_irqsave(&id->lock, flags);
959 if (!spin_trylock_irqsave(&id->lock, flags))
960 return NULL;
961 } else
962 spin_lock_irqsave(&id->lock, flags);
963 active = get_active_entry(id); 949 active = get_active_entry(id);
964 memset(DEBUG_DATA(active), 0, id->buf_size); 950 memset(DEBUG_DATA(active), 0, id->buf_size);
965 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); 951 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -968,7 +954,6 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
968 954
969 return active; 955 return active;
970} 956}
971EXPORT_SYMBOL(debug_event_common);
972 957
973/* 958/*
974 * debug_exception_common: 959 * debug_exception_common:
@@ -983,11 +968,7 @@ debug_entry_t
983 968
984 if (!debug_active || !id->areas) 969 if (!debug_active || !id->areas)
985 return NULL; 970 return NULL;
986 if (debug_critical) { 971 spin_lock_irqsave(&id->lock, flags);
987 if (!spin_trylock_irqsave(&id->lock, flags))
988 return NULL;
989 } else
990 spin_lock_irqsave(&id->lock, flags);
991 active = get_active_entry(id); 972 active = get_active_entry(id);
992 memset(DEBUG_DATA(active), 0, id->buf_size); 973 memset(DEBUG_DATA(active), 0, id->buf_size);
993 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); 974 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -996,7 +977,6 @@ debug_entry_t
996 977
997 return active; 978 return active;
998} 979}
999EXPORT_SYMBOL(debug_exception_common);
1000 980
1001/* 981/*
1002 * counts arguments in format string for sprintf view 982 * counts arguments in format string for sprintf view
@@ -1033,11 +1013,7 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1033 return NULL; 1013 return NULL;
1034 numargs=debug_count_numargs(string); 1014 numargs=debug_count_numargs(string);
1035 1015
1036 if (debug_critical) { 1016 spin_lock_irqsave(&id->lock, flags);
1037 if (!spin_trylock_irqsave(&id->lock, flags))
1038 return NULL;
1039 } else
1040 spin_lock_irqsave(&id->lock, flags);
1041 active = get_active_entry(id); 1017 active = get_active_entry(id);
1042 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active); 1018 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
1043 va_start(ap,string); 1019 va_start(ap,string);
@@ -1050,7 +1026,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1050 1026
1051 return active; 1027 return active;
1052} 1028}
1053EXPORT_SYMBOL(debug_sprintf_event);
1054 1029
1055/* 1030/*
1056 * debug_sprintf_exception: 1031 * debug_sprintf_exception:
@@ -1072,11 +1047,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1072 1047
1073 numargs=debug_count_numargs(string); 1048 numargs=debug_count_numargs(string);
1074 1049
1075 if (debug_critical) { 1050 spin_lock_irqsave(&id->lock, flags);
1076 if (!spin_trylock_irqsave(&id->lock, flags))
1077 return NULL;
1078 } else
1079 spin_lock_irqsave(&id->lock, flags);
1080 active = get_active_entry(id); 1051 active = get_active_entry(id);
1081 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active); 1052 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
1082 va_start(ap,string); 1053 va_start(ap,string);
@@ -1089,7 +1060,25 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1089 1060
1090 return active; 1061 return active;
1091} 1062}
1092EXPORT_SYMBOL(debug_sprintf_exception); 1063
1064/*
1065 * debug_init:
1066 * - is called exactly once to initialize the debug feature
1067 */
1068
1069static int
1070__init debug_init(void)
1071{
1072 int rc = 0;
1073
1074 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1075 mutex_lock(&debug_mutex);
1076 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
1077 initialized = 1;
1078 mutex_unlock(&debug_mutex);
1079
1080 return rc;
1081}
1093 1082
1094/* 1083/*
1095 * debug_register_view: 1084 * debug_register_view:
@@ -1101,7 +1090,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
1101 int rc = 0; 1090 int rc = 0;
1102 int i; 1091 int i;
1103 unsigned long flags; 1092 unsigned long flags;
1104 umode_t mode; 1093 mode_t mode;
1105 struct dentry *pde; 1094 struct dentry *pde;
1106 1095
1107 if (!id) 1096 if (!id)
@@ -1127,18 +1116,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
1127 if (i == DEBUG_MAX_VIEWS) { 1116 if (i == DEBUG_MAX_VIEWS) {
1128 pr_err("Registering view %s/%s would exceed the maximum " 1117 pr_err("Registering view %s/%s would exceed the maximum "
1129 "number of views %i\n", id->name, view->name, i); 1118 "number of views %i\n", id->name, view->name, i);
1119 debugfs_remove(pde);
1130 rc = -1; 1120 rc = -1;
1131 } else { 1121 } else {
1132 id->views[i] = view; 1122 id->views[i] = view;
1133 id->debugfs_entries[i] = pde; 1123 id->debugfs_entries[i] = pde;
1134 } 1124 }
1135 spin_unlock_irqrestore(&id->lock, flags); 1125 spin_unlock_irqrestore(&id->lock, flags);
1136 if (rc)
1137 debugfs_remove(pde);
1138out: 1126out:
1139 return rc; 1127 return rc;
1140} 1128}
1141EXPORT_SYMBOL(debug_register_view);
1142 1129
1143/* 1130/*
1144 * debug_unregister_view: 1131 * debug_unregister_view:
@@ -1147,9 +1134,9 @@ EXPORT_SYMBOL(debug_register_view);
1147int 1134int
1148debug_unregister_view(debug_info_t * id, struct debug_view *view) 1135debug_unregister_view(debug_info_t * id, struct debug_view *view)
1149{ 1136{
1150 struct dentry *dentry = NULL; 1137 int rc = 0;
1138 int i;
1151 unsigned long flags; 1139 unsigned long flags;
1152 int i, rc = 0;
1153 1140
1154 if (!id) 1141 if (!id)
1155 goto out; 1142 goto out;
@@ -1161,16 +1148,13 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
1161 if (i == DEBUG_MAX_VIEWS) 1148 if (i == DEBUG_MAX_VIEWS)
1162 rc = -1; 1149 rc = -1;
1163 else { 1150 else {
1164 dentry = id->debugfs_entries[i]; 1151 debugfs_remove(id->debugfs_entries[i]);
1165 id->views[i] = NULL; 1152 id->views[i] = NULL;
1166 id->debugfs_entries[i] = NULL;
1167 } 1153 }
1168 spin_unlock_irqrestore(&id->lock, flags); 1154 spin_unlock_irqrestore(&id->lock, flags);
1169 debugfs_remove(dentry);
1170out: 1155out:
1171 return rc; 1156 return rc;
1172} 1157}
1173EXPORT_SYMBOL(debug_unregister_view);
1174 1158
1175static inline char * 1159static inline char *
1176debug_get_user_string(const char __user *user_buf, size_t user_len) 1160debug_get_user_string(const char __user *user_buf, size_t user_len)
@@ -1444,10 +1428,10 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
1444 rc += sprintf(out_buf + rc, "| "); 1428 rc += sprintf(out_buf + rc, "| ");
1445 for (i = 0; i < id->buf_size; i++) { 1429 for (i = 0; i < id->buf_size; i++) {
1446 unsigned char c = in_buf[i]; 1430 unsigned char c = in_buf[i];
1447 if (isascii(c) && isprint(c)) 1431 if (!isprint(c))
1448 rc += sprintf(out_buf + rc, "%c", c);
1449 else
1450 rc += sprintf(out_buf + rc, "."); 1432 rc += sprintf(out_buf + rc, ".");
1433 else
1434 rc += sprintf(out_buf + rc, "%c", c);
1451 } 1435 }
1452 rc += sprintf(out_buf + rc, "\n"); 1436 rc += sprintf(out_buf + rc, "\n");
1453 return rc; 1437 return rc;
@@ -1480,7 +1464,6 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
1480 except_str, entry->id.fields.cpuid, (void *) caller); 1464 except_str, entry->id.fields.cpuid, (void *) caller);
1481 return rc; 1465 return rc;
1482} 1466}
1483EXPORT_SYMBOL(debug_dflt_header_fn);
1484 1467
1485/* 1468/*
1486 * prints debug data sprintf-formated: 1469 * prints debug data sprintf-formated:
@@ -1529,16 +1512,33 @@ out:
1529} 1512}
1530 1513
1531/* 1514/*
1532 * debug_init: 1515 * clean up module
1533 * - is called exactly once to initialize the debug feature
1534 */ 1516 */
1535static int __init debug_init(void) 1517static void __exit debug_exit(void)
1536{ 1518{
1537 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); 1519 debugfs_remove(debug_debugfs_root_entry);
1538 mutex_lock(&debug_mutex); 1520 unregister_sysctl_table(s390dbf_sysctl_header);
1539 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL); 1521 return;
1540 initialized = 1;
1541 mutex_unlock(&debug_mutex);
1542 return 0;
1543} 1522}
1523
1524/*
1525 * module definitions
1526 */
1544postcore_initcall(debug_init); 1527postcore_initcall(debug_init);
1528module_exit(debug_exit);
1529MODULE_LICENSE("GPL");
1530
1531EXPORT_SYMBOL(debug_register);
1532EXPORT_SYMBOL(debug_unregister);
1533EXPORT_SYMBOL(debug_set_level);
1534EXPORT_SYMBOL(debug_stop_all);
1535EXPORT_SYMBOL(debug_register_view);
1536EXPORT_SYMBOL(debug_unregister_view);
1537EXPORT_SYMBOL(debug_event_common);
1538EXPORT_SYMBOL(debug_exception_common);
1539EXPORT_SYMBOL(debug_hex_ascii_view);
1540EXPORT_SYMBOL(debug_raw_view);
1541EXPORT_SYMBOL(debug_dflt_header_fn);
1542EXPORT_SYMBOL(debug_sprintf_view);
1543EXPORT_SYMBOL(debug_sprintf_exception);
1544EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a7f9abd98cf..45df6d456aa 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * arch/s390/kernel/dis.c
3 *
2 * Disassemble s390 instructions. 4 * Disassemble s390 instructions.
3 * 5 *
4 * Copyright IBM Corp. 2007 6 * Copyright IBM Corp. 2007
@@ -22,6 +24,7 @@
22#include <linux/kprobes.h> 24#include <linux/kprobes.h>
23#include <linux/kdebug.h> 25#include <linux/kdebug.h>
24 26
27#include <asm/system.h>
25#include <asm/uaccess.h> 28#include <asm/uaccess.h>
26#include <asm/io.h> 29#include <asm/io.h>
27#include <linux/atomic.h> 30#include <linux/atomic.h>
@@ -83,29 +86,22 @@ enum {
83 U4_12, /* 4 bit unsigned value starting at 12 */ 86 U4_12, /* 4 bit unsigned value starting at 12 */
84 U4_16, /* 4 bit unsigned value starting at 16 */ 87 U4_16, /* 4 bit unsigned value starting at 16 */
85 U4_20, /* 4 bit unsigned value starting at 20 */ 88 U4_20, /* 4 bit unsigned value starting at 20 */
86 U4_24, /* 4 bit unsigned value starting at 24 */
87 U4_28, /* 4 bit unsigned value starting at 28 */
88 U4_32, /* 4 bit unsigned value starting at 32 */ 89 U4_32, /* 4 bit unsigned value starting at 32 */
89 U4_36, /* 4 bit unsigned value starting at 36 */
90 U8_8, /* 8 bit unsigned value starting at 8 */ 90 U8_8, /* 8 bit unsigned value starting at 8 */
91 U8_16, /* 8 bit unsigned value starting at 16 */ 91 U8_16, /* 8 bit unsigned value starting at 16 */
92 U8_24, /* 8 bit unsigned value starting at 24 */ 92 U8_24, /* 8 bit unsigned value starting at 24 */
93 U8_32, /* 8 bit unsigned value starting at 32 */ 93 U8_32, /* 8 bit unsigned value starting at 32 */
94 I8_8, /* 8 bit signed value starting at 8 */ 94 I8_8, /* 8 bit signed value starting at 8 */
95 I8_32, /* 8 bit signed value starting at 32 */ 95 I8_32, /* 8 bit signed value starting at 32 */
96 J12_12, /* PC relative offset at 12 */
97 I16_16, /* 16 bit signed value starting at 16 */ 96 I16_16, /* 16 bit signed value starting at 16 */
98 I16_32, /* 32 bit signed value starting at 16 */ 97 I16_32, /* 32 bit signed value starting at 16 */
99 U16_16, /* 16 bit unsigned value starting at 16 */ 98 U16_16, /* 16 bit unsigned value starting at 16 */
100 U16_32, /* 32 bit unsigned value starting at 16 */ 99 U16_32, /* 32 bit unsigned value starting at 16 */
101 J16_16, /* PC relative jump offset at 16 */ 100 J16_16, /* PC relative jump offset at 16 */
102 J16_32, /* PC relative offset at 16 */
103 I24_24, /* 24 bit signed value starting at 24 */
104 J32_16, /* PC relative long offset at 16 */ 101 J32_16, /* PC relative long offset at 16 */
105 I32_16, /* 32 bit signed value starting at 16 */ 102 I32_16, /* 32 bit signed value starting at 16 */
106 U32_16, /* 32 bit unsigned value starting at 16 */ 103 U32_16, /* 32 bit unsigned value starting at 16 */
107 M_16, /* 4 bit optional mask starting at 16 */ 104 M_16, /* 4 bit optional mask starting at 16 */
108 M_20, /* 4 bit optional mask starting at 20 */
109 RO_28, /* optional GPR starting at position 28 */ 105 RO_28, /* optional GPR starting at position 28 */
110}; 106};
111 107
@@ -116,8 +112,6 @@ enum {
116enum { 112enum {
117 INSTR_INVALID, 113 INSTR_INVALID,
118 INSTR_E, 114 INSTR_E,
119 INSTR_IE_UU,
120 INSTR_MII_UPI,
121 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, 115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
122 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, 116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
123 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, 117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
@@ -127,15 +121,13 @@ enum {
127 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, 121 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
128 INSTR_RRE_RR, INSTR_RRE_RR_OPT, 122 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
129 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, 123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
130 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR, 124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
131 INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR, 125 INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
132 INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF, 126 INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
133 INSTR_RRF_UUFR, INSTR_RRF_UURF,
134 INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
135 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
136 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, 128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
137 INSTR_RSI_RRP, 129 INSTR_RSI_RRP,
138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD, 130 INSTR_RSL_R0RD,
139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, 131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
140 INSTR_RSY_RDRM, 132 INSTR_RSY_RDRM,
141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 133 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
@@ -147,7 +139,6 @@ enum {
147 INSTR_SIL_RDI, INSTR_SIL_RDU, 139 INSTR_SIL_RDI, INSTR_SIL_RDU,
148 INSTR_SIY_IRD, INSTR_SIY_URD, 140 INSTR_SIY_IRD, INSTR_SIY_URD,
149 INSTR_SI_URD, 141 INSTR_SI_URD,
150 INSTR_SMI_U0RDP,
151 INSTR_SSE_RDRD, 142 INSTR_SSE_RDRD,
152 INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, 143 INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
153 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, 144 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
@@ -203,42 +194,31 @@ static const struct operand operands[] =
203 [U4_12] = { 4, 12, 0 }, 194 [U4_12] = { 4, 12, 0 },
204 [U4_16] = { 4, 16, 0 }, 195 [U4_16] = { 4, 16, 0 },
205 [U4_20] = { 4, 20, 0 }, 196 [U4_20] = { 4, 20, 0 },
206 [U4_24] = { 4, 24, 0 },
207 [U4_28] = { 4, 28, 0 },
208 [U4_32] = { 4, 32, 0 }, 197 [U4_32] = { 4, 32, 0 },
209 [U4_36] = { 4, 36, 0 },
210 [U8_8] = { 8, 8, 0 }, 198 [U8_8] = { 8, 8, 0 },
211 [U8_16] = { 8, 16, 0 }, 199 [U8_16] = { 8, 16, 0 },
212 [U8_24] = { 8, 24, 0 }, 200 [U8_24] = { 8, 24, 0 },
213 [U8_32] = { 8, 32, 0 }, 201 [U8_32] = { 8, 32, 0 },
214 [J12_12] = { 12, 12, OPERAND_PCREL },
215 [I16_16] = { 16, 16, OPERAND_SIGNED }, 202 [I16_16] = { 16, 16, OPERAND_SIGNED },
216 [U16_16] = { 16, 16, 0 }, 203 [U16_16] = { 16, 16, 0 },
217 [U16_32] = { 16, 32, 0 }, 204 [U16_32] = { 16, 32, 0 },
218 [J16_16] = { 16, 16, OPERAND_PCREL }, 205 [J16_16] = { 16, 16, OPERAND_PCREL },
219 [J16_32] = { 16, 32, OPERAND_PCREL },
220 [I16_32] = { 16, 32, OPERAND_SIGNED }, 206 [I16_32] = { 16, 32, OPERAND_SIGNED },
221 [I24_24] = { 24, 24, OPERAND_SIGNED },
222 [J32_16] = { 32, 16, OPERAND_PCREL }, 207 [J32_16] = { 32, 16, OPERAND_PCREL },
223 [I32_16] = { 32, 16, OPERAND_SIGNED }, 208 [I32_16] = { 32, 16, OPERAND_SIGNED },
224 [U32_16] = { 32, 16, 0 }, 209 [U32_16] = { 32, 16, 0 },
225 [M_16] = { 4, 16, 0 }, 210 [M_16] = { 4, 16, 0 },
226 [M_20] = { 4, 20, 0 },
227 [RO_28] = { 4, 28, OPERAND_GPR } 211 [RO_28] = { 4, 28, OPERAND_GPR }
228}; 212};
229 213
230static const unsigned char formats[][7] = { 214static const unsigned char formats[][7] = {
231 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, 215 [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
232 [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
233 [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
234 [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
235 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, 216 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
236 [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
237 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, 217 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
238 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, 218 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
239 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, 219 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
240 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, 220 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
241 [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 }, 221 [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
242 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, 222 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
243 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, 223 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
244 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, 224 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -268,18 +248,14 @@ static const unsigned char formats[][7] = {
268 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, 248 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
269 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, 249 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
270 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, 250 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
271 [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
272 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, 251 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
273 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, 252 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
274 [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, 253 [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
275 [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
276 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, 254 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
277 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, 255 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
278 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, 256 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
279 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, 257 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
280 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, 258 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
281 [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
282 [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
283 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, 259 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
284 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, 260 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
285 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, 261 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
@@ -291,13 +267,12 @@ static const unsigned char formats[][7] = {
291 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, 267 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
292 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, 268 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
293 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, 269 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
294 [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
295 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, 270 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
296 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, 271 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
297 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, 272 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
298 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
299 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, 273 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
300 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 274 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
275 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
301 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, 276 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
302 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, 277 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
303 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, 278 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -317,10 +292,9 @@ static const unsigned char formats[][7] = {
317 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, 292 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
318 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, 293 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
319 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, 294 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
320 [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
321 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, 295 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
322 [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 }, 296 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
323 [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 }, 297 [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 },
324 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 298 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
325 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 299 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
326 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 300 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -333,157 +307,36 @@ static const unsigned char formats[][7] = {
333 307
334enum { 308enum {
335 LONG_INSN_ALGHSIK, 309 LONG_INSN_ALGHSIK,
336 LONG_INSN_ALHHHR,
337 LONG_INSN_ALHHLR,
338 LONG_INSN_ALHSIK, 310 LONG_INSN_ALHSIK,
339 LONG_INSN_ALSIHN,
340 LONG_INSN_CDFBRA,
341 LONG_INSN_CDGBRA,
342 LONG_INSN_CDGTRA,
343 LONG_INSN_CDLFBR,
344 LONG_INSN_CDLFTR,
345 LONG_INSN_CDLGBR,
346 LONG_INSN_CDLGTR,
347 LONG_INSN_CEFBRA,
348 LONG_INSN_CEGBRA,
349 LONG_INSN_CELFBR,
350 LONG_INSN_CELGBR,
351 LONG_INSN_CFDBRA,
352 LONG_INSN_CFEBRA,
353 LONG_INSN_CFXBRA,
354 LONG_INSN_CGDBRA,
355 LONG_INSN_CGDTRA,
356 LONG_INSN_CGEBRA,
357 LONG_INSN_CGXBRA,
358 LONG_INSN_CGXTRA,
359 LONG_INSN_CLFDBR,
360 LONG_INSN_CLFDTR,
361 LONG_INSN_CLFEBR,
362 LONG_INSN_CLFHSI, 311 LONG_INSN_CLFHSI,
363 LONG_INSN_CLFXBR,
364 LONG_INSN_CLFXTR,
365 LONG_INSN_CLGDBR,
366 LONG_INSN_CLGDTR,
367 LONG_INSN_CLGEBR,
368 LONG_INSN_CLGFRL, 312 LONG_INSN_CLGFRL,
369 LONG_INSN_CLGHRL, 313 LONG_INSN_CLGHRL,
370 LONG_INSN_CLGHSI, 314 LONG_INSN_CLGHSI,
371 LONG_INSN_CLGXBR,
372 LONG_INSN_CLGXTR,
373 LONG_INSN_CLHHSI, 315 LONG_INSN_CLHHSI,
374 LONG_INSN_CXFBRA,
375 LONG_INSN_CXGBRA,
376 LONG_INSN_CXGTRA,
377 LONG_INSN_CXLFBR,
378 LONG_INSN_CXLFTR,
379 LONG_INSN_CXLGBR,
380 LONG_INSN_CXLGTR,
381 LONG_INSN_FIDBRA,
382 LONG_INSN_FIEBRA,
383 LONG_INSN_FIXBRA,
384 LONG_INSN_LDXBRA,
385 LONG_INSN_LEDBRA,
386 LONG_INSN_LEXBRA,
387 LONG_INSN_LLGFAT,
388 LONG_INSN_LLGFRL, 316 LONG_INSN_LLGFRL,
389 LONG_INSN_LLGHRL, 317 LONG_INSN_LLGHRL,
390 LONG_INSN_LLGTAT,
391 LONG_INSN_POPCNT, 318 LONG_INSN_POPCNT,
392 LONG_INSN_RIEMIT,
393 LONG_INSN_RINEXT,
394 LONG_INSN_RISBGN,
395 LONG_INSN_RISBHG, 319 LONG_INSN_RISBHG,
396 LONG_INSN_RISBLG, 320 LONG_INSN_RISBLG,
397 LONG_INSN_SLHHHR,
398 LONG_INSN_SLHHLR,
399 LONG_INSN_TABORT,
400 LONG_INSN_TBEGIN,
401 LONG_INSN_TBEGINC,
402 LONG_INSN_PCISTG,
403 LONG_INSN_MPCIFC,
404 LONG_INSN_STPCIFC,
405 LONG_INSN_PCISTB,
406}; 321};
407 322
408static char *long_insn_name[] = { 323static char *long_insn_name[] = {
409 [LONG_INSN_ALGHSIK] = "alghsik", 324 [LONG_INSN_ALGHSIK] = "alghsik",
410 [LONG_INSN_ALHHHR] = "alhhhr",
411 [LONG_INSN_ALHHLR] = "alhhlr",
412 [LONG_INSN_ALHSIK] = "alhsik", 325 [LONG_INSN_ALHSIK] = "alhsik",
413 [LONG_INSN_ALSIHN] = "alsihn",
414 [LONG_INSN_CDFBRA] = "cdfbra",
415 [LONG_INSN_CDGBRA] = "cdgbra",
416 [LONG_INSN_CDGTRA] = "cdgtra",
417 [LONG_INSN_CDLFBR] = "cdlfbr",
418 [LONG_INSN_CDLFTR] = "cdlftr",
419 [LONG_INSN_CDLGBR] = "cdlgbr",
420 [LONG_INSN_CDLGTR] = "cdlgtr",
421 [LONG_INSN_CEFBRA] = "cefbra",
422 [LONG_INSN_CEGBRA] = "cegbra",
423 [LONG_INSN_CELFBR] = "celfbr",
424 [LONG_INSN_CELGBR] = "celgbr",
425 [LONG_INSN_CFDBRA] = "cfdbra",
426 [LONG_INSN_CFEBRA] = "cfebra",
427 [LONG_INSN_CFXBRA] = "cfxbra",
428 [LONG_INSN_CGDBRA] = "cgdbra",
429 [LONG_INSN_CGDTRA] = "cgdtra",
430 [LONG_INSN_CGEBRA] = "cgebra",
431 [LONG_INSN_CGXBRA] = "cgxbra",
432 [LONG_INSN_CGXTRA] = "cgxtra",
433 [LONG_INSN_CLFDBR] = "clfdbr",
434 [LONG_INSN_CLFDTR] = "clfdtr",
435 [LONG_INSN_CLFEBR] = "clfebr",
436 [LONG_INSN_CLFHSI] = "clfhsi", 326 [LONG_INSN_CLFHSI] = "clfhsi",
437 [LONG_INSN_CLFXBR] = "clfxbr",
438 [LONG_INSN_CLFXTR] = "clfxtr",
439 [LONG_INSN_CLGDBR] = "clgdbr",
440 [LONG_INSN_CLGDTR] = "clgdtr",
441 [LONG_INSN_CLGEBR] = "clgebr",
442 [LONG_INSN_CLGFRL] = "clgfrl", 327 [LONG_INSN_CLGFRL] = "clgfrl",
443 [LONG_INSN_CLGHRL] = "clghrl", 328 [LONG_INSN_CLGHRL] = "clghrl",
444 [LONG_INSN_CLGHSI] = "clghsi", 329 [LONG_INSN_CLGHSI] = "clghsi",
445 [LONG_INSN_CLGXBR] = "clgxbr",
446 [LONG_INSN_CLGXTR] = "clgxtr",
447 [LONG_INSN_CLHHSI] = "clhhsi", 330 [LONG_INSN_CLHHSI] = "clhhsi",
448 [LONG_INSN_CXFBRA] = "cxfbra",
449 [LONG_INSN_CXGBRA] = "cxgbra",
450 [LONG_INSN_CXGTRA] = "cxgtra",
451 [LONG_INSN_CXLFBR] = "cxlfbr",
452 [LONG_INSN_CXLFTR] = "cxlftr",
453 [LONG_INSN_CXLGBR] = "cxlgbr",
454 [LONG_INSN_CXLGTR] = "cxlgtr",
455 [LONG_INSN_FIDBRA] = "fidbra",
456 [LONG_INSN_FIEBRA] = "fiebra",
457 [LONG_INSN_FIXBRA] = "fixbra",
458 [LONG_INSN_LDXBRA] = "ldxbra",
459 [LONG_INSN_LEDBRA] = "ledbra",
460 [LONG_INSN_LEXBRA] = "lexbra",
461 [LONG_INSN_LLGFAT] = "llgfat",
462 [LONG_INSN_LLGFRL] = "llgfrl", 331 [LONG_INSN_LLGFRL] = "llgfrl",
463 [LONG_INSN_LLGHRL] = "llghrl", 332 [LONG_INSN_LLGHRL] = "llghrl",
464 [LONG_INSN_LLGTAT] = "llgtat",
465 [LONG_INSN_POPCNT] = "popcnt", 333 [LONG_INSN_POPCNT] = "popcnt",
466 [LONG_INSN_RIEMIT] = "riemit",
467 [LONG_INSN_RINEXT] = "rinext",
468 [LONG_INSN_RISBGN] = "risbgn",
469 [LONG_INSN_RISBHG] = "risbhg", 334 [LONG_INSN_RISBHG] = "risbhg",
470 [LONG_INSN_RISBLG] = "risblg", 335 [LONG_INSN_RISBLG] = "risblk",
471 [LONG_INSN_SLHHHR] = "slhhhr",
472 [LONG_INSN_SLHHLR] = "slhhlr",
473 [LONG_INSN_TABORT] = "tabort",
474 [LONG_INSN_TBEGIN] = "tbegin",
475 [LONG_INSN_TBEGINC] = "tbeginc",
476 [LONG_INSN_PCISTG] = "pcistg",
477 [LONG_INSN_MPCIFC] = "mpcifc",
478 [LONG_INSN_STPCIFC] = "stpcifc",
479 [LONG_INSN_PCISTB] = "pcistb",
480}; 336};
481 337
482static struct insn opcode[] = { 338static struct insn opcode[] = {
483#ifdef CONFIG_64BIT 339#ifdef CONFIG_64BIT
484 { "bprp", 0xc5, INSTR_MII_UPI },
485 { "bpp", 0xc7, INSTR_SMI_U0RDP },
486 { "trtr", 0xd0, INSTR_SS_L0RDRD },
487 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 340 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
488#endif 341#endif
489 { "spm", 0x04, INSTR_RR_R0 }, 342 { "spm", 0x04, INSTR_RR_R0 },
@@ -518,6 +371,7 @@ static struct insn opcode[] = {
518 { "lcdr", 0x23, INSTR_RR_FF }, 371 { "lcdr", 0x23, INSTR_RR_FF },
519 { "hdr", 0x24, INSTR_RR_FF }, 372 { "hdr", 0x24, INSTR_RR_FF },
520 { "ldxr", 0x25, INSTR_RR_FF }, 373 { "ldxr", 0x25, INSTR_RR_FF },
374 { "lrdr", 0x25, INSTR_RR_FF },
521 { "mxr", 0x26, INSTR_RR_FF }, 375 { "mxr", 0x26, INSTR_RR_FF },
522 { "mxdr", 0x27, INSTR_RR_FF }, 376 { "mxdr", 0x27, INSTR_RR_FF },
523 { "ldr", 0x28, INSTR_RR_FF }, 377 { "ldr", 0x28, INSTR_RR_FF },
@@ -534,6 +388,7 @@ static struct insn opcode[] = {
534 { "lcer", 0x33, INSTR_RR_FF }, 388 { "lcer", 0x33, INSTR_RR_FF },
535 { "her", 0x34, INSTR_RR_FF }, 389 { "her", 0x34, INSTR_RR_FF },
536 { "ledr", 0x35, INSTR_RR_FF }, 390 { "ledr", 0x35, INSTR_RR_FF },
391 { "lrer", 0x35, INSTR_RR_FF },
537 { "axr", 0x36, INSTR_RR_FF }, 392 { "axr", 0x36, INSTR_RR_FF },
538 { "sxr", 0x37, INSTR_RR_FF }, 393 { "sxr", 0x37, INSTR_RR_FF },
539 { "ler", 0x38, INSTR_RR_FF }, 394 { "ler", 0x38, INSTR_RR_FF },
@@ -541,6 +396,7 @@ static struct insn opcode[] = {
541 { "aer", 0x3a, INSTR_RR_FF }, 396 { "aer", 0x3a, INSTR_RR_FF },
542 { "ser", 0x3b, INSTR_RR_FF }, 397 { "ser", 0x3b, INSTR_RR_FF },
543 { "mder", 0x3c, INSTR_RR_FF }, 398 { "mder", 0x3c, INSTR_RR_FF },
399 { "mer", 0x3c, INSTR_RR_FF },
544 { "der", 0x3d, INSTR_RR_FF }, 400 { "der", 0x3d, INSTR_RR_FF },
545 { "aur", 0x3e, INSTR_RR_FF }, 401 { "aur", 0x3e, INSTR_RR_FF },
546 { "sur", 0x3f, INSTR_RR_FF }, 402 { "sur", 0x3f, INSTR_RR_FF },
@@ -591,6 +447,7 @@ static struct insn opcode[] = {
591 { "ae", 0x7a, INSTR_RX_FRRD }, 447 { "ae", 0x7a, INSTR_RX_FRRD },
592 { "se", 0x7b, INSTR_RX_FRRD }, 448 { "se", 0x7b, INSTR_RX_FRRD },
593 { "mde", 0x7c, INSTR_RX_FRRD }, 449 { "mde", 0x7c, INSTR_RX_FRRD },
450 { "me", 0x7c, INSTR_RX_FRRD },
594 { "de", 0x7d, INSTR_RX_FRRD }, 451 { "de", 0x7d, INSTR_RX_FRRD },
595 { "au", 0x7e, INSTR_RX_FRRD }, 452 { "au", 0x7e, INSTR_RX_FRRD },
596 { "su", 0x7f, INSTR_RX_FRRD }, 453 { "su", 0x7f, INSTR_RX_FRRD },
@@ -670,9 +527,9 @@ static struct insn opcode[] = {
670 527
671static struct insn opcode_01[] = { 528static struct insn opcode_01[] = {
672#ifdef CONFIG_64BIT 529#ifdef CONFIG_64BIT
673 { "ptff", 0x04, INSTR_E },
674 { "pfpo", 0x0a, INSTR_E },
675 { "sam64", 0x0e, INSTR_E }, 530 { "sam64", 0x0e, INSTR_E },
531 { "pfpo", 0x0a, INSTR_E },
532 { "ptff", 0x04, INSTR_E },
676#endif 533#endif
677 { "pr", 0x01, INSTR_E }, 534 { "pr", 0x01, INSTR_E },
678 { "upt", 0x02, INSTR_E }, 535 { "upt", 0x02, INSTR_E },
@@ -728,41 +585,18 @@ static struct insn opcode_a7[] = {
728 { "", 0, INSTR_INVALID } 585 { "", 0, INSTR_INVALID }
729}; 586};
730 587
731static struct insn opcode_aa[] = {
732#ifdef CONFIG_64BIT
733 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
734 { "rion", 0x01, INSTR_RI_RI },
735 { "tric", 0x02, INSTR_RI_RI },
736 { "rioff", 0x03, INSTR_RI_RI },
737 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
738#endif
739 { "", 0, INSTR_INVALID }
740};
741
742static struct insn opcode_b2[] = { 588static struct insn opcode_b2[] = {
743#ifdef CONFIG_64BIT 589#ifdef CONFIG_64BIT
590 { "sske", 0x2b, INSTR_RRF_M0RR },
744 { "stckf", 0x7c, INSTR_S_RD }, 591 { "stckf", 0x7c, INSTR_S_RD },
745 { "lpp", 0x80, INSTR_S_RD }, 592 { "cu21", 0xa6, INSTR_RRF_M0RR },
746 { "lcctl", 0x84, INSTR_S_RD }, 593 { "cuutf", 0xa6, INSTR_RRF_M0RR },
747 { "lpctl", 0x85, INSTR_S_RD }, 594 { "cu12", 0xa7, INSTR_RRF_M0RR },
748 { "qsi", 0x86, INSTR_S_RD }, 595 { "cutfu", 0xa7, INSTR_RRF_M0RR },
749 { "lsctl", 0x87, INSTR_S_RD },
750 { "qctri", 0x8e, INSTR_S_RD },
751 { "stfle", 0xb0, INSTR_S_RD }, 596 { "stfle", 0xb0, INSTR_S_RD },
752 { "lpswe", 0xb2, INSTR_S_RD }, 597 { "lpswe", 0xb2, INSTR_S_RD },
753 { "srnmb", 0xb8, INSTR_S_RD },
754 { "srnmt", 0xb9, INSTR_S_RD }, 598 { "srnmt", 0xb9, INSTR_S_RD },
755 { "lfas", 0xbd, INSTR_S_RD }, 599 { "lfas", 0xbd, INSTR_S_RD },
756 { "scctr", 0xe0, INSTR_RRE_RR },
757 { "spctr", 0xe1, INSTR_RRE_RR },
758 { "ecctr", 0xe4, INSTR_RRE_RR },
759 { "epctr", 0xe5, INSTR_RRE_RR },
760 { "ppa", 0xe8, INSTR_RRF_U0RR },
761 { "etnd", 0xec, INSTR_RRE_R0 },
762 { "ecpga", 0xed, INSTR_RRE_RR },
763 { "tend", 0xf8, INSTR_S_00 },
764 { "niai", 0xfa, INSTR_IE_UU },
765 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
766#endif 600#endif
767 { "stidp", 0x02, INSTR_S_RD }, 601 { "stidp", 0x02, INSTR_S_RD },
768 { "sck", 0x04, INSTR_S_RD }, 602 { "sck", 0x04, INSTR_S_RD },
@@ -781,7 +615,6 @@ static struct insn opcode_b2[] = {
781 { "pc", 0x18, INSTR_S_RD }, 615 { "pc", 0x18, INSTR_S_RD },
782 { "sac", 0x19, INSTR_S_RD }, 616 { "sac", 0x19, INSTR_S_RD },
783 { "cfc", 0x1a, INSTR_S_RD }, 617 { "cfc", 0x1a, INSTR_S_RD },
784 { "servc", 0x20, INSTR_RRE_RR },
785 { "ipte", 0x21, INSTR_RRE_RR }, 618 { "ipte", 0x21, INSTR_RRE_RR },
786 { "ipm", 0x22, INSTR_RRE_R0 }, 619 { "ipm", 0x22, INSTR_RRE_R0 },
787 { "ivsk", 0x23, INSTR_RRE_RR }, 620 { "ivsk", 0x23, INSTR_RRE_RR },
@@ -792,9 +625,9 @@ static struct insn opcode_b2[] = {
792 { "pt", 0x28, INSTR_RRE_RR }, 625 { "pt", 0x28, INSTR_RRE_RR },
793 { "iske", 0x29, INSTR_RRE_RR }, 626 { "iske", 0x29, INSTR_RRE_RR },
794 { "rrbe", 0x2a, INSTR_RRE_RR }, 627 { "rrbe", 0x2a, INSTR_RRE_RR },
795 { "sske", 0x2b, INSTR_RRF_M0RR }, 628 { "sske", 0x2b, INSTR_RRE_RR },
796 { "tb", 0x2c, INSTR_RRE_0R }, 629 { "tb", 0x2c, INSTR_RRE_0R },
797 { "dxr", 0x2d, INSTR_RRE_FF }, 630 { "dxr", 0x2d, INSTR_RRE_F0 },
798 { "pgin", 0x2e, INSTR_RRE_RR }, 631 { "pgin", 0x2e, INSTR_RRE_RR },
799 { "pgout", 0x2f, INSTR_RRE_RR }, 632 { "pgout", 0x2f, INSTR_RRE_RR },
800 { "csch", 0x30, INSTR_S_00 }, 633 { "csch", 0x30, INSTR_S_00 },
@@ -812,8 +645,8 @@ static struct insn opcode_b2[] = {
812 { "schm", 0x3c, INSTR_S_00 }, 645 { "schm", 0x3c, INSTR_S_00 },
813 { "bakr", 0x40, INSTR_RRE_RR }, 646 { "bakr", 0x40, INSTR_RRE_RR },
814 { "cksm", 0x41, INSTR_RRE_RR }, 647 { "cksm", 0x41, INSTR_RRE_RR },
815 { "sqdr", 0x44, INSTR_RRE_FF }, 648 { "sqdr", 0x44, INSTR_RRE_F0 },
816 { "sqer", 0x45, INSTR_RRE_FF }, 649 { "sqer", 0x45, INSTR_RRE_F0 },
817 { "stura", 0x46, INSTR_RRE_RR }, 650 { "stura", 0x46, INSTR_RRE_RR },
818 { "msta", 0x47, INSTR_RRE_R0 }, 651 { "msta", 0x47, INSTR_RRE_R0 },
819 { "palb", 0x48, INSTR_RRE_00 }, 652 { "palb", 0x48, INSTR_RRE_00 },
@@ -839,14 +672,14 @@ static struct insn opcode_b2[] = {
839 { "rp", 0x77, INSTR_S_RD }, 672 { "rp", 0x77, INSTR_S_RD },
840 { "stcke", 0x78, INSTR_S_RD }, 673 { "stcke", 0x78, INSTR_S_RD },
841 { "sacf", 0x79, INSTR_S_RD }, 674 { "sacf", 0x79, INSTR_S_RD },
842 { "stsi", 0x7d, INSTR_S_RD },
843 { "spp", 0x80, INSTR_S_RD }, 675 { "spp", 0x80, INSTR_S_RD },
676 { "stsi", 0x7d, INSTR_S_RD },
844 { "srnm", 0x99, INSTR_S_RD }, 677 { "srnm", 0x99, INSTR_S_RD },
845 { "stfpc", 0x9c, INSTR_S_RD }, 678 { "stfpc", 0x9c, INSTR_S_RD },
846 { "lfpc", 0x9d, INSTR_S_RD }, 679 { "lfpc", 0x9d, INSTR_S_RD },
847 { "tre", 0xa5, INSTR_RRE_RR }, 680 { "tre", 0xa5, INSTR_RRE_RR },
848 { "cuutf", 0xa6, INSTR_RRF_M0RR }, 681 { "cuutf", 0xa6, INSTR_RRE_RR },
849 { "cutfu", 0xa7, INSTR_RRF_M0RR }, 682 { "cutfu", 0xa7, INSTR_RRE_RR },
850 { "stfl", 0xb1, INSTR_S_RD }, 683 { "stfl", 0xb1, INSTR_S_RD },
851 { "trap4", 0xff, INSTR_S_RD }, 684 { "trap4", 0xff, INSTR_S_RD },
852 { "", 0, INSTR_INVALID } 685 { "", 0, INSTR_INVALID }
@@ -860,87 +693,72 @@ static struct insn opcode_b3[] = {
860 { "myr", 0x3b, INSTR_RRF_F0FF }, 693 { "myr", 0x3b, INSTR_RRF_F0FF },
861 { "mayhr", 0x3c, INSTR_RRF_F0FF }, 694 { "mayhr", 0x3c, INSTR_RRF_F0FF },
862 { "myhr", 0x3d, INSTR_RRF_F0FF }, 695 { "myhr", 0x3d, INSTR_RRF_F0FF },
696 { "cegbr", 0xa4, INSTR_RRE_RR },
697 { "cdgbr", 0xa5, INSTR_RRE_RR },
698 { "cxgbr", 0xa6, INSTR_RRE_RR },
699 { "cgebr", 0xa8, INSTR_RRF_U0RF },
700 { "cgdbr", 0xa9, INSTR_RRF_U0RF },
701 { "cgxbr", 0xaa, INSTR_RRF_U0RF },
702 { "cfer", 0xb8, INSTR_RRF_U0RF },
703 { "cfdr", 0xb9, INSTR_RRF_U0RF },
704 { "cfxr", 0xba, INSTR_RRF_U0RF },
705 { "cegr", 0xc4, INSTR_RRE_RR },
706 { "cdgr", 0xc5, INSTR_RRE_RR },
707 { "cxgr", 0xc6, INSTR_RRE_RR },
708 { "cger", 0xc8, INSTR_RRF_U0RF },
709 { "cgdr", 0xc9, INSTR_RRF_U0RF },
710 { "cgxr", 0xca, INSTR_RRF_U0RF },
863 { "lpdfr", 0x70, INSTR_RRE_FF }, 711 { "lpdfr", 0x70, INSTR_RRE_FF },
864 { "lndfr", 0x71, INSTR_RRE_FF }, 712 { "lndfr", 0x71, INSTR_RRE_FF },
865 { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, 713 { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
866 { "lcdfr", 0x73, INSTR_RRE_FF }, 714 { "lcdfr", 0x73, INSTR_RRE_FF },
867 { "sfasr", 0x85, INSTR_RRE_R0 },
868 { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
869 { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
870 { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
871 { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
872 { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
873 { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
874 { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
875 { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
876 { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
877 { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
878 { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
879 { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
880 { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
881 { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
882 { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
883 { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
884 { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
885 { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
886 { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
887 { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
888 { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
889 { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
890 { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
891 { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
892 { "ldgr", 0xc1, INSTR_RRE_FR }, 715 { "ldgr", 0xc1, INSTR_RRE_FR },
893 { "cegr", 0xc4, INSTR_RRE_FR },
894 { "cdgr", 0xc5, INSTR_RRE_FR },
895 { "cxgr", 0xc6, INSTR_RRE_FR },
896 { "cger", 0xc8, INSTR_RRF_U0RF },
897 { "cgdr", 0xc9, INSTR_RRF_U0RF },
898 { "cgxr", 0xca, INSTR_RRF_U0RF },
899 { "lgdr", 0xcd, INSTR_RRE_RF }, 716 { "lgdr", 0xcd, INSTR_RRE_RF },
900 { "mdtra", 0xd0, INSTR_RRF_FUFF2 }, 717 { "adtr", 0xd2, INSTR_RRR_F0FF },
901 { "ddtra", 0xd1, INSTR_RRF_FUFF2 }, 718 { "axtr", 0xda, INSTR_RRR_F0FF },
902 { "adtra", 0xd2, INSTR_RRF_FUFF2 },
903 { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
904 { "ldetr", 0xd4, INSTR_RRF_0UFF },
905 { "ledtr", 0xd5, INSTR_RRF_UUFF },
906 { "ltdtr", 0xd6, INSTR_RRE_FF },
907 { "fidtr", 0xd7, INSTR_RRF_UUFF },
908 { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
909 { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
910 { "axtra", 0xda, INSTR_RRF_FUFF2 },
911 { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
912 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
913 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
914 { "ltxtr", 0xde, INSTR_RRE_FF },
915 { "fixtr", 0xdf, INSTR_RRF_UUFF },
916 { "kdtr", 0xe0, INSTR_RRE_FF },
917 { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
918 { "cudtr", 0xe2, INSTR_RRE_RF },
919 { "csdtr", 0xe3, INSTR_RRE_RF },
920 { "cdtr", 0xe4, INSTR_RRE_FF }, 719 { "cdtr", 0xe4, INSTR_RRE_FF },
921 { "eedtr", 0xe5, INSTR_RRE_RF }, 720 { "cxtr", 0xec, INSTR_RRE_FF },
922 { "esdtr", 0xe7, INSTR_RRE_RF }, 721 { "kdtr", 0xe0, INSTR_RRE_FF },
923 { "kxtr", 0xe8, INSTR_RRE_FF }, 722 { "kxtr", 0xe8, INSTR_RRE_FF },
924 { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR }, 723 { "cedtr", 0xf4, INSTR_RRE_FF },
925 { "cuxtr", 0xea, INSTR_RRE_RF }, 724 { "cextr", 0xfc, INSTR_RRE_FF },
725 { "cdgtr", 0xf1, INSTR_RRE_FR },
726 { "cxgtr", 0xf9, INSTR_RRE_FR },
727 { "cdstr", 0xf3, INSTR_RRE_FR },
728 { "cxstr", 0xfb, INSTR_RRE_FR },
729 { "cdutr", 0xf2, INSTR_RRE_FR },
730 { "cxutr", 0xfa, INSTR_RRE_FR },
731 { "cgdtr", 0xe1, INSTR_RRF_U0RF },
732 { "cgxtr", 0xe9, INSTR_RRF_U0RF },
733 { "csdtr", 0xe3, INSTR_RRE_RF },
926 { "csxtr", 0xeb, INSTR_RRE_RF }, 734 { "csxtr", 0xeb, INSTR_RRE_RF },
927 { "cxtr", 0xec, INSTR_RRE_FF }, 735 { "cudtr", 0xe2, INSTR_RRE_RF },
736 { "cuxtr", 0xea, INSTR_RRE_RF },
737 { "ddtr", 0xd1, INSTR_RRR_F0FF },
738 { "dxtr", 0xd9, INSTR_RRR_F0FF },
739 { "eedtr", 0xe5, INSTR_RRE_RF },
928 { "eextr", 0xed, INSTR_RRE_RF }, 740 { "eextr", 0xed, INSTR_RRE_RF },
741 { "esdtr", 0xe7, INSTR_RRE_RF },
929 { "esxtr", 0xef, INSTR_RRE_RF }, 742 { "esxtr", 0xef, INSTR_RRE_RF },
930 { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
931 { "cdutr", 0xf2, INSTR_RRE_FR },
932 { "cdstr", 0xf3, INSTR_RRE_FR },
933 { "cedtr", 0xf4, INSTR_RRE_FF },
934 { "qadtr", 0xf5, INSTR_RRF_FUFF },
935 { "iedtr", 0xf6, INSTR_RRF_F0FR }, 743 { "iedtr", 0xf6, INSTR_RRF_F0FR },
936 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
937 { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
938 { "cxutr", 0xfa, INSTR_RRE_FR },
939 { "cxstr", 0xfb, INSTR_RRE_FR },
940 { "cextr", 0xfc, INSTR_RRE_FF },
941 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
942 { "iextr", 0xfe, INSTR_RRF_F0FR }, 744 { "iextr", 0xfe, INSTR_RRF_F0FR },
745 { "ltdtr", 0xd6, INSTR_RRE_FF },
746 { "ltxtr", 0xde, INSTR_RRE_FF },
747 { "fidtr", 0xd7, INSTR_RRF_UUFF },
748 { "fixtr", 0xdf, INSTR_RRF_UUFF },
749 { "ldetr", 0xd4, INSTR_RRF_0UFF },
750 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
751 { "ledtr", 0xd5, INSTR_RRF_UUFF },
752 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
753 { "mdtr", 0xd0, INSTR_RRR_F0FF },
754 { "mxtr", 0xd8, INSTR_RRR_F0FF },
755 { "qadtr", 0xf5, INSTR_RRF_FUFF },
756 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
757 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
943 { "rrxtr", 0xff, INSTR_RRF_FFRU }, 758 { "rrxtr", 0xff, INSTR_RRF_FFRU },
759 { "sfasr", 0x85, INSTR_RRE_R0 },
760 { "sdtr", 0xd3, INSTR_RRR_F0FF },
761 { "sxtr", 0xdb, INSTR_RRR_F0FF },
944#endif 762#endif
945 { "lpebr", 0x00, INSTR_RRE_FF }, 763 { "lpebr", 0x00, INSTR_RRE_FF },
946 { "lnebr", 0x01, INSTR_RRE_FF }, 764 { "lnebr", 0x01, INSTR_RRE_FF },
@@ -987,10 +805,10 @@ static struct insn opcode_b3[] = {
987 { "lnxbr", 0x41, INSTR_RRE_FF }, 805 { "lnxbr", 0x41, INSTR_RRE_FF },
988 { "ltxbr", 0x42, INSTR_RRE_FF }, 806 { "ltxbr", 0x42, INSTR_RRE_FF },
989 { "lcxbr", 0x43, INSTR_RRE_FF }, 807 { "lcxbr", 0x43, INSTR_RRE_FF },
990 { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF }, 808 { "ledbr", 0x44, INSTR_RRE_FF },
991 { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF }, 809 { "ldxbr", 0x45, INSTR_RRE_FF },
992 { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF }, 810 { "lexbr", 0x46, INSTR_RRE_FF },
993 { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF }, 811 { "fixbr", 0x47, INSTR_RRF_U0FF },
994 { "kxbr", 0x48, INSTR_RRE_FF }, 812 { "kxbr", 0x48, INSTR_RRE_FF },
995 { "cxbr", 0x49, INSTR_RRE_FF }, 813 { "cxbr", 0x49, INSTR_RRE_FF },
996 { "axbr", 0x4a, INSTR_RRE_FF }, 814 { "axbr", 0x4a, INSTR_RRE_FF },
@@ -1000,24 +818,24 @@ static struct insn opcode_b3[] = {
1000 { "tbedr", 0x50, INSTR_RRF_U0FF }, 818 { "tbedr", 0x50, INSTR_RRF_U0FF },
1001 { "tbdr", 0x51, INSTR_RRF_U0FF }, 819 { "tbdr", 0x51, INSTR_RRF_U0FF },
1002 { "diebr", 0x53, INSTR_RRF_FUFF }, 820 { "diebr", 0x53, INSTR_RRF_FUFF },
1003 { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF }, 821 { "fiebr", 0x57, INSTR_RRF_U0FF },
1004 { "thder", 0x58, INSTR_RRE_FF }, 822 { "thder", 0x58, INSTR_RRE_RR },
1005 { "thdr", 0x59, INSTR_RRE_FF }, 823 { "thdr", 0x59, INSTR_RRE_RR },
1006 { "didbr", 0x5b, INSTR_RRF_FUFF }, 824 { "didbr", 0x5b, INSTR_RRF_FUFF },
1007 { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF }, 825 { "fidbr", 0x5f, INSTR_RRF_U0FF },
1008 { "lpxr", 0x60, INSTR_RRE_FF }, 826 { "lpxr", 0x60, INSTR_RRE_FF },
1009 { "lnxr", 0x61, INSTR_RRE_FF }, 827 { "lnxr", 0x61, INSTR_RRE_FF },
1010 { "ltxr", 0x62, INSTR_RRE_FF }, 828 { "ltxr", 0x62, INSTR_RRE_FF },
1011 { "lcxr", 0x63, INSTR_RRE_FF }, 829 { "lcxr", 0x63, INSTR_RRE_FF },
1012 { "lxr", 0x65, INSTR_RRE_FF }, 830 { "lxr", 0x65, INSTR_RRE_RR },
1013 { "lexr", 0x66, INSTR_RRE_FF }, 831 { "lexr", 0x66, INSTR_RRE_FF },
1014 { "fixr", 0x67, INSTR_RRE_FF }, 832 { "fixr", 0x67, INSTR_RRF_U0FF },
1015 { "cxr", 0x69, INSTR_RRE_FF }, 833 { "cxr", 0x69, INSTR_RRE_FF },
1016 { "lzer", 0x74, INSTR_RRE_F0 }, 834 { "lzer", 0x74, INSTR_RRE_R0 },
1017 { "lzdr", 0x75, INSTR_RRE_F0 }, 835 { "lzdr", 0x75, INSTR_RRE_R0 },
1018 { "lzxr", 0x76, INSTR_RRE_F0 }, 836 { "lzxr", 0x76, INSTR_RRE_R0 },
1019 { "fier", 0x77, INSTR_RRE_FF }, 837 { "fier", 0x77, INSTR_RRF_U0FF },
1020 { "fidr", 0x7f, INSTR_RRE_FF }, 838 { "fidr", 0x7f, INSTR_RRF_U0FF },
1021 { "sfpc", 0x84, INSTR_RRE_RR_OPT }, 839 { "sfpc", 0x84, INSTR_RRE_RR_OPT },
1022 { "efpc", 0x8c, INSTR_RRE_RR_OPT }, 840 { "efpc", 0x8c, INSTR_RRE_RR_OPT },
1023 { "cefbr", 0x94, INSTR_RRE_RF }, 841 { "cefbr", 0x94, INSTR_RRE_RF },
@@ -1026,12 +844,9 @@ static struct insn opcode_b3[] = {
1026 { "cfebr", 0x98, INSTR_RRF_U0RF }, 844 { "cfebr", 0x98, INSTR_RRF_U0RF },
1027 { "cfdbr", 0x99, INSTR_RRF_U0RF }, 845 { "cfdbr", 0x99, INSTR_RRF_U0RF },
1028 { "cfxbr", 0x9a, INSTR_RRF_U0RF }, 846 { "cfxbr", 0x9a, INSTR_RRF_U0RF },
1029 { "cefr", 0xb4, INSTR_RRE_FR }, 847 { "cefr", 0xb4, INSTR_RRE_RF },
1030 { "cdfr", 0xb5, INSTR_RRE_FR }, 848 { "cdfr", 0xb5, INSTR_RRE_RF },
1031 { "cxfr", 0xb6, INSTR_RRE_FR }, 849 { "cxfr", 0xb6, INSTR_RRE_RF },
1032 { "cfer", 0xb8, INSTR_RRF_U0RF },
1033 { "cfdr", 0xb9, INSTR_RRF_U0RF },
1034 { "cfxr", 0xba, INSTR_RRF_U0RF },
1035 { "", 0, INSTR_INVALID } 850 { "", 0, INSTR_INVALID }
1036}; 851};
1037 852
@@ -1073,23 +888,7 @@ static struct insn opcode_b9[] = {
1073 { "lhr", 0x27, INSTR_RRE_RR }, 888 { "lhr", 0x27, INSTR_RRE_RR },
1074 { "cgfr", 0x30, INSTR_RRE_RR }, 889 { "cgfr", 0x30, INSTR_RRE_RR },
1075 { "clgfr", 0x31, INSTR_RRE_RR }, 890 { "clgfr", 0x31, INSTR_RRE_RR },
1076 { "cfdtr", 0x41, INSTR_RRF_UURF },
1077 { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
1078 { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
1079 { "bctgr", 0x46, INSTR_RRE_RR }, 891 { "bctgr", 0x46, INSTR_RRE_RR },
1080 { "cfxtr", 0x49, INSTR_RRF_UURF },
1081 { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
1082 { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
1083 { "cdftr", 0x51, INSTR_RRF_UUFR },
1084 { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
1085 { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
1086 { "cxftr", 0x59, INSTR_RRF_UURF },
1087 { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
1088 { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
1089 { "cgrt", 0x60, INSTR_RRF_U0RR },
1090 { "clgrt", 0x61, INSTR_RRF_U0RR },
1091 { "crt", 0x72, INSTR_RRF_U0RR },
1092 { "clrt", 0x73, INSTR_RRF_U0RR },
1093 { "ngr", 0x80, INSTR_RRE_RR }, 892 { "ngr", 0x80, INSTR_RRE_RR },
1094 { "ogr", 0x81, INSTR_RRE_RR }, 893 { "ogr", 0x81, INSTR_RRE_RR },
1095 { "xgr", 0x82, INSTR_RRE_RR }, 894 { "xgr", 0x82, INSTR_RRE_RR },
@@ -1102,34 +901,32 @@ static struct insn opcode_b9[] = {
1102 { "slbgr", 0x89, INSTR_RRE_RR }, 901 { "slbgr", 0x89, INSTR_RRE_RR },
1103 { "cspg", 0x8a, INSTR_RRE_RR }, 902 { "cspg", 0x8a, INSTR_RRE_RR },
1104 { "idte", 0x8e, INSTR_RRF_R0RR }, 903 { "idte", 0x8e, INSTR_RRF_R0RR },
1105 { "crdte", 0x8f, INSTR_RRF_RMRR },
1106 { "llcr", 0x94, INSTR_RRE_RR }, 904 { "llcr", 0x94, INSTR_RRE_RR },
1107 { "llhr", 0x95, INSTR_RRE_RR }, 905 { "llhr", 0x95, INSTR_RRE_RR },
1108 { "esea", 0x9d, INSTR_RRE_R0 }, 906 { "esea", 0x9d, INSTR_RRE_R0 },
1109 { "ptf", 0xa2, INSTR_RRE_R0 },
1110 { "lptea", 0xaa, INSTR_RRF_RURR }, 907 { "lptea", 0xaa, INSTR_RRF_RURR },
1111 { "rrbm", 0xae, INSTR_RRE_RR },
1112 { "pfmf", 0xaf, INSTR_RRE_RR },
1113 { "cu14", 0xb0, INSTR_RRF_M0RR }, 908 { "cu14", 0xb0, INSTR_RRF_M0RR },
1114 { "cu24", 0xb1, INSTR_RRF_M0RR }, 909 { "cu24", 0xb1, INSTR_RRF_M0RR },
1115 { "cu41", 0xb2, INSTR_RRE_RR }, 910 { "cu41", 0xb2, INSTR_RRF_M0RR },
1116 { "cu42", 0xb3, INSTR_RRE_RR }, 911 { "cu42", 0xb3, INSTR_RRF_M0RR },
1117 { "trtre", 0xbd, INSTR_RRF_M0RR }, 912 { "crt", 0x72, INSTR_RRF_U0RR },
1118 { "srstu", 0xbe, INSTR_RRE_RR }, 913 { "cgrt", 0x60, INSTR_RRF_U0RR },
914 { "clrt", 0x73, INSTR_RRF_U0RR },
915 { "clgrt", 0x61, INSTR_RRF_U0RR },
916 { "ptf", 0xa2, INSTR_RRE_R0 },
917 { "pfmf", 0xaf, INSTR_RRE_RR },
1119 { "trte", 0xbf, INSTR_RRF_M0RR }, 918 { "trte", 0xbf, INSTR_RRF_M0RR },
919 { "trtre", 0xbd, INSTR_RRF_M0RR },
1120 { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, 920 { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
1121 { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, 921 { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
1122 { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 }, 922 { "alhhh", 0xca, INSTR_RRF_R0RR2 },
1123 { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 }, 923 { "alhhl", 0xca, INSTR_RRF_R0RR2 },
1124 { "chhr", 0xcd, INSTR_RRE_RR }, 924 { "slhhh", 0xcb, INSTR_RRF_R0RR2 },
925 { "chhr ", 0xcd, INSTR_RRE_RR },
1125 { "clhhr", 0xcf, INSTR_RRE_RR }, 926 { "clhhr", 0xcf, INSTR_RRE_RR },
1126 { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
1127 { "pcilg", 0xd2, INSTR_RRE_RR },
1128 { "rpcit", 0xd3, INSTR_RRE_RR },
1129 { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, 927 { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
1130 { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, 928 { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
1131 { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 }, 929 { "slhhl", 0xdb, INSTR_RRF_R0RR2 },
1132 { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
1133 { "chlr", 0xdd, INSTR_RRE_RR }, 930 { "chlr", 0xdd, INSTR_RRE_RR },
1134 { "clhlr", 0xdf, INSTR_RRE_RR }, 931 { "clhlr", 0xdf, INSTR_RRE_RR },
1135 { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, 932 { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
@@ -1157,9 +954,13 @@ static struct insn opcode_b9[] = {
1157 { "kimd", 0x3e, INSTR_RRE_RR }, 954 { "kimd", 0x3e, INSTR_RRE_RR },
1158 { "klmd", 0x3f, INSTR_RRE_RR }, 955 { "klmd", 0x3f, INSTR_RRE_RR },
1159 { "epsw", 0x8d, INSTR_RRE_RR }, 956 { "epsw", 0x8d, INSTR_RRE_RR },
957 { "trtt", 0x90, INSTR_RRE_RR },
1160 { "trtt", 0x90, INSTR_RRF_M0RR }, 958 { "trtt", 0x90, INSTR_RRF_M0RR },
959 { "trto", 0x91, INSTR_RRE_RR },
1161 { "trto", 0x91, INSTR_RRF_M0RR }, 960 { "trto", 0x91, INSTR_RRF_M0RR },
961 { "trot", 0x92, INSTR_RRE_RR },
1162 { "trot", 0x92, INSTR_RRF_M0RR }, 962 { "trot", 0x92, INSTR_RRF_M0RR },
963 { "troo", 0x93, INSTR_RRE_RR },
1163 { "troo", 0x93, INSTR_RRF_M0RR }, 964 { "troo", 0x93, INSTR_RRF_M0RR },
1164 { "mlr", 0x96, INSTR_RRE_RR }, 965 { "mlr", 0x96, INSTR_RRE_RR },
1165 { "dlr", 0x97, INSTR_RRE_RR }, 966 { "dlr", 0x97, INSTR_RRE_RR },
@@ -1190,8 +991,6 @@ static struct insn opcode_c0[] = {
1190 991
1191static struct insn opcode_c2[] = { 992static struct insn opcode_c2[] = {
1192#ifdef CONFIG_64BIT 993#ifdef CONFIG_64BIT
1193 { "msgfi", 0x00, INSTR_RIL_RI },
1194 { "msfi", 0x01, INSTR_RIL_RI },
1195 { "slgfi", 0x04, INSTR_RIL_RU }, 994 { "slgfi", 0x04, INSTR_RIL_RU },
1196 { "slfi", 0x05, INSTR_RIL_RU }, 995 { "slfi", 0x05, INSTR_RIL_RU },
1197 { "agfi", 0x08, INSTR_RIL_RI }, 996 { "agfi", 0x08, INSTR_RIL_RI },
@@ -1202,41 +1001,43 @@ static struct insn opcode_c2[] = {
1202 { "cfi", 0x0d, INSTR_RIL_RI }, 1001 { "cfi", 0x0d, INSTR_RIL_RI },
1203 { "clgfi", 0x0e, INSTR_RIL_RU }, 1002 { "clgfi", 0x0e, INSTR_RIL_RU },
1204 { "clfi", 0x0f, INSTR_RIL_RU }, 1003 { "clfi", 0x0f, INSTR_RIL_RU },
1004 { "msfi", 0x01, INSTR_RIL_RI },
1005 { "msgfi", 0x00, INSTR_RIL_RI },
1205#endif 1006#endif
1206 { "", 0, INSTR_INVALID } 1007 { "", 0, INSTR_INVALID }
1207}; 1008};
1208 1009
1209static struct insn opcode_c4[] = { 1010static struct insn opcode_c4[] = {
1210#ifdef CONFIG_64BIT 1011#ifdef CONFIG_64BIT
1211 { "llhrl", 0x02, INSTR_RIL_RP }, 1012 { "lrl", 0x0d, INSTR_RIL_RP },
1212 { "lghrl", 0x04, INSTR_RIL_RP },
1213 { "lhrl", 0x05, INSTR_RIL_RP },
1214 { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
1215 { "sthrl", 0x07, INSTR_RIL_RP },
1216 { "lgrl", 0x08, INSTR_RIL_RP }, 1013 { "lgrl", 0x08, INSTR_RIL_RP },
1217 { "stgrl", 0x0b, INSTR_RIL_RP },
1218 { "lgfrl", 0x0c, INSTR_RIL_RP }, 1014 { "lgfrl", 0x0c, INSTR_RIL_RP },
1219 { "lrl", 0x0d, INSTR_RIL_RP }, 1015 { "lhrl", 0x05, INSTR_RIL_RP },
1016 { "lghrl", 0x04, INSTR_RIL_RP },
1220 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, 1017 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
1018 { "llhrl", 0x02, INSTR_RIL_RP },
1019 { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
1221 { "strl", 0x0f, INSTR_RIL_RP }, 1020 { "strl", 0x0f, INSTR_RIL_RP },
1021 { "stgrl", 0x0b, INSTR_RIL_RP },
1022 { "sthrl", 0x07, INSTR_RIL_RP },
1222#endif 1023#endif
1223 { "", 0, INSTR_INVALID } 1024 { "", 0, INSTR_INVALID }
1224}; 1025};
1225 1026
1226static struct insn opcode_c6[] = { 1027static struct insn opcode_c6[] = {
1227#ifdef CONFIG_64BIT 1028#ifdef CONFIG_64BIT
1228 { "exrl", 0x00, INSTR_RIL_RP }, 1029 { "crl", 0x0d, INSTR_RIL_RP },
1229 { "pfdrl", 0x02, INSTR_RIL_UP },
1230 { "cghrl", 0x04, INSTR_RIL_RP },
1231 { "chrl", 0x05, INSTR_RIL_RP },
1232 { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
1233 { "clhrl", 0x07, INSTR_RIL_RP },
1234 { "cgrl", 0x08, INSTR_RIL_RP }, 1030 { "cgrl", 0x08, INSTR_RIL_RP },
1235 { "clgrl", 0x0a, INSTR_RIL_RP },
1236 { "cgfrl", 0x0c, INSTR_RIL_RP }, 1031 { "cgfrl", 0x0c, INSTR_RIL_RP },
1237 { "crl", 0x0d, INSTR_RIL_RP }, 1032 { "chrl", 0x05, INSTR_RIL_RP },
1238 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, 1033 { "cghrl", 0x04, INSTR_RIL_RP },
1239 { "clrl", 0x0f, INSTR_RIL_RP }, 1034 { "clrl", 0x0f, INSTR_RIL_RP },
1035 { "clgrl", 0x0a, INSTR_RIL_RP },
1036 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
1037 { "clhrl", 0x07, INSTR_RIL_RP },
1038 { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
1039 { "pfdrl", 0x02, INSTR_RIL_UP },
1040 { "exrl", 0x00, INSTR_RIL_RP },
1240#endif 1041#endif
1241 { "", 0, INSTR_INVALID } 1042 { "", 0, INSTR_INVALID }
1242}; 1043};
@@ -1247,7 +1048,7 @@ static struct insn opcode_c8[] = {
1247 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1048 { "ectg", 0x01, INSTR_SSF_RRDRD },
1248 { "csst", 0x02, INSTR_SSF_RRDRD }, 1049 { "csst", 0x02, INSTR_SSF_RRDRD },
1249 { "lpd", 0x04, INSTR_SSF_RRDRD2 }, 1050 { "lpd", 0x04, INSTR_SSF_RRDRD2 },
1250 { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, 1051 { "lpdg ", 0x05, INSTR_SSF_RRDRD2 },
1251#endif 1052#endif
1252 { "", 0, INSTR_INVALID } 1053 { "", 0, INSTR_INVALID }
1253}; 1054};
@@ -1257,9 +1058,9 @@ static struct insn opcode_cc[] = {
1257 { "brcth", 0x06, INSTR_RIL_RP }, 1058 { "brcth", 0x06, INSTR_RIL_RP },
1258 { "aih", 0x08, INSTR_RIL_RI }, 1059 { "aih", 0x08, INSTR_RIL_RI },
1259 { "alsih", 0x0a, INSTR_RIL_RI }, 1060 { "alsih", 0x0a, INSTR_RIL_RI },
1260 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, 1061 { "alsih", 0x0b, INSTR_RIL_RI },
1261 { "cih", 0x0d, INSTR_RIL_RI }, 1062 { "cih", 0x0d, INSTR_RIL_RI },
1262 { "clih", 0x0f, INSTR_RIL_RI }, 1063 { "clih ", 0x0f, INSTR_RIL_RI },
1263#endif 1064#endif
1264 { "", 0, INSTR_INVALID } 1065 { "", 0, INSTR_INVALID }
1265}; 1066};
@@ -1293,15 +1094,11 @@ static struct insn opcode_e3[] = {
1293 { "cg", 0x20, INSTR_RXY_RRRD }, 1094 { "cg", 0x20, INSTR_RXY_RRRD },
1294 { "clg", 0x21, INSTR_RXY_RRRD }, 1095 { "clg", 0x21, INSTR_RXY_RRRD },
1295 { "stg", 0x24, INSTR_RXY_RRRD }, 1096 { "stg", 0x24, INSTR_RXY_RRRD },
1296 { "ntstg", 0x25, INSTR_RXY_RRRD },
1297 { "cvdy", 0x26, INSTR_RXY_RRRD }, 1097 { "cvdy", 0x26, INSTR_RXY_RRRD },
1298 { "cvdg", 0x2e, INSTR_RXY_RRRD }, 1098 { "cvdg", 0x2e, INSTR_RXY_RRRD },
1299 { "strvg", 0x2f, INSTR_RXY_RRRD }, 1099 { "strvg", 0x2f, INSTR_RXY_RRRD },
1300 { "cgf", 0x30, INSTR_RXY_RRRD }, 1100 { "cgf", 0x30, INSTR_RXY_RRRD },
1301 { "clgf", 0x31, INSTR_RXY_RRRD }, 1101 { "clgf", 0x31, INSTR_RXY_RRRD },
1302 { "ltgf", 0x32, INSTR_RXY_RRRD },
1303 { "cgh", 0x34, INSTR_RXY_RRRD },
1304 { "pfd", 0x36, INSTR_RXY_URRD },
1305 { "strvh", 0x3f, INSTR_RXY_RRRD }, 1102 { "strvh", 0x3f, INSTR_RXY_RRRD },
1306 { "bctg", 0x46, INSTR_RXY_RRRD }, 1103 { "bctg", 0x46, INSTR_RXY_RRRD },
1307 { "sty", 0x50, INSTR_RXY_RRRD }, 1104 { "sty", 0x50, INSTR_RXY_RRRD },
@@ -1314,25 +1111,21 @@ static struct insn opcode_e3[] = {
1314 { "cy", 0x59, INSTR_RXY_RRRD }, 1111 { "cy", 0x59, INSTR_RXY_RRRD },
1315 { "ay", 0x5a, INSTR_RXY_RRRD }, 1112 { "ay", 0x5a, INSTR_RXY_RRRD },
1316 { "sy", 0x5b, INSTR_RXY_RRRD }, 1113 { "sy", 0x5b, INSTR_RXY_RRRD },
1317 { "mfy", 0x5c, INSTR_RXY_RRRD },
1318 { "aly", 0x5e, INSTR_RXY_RRRD }, 1114 { "aly", 0x5e, INSTR_RXY_RRRD },
1319 { "sly", 0x5f, INSTR_RXY_RRRD }, 1115 { "sly", 0x5f, INSTR_RXY_RRRD },
1320 { "sthy", 0x70, INSTR_RXY_RRRD }, 1116 { "sthy", 0x70, INSTR_RXY_RRRD },
1321 { "lay", 0x71, INSTR_RXY_RRRD }, 1117 { "lay", 0x71, INSTR_RXY_RRRD },
1322 { "stcy", 0x72, INSTR_RXY_RRRD }, 1118 { "stcy", 0x72, INSTR_RXY_RRRD },
1323 { "icy", 0x73, INSTR_RXY_RRRD }, 1119 { "icy", 0x73, INSTR_RXY_RRRD },
1324 { "laey", 0x75, INSTR_RXY_RRRD },
1325 { "lb", 0x76, INSTR_RXY_RRRD }, 1120 { "lb", 0x76, INSTR_RXY_RRRD },
1326 { "lgb", 0x77, INSTR_RXY_RRRD }, 1121 { "lgb", 0x77, INSTR_RXY_RRRD },
1327 { "lhy", 0x78, INSTR_RXY_RRRD }, 1122 { "lhy", 0x78, INSTR_RXY_RRRD },
1328 { "chy", 0x79, INSTR_RXY_RRRD }, 1123 { "chy", 0x79, INSTR_RXY_RRRD },
1329 { "ahy", 0x7a, INSTR_RXY_RRRD }, 1124 { "ahy", 0x7a, INSTR_RXY_RRRD },
1330 { "shy", 0x7b, INSTR_RXY_RRRD }, 1125 { "shy", 0x7b, INSTR_RXY_RRRD },
1331 { "mhy", 0x7c, INSTR_RXY_RRRD },
1332 { "ng", 0x80, INSTR_RXY_RRRD }, 1126 { "ng", 0x80, INSTR_RXY_RRRD },
1333 { "og", 0x81, INSTR_RXY_RRRD }, 1127 { "og", 0x81, INSTR_RXY_RRRD },
1334 { "xg", 0x82, INSTR_RXY_RRRD }, 1128 { "xg", 0x82, INSTR_RXY_RRRD },
1335 { "lgat", 0x85, INSTR_RXY_RRRD },
1336 { "mlg", 0x86, INSTR_RXY_RRRD }, 1129 { "mlg", 0x86, INSTR_RXY_RRRD },
1337 { "dlg", 0x87, INSTR_RXY_RRRD }, 1130 { "dlg", 0x87, INSTR_RXY_RRRD },
1338 { "alcg", 0x88, INSTR_RXY_RRRD }, 1131 { "alcg", 0x88, INSTR_RXY_RRRD },
@@ -1343,22 +1136,22 @@ static struct insn opcode_e3[] = {
1343 { "llgh", 0x91, INSTR_RXY_RRRD }, 1136 { "llgh", 0x91, INSTR_RXY_RRRD },
1344 { "llc", 0x94, INSTR_RXY_RRRD }, 1137 { "llc", 0x94, INSTR_RXY_RRRD },
1345 { "llh", 0x95, INSTR_RXY_RRRD }, 1138 { "llh", 0x95, INSTR_RXY_RRRD },
1346 { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD }, 1139 { "cgh", 0x34, INSTR_RXY_RRRD },
1347 { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD }, 1140 { "laey", 0x75, INSTR_RXY_RRRD },
1348 { "lat", 0x9f, INSTR_RXY_RRRD }, 1141 { "ltgf", 0x32, INSTR_RXY_RRRD },
1142 { "mfy", 0x5c, INSTR_RXY_RRRD },
1143 { "mhy", 0x7c, INSTR_RXY_RRRD },
1144 { "pfd", 0x36, INSTR_RXY_URRD },
1349 { "lbh", 0xc0, INSTR_RXY_RRRD }, 1145 { "lbh", 0xc0, INSTR_RXY_RRRD },
1350 { "llch", 0xc2, INSTR_RXY_RRRD }, 1146 { "llch", 0xc2, INSTR_RXY_RRRD },
1351 { "stch", 0xc3, INSTR_RXY_RRRD }, 1147 { "stch", 0xc3, INSTR_RXY_RRRD },
1352 { "lhh", 0xc4, INSTR_RXY_RRRD }, 1148 { "lhh", 0xc4, INSTR_RXY_RRRD },
1353 { "llhh", 0xc6, INSTR_RXY_RRRD }, 1149 { "llhh", 0xc6, INSTR_RXY_RRRD },
1354 { "sthh", 0xc7, INSTR_RXY_RRRD }, 1150 { "sthh", 0xc7, INSTR_RXY_RRRD },
1355 { "lfhat", 0xc8, INSTR_RXY_RRRD },
1356 { "lfh", 0xca, INSTR_RXY_RRRD }, 1151 { "lfh", 0xca, INSTR_RXY_RRRD },
1357 { "stfh", 0xcb, INSTR_RXY_RRRD }, 1152 { "stfh", 0xcb, INSTR_RXY_RRRD },
1358 { "chf", 0xcd, INSTR_RXY_RRRD }, 1153 { "chf", 0xcd, INSTR_RXY_RRRD },
1359 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1154 { "clhf", 0xcf, INSTR_RXY_RRRD },
1360 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
1361 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
1362#endif 1155#endif
1363 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1156 { "lrv", 0x1e, INSTR_RXY_RRRD },
1364 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1157 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1373,17 +1166,15 @@ static struct insn opcode_e3[] = {
1373static struct insn opcode_e5[] = { 1166static struct insn opcode_e5[] = {
1374#ifdef CONFIG_64BIT 1167#ifdef CONFIG_64BIT
1375 { "strag", 0x02, INSTR_SSE_RDRD }, 1168 { "strag", 0x02, INSTR_SSE_RDRD },
1376 { "mvhhi", 0x44, INSTR_SIL_RDI },
1377 { "mvghi", 0x48, INSTR_SIL_RDI },
1378 { "mvhi", 0x4c, INSTR_SIL_RDI },
1379 { "chhsi", 0x54, INSTR_SIL_RDI }, 1169 { "chhsi", 0x54, INSTR_SIL_RDI },
1380 { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
1381 { "cghsi", 0x58, INSTR_SIL_RDI },
1382 { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
1383 { "chsi", 0x5c, INSTR_SIL_RDI }, 1170 { "chsi", 0x5c, INSTR_SIL_RDI },
1171 { "cghsi", 0x58, INSTR_SIL_RDI },
1172 { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
1384 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, 1173 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
1385 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, 1174 { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
1386 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, 1175 { "mvhhi", 0x44, INSTR_SIL_RDI },
1176 { "mvhi", 0x4c, INSTR_SIL_RDI },
1177 { "mvghi", 0x48, INSTR_SIL_RDI },
1387#endif 1178#endif
1388 { "lasp", 0x00, INSTR_SSE_RDRD }, 1179 { "lasp", 0x00, INSTR_SSE_RDRD },
1389 { "tprot", 0x01, INSTR_SSE_RDRD }, 1180 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -1404,11 +1195,9 @@ static struct insn opcode_eb[] = {
1404 { "rllg", 0x1c, INSTR_RSY_RRRD }, 1195 { "rllg", 0x1c, INSTR_RSY_RRRD },
1405 { "clmh", 0x20, INSTR_RSY_RURD }, 1196 { "clmh", 0x20, INSTR_RSY_RURD },
1406 { "clmy", 0x21, INSTR_RSY_RURD }, 1197 { "clmy", 0x21, INSTR_RSY_RURD },
1407 { "clt", 0x23, INSTR_RSY_RURD },
1408 { "stmg", 0x24, INSTR_RSY_RRRD }, 1198 { "stmg", 0x24, INSTR_RSY_RRRD },
1409 { "stctg", 0x25, INSTR_RSY_CCRD }, 1199 { "stctg", 0x25, INSTR_RSY_CCRD },
1410 { "stmh", 0x26, INSTR_RSY_RRRD }, 1200 { "stmh", 0x26, INSTR_RSY_RRRD },
1411 { "clgt", 0x2b, INSTR_RSY_RURD },
1412 { "stcmh", 0x2c, INSTR_RSY_RURD }, 1201 { "stcmh", 0x2c, INSTR_RSY_RURD },
1413 { "stcmy", 0x2d, INSTR_RSY_RURD }, 1202 { "stcmy", 0x2d, INSTR_RSY_RURD },
1414 { "lctlg", 0x2f, INSTR_RSY_CCRD }, 1203 { "lctlg", 0x2f, INSTR_RSY_CCRD },
@@ -1417,17 +1206,13 @@ static struct insn opcode_eb[] = {
1417 { "cdsg", 0x3e, INSTR_RSY_RRRD }, 1206 { "cdsg", 0x3e, INSTR_RSY_RRRD },
1418 { "bxhg", 0x44, INSTR_RSY_RRRD }, 1207 { "bxhg", 0x44, INSTR_RSY_RRRD },
1419 { "bxleg", 0x45, INSTR_RSY_RRRD }, 1208 { "bxleg", 0x45, INSTR_RSY_RRRD },
1420 { "ecag", 0x4c, INSTR_RSY_RRRD },
1421 { "tmy", 0x51, INSTR_SIY_URD }, 1209 { "tmy", 0x51, INSTR_SIY_URD },
1422 { "mviy", 0x52, INSTR_SIY_URD }, 1210 { "mviy", 0x52, INSTR_SIY_URD },
1423 { "niy", 0x54, INSTR_SIY_URD }, 1211 { "niy", 0x54, INSTR_SIY_URD },
1424 { "cliy", 0x55, INSTR_SIY_URD }, 1212 { "cliy", 0x55, INSTR_SIY_URD },
1425 { "oiy", 0x56, INSTR_SIY_URD }, 1213 { "oiy", 0x56, INSTR_SIY_URD },
1426 { "xiy", 0x57, INSTR_SIY_URD }, 1214 { "xiy", 0x57, INSTR_SIY_URD },
1427 { "asi", 0x6a, INSTR_SIY_IRD }, 1215 { "icmh", 0x80, INSTR_RSE_RURD },
1428 { "alsi", 0x6e, INSTR_SIY_IRD },
1429 { "agsi", 0x7a, INSTR_SIY_IRD },
1430 { "algsi", 0x7e, INSTR_SIY_IRD },
1431 { "icmh", 0x80, INSTR_RSY_RURD }, 1216 { "icmh", 0x80, INSTR_RSY_RURD },
1432 { "icmy", 0x81, INSTR_RSY_RURD }, 1217 { "icmy", 0x81, INSTR_RSY_RURD },
1433 { "clclu", 0x8f, INSTR_RSY_RRRD }, 1218 { "clclu", 0x8f, INSTR_RSY_RRRD },
@@ -1436,8 +1221,11 @@ static struct insn opcode_eb[] = {
1436 { "lmy", 0x98, INSTR_RSY_RRRD }, 1221 { "lmy", 0x98, INSTR_RSY_RRRD },
1437 { "lamy", 0x9a, INSTR_RSY_AARD }, 1222 { "lamy", 0x9a, INSTR_RSY_AARD },
1438 { "stamy", 0x9b, INSTR_RSY_AARD }, 1223 { "stamy", 0x9b, INSTR_RSY_AARD },
1439 { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD }, 1224 { "asi", 0x6a, INSTR_SIY_IRD },
1440 { "sic", 0xd1, INSTR_RSY_RRRD }, 1225 { "agsi", 0x7a, INSTR_SIY_IRD },
1226 { "alsi", 0x6e, INSTR_SIY_IRD },
1227 { "algsi", 0x7e, INSTR_SIY_IRD },
1228 { "ecag", 0x4c, INSTR_RSY_RRRD },
1441 { "srak", 0xdc, INSTR_RSY_RRRD }, 1229 { "srak", 0xdc, INSTR_RSY_RRRD },
1442 { "slak", 0xdd, INSTR_RSY_RRRD }, 1230 { "slak", 0xdd, INSTR_RSY_RRRD },
1443 { "srlk", 0xde, INSTR_RSY_RRRD }, 1231 { "srlk", 0xde, INSTR_RSY_RRRD },
@@ -1456,9 +1244,6 @@ static struct insn opcode_eb[] = {
1456 { "lax", 0xf7, INSTR_RSY_RRRD }, 1244 { "lax", 0xf7, INSTR_RSY_RRRD },
1457 { "laa", 0xf8, INSTR_RSY_RRRD }, 1245 { "laa", 0xf8, INSTR_RSY_RRRD },
1458 { "laal", 0xfa, INSTR_RSY_RRRD }, 1246 { "laal", 0xfa, INSTR_RSY_RRRD },
1459 { "lric", 0x60, INSTR_RSY_RDRM },
1460 { "stric", 0x61, INSTR_RSY_RDRM },
1461 { "mric", 0x62, INSTR_RSY_RDRM },
1462#endif 1247#endif
1463 { "rll", 0x1d, INSTR_RSY_RRRD }, 1248 { "rll", 0x1d, INSTR_RSY_RRRD },
1464 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1249 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1470,37 +1255,36 @@ static struct insn opcode_ec[] = {
1470#ifdef CONFIG_64BIT 1255#ifdef CONFIG_64BIT
1471 { "brxhg", 0x44, INSTR_RIE_RRP }, 1256 { "brxhg", 0x44, INSTR_RIE_RRP },
1472 { "brxlg", 0x45, INSTR_RIE_RRP }, 1257 { "brxlg", 0x45, INSTR_RIE_RRP },
1473 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, 1258 { "crb", 0xf6, INSTR_RRS_RRRDU },
1474 { "rnsbg", 0x54, INSTR_RIE_RRUUU }, 1259 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1475 { "risbg", 0x55, INSTR_RIE_RRUUU }, 1260 { "crj", 0x76, INSTR_RIE_RRPU },
1476 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1477 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1478 { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
1479 { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
1480 { "cgrj", 0x64, INSTR_RIE_RRPU }, 1261 { "cgrj", 0x64, INSTR_RIE_RRPU },
1481 { "clgrj", 0x65, INSTR_RIE_RRPU }, 1262 { "cib", 0xfe, INSTR_RIS_RURDI },
1482 { "cgit", 0x70, INSTR_RIE_R0IU }, 1263 { "cgib", 0xfc, INSTR_RIS_RURDI },
1483 { "clgit", 0x71, INSTR_RIE_R0UU }, 1264 { "cij", 0x7e, INSTR_RIE_RUPI },
1265 { "cgij", 0x7c, INSTR_RIE_RUPI },
1484 { "cit", 0x72, INSTR_RIE_R0IU }, 1266 { "cit", 0x72, INSTR_RIE_R0IU },
1485 { "clfit", 0x73, INSTR_RIE_R0UU }, 1267 { "cgit", 0x70, INSTR_RIE_R0IU },
1486 { "crj", 0x76, INSTR_RIE_RRPU }, 1268 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1269 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1487 { "clrj", 0x77, INSTR_RIE_RRPU }, 1270 { "clrj", 0x77, INSTR_RIE_RRPU },
1488 { "cgij", 0x7c, INSTR_RIE_RUPI }, 1271 { "clgrj", 0x65, INSTR_RIE_RRPU },
1489 { "clgij", 0x7d, INSTR_RIE_RUPU }, 1272 { "clib", 0xff, INSTR_RIS_RURDU },
1490 { "cij", 0x7e, INSTR_RIE_RUPI }, 1273 { "clgib", 0xfd, INSTR_RIS_RURDU },
1491 { "clij", 0x7f, INSTR_RIE_RUPU }, 1274 { "clij", 0x7f, INSTR_RIE_RUPU },
1275 { "clgij", 0x7d, INSTR_RIE_RUPU },
1276 { "clfit", 0x73, INSTR_RIE_R0UU },
1277 { "clgit", 0x71, INSTR_RIE_R0UU },
1278 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1279 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1280 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1281 { "risbg", 0x55, INSTR_RIE_RRUUU },
1282 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
1283 { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
1492 { "ahik", 0xd8, INSTR_RIE_RRI0 }, 1284 { "ahik", 0xd8, INSTR_RIE_RRI0 },
1493 { "aghik", 0xd9, INSTR_RIE_RRI0 }, 1285 { "aghik", 0xd9, INSTR_RIE_RRI0 },
1494 { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, 1286 { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
1495 { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, 1287 { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
1496 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1497 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1498 { "crb", 0xf6, INSTR_RRS_RRRDU },
1499 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1500 { "cgib", 0xfc, INSTR_RIS_RURDI },
1501 { "clgib", 0xfd, INSTR_RIS_RURDU },
1502 { "cib", 0xfe, INSTR_RIS_RURDI },
1503 { "clib", 0xff, INSTR_RIS_RURDU },
1504#endif 1288#endif
1505 { "", 0, INSTR_INVALID } 1289 { "", 0, INSTR_INVALID }
1506}; 1290};
@@ -1513,24 +1297,20 @@ static struct insn opcode_ed[] = {
1513 { "my", 0x3b, INSTR_RXF_FRRDF }, 1297 { "my", 0x3b, INSTR_RXF_FRRDF },
1514 { "mayh", 0x3c, INSTR_RXF_FRRDF }, 1298 { "mayh", 0x3c, INSTR_RXF_FRRDF },
1515 { "myh", 0x3d, INSTR_RXF_FRRDF }, 1299 { "myh", 0x3d, INSTR_RXF_FRRDF },
1300 { "ley", 0x64, INSTR_RXY_FRRD },
1301 { "ldy", 0x65, INSTR_RXY_FRRD },
1302 { "stey", 0x66, INSTR_RXY_FRRD },
1303 { "stdy", 0x67, INSTR_RXY_FRRD },
1516 { "sldt", 0x40, INSTR_RXF_FRRDF }, 1304 { "sldt", 0x40, INSTR_RXF_FRRDF },
1517 { "srdt", 0x41, INSTR_RXF_FRRDF },
1518 { "slxt", 0x48, INSTR_RXF_FRRDF }, 1305 { "slxt", 0x48, INSTR_RXF_FRRDF },
1306 { "srdt", 0x41, INSTR_RXF_FRRDF },
1519 { "srxt", 0x49, INSTR_RXF_FRRDF }, 1307 { "srxt", 0x49, INSTR_RXF_FRRDF },
1520 { "tdcet", 0x50, INSTR_RXE_FRRD }, 1308 { "tdcet", 0x50, INSTR_RXE_FRRD },
1521 { "tdget", 0x51, INSTR_RXE_FRRD },
1522 { "tdcdt", 0x54, INSTR_RXE_FRRD }, 1309 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1523 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1524 { "tdcxt", 0x58, INSTR_RXE_FRRD }, 1310 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1311 { "tdget", 0x51, INSTR_RXE_FRRD },
1312 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1525 { "tdgxt", 0x59, INSTR_RXE_FRRD }, 1313 { "tdgxt", 0x59, INSTR_RXE_FRRD },
1526 { "ley", 0x64, INSTR_RXY_FRRD },
1527 { "ldy", 0x65, INSTR_RXY_FRRD },
1528 { "stey", 0x66, INSTR_RXY_FRRD },
1529 { "stdy", 0x67, INSTR_RXY_FRRD },
1530 { "czdt", 0xa8, INSTR_RSL_LRDFU },
1531 { "czxt", 0xa9, INSTR_RSL_LRDFU },
1532 { "cdzt", 0xaa, INSTR_RSL_LRDFU },
1533 { "cxzt", 0xab, INSTR_RSL_LRDFU },
1534#endif 1314#endif
1535 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1315 { "ldeb", 0x04, INSTR_RXE_FRRD },
1536 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1316 { "lxdb", 0x05, INSTR_RXE_FRRD },
@@ -1630,9 +1410,6 @@ static struct insn *find_insn(unsigned char *code)
1630 case 0xa7: 1410 case 0xa7:
1631 table = opcode_a7; 1411 table = opcode_a7;
1632 break; 1412 break;
1633 case 0xaa:
1634 table = opcode_aa;
1635 break;
1636 case 0xb2: 1413 case 0xb2:
1637 table = opcode_b2; 1414 table = opcode_b2;
1638 break; 1415 break;
@@ -1693,33 +1470,6 @@ static struct insn *find_insn(unsigned char *code)
1693 return NULL; 1470 return NULL;
1694} 1471}
1695 1472
1696/**
1697 * insn_to_mnemonic - decode an s390 instruction
1698 * @instruction: instruction to decode
1699 * @buf: buffer to fill with mnemonic
1700 *
1701 * Decode the instruction at @instruction and store the corresponding
1702 * mnemonic into @buf.
1703 * @buf is left unchanged if the instruction could not be decoded.
1704 * Returns:
1705 * %0 on success, %-ENOENT if the instruction was not found.
1706 */
1707int insn_to_mnemonic(unsigned char *instruction, char buf[8])
1708{
1709 struct insn *insn;
1710
1711 insn = find_insn(instruction);
1712 if (!insn)
1713 return -ENOENT;
1714 if (insn->name[0] == '\0')
1715 snprintf(buf, sizeof(buf), "%s",
1716 long_insn_name[(int) insn->name[1]]);
1717 else
1718 snprintf(buf, sizeof(buf), "%.5s", insn->name);
1719 return 0;
1720}
1721EXPORT_SYMBOL_GPL(insn_to_mnemonic);
1722
1723static int print_insn(char *buffer, unsigned char *code, unsigned long addr) 1473static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1724{ 1474{
1725 struct insn *insn; 1475 struct insn *insn;
@@ -1783,7 +1533,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1783 1533
1784void show_code(struct pt_regs *regs) 1534void show_code(struct pt_regs *regs)
1785{ 1535{
1786 char *mode = user_mode(regs) ? "User" : "Krnl"; 1536 char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
1787 unsigned char code[64]; 1537 unsigned char code[64];
1788 char buffer[64], *ptr; 1538 char buffer[64], *ptr;
1789 mm_segment_t old_fs; 1539 mm_segment_t old_fs;
@@ -1792,7 +1542,7 @@ void show_code(struct pt_regs *regs)
1792 1542
1793 /* Get a snapshot of the 64 bytes surrounding the fault address. */ 1543 /* Get a snapshot of the 64 bytes surrounding the fault address. */
1794 old_fs = get_fs(); 1544 old_fs = get_fs();
1795 set_fs(user_mode(regs) ? USER_DS : KERNEL_DS); 1545 set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
1796 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { 1546 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
1797 addr = regs->psw.addr - 34 + start; 1547 addr = regs->psw.addr - 34 + start;
1798 if (__copy_from_user(code + start - 2, 1548 if (__copy_from_user(code + start - 2,
@@ -1828,15 +1578,10 @@ void show_code(struct pt_regs *regs)
1828 ptr += sprintf(ptr, "%s Code:", mode); 1578 ptr += sprintf(ptr, "%s Code:", mode);
1829 hops = 0; 1579 hops = 0;
1830 while (start < end && hops < 8) { 1580 while (start < end && hops < 8) {
1831 opsize = insn_length(code[start]); 1581 *ptr++ = (start == 32) ? '>' : ' ';
1832 if (start + opsize == 32)
1833 *ptr++ = '#';
1834 else if (start == 32)
1835 *ptr++ = '>';
1836 else
1837 *ptr++ = ' ';
1838 addr = regs->psw.addr + start - 32; 1582 addr = regs->psw.addr + start - 32;
1839 ptr += sprintf(ptr, ONELONG, addr); 1583 ptr += sprintf(ptr, ONELONG, addr);
1584 opsize = insn_length(code[start]);
1840 if (start + opsize >= end) 1585 if (start + opsize >= end)
1841 break; 1586 break;
1842 for (i = 0; i < opsize; i++) 1587 for (i = 0; i < opsize; i++)
@@ -1853,26 +1598,3 @@ void show_code(struct pt_regs *regs)
1853 } 1598 }
1854 printk("\n"); 1599 printk("\n");
1855} 1600}
1856
1857void print_fn_code(unsigned char *code, unsigned long len)
1858{
1859 char buffer[64], *ptr;
1860 int opsize, i;
1861
1862 while (len) {
1863 ptr = buffer;
1864 opsize = insn_length(*code);
1865 ptr += sprintf(ptr, "%p: ", code);
1866 for (i = 0; i < opsize; i++)
1867 ptr += sprintf(ptr, "%02x", code[i]);
1868 *ptr++ = '\t';
1869 if (i < 4)
1870 *ptr++ = '\t';
1871 ptr += print_insn(ptr, code, (unsigned long) code);
1872 *ptr++ = '\n';
1873 *ptr++ = 0;
1874 printk(buffer);
1875 code += opsize;
1876 len -= opsize;
1877 }
1878}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1f0eee9e7da..f297456dba7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * arch/s390/kernel/early.c
3 *
2 * Copyright IBM Corp. 2007, 2009 4 * Copyright IBM Corp. 2007, 2009
3 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
4 * Heiko Carstens <heiko.carstens@de.ibm.com> 6 * Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -27,7 +29,6 @@
27#include <asm/sysinfo.h> 29#include <asm/sysinfo.h>
28#include <asm/cpcmd.h> 30#include <asm/cpcmd.h>
29#include <asm/sclp.h> 31#include <asm/sclp.h>
30#include <asm/facility.h>
31#include "entry.h" 32#include "entry.h"
32 33
33/* 34/*
@@ -215,61 +216,43 @@ static noinline __init void init_kernel_storage_key(void)
215 PAGE_DEFAULT_KEY, 0); 216 PAGE_DEFAULT_KEY, 0);
216} 217}
217 218
218static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 219static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
219 220
220static noinline __init void detect_machine_type(void) 221static noinline __init void detect_machine_type(void)
221{ 222{
222 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
223
224 /* Check current-configuration-level */ 223 /* Check current-configuration-level */
225 if (stsi(NULL, 0, 0, 0) <= 2) { 224 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
226 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; 225 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
227 return; 226 return;
228 } 227 }
229 /* Get virtual-machine cpu information. */ 228 /* Get virtual-machine cpu information. */
230 if (stsi(vmms, 3, 2, 2) || !vmms->count) 229 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
231 return; 230 return;
232 231
233 /* Running under KVM? If not we assume z/VM */ 232 /* Running under KVM? If not we assume z/VM */
234 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 233 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
235 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 234 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
236 else 235 else
237 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 236 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
238} 237}
239 238
240static __init void setup_topology(void) 239static __init void early_pgm_check_handler(void)
241{ 240{
242#ifdef CONFIG_64BIT
243 int max_mnest;
244
245 if (!test_facility(11))
246 return;
247 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
248 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
249 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
250 break;
251 }
252 topology_max_mnest = max_mnest;
253#endif
254}
255
256static void early_pgm_check_handler(void)
257{
258 const struct exception_table_entry *fixup;
259 unsigned long addr; 241 unsigned long addr;
242 const struct exception_table_entry *fixup;
260 243
261 addr = S390_lowcore.program_old_psw.addr; 244 addr = S390_lowcore.program_old_psw.addr;
262 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 245 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
263 if (!fixup) 246 if (!fixup)
264 disabled_wait(0); 247 disabled_wait(0);
265 S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; 248 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
266} 249}
267 250
268static noinline __init void setup_lowcore_early(void) 251static noinline __init void setup_lowcore_early(void)
269{ 252{
270 psw_t psw; 253 psw_t psw;
271 254
272 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; 255 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
273 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; 256 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
274 S390_lowcore.external_new_psw = psw; 257 S390_lowcore.external_new_psw = psw;
275 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 258 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
@@ -279,8 +262,35 @@ static noinline __init void setup_lowcore_early(void)
279 262
280static noinline __init void setup_facility_list(void) 263static noinline __init void setup_facility_list(void)
281{ 264{
282 stfle(S390_lowcore.stfle_fac_list, 265 unsigned long nr;
283 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 266
267 S390_lowcore.stfl_fac_list = 0;
268 asm volatile(
269 " .insn s,0xb2b10000,0(0)\n" /* stfl */
270 "0:\n"
271 EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
272 memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
273 nr = 4; /* # bytes stored by stfl */
274 if (test_facility(7)) {
275 /* More facility bits available with stfle */
276 register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
277 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
278 : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
279 : : "cc");
280 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
281 }
282 memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
283 MAX_FACILITY_BIT/8 - nr);
284}
285
286static noinline __init void setup_hpage(void)
287{
288#ifndef CONFIG_DEBUG_PAGEALLOC
289 if (!test_facility(2) || !test_facility(8))
290 return;
291 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
292 __ctl_set_bit(0, 23);
293#endif
284} 294}
285 295
286static __init void detect_mvpg(void) 296static __init void detect_mvpg(void)
@@ -370,22 +380,16 @@ static __init void detect_diag44(void)
370static __init void detect_machine_facilities(void) 380static __init void detect_machine_facilities(void)
371{ 381{
372#ifdef CONFIG_64BIT 382#ifdef CONFIG_64BIT
373 if (test_facility(8)) {
374 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
375 __ctl_set_bit(0, 23);
376 }
377 if (test_facility(78))
378 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
379 if (test_facility(3)) 383 if (test_facility(3))
380 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 384 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
385 if (test_facility(8))
386 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
387 if (test_facility(11))
388 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
381 if (test_facility(27)) 389 if (test_facility(27))
382 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
383 if (test_facility(40)) 391 if (test_facility(40))
384 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
385 if (test_facility(50) && test_facility(73))
386 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
387 if (test_facility(66))
388 S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
389#endif 393#endif
390} 394}
391 395
@@ -428,22 +432,18 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
428 } 432 }
429} 433}
430 434
431static inline int has_ebcdic_char(const char *str) 435static void __init setup_boot_command_line(void)
432{ 436{
433 int i; 437 int i;
434 438
435 for (i = 0; str[i]; i++) 439 /* convert arch command line to ascii */
436 if (str[i] & 0x80) 440 for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
437 return 1; 441 if (COMMAND_LINE[i] & 0x80)
438 return 0; 442 break;
439} 443 if (i < ARCH_COMMAND_LINE_SIZE)
440
441static void __init setup_boot_command_line(void)
442{
443 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
444 /* convert arch command line to ascii if necessary */
445 if (has_ebcdic_char(COMMAND_LINE))
446 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 444 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
445 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
446
447 /* copy arch command line */ 447 /* copy arch command line */
448 strlcpy(boot_command_line, strstrip(COMMAND_LINE), 448 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
449 ARCH_COMMAND_LINE_SIZE); 449 ARCH_COMMAND_LINE_SIZE);
@@ -455,6 +455,7 @@ static void __init setup_boot_command_line(void)
455 append_to_cmdline(append_ipl_scpdata); 455 append_to_cmdline(append_ipl_scpdata);
456} 456}
457 457
458
458/* 459/*
459 * Save ipl parameters, clear bss memory, initialize storage keys 460 * Save ipl parameters, clear bss memory, initialize storage keys
460 * and create a kernel NSS at startup if the SAVESYS= parm is defined 461 * and create a kernel NSS at startup if the SAVESYS= parm is defined
@@ -468,6 +469,7 @@ void __init startup_init(void)
468 init_kernel_storage_key(); 469 init_kernel_storage_key();
469 lockdep_init(); 470 lockdep_init();
470 lockdep_off(); 471 lockdep_off();
472 sort_main_extable();
471 setup_lowcore_early(); 473 setup_lowcore_early();
472 setup_facility_list(); 474 setup_facility_list();
473 detect_machine_type(); 475 detect_machine_type();
@@ -480,7 +482,7 @@ void __init startup_init(void)
480 detect_diag9c(); 482 detect_diag9c();
481 detect_diag44(); 483 detect_diag44();
482 detect_machine_facilities(); 484 detect_machine_facilities();
483 setup_topology(); 485 setup_hpage();
484 sclp_facilities_detect(); 486 sclp_facilities_detect();
485 detect_memory_layout(memory_chunk); 487 detect_memory_layout(memory_chunk);
486#ifdef CONFIG_DYNAMIC_FTRACE 488#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index b971c6be629..cc0dc609d73 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * arch/s390/kernel/ebcdic.c
2 * ECBDIC -> ASCII, ASCII -> ECBDIC, 3 * ECBDIC -> ASCII, ASCII -> ECBDIC,
3 * upper to lower case (EBCDIC) conversion tables. 4 * upper to lower case (EBCDIC) conversion tables.
4 * 5 *
5 * S390 version 6 * S390 version
6 * Copyright IBM Corp. 1999 7 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * Martin Peschke <peschke@fh-brandenburg.de> 9 * Martin Peschke <peschke@fh-brandenburg.de>
9 */ 10 */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 55022852326..02ec8fe7d03 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/entry.S
2 * S390 low-level entry points. 3 * S390 low-level entry points.
3 * 4 *
4 * Copyright IBM Corp. 1999, 2012 5 * Copyright (C) IBM Corp. 1999,2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -17,122 +18,174 @@
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/unistd.h> 19#include <asm/unistd.h>
19#include <asm/page.h> 20#include <asm/page.h>
20#include <asm/sigp.h> 21
21 22/*
22__PT_R0 = __PT_GPRS 23 * Stack layout for the system_call stack entry.
23__PT_R1 = __PT_GPRS + 4 24 * The first few entries are identical to the user_regs_struct.
24__PT_R2 = __PT_GPRS + 8 25 */
25__PT_R3 = __PT_GPRS + 12 26SP_PTREGS = STACK_FRAME_OVERHEAD
26__PT_R4 = __PT_GPRS + 16 27SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
27__PT_R5 = __PT_GPRS + 20 28SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
28__PT_R6 = __PT_GPRS + 24 29SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
29__PT_R7 = __PT_GPRS + 28 30SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
30__PT_R8 = __PT_GPRS + 32 31SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
31__PT_R9 = __PT_GPRS + 36 32SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
32__PT_R10 = __PT_GPRS + 40 33SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
33__PT_R11 = __PT_GPRS + 44 34SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
34__PT_R12 = __PT_GPRS + 48 35SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
35__PT_R13 = __PT_GPRS + 524 36SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
36__PT_R14 = __PT_GPRS + 56 37SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
37__PT_R15 = __PT_GPRS + 60 38SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
39SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
40SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
41SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
42SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
47SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
38 49
39_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
40 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
41_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
42 _TIF_MCCK_PENDING) 53 _TIF_MCCK_PENDING)
43_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
44 _TIF_SYSCALL_TRACEPOINT) 55 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
45 56
46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 57STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
47STACK_SIZE = 1 << STACK_SHIFT 58STACK_SIZE = 1 << STACK_SHIFT
48 59
49#define BASED(name) name-system_call(%r13) 60#define BASED(name) name-system_call(%r13)
50 61
51 .macro TRACE_IRQS_ON
52#ifdef CONFIG_TRACE_IRQFLAGS 62#ifdef CONFIG_TRACE_IRQFLAGS
63 .macro TRACE_IRQS_ON
53 basr %r2,%r0 64 basr %r2,%r0
54 l %r1,BASED(.Lhardirqs_on) 65 l %r1,BASED(.Ltrace_irq_on_caller)
55 basr %r14,%r1 # call trace_hardirqs_on_caller 66 basr %r14,%r1
56#endif
57 .endm 67 .endm
58 68
59 .macro TRACE_IRQS_OFF 69 .macro TRACE_IRQS_OFF
60#ifdef CONFIG_TRACE_IRQFLAGS
61 basr %r2,%r0 70 basr %r2,%r0
62 l %r1,BASED(.Lhardirqs_off) 71 l %r1,BASED(.Ltrace_irq_off_caller)
63 basr %r14,%r1 # call trace_hardirqs_off_caller 72 basr %r14,%r1
64#endif
65 .endm 73 .endm
74#else
75#define TRACE_IRQS_ON
76#define TRACE_IRQS_OFF
77#endif
66 78
67 .macro LOCKDEP_SYS_EXIT
68#ifdef CONFIG_LOCKDEP 79#ifdef CONFIG_LOCKDEP
69 tm __PT_PSW+1(%r11),0x01 # returning to user ? 80 .macro LOCKDEP_SYS_EXIT
70 jz .+10 81 tm SP_PSW+1(%r15),0x01 # returning to user ?
82 jz 0f
71 l %r1,BASED(.Llockdep_sys_exit) 83 l %r1,BASED(.Llockdep_sys_exit)
72 basr %r14,%r1 # call lockdep_sys_exit 84 basr %r14,%r1
850:
86 .endm
87#else
88#define LOCKDEP_SYS_EXIT
73#endif 89#endif
90
91/*
92 * Register usage in interrupt handlers:
93 * R9 - pointer to current task structure
94 * R13 - pointer to literal pool
95 * R14 - return register for function calls
96 * R15 - kernel stack pointer
97 */
98
99 .macro UPDATE_VTIME lc_from,lc_to,lc_sum
100 lm %r10,%r11,\lc_from
101 sl %r10,\lc_to
102 sl %r11,\lc_to+4
103 bc 3,BASED(0f)
104 sl %r10,BASED(.Lc_1)
1050: al %r10,\lc_sum
106 al %r11,\lc_sum+4
107 bc 12,BASED(1f)
108 al %r10,BASED(.Lc_1)
1091: stm %r10,%r11,\lc_sum
110 .endm
111
112 .macro SAVE_ALL_SVC psworg,savearea
113 stm %r12,%r15,\savearea
114 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
115 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
116 s %r15,BASED(.Lc_spsize) # make room for registers & psw
117 .endm
118
119 .macro SAVE_ALL_BASE savearea
120 stm %r12,%r15,\savearea
121 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
74 .endm 122 .endm
75 123
76 .macro CHECK_STACK stacksize,savearea 124 .macro SAVE_ALL_PGM psworg,savearea
125 tm \psworg+1,0x01 # test problem state bit
77#ifdef CONFIG_CHECK_STACK 126#ifdef CONFIG_CHECK_STACK
78 tml %r15,\stacksize - CONFIG_STACK_GUARD 127 bnz BASED(1f)
79 la %r14,\savearea 128 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
80 jz stack_overflow 129 bnz BASED(2f)
130 la %r12,\psworg
131 b BASED(stack_overflow)
132#else
133 bz BASED(2f)
81#endif 134#endif
1351: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
1362: s %r15,BASED(.Lc_spsize) # make room for registers & psw
82 .endm 137 .endm
83 138
84 .macro SWITCH_ASYNC savearea,stack,shift 139 .macro SAVE_ALL_ASYNC psworg,savearea
85 tmh %r8,0x0001 # interrupting from user ? 140 stm %r12,%r15,\savearea
86 jnz 1f 141 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
87 lr %r14,%r9 142 la %r12,\psworg
88 sl %r14,BASED(.Lcritical_start) 143 tm \psworg+1,0x01 # test problem state bit
89 cl %r14,BASED(.Lcritical_length) 144 bnz BASED(1f) # from user -> load async stack
90 jhe 0f 145 clc \psworg+4(4),BASED(.Lcritical_end)
91 la %r11,\savearea # inside critical section, do cleanup 146 bhe BASED(0f)
92 bras %r14,cleanup_critical 147 clc \psworg+4(4),BASED(.Lcritical_start)
93 tmh %r8,0x0001 # retest problem state after cleanup 148 bl BASED(0f)
94 jnz 1f 149 l %r14,BASED(.Lcleanup_critical)
950: l %r14,\stack # are we already on the target stack? 150 basr %r14,%r14
151 tm 1(%r12),0x01 # retest problem state after cleanup
152 bnz BASED(1f)
1530: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
96 slr %r14,%r15 154 slr %r14,%r15
97 sra %r14,\shift 155 sra %r14,STACK_SHIFT
98 jnz 1f 156#ifdef CONFIG_CHECK_STACK
99 CHECK_STACK 1<<\shift,\savearea 157 bnz BASED(1f)
100 j 2f 158 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
1011: l %r15,\stack # load target stack 159 bnz BASED(2f)
1022: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 160 b BASED(stack_overflow)
103 la %r11,STACK_FRAME_OVERHEAD(%r15) 161#else
104 .endm 162 bz BASED(2f)
105 163#endif
106 .macro ADD64 high,low,timer 1641: l %r15,__LC_ASYNC_STACK
107 al \high,\timer 1652: s %r15,BASED(.Lc_spsize) # make room for registers & psw
108 al \low,4+\timer
109 brc 12,.+8
110 ahi \high,1
111 .endm 166 .endm
112 167
113 .macro SUB64 high,low,timer 168 .macro CREATE_STACK_FRAME savearea
114 sl \high,\timer 169 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
115 sl \low,4+\timer 170 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
116 brc 3,.+8 171 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
117 ahi \high,-1 172 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
118 .endm 173 .endm
119 174
120 .macro UPDATE_VTIME high,low,enter_timer 175 .macro RESTORE_ALL psworg,sync
121 lm \high,\low,__LC_EXIT_TIMER 176 mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
122 SUB64 \high,\low,\enter_timer 177 .if !\sync
123 ADD64 \high,\low,__LC_USER_TIMER 178 ni \psworg+1,0xfd # clear wait state bit
124 stm \high,\low,__LC_USER_TIMER 179 .endif
125 lm \high,\low,__LC_LAST_UPDATE_TIMER 180 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
126 SUB64 \high,\low,__LC_EXIT_TIMER 181 stpt __LC_EXIT_TIMER
127 ADD64 \high,\low,__LC_SYSTEM_TIMER 182 lpsw \psworg # back to caller
128 stm \high,\low,__LC_SYSTEM_TIMER
129 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
130 .endm 183 .endm
131 184
132 .macro REENABLE_IRQS 185 .macro REENABLE_IRQS
133 st %r8,__LC_RETURN_PSW 186 mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
134 ni __LC_RETURN_PSW,0xbf 187 ni __SF_EMPTY(%r15),0xbf
135 ssm __LC_RETURN_PSW 188 ssm __SF_EMPTY(%r15)
136 .endm 189 .endm
137 190
138 .section .kprobes.text, "ax" 191 .section .kprobes.text, "ax"
@@ -145,23 +198,23 @@ STACK_SIZE = 1 << STACK_SHIFT
145 * gpr2 = prev 198 * gpr2 = prev
146 */ 199 */
147ENTRY(__switch_to) 200ENTRY(__switch_to)
148 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 201 basr %r1,0
149 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 2020: l %r4,__THREAD_info(%r2) # get thread_info of prev
150 l %r4,__THREAD_info(%r2) # get thread_info of prev
151 l %r5,__THREAD_info(%r3) # get thread_info of next 203 l %r5,__THREAD_info(%r3) # get thread_info of next
152 lr %r15,%r5
153 ahi %r15,STACK_SIZE # end of kernel stack of next
154 st %r3,__LC_CURRENT # store task struct of next
155 st %r5,__LC_THREAD_INFO # store thread info of next
156 st %r15,__LC_KERNEL_STACK # store end of kernel stack
157 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
158 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
159 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
160 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 204 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
161 jz 0f 205 bz 1f-0b(%r1)
162 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 206 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
163 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next 207 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
1640: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 2081: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
209 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
210 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
211 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
212 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
213 st %r3,__LC_CURRENT # store task struct of next
214 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
215 st %r5,__LC_THREAD_INFO # store thread info of next
216 ahi %r5,STACK_SIZE # end of kernel stack of next
217 st %r5,__LC_KERNEL_STACK # store end of kernel stack
165 br %r14 218 br %r14
166 219
167__critical_start: 220__critical_start:
@@ -172,72 +225,70 @@ __critical_start:
172 225
173ENTRY(system_call) 226ENTRY(system_call)
174 stpt __LC_SYNC_ENTER_TIMER 227 stpt __LC_SYNC_ENTER_TIMER
175sysc_stm: 228sysc_saveall:
176 stm %r8,%r15,__LC_SAVE_AREA_SYNC 229 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
177 l %r12,__LC_THREAD_INFO 230 CREATE_STACK_FRAME __LC_SAVE_AREA
178 l %r13,__LC_SVC_NEW_PSW+4 231 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
179sysc_per: 232 mvc SP_ILC(4,%r15),__LC_SVC_ILC
180 l %r15,__LC_KERNEL_STACK 233 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
181 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
183sysc_vtime: 234sysc_vtime:
184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 235 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
185 stm %r0,%r7,__PT_R0(%r11) 236sysc_stime:
186 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 237 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
187 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 238sysc_update:
188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 239 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
189sysc_do_svc: 240sysc_do_svc:
190 oi __TI_flags+3(%r12),_TIF_SYSCALL 241 xr %r7,%r7
191 lh %r8,__PT_INT_CODE+2(%r11) 242 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
192 sla %r8,2 # shift and test for svc0 243 bnz BASED(sysc_nr_ok) # svc number > 0
193 jnz sysc_nr_ok
194 # svc 0: system call number in %r1 244 # svc 0: system call number in %r1
195 cl %r1,BASED(.Lnr_syscalls) 245 cl %r1,BASED(.Lnr_syscalls)
196 jnl sysc_nr_ok 246 bnl BASED(sysc_nr_ok)
197 sth %r1,__PT_INT_CODE+2(%r11) 247 sth %r1,SP_SVCNR(%r15)
198 lr %r8,%r1 248 lr %r7,%r1 # copy svc number to %r7
199 sla %r8,2
200sysc_nr_ok: 249sysc_nr_ok:
201 l %r10,BASED(.Lsys_call_table) # 31 bit system call table 250 sll %r7,2 # svc number *4
202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 251 l %r10,BASED(.Lsysc_table)
203 st %r2,__PT_ORIG_GPR2(%r11) 252 tm __TI_flags+2(%r12),_TIF_SYSCALL
204 st %r7,STACK_FRAME_OVERHEAD(%r15) 253 mvc SP_ARGS(4,%r15),SP_R7(%r15)
205 l %r9,0(%r8,%r10) # get system call addr. 254 l %r8,0(%r7,%r10) # get system call addr.
206 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 255 bnz BASED(sysc_tracesys)
207 jnz sysc_tracesys 256 basr %r14,%r8 # call sys_xxxx
208 basr %r14,%r9 # call sys_xxxx 257 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
209 st %r2,__PT_R2(%r11) # store return value
210 258
211sysc_return: 259sysc_return:
212 LOCKDEP_SYS_EXIT 260 LOCKDEP_SYS_EXIT
213sysc_tif: 261sysc_tif:
214 tm __PT_PSW+1(%r11),0x01 # returning to user ?
215 jno sysc_restore
216 tm __TI_flags+3(%r12),_TIF_WORK_SVC 262 tm __TI_flags+3(%r12),_TIF_WORK_SVC
217 jnz sysc_work # check for work 263 bnz BASED(sysc_work) # there is work to do (signals etc.)
218 ni __TI_flags+3(%r12),255-_TIF_SYSCALL
219sysc_restore: 264sysc_restore:
220 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 265 RESTORE_ALL __LC_RETURN_PSW,1
221 stpt __LC_EXIT_TIMER
222 lm %r0,%r15,__PT_R0(%r11)
223 lpsw __LC_RETURN_PSW
224sysc_done: 266sysc_done:
225 267
226# 268#
227# One of the work bits is on. Find out which one. 269# There is work to do, but first we need to check if we return to userspace.
228# 270#
229sysc_work: 271sysc_work:
272 tm SP_PSW+1(%r15),0x01 # returning to user ?
273 bno BASED(sysc_restore)
274
275#
276# One of the work bits is on. Find out which one.
277#
278sysc_work_tif:
230 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 279 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
231 jo sysc_mcck_pending 280 bo BASED(sysc_mcck_pending)
232 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 281 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
233 jo sysc_reschedule 282 bo BASED(sysc_reschedule)
234 tm __TI_flags+3(%r12),_TIF_PER_TRAP
235 jo sysc_singlestep
236 tm __TI_flags+3(%r12),_TIF_SIGPENDING 283 tm __TI_flags+3(%r12),_TIF_SIGPENDING
237 jo sysc_sigpending 284 bo BASED(sysc_sigpending)
238 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 285 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
239 jo sysc_notify_resume 286 bo BASED(sysc_notify_resume)
240 j sysc_return # beware of critical section cleanup 287 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
288 bo BASED(sysc_restart)
289 tm __TI_flags+3(%r12),_TIF_PER_TRAP
290 bo BASED(sysc_singlestep)
291 b BASED(sysc_return) # beware of critical section cleanup
241 292
242# 293#
243# _TIF_NEED_RESCHED is set, call schedule 294# _TIF_NEED_RESCHED is set, call schedule
@@ -245,13 +296,13 @@ sysc_work:
245sysc_reschedule: 296sysc_reschedule:
246 l %r1,BASED(.Lschedule) 297 l %r1,BASED(.Lschedule)
247 la %r14,BASED(sysc_return) 298 la %r14,BASED(sysc_return)
248 br %r1 # call schedule 299 br %r1 # call scheduler
249 300
250# 301#
251# _TIF_MCCK_PENDING is set, call handler 302# _TIF_MCCK_PENDING is set, call handler
252# 303#
253sysc_mcck_pending: 304sysc_mcck_pending:
254 l %r1,BASED(.Lhandle_mcck) 305 l %r1,BASED(.Ls390_handle_mcck)
255 la %r14,BASED(sysc_return) 306 la %r14,BASED(sysc_return)
256 br %r1 # TIF bit will be cleared by handler 307 br %r1 # TIF bit will be cleared by handler
257 308
@@ -259,156 +310,251 @@ sysc_mcck_pending:
259# _TIF_SIGPENDING is set, call do_signal 310# _TIF_SIGPENDING is set, call do_signal
260# 311#
261sysc_sigpending: 312sysc_sigpending:
262 lr %r2,%r11 # pass pointer to pt_regs 313 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
314 la %r2,SP_PTREGS(%r15) # load pt_regs
263 l %r1,BASED(.Ldo_signal) 315 l %r1,BASED(.Ldo_signal)
264 basr %r14,%r1 # call do_signal 316 basr %r14,%r1 # call do_signal
265 tm __TI_flags+3(%r12),_TIF_SYSCALL 317 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
266 jno sysc_return 318 bo BASED(sysc_restart)
267 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 319 tm __TI_flags+3(%r12),_TIF_PER_TRAP
268 xr %r8,%r8 # svc 0 returns -ENOSYS 320 bo BASED(sysc_singlestep)
269 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 321 b BASED(sysc_return)
270 jnl sysc_nr_ok # invalid svc number -> do svc 0
271 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
272 sla %r8,2
273 j sysc_nr_ok # restart svc
274 322
275# 323#
276# _TIF_NOTIFY_RESUME is set, call do_notify_resume 324# _TIF_NOTIFY_RESUME is set, call do_notify_resume
277# 325#
278sysc_notify_resume: 326sysc_notify_resume:
279 lr %r2,%r11 # pass pointer to pt_regs 327 la %r2,SP_PTREGS(%r15) # load pt_regs
280 l %r1,BASED(.Ldo_notify_resume) 328 l %r1,BASED(.Ldo_notify_resume)
281 la %r14,BASED(sysc_return) 329 la %r14,BASED(sysc_return)
282 br %r1 # call do_notify_resume 330 br %r1 # call do_notify_resume
283 331
332
333#
334# _TIF_RESTART_SVC is set, set up registers and restart svc
335#
336sysc_restart:
337 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
338 l %r7,SP_R2(%r15) # load new svc number
339 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
340 lm %r2,%r6,SP_R2(%r15) # load svc arguments
341 sth %r7,SP_SVCNR(%r15)
342 b BASED(sysc_nr_ok) # restart svc
343
284# 344#
285# _TIF_PER_TRAP is set, call do_per_trap 345# _TIF_PER_TRAP is set, call do_per_trap
286# 346#
287sysc_singlestep: 347sysc_singlestep:
288 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP 348 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
289 lr %r2,%r11 # pass pointer to pt_regs 349 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
290 l %r1,BASED(.Ldo_per_trap) 350 la %r2,SP_PTREGS(%r15) # address of register-save area
291 la %r14,BASED(sysc_return) 351 l %r1,BASED(.Lhandle_per) # load adr. of per handler
292 br %r1 # call do_per_trap 352 la %r14,BASED(sysc_return) # load adr. of system return
353 br %r1 # branch to do_per_trap
293 354
294# 355#
295# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 356# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
296# and after the system call 357# and after the system call
297# 358#
298sysc_tracesys: 359sysc_tracesys:
299 l %r1,BASED(.Ltrace_enter) 360 l %r1,BASED(.Ltrace_entry)
300 lr %r2,%r11 # pass pointer to pt_regs 361 la %r2,SP_PTREGS(%r15) # load pt_regs
301 la %r3,0 362 la %r3,0
302 xr %r0,%r0 363 xr %r0,%r0
303 icm %r0,3,__PT_INT_CODE+2(%r11) 364 icm %r0,3,SP_SVCNR(%r15)
304 st %r0,__PT_R2(%r11) 365 st %r0,SP_R2(%r15)
305 basr %r14,%r1 # call do_syscall_trace_enter 366 basr %r14,%r1
306 cl %r2,BASED(.Lnr_syscalls) 367 cl %r2,BASED(.Lnr_syscalls)
307 jnl sysc_tracenogo 368 bnl BASED(sysc_tracenogo)
308 lr %r8,%r2 369 lr %r7,%r2
309 sll %r8,2 370 sll %r7,2 # svc number *4
310 l %r9,0(%r8,%r10) 371 l %r8,0(%r7,%r10)
311sysc_tracego: 372sysc_tracego:
312 lm %r3,%r7,__PT_R3(%r11) 373 lm %r3,%r6,SP_R3(%r15)
313 st %r7,STACK_FRAME_OVERHEAD(%r15) 374 mvc SP_ARGS(4,%r15),SP_R7(%r15)
314 l %r2,__PT_ORIG_GPR2(%r11) 375 l %r2,SP_ORIG_R2(%r15)
315 basr %r14,%r9 # call sys_xxx 376 basr %r14,%r8 # call sys_xxx
316 st %r2,__PT_R2(%r11) # store return value 377 st %r2,SP_R2(%r15) # store return value
317sysc_tracenogo: 378sysc_tracenogo:
318 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 379 tm __TI_flags+2(%r12),_TIF_SYSCALL
319 jz sysc_return 380 bz BASED(sysc_return)
320 l %r1,BASED(.Ltrace_exit) 381 l %r1,BASED(.Ltrace_exit)
321 lr %r2,%r11 # pass pointer to pt_regs 382 la %r2,SP_PTREGS(%r15) # load pt_regs
322 la %r14,BASED(sysc_return) 383 la %r14,BASED(sysc_return)
323 br %r1 # call do_syscall_trace_exit 384 br %r1
324 385
325# 386#
326# a new process exits the kernel with ret_from_fork 387# a new process exits the kernel with ret_from_fork
327# 388#
328ENTRY(ret_from_fork) 389ENTRY(ret_from_fork)
329 la %r11,STACK_FRAME_OVERHEAD(%r15)
330 l %r12,__LC_THREAD_INFO
331 l %r13,__LC_SVC_NEW_PSW+4 390 l %r13,__LC_SVC_NEW_PSW+4
332 l %r1,BASED(.Lschedule_tail) 391 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
333 basr %r14,%r1 # call schedule_tail 392 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
393 bo BASED(0f)
394 st %r15,SP_R15(%r15) # store stack pointer for new kthread
3950: l %r1,BASED(.Lschedtail)
396 basr %r14,%r1
334 TRACE_IRQS_ON 397 TRACE_IRQS_ON
335 ssm __LC_SVC_NEW_PSW # reenable interrupts 398 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
336 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 399 b BASED(sysc_tracenogo)
337 jne sysc_tracenogo 400
338 # it's a kernel thread 401#
339 lm %r9,%r10,__PT_R9(%r11) # load gprs 402# kernel_execve function needs to deal with pt_regs that is not
340ENTRY(kernel_thread_starter) 403# at the usual place
341 la %r2,0(%r10) 404#
342 basr %r14,%r9 405ENTRY(kernel_execve)
343 j sysc_tracenogo 406 stm %r12,%r15,48(%r15)
407 lr %r14,%r15
408 l %r13,__LC_SVC_NEW_PSW+4
409 s %r15,BASED(.Lc_spsize)
410 st %r14,__SF_BACKCHAIN(%r15)
411 la %r12,SP_PTREGS(%r15)
412 xc 0(__PT_SIZE,%r12),0(%r12)
413 l %r1,BASED(.Ldo_execve)
414 lr %r5,%r12
415 basr %r14,%r1
416 ltr %r2,%r2
417 be BASED(0f)
418 a %r15,BASED(.Lc_spsize)
419 lm %r12,%r15,48(%r15)
420 br %r14
421 # execve succeeded.
4220: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
423 l %r15,__LC_KERNEL_STACK # load ksp
424 s %r15,BASED(.Lc_spsize) # make room for registers & psw
425 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
426 l %r12,__LC_THREAD_INFO
427 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
428 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
429 l %r1,BASED(.Lexecve_tail)
430 basr %r14,%r1
431 b BASED(sysc_return)
344 432
345/* 433/*
346 * Program check handler routine 434 * Program check handler routine
347 */ 435 */
348 436
349ENTRY(pgm_check_handler) 437ENTRY(pgm_check_handler)
438/*
439 * First we need to check for a special case:
440 * Single stepping an instruction that disables the PER event mask will
441 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
442 * For a single stepped SVC the program check handler gets control after
443 * the SVC new PSW has been loaded. But we want to execute the SVC first and
444 * then handle the PER event. Therefore we update the SVC old PSW to point
445 * to the pgm_check_handler and branch to the SVC handler after we checked
446 * if we have to load the kernel stack register.
447 * For every other possible cause for PER event without the PER mask set
448 * we just ignore the PER event (FIXME: is there anything we have to do
449 * for LPSW?).
450 */
350 stpt __LC_SYNC_ENTER_TIMER 451 stpt __LC_SYNC_ENTER_TIMER
351 stm %r8,%r15,__LC_SAVE_AREA_SYNC 452 SAVE_ALL_BASE __LC_SAVE_AREA
352 l %r12,__LC_THREAD_INFO 453 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
353 l %r13,__LC_SVC_NEW_PSW+4 454 bnz BASED(pgm_per) # got per exception -> special case
354 lm %r8,%r9,__LC_PGM_OLD_PSW 455 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
355 tmh %r8,0x0001 # test problem state bit 456 CREATE_STACK_FRAME __LC_SAVE_AREA
356 jnz 1f # -> fault in user space 457 xc SP_ILC(4,%r15),SP_ILC(%r15)
357 tmh %r8,0x4000 # PER bit set in old PSW ? 458 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
358 jnz 0f # -> enabled, can't be a double fault 459 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
359 tm __LC_PGM_ILC+3,0x80 # check for per exception 460 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
360 jnz pgm_svcper # -> single stepped svc 461 bz BASED(pgm_no_vtime)
3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 462 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
362 j 2f 463 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
3631: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 464 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
364 l %r15,__LC_KERNEL_STACK 465pgm_no_vtime:
3652: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 466 l %r3,__LC_PGM_ILC # load program interruption code
366 la %r11,STACK_FRAME_OVERHEAD(%r15) 467 l %r4,__LC_TRANS_EXC_CODE
367 stm %r0,%r7,__PT_R0(%r11) 468 REENABLE_IRQS
368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 469 la %r8,0x7f
369 stm %r8,%r9,__PT_PSW(%r11) 470 nr %r8,%r3
370 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 471 sll %r8,2
371 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE 472 l %r1,BASED(.Ljump_table)
372 tm __LC_PGM_ILC+3,0x80 # check for per exception 473 l %r1,0(%r8,%r1) # load address of handler routine
373 jz 0f 474 la %r2,SP_PTREGS(%r15) # address of register-save area
475 basr %r14,%r1 # branch to interrupt-handler
476pgm_exit:
477 b BASED(sysc_return)
478
479#
480# handle per exception
481#
482pgm_per:
483 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
484 bnz BASED(pgm_per_std) # ok, normal per event from user space
485# ok its one of the special cases, now we need to find out which one
486 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
487 be BASED(pgm_svcper)
488# no interesting special case, ignore PER event
489 lm %r12,%r15,__LC_SAVE_AREA
490 lpsw 0x28
491
492#
493# Normal per exception
494#
495pgm_per_std:
496 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
497 CREATE_STACK_FRAME __LC_SAVE_AREA
498 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
499 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
500 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
501 bz BASED(pgm_no_vtime2)
502 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
503 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
504 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
505pgm_no_vtime2:
374 l %r1,__TI_task(%r12) 506 l %r1,__TI_task(%r12)
375 tmh %r8,0x0001 # kernel per event ? 507 tm SP_PSW+1(%r15),0x01 # kernel per event ?
376 jz pgm_kprobe 508 bz BASED(kernel_per)
377 oi __TI_flags+3(%r12),_TIF_PER_TRAP
378 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
379 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 509 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
510 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
380 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 511 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
3810: REENABLE_IRQS 512 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
382 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 513 l %r3,__LC_PGM_ILC # load program interruption code
514 l %r4,__LC_TRANS_EXC_CODE
515 REENABLE_IRQS
516 la %r8,0x7f
517 nr %r8,%r3 # clear per-event-bit and ilc
518 be BASED(pgm_exit2) # only per or per+check ?
519 sll %r8,2
383 l %r1,BASED(.Ljump_table) 520 l %r1,BASED(.Ljump_table)
384 la %r10,0x7f 521 l %r1,0(%r8,%r1) # load address of handler routine
385 n %r10,__PT_INT_CODE(%r11) 522 la %r2,SP_PTREGS(%r15) # address of register-save area
386 je sysc_return
387 sll %r10,2
388 l %r1,0(%r10,%r1) # load address of handler routine
389 lr %r2,%r11 # pass pointer to pt_regs
390 basr %r14,%r1 # branch to interrupt-handler 523 basr %r14,%r1 # branch to interrupt-handler
391 j sysc_return 524pgm_exit2:
525 b BASED(sysc_return)
392 526
393# 527#
394# PER event in supervisor state, must be kprobes 528# it was a single stepped SVC that is causing all the trouble
395# 529#
396pgm_kprobe: 530pgm_svcper:
397 REENABLE_IRQS 531 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
398 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 532 CREATE_STACK_FRAME __LC_SAVE_AREA
399 l %r1,BASED(.Ldo_per_trap) 533 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
400 lr %r2,%r11 # pass pointer to pt_regs 534 mvc SP_ILC(4,%r15),__LC_SVC_ILC
401 basr %r14,%r1 # call do_per_trap 535 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
402 j sysc_return 536 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
537 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
538 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
539 l %r8,__TI_task(%r12)
540 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
541 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
542 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
543 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
544 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
545 lm %r2,%r6,SP_R2(%r15) # load svc arguments
546 b BASED(sysc_do_svc)
403 547
404# 548#
405# single stepped system call 549# per was called from kernel, must be kprobes
406# 550#
407pgm_svcper: 551kernel_per:
408 oi __TI_flags+3(%r12),_TIF_PER_TRAP 552 REENABLE_IRQS
409 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 553 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
410 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) 554 la %r2,SP_PTREGS(%r15) # address of register-save area
411 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs 555 l %r1,BASED(.Lhandle_per) # load adr. of per handler
556 basr %r14,%r1 # branch to do_single_step
557 b BASED(pgm_exit)
412 558
413/* 559/*
414 * IO interrupt handler routine 560 * IO interrupt handler routine
@@ -417,34 +563,28 @@ pgm_svcper:
417ENTRY(io_int_handler) 563ENTRY(io_int_handler)
418 stck __LC_INT_CLOCK 564 stck __LC_INT_CLOCK
419 stpt __LC_ASYNC_ENTER_TIMER 565 stpt __LC_ASYNC_ENTER_TIMER
420 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 566 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
421 l %r12,__LC_THREAD_INFO 567 CREATE_STACK_FRAME __LC_SAVE_AREA+16
422 l %r13,__LC_SVC_NEW_PSW+4 568 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
423 lm %r8,%r9,__LC_IO_OLD_PSW 569 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
424 tmh %r8,0x0001 # interrupting from user ? 570 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
425 jz io_skip 571 bz BASED(io_no_vtime)
426 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 572 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
427io_skip: 573 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
428 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 574 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
429 stm %r0,%r7,__PT_R0(%r11) 575io_no_vtime:
430 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
431 stm %r8,%r9,__PT_PSW(%r11)
432 TRACE_IRQS_OFF 576 TRACE_IRQS_OFF
433 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 577 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
434 l %r1,BASED(.Ldo_IRQ) 578 la %r2,SP_PTREGS(%r15) # address of register-save area
435 lr %r2,%r11 # pass pointer to pt_regs 579 basr %r14,%r1 # branch to standard irq handler
436 basr %r14,%r1 # call do_IRQ
437io_return: 580io_return:
438 LOCKDEP_SYS_EXIT 581 LOCKDEP_SYS_EXIT
439 TRACE_IRQS_ON 582 TRACE_IRQS_ON
440io_tif: 583io_tif:
441 tm __TI_flags+3(%r12),_TIF_WORK_INT 584 tm __TI_flags+3(%r12),_TIF_WORK_INT
442 jnz io_work # there is work to do (signals etc.) 585 bnz BASED(io_work) # there is work to do (signals etc.)
443io_restore: 586io_restore:
444 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 587 RESTORE_ALL __LC_RETURN_PSW,0
445 stpt __LC_EXIT_TIMER
446 lm %r0,%r15,__PT_R0(%r11)
447 lpsw __LC_RETURN_PSW
448io_done: 588io_done:
449 589
450# 590#
@@ -455,29 +595,28 @@ io_done:
455# Before any work can be done, a switch to the kernel stack is required. 595# Before any work can be done, a switch to the kernel stack is required.
456# 596#
457io_work: 597io_work:
458 tm __PT_PSW+1(%r11),0x01 # returning to user ? 598 tm SP_PSW+1(%r15),0x01 # returning to user ?
459 jo io_work_user # yes -> do resched & signal 599 bo BASED(io_work_user) # yes -> do resched & signal
460#ifdef CONFIG_PREEMPT 600#ifdef CONFIG_PREEMPT
461 # check for preemptive scheduling 601 # check for preemptive scheduling
462 icm %r0,15,__TI_precount(%r12) 602 icm %r0,15,__TI_precount(%r12)
463 jnz io_restore # preemption disabled 603 bnz BASED(io_restore) # preemption disabled
464 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 604 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
465 jno io_restore 605 bno BASED(io_restore)
466 # switch to kernel stack 606 # switch to kernel stack
467 l %r1,__PT_R15(%r11) 607 l %r1,SP_R15(%r15)
468 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 608 s %r1,BASED(.Lc_spsize)
469 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 609 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
470 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 610 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
471 la %r11,STACK_FRAME_OVERHEAD(%r1)
472 lr %r15,%r1 611 lr %r15,%r1
473 # TRACE_IRQS_ON already done at io_return, call 612 # TRACE_IRQS_ON already done at io_return, call
474 # TRACE_IRQS_OFF to keep things symmetrical 613 # TRACE_IRQS_OFF to keep things symmetrical
475 TRACE_IRQS_OFF 614 TRACE_IRQS_OFF
476 l %r1,BASED(.Lpreempt_irq) 615 l %r1,BASED(.Lpreempt_schedule_irq)
477 basr %r14,%r1 # call preempt_schedule_irq 616 basr %r14,%r1 # call preempt_schedule_irq
478 j io_return 617 b BASED(io_return)
479#else 618#else
480 j io_restore 619 b BASED(io_restore)
481#endif 620#endif
482 621
483# 622#
@@ -485,10 +624,9 @@ io_work:
485# 624#
486io_work_user: 625io_work_user:
487 l %r1,__LC_KERNEL_STACK 626 l %r1,__LC_KERNEL_STACK
488 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 627 s %r1,BASED(.Lc_spsize)
489 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 628 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
490 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 629 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
491 la %r11,STACK_FRAME_OVERHEAD(%r1)
492 lr %r15,%r1 630 lr %r15,%r1
493 631
494# 632#
@@ -498,24 +636,24 @@ io_work_user:
498# 636#
499io_work_tif: 637io_work_tif:
500 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 638 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
501 jo io_mcck_pending 639 bo BASED(io_mcck_pending)
502 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 640 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
503 jo io_reschedule 641 bo BASED(io_reschedule)
504 tm __TI_flags+3(%r12),_TIF_SIGPENDING 642 tm __TI_flags+3(%r12),_TIF_SIGPENDING
505 jo io_sigpending 643 bo BASED(io_sigpending)
506 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 644 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
507 jo io_notify_resume 645 bo BASED(io_notify_resume)
508 j io_return # beware of critical section cleanup 646 b BASED(io_return) # beware of critical section cleanup
509 647
510# 648#
511# _TIF_MCCK_PENDING is set, call handler 649# _TIF_MCCK_PENDING is set, call handler
512# 650#
513io_mcck_pending: 651io_mcck_pending:
514 # TRACE_IRQS_ON already done at io_return 652 # TRACE_IRQS_ON already done at io_return
515 l %r1,BASED(.Lhandle_mcck) 653 l %r1,BASED(.Ls390_handle_mcck)
516 basr %r14,%r1 # TIF bit will be cleared by handler 654 basr %r14,%r1 # TIF bit will be cleared by handler
517 TRACE_IRQS_OFF 655 TRACE_IRQS_OFF
518 j io_return 656 b BASED(io_return)
519 657
520# 658#
521# _TIF_NEED_RESCHED is set, call schedule 659# _TIF_NEED_RESCHED is set, call schedule
@@ -523,37 +661,37 @@ io_mcck_pending:
523io_reschedule: 661io_reschedule:
524 # TRACE_IRQS_ON already done at io_return 662 # TRACE_IRQS_ON already done at io_return
525 l %r1,BASED(.Lschedule) 663 l %r1,BASED(.Lschedule)
526 ssm __LC_SVC_NEW_PSW # reenable interrupts 664 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
527 basr %r14,%r1 # call scheduler 665 basr %r14,%r1 # call scheduler
528 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 666 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
529 TRACE_IRQS_OFF 667 TRACE_IRQS_OFF
530 j io_return 668 b BASED(io_return)
531 669
532# 670#
533# _TIF_SIGPENDING is set, call do_signal 671# _TIF_SIGPENDING is set, call do_signal
534# 672#
535io_sigpending: 673io_sigpending:
536 # TRACE_IRQS_ON already done at io_return 674 # TRACE_IRQS_ON already done at io_return
675 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
676 la %r2,SP_PTREGS(%r15) # load pt_regs
537 l %r1,BASED(.Ldo_signal) 677 l %r1,BASED(.Ldo_signal)
538 ssm __LC_SVC_NEW_PSW # reenable interrupts
539 lr %r2,%r11 # pass pointer to pt_regs
540 basr %r14,%r1 # call do_signal 678 basr %r14,%r1 # call do_signal
541 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 679 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
542 TRACE_IRQS_OFF 680 TRACE_IRQS_OFF
543 j io_return 681 b BASED(io_return)
544 682
545# 683#
546# _TIF_SIGPENDING is set, call do_signal 684# _TIF_SIGPENDING is set, call do_signal
547# 685#
548io_notify_resume: 686io_notify_resume:
549 # TRACE_IRQS_ON already done at io_return 687 # TRACE_IRQS_ON already done at io_return
688 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
689 la %r2,SP_PTREGS(%r15) # load pt_regs
550 l %r1,BASED(.Ldo_notify_resume) 690 l %r1,BASED(.Ldo_notify_resume)
551 ssm __LC_SVC_NEW_PSW # reenable interrupts 691 basr %r14,%r1 # call do_signal
552 lr %r2,%r11 # pass pointer to pt_regs 692 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
553 basr %r14,%r1 # call do_notify_resume
554 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
555 TRACE_IRQS_OFF 693 TRACE_IRQS_OFF
556 j io_return 694 b BASED(io_return)
557 695
558/* 696/*
559 * External interrupt handler routine 697 * External interrupt handler routine
@@ -562,41 +700,23 @@ io_notify_resume:
562ENTRY(ext_int_handler) 700ENTRY(ext_int_handler)
563 stck __LC_INT_CLOCK 701 stck __LC_INT_CLOCK
564 stpt __LC_ASYNC_ENTER_TIMER 702 stpt __LC_ASYNC_ENTER_TIMER
565 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 703 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
566 l %r12,__LC_THREAD_INFO 704 CREATE_STACK_FRAME __LC_SAVE_AREA+16
567 l %r13,__LC_SVC_NEW_PSW+4 705 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
568 lm %r8,%r9,__LC_EXT_OLD_PSW 706 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
569 tmh %r8,0x0001 # interrupting from user ? 707 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
570 jz ext_skip 708 bz BASED(ext_no_vtime)
571 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 709 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
572ext_skip: 710 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
573 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 711 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
574 stm %r0,%r7,__PT_R0(%r11) 712ext_no_vtime:
575 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
576 stm %r8,%r9,__PT_PSW(%r11)
577 TRACE_IRQS_OFF 713 TRACE_IRQS_OFF
578 lr %r2,%r11 # pass pointer to pt_regs 714 la %r2,SP_PTREGS(%r15) # address of register-save area
579 l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 715 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
580 l %r4,__LC_EXT_PARAMS # get external parameters 716 l %r4,__LC_EXT_PARAMS # get external parameters
581 l %r1,BASED(.Ldo_extint) 717 l %r1,BASED(.Ldo_extint)
582 basr %r14,%r1 # call do_extint 718 basr %r14,%r1
583 j io_return 719 b BASED(io_return)
584
585/*
586 * Load idle PSW. The second "half" of this function is in cleanup_idle.
587 */
588ENTRY(psw_idle)
589 st %r3,__SF_EMPTY(%r15)
590 basr %r1,0
591 la %r1,psw_idle_lpsw+4-.(%r1)
592 st %r1,__SF_EMPTY+4(%r15)
593 oi __SF_EMPTY+4(%r15),0x80
594 stck __CLOCK_IDLE_ENTER(%r2)
595 stpt __TIMER_IDLE_ENTER(%r2)
596psw_idle_lpsw:
597 lpsw __SF_EMPTY(%r15)
598 br %r14
599psw_idle_end:
600 720
601__critical_end: 721__critical_end:
602 722
@@ -608,100 +728,154 @@ ENTRY(mcck_int_handler)
608 stck __LC_MCCK_CLOCK 728 stck __LC_MCCK_CLOCK
609 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 729 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
610 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 730 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
611 l %r12,__LC_THREAD_INFO 731 SAVE_ALL_BASE __LC_SAVE_AREA+32
612 l %r13,__LC_SVC_NEW_PSW+4 732 la %r12,__LC_MCK_OLD_PSW
613 lm %r8,%r9,__LC_MCK_OLD_PSW
614 tm __LC_MCCK_CODE,0x80 # system damage? 733 tm __LC_MCCK_CODE,0x80 # system damage?
615 jo mcck_panic # yes -> rest of mcck code invalid 734 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
616 la %r14,__LC_CPU_TIMER_SAVE_AREA 735 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
617 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
618 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 736 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
619 jo 3f 737 bo BASED(1f)
620 la %r14,__LC_SYNC_ENTER_TIMER 738 la %r14,__LC_SYNC_ENTER_TIMER
621 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 739 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
622 jl 0f 740 bl BASED(0f)
623 la %r14,__LC_ASYNC_ENTER_TIMER 741 la %r14,__LC_ASYNC_ENTER_TIMER
6240: clc 0(8,%r14),__LC_EXIT_TIMER 7420: clc 0(8,%r14),__LC_EXIT_TIMER
625 jl 1f 743 bl BASED(0f)
626 la %r14,__LC_EXIT_TIMER 744 la %r14,__LC_EXIT_TIMER
6271: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 7450: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
628 jl 2f 746 bl BASED(0f)
629 la %r14,__LC_LAST_UPDATE_TIMER 747 la %r14,__LC_LAST_UPDATE_TIMER
6302: spt 0(%r14) 7480: spt 0(%r14)
631 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 749 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6323: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7501: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
633 jno mcck_panic # no -> skip cleanup critical 751 bno BASED(mcck_int_main) # no -> skip cleanup critical
634 tm %r8,0x0001 # interrupting from user ? 752 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
635 jz mcck_skip 753 bnz BASED(mcck_int_main) # from user -> load async stack
636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 754 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
637mcck_skip: 755 bhe BASED(mcck_int_main)
638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 756 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
639 mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 757 bl BASED(mcck_int_main)
640 stm %r8,%r9,__PT_PSW(%r11) 758 l %r14,BASED(.Lcleanup_critical)
641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 759 basr %r14,%r14
642 l %r1,BASED(.Ldo_machine_check) 760mcck_int_main:
643 lr %r2,%r11 # pass pointer to pt_regs 761 l %r14,__LC_PANIC_STACK # are we already on the panic stack?
644 basr %r14,%r1 # call s390_do_machine_check 762 slr %r14,%r15
645 tm __PT_PSW+1(%r11),0x01 # returning to user ? 763 sra %r14,PAGE_SHIFT
646 jno mcck_return 764 be BASED(0f)
765 l %r15,__LC_PANIC_STACK # load panic stack
7660: s %r15,BASED(.Lc_spsize) # make room for registers & psw
767 CREATE_STACK_FRAME __LC_SAVE_AREA+32
768 mvc SP_PSW(8,%r15),0(%r12)
769 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
770 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
771 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
772 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
773 bz BASED(mcck_no_vtime)
774 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
775 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
776 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
777mcck_no_vtime:
778 la %r2,SP_PTREGS(%r15) # load pt_regs
779 l %r1,BASED(.Ls390_mcck)
780 basr %r14,%r1 # call machine check handler
781 tm SP_PSW+1(%r15),0x01 # returning to user ?
782 bno BASED(mcck_return)
647 l %r1,__LC_KERNEL_STACK # switch to kernel stack 783 l %r1,__LC_KERNEL_STACK # switch to kernel stack
648 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 784 s %r1,BASED(.Lc_spsize)
649 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 785 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
650 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 786 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
651 la %r11,STACK_FRAME_OVERHEAD(%r15)
652 lr %r15,%r1 787 lr %r15,%r1
653 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 788 stosm __SF_EMPTY(%r15),0x04 # turn dat on
654 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 789 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
655 jno mcck_return 790 bno BASED(mcck_return)
656 TRACE_IRQS_OFF 791 TRACE_IRQS_OFF
657 l %r1,BASED(.Lhandle_mcck) 792 l %r1,BASED(.Ls390_handle_mcck)
658 basr %r14,%r1 # call s390_handle_mcck 793 basr %r14,%r1 # call machine check handler
659 TRACE_IRQS_ON 794 TRACE_IRQS_ON
660mcck_return: 795mcck_return:
661 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 796 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
797 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
662 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 798 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
663 jno 0f 799 bno BASED(0f)
664 lm %r0,%r15,__PT_R0(%r11) 800 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
665 stpt __LC_EXIT_TIMER 801 stpt __LC_EXIT_TIMER
666 lpsw __LC_RETURN_MCCK_PSW 802 lpsw __LC_RETURN_MCCK_PSW # back to caller
6670: lm %r0,%r15,__PT_R0(%r11) 8030: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
668 lpsw __LC_RETURN_MCCK_PSW 804 lpsw __LC_RETURN_MCCK_PSW # back to caller
669 805
670mcck_panic: 806 RESTORE_ALL __LC_RETURN_MCCK_PSW,0
671 l %r14,__LC_PANIC_STACK 807
672 slr %r14,%r15 808/*
673 sra %r14,PAGE_SHIFT 809 * Restart interruption handler, kick starter for additional CPUs
674 jz 0f 810 */
675 l %r15,__LC_PANIC_STACK 811#ifdef CONFIG_SMP
6760: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 812 __CPUINIT
677 j mcck_skip 813ENTRY(restart_int_handler)
814 basr %r1,0
815restart_base:
816 spt restart_vtime-restart_base(%r1)
817 stck __LC_LAST_UPDATE_CLOCK
818 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
819 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
820 l %r15,__LC_SAVE_AREA+60 # load ksp
821 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
822 lam %a0,%a15,__LC_AREGS_SAVE_AREA
823 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
824 l %r1,__LC_THREAD_INFO
825 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
826 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
827 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
828 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
829 basr %r14,0
830 l %r14,restart_addr-.(%r14)
831 basr %r14,%r14 # branch to start_secondary
832restart_addr:
833 .long start_secondary
834 .align 8
835restart_vtime:
836 .long 0x7fffffff,0xffffffff
837 .previous
838#else
839/*
840 * If we do not run with SMP enabled, let the new CPU crash ...
841 */
842ENTRY(restart_int_handler)
843 basr %r1,0
844restart_base:
845 lpsw restart_crash-restart_base(%r1)
846 .align 8
847restart_crash:
848 .long 0x000a0000,0x00000000
849restart_go:
850#endif
678 851
679# 852#
680# PSW restart interrupt handler 853# PSW restart interrupt handler
681# 854#
682ENTRY(restart_int_handler) 855ENTRY(psw_restart_int_handler)
683 st %r15,__LC_SAVE_AREA_RESTART 856 st %r15,__LC_SAVE_AREA_64(%r0) # save r15
684 l %r15,__LC_RESTART_STACK 857 basr %r15,0
685 ahi %r15,-__PT_SIZE # create pt_regs on stack 8580: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
686 xc 0(__PT_SIZE,%r15),0(%r15) 859 l %r15,0(%r15)
687 stm %r0,%r14,__PT_R0(%r15) 860 ahi %r15,-SP_SIZE # make room for pt_regs
688 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 861 stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
689 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 862 mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
690 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 863 mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
691 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 864 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
692 l %r1,__LC_RESTART_FN # load fn, parm & source cpu 865 basr %r14,0
693 l %r2,__LC_RESTART_DATA 8661: l %r14,.Ldo_restart-1b(%r14)
694 l %r3,__LC_RESTART_SOURCE 867 basr %r14,%r14
695 ltr %r3,%r3 # test source cpu address 868
696 jm 1f # negative -> skip source stop 869 basr %r14,0 # load disabled wait PSW if
6970: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 8702: lpsw restart_psw_crash-2b(%r14) # do_restart returns
698 brc 10,0b # wait for status stored 871 .align 4
6991: basr %r14,%r1 # call function 872.Ldo_restart:
700 stap __SF_EMPTY(%r15) # store cpu address 873 .long do_restart
701 lh %r3,__SF_EMPTY(%r15) 874.Lrestart_stack:
7022: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 875 .long restart_stack
703 brc 2,2b 876 .align 8
7043: j 3b 877restart_psw_crash:
878 .long 0x000a0000,0x00000000 + restart_psw_crash
705 879
706 .section .kprobes.text, "ax" 880 .section .kprobes.text, "ax"
707 881
@@ -713,213 +887,213 @@ ENTRY(restart_int_handler)
713 */ 887 */
714stack_overflow: 888stack_overflow:
715 l %r15,__LC_PANIC_STACK # change to panic stack 889 l %r15,__LC_PANIC_STACK # change to panic stack
716 ahi %r15,-__PT_SIZE # create pt_regs 890 sl %r15,BASED(.Lc_spsize)
717 stm %r0,%r7,__PT_R0(%r15) 891 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
718 stm %r8,%r9,__PT_PSW(%r15) 892 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
719 mvc __PT_R8(32,%r11),0(%r14) 893 la %r1,__LC_SAVE_AREA
720 lr %r15,%r11 894 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
721 ahi %r15,-STACK_FRAME_OVERHEAD 895 be BASED(0f)
722 l %r1,BASED(1f) 896 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
723 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 897 be BASED(0f)
724 lr %r2,%r11 # pass pointer to pt_regs 898 la %r1,__LC_SAVE_AREA+16
725 br %r1 # branch to kernel_stack_overflow 8990: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
900 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
901 l %r1,BASED(1f) # branch to kernel_stack_overflow
902 la %r2,SP_PTREGS(%r15) # load pt_regs
903 br %r1
7261: .long kernel_stack_overflow 9041: .long kernel_stack_overflow
727#endif 905#endif
728 906
729cleanup_table: 907cleanup_table_system_call:
730 .long system_call + 0x80000000 908 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
731 .long sysc_do_svc + 0x80000000 909cleanup_table_sysc_tif:
732 .long sysc_tif + 0x80000000 910 .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
733 .long sysc_restore + 0x80000000 911cleanup_table_sysc_restore:
734 .long sysc_done + 0x80000000 912 .long sysc_restore + 0x80000000, sysc_done + 0x80000000
735 .long io_tif + 0x80000000 913cleanup_table_io_tif:
736 .long io_restore + 0x80000000 914 .long io_tif + 0x80000000, io_restore + 0x80000000
737 .long io_done + 0x80000000 915cleanup_table_io_restore:
738 .long psw_idle + 0x80000000 916 .long io_restore + 0x80000000, io_done + 0x80000000
739 .long psw_idle_end + 0x80000000
740 917
741cleanup_critical: 918cleanup_critical:
742 cl %r9,BASED(cleanup_table) # system_call 919 clc 4(4,%r12),BASED(cleanup_table_system_call)
743 jl 0f 920 bl BASED(0f)
744 cl %r9,BASED(cleanup_table+4) # sysc_do_svc 921 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
745 jl cleanup_system_call 922 bl BASED(cleanup_system_call)
746 cl %r9,BASED(cleanup_table+8) # sysc_tif 9230:
747 jl 0f 924 clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
748 cl %r9,BASED(cleanup_table+12) # sysc_restore 925 bl BASED(0f)
749 jl cleanup_sysc_tif 926 clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
750 cl %r9,BASED(cleanup_table+16) # sysc_done 927 bl BASED(cleanup_sysc_tif)
751 jl cleanup_sysc_restore 9280:
752 cl %r9,BASED(cleanup_table+20) # io_tif 929 clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
753 jl 0f 930 bl BASED(0f)
754 cl %r9,BASED(cleanup_table+24) # io_restore 931 clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
755 jl cleanup_io_tif 932 bl BASED(cleanup_sysc_restore)
756 cl %r9,BASED(cleanup_table+28) # io_done 9330:
757 jl cleanup_io_restore 934 clc 4(4,%r12),BASED(cleanup_table_io_tif)
758 cl %r9,BASED(cleanup_table+32) # psw_idle 935 bl BASED(0f)
759 jl 0f 936 clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
760 cl %r9,BASED(cleanup_table+36) # psw_idle_end 937 bl BASED(cleanup_io_tif)
761 jl cleanup_idle 9380:
7620: br %r14 939 clc 4(4,%r12),BASED(cleanup_table_io_restore)
940 bl BASED(0f)
941 clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
942 bl BASED(cleanup_io_restore)
9430:
944 br %r14
763 945
764cleanup_system_call: 946cleanup_system_call:
765 # check if stpt has been executed 947 mvc __LC_RETURN_PSW(8),0(%r12)
766 cl %r9,BASED(cleanup_system_call_insn) 948 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
767 jh 0f 949 bh BASED(0f)
768 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
769 chi %r11,__LC_SAVE_AREA_ASYNC
770 je 0f
771 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 950 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
7720: # check if stm has been executed 951 c %r12,BASED(.Lmck_old_psw)
773 cl %r9,BASED(cleanup_system_call_insn+4) 952 be BASED(0f)
774 jh 0f 953 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
775 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 9540: c %r12,BASED(.Lmck_old_psw)
7760: # set up saved registers r12, and r13 955 la %r12,__LC_SAVE_AREA+32
777 st %r12,16(%r11) # r12 thread-info pointer 956 be BASED(0f)
778 st %r13,20(%r11) # r13 literal-pool pointer 957 la %r12,__LC_SAVE_AREA+16
779 # check if the user time calculation has been done 9580: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
780 cl %r9,BASED(cleanup_system_call_insn+8) 959 bhe BASED(cleanup_vtime)
781 jh 0f 960 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
782 l %r10,__LC_EXIT_TIMER 961 bh BASED(0f)
783 l %r15,__LC_EXIT_TIMER+4 962 mvc __LC_SAVE_AREA(16),0(%r12)
784 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER 9630: st %r13,4(%r12)
785 ADD64 %r10,%r15,__LC_USER_TIMER 964 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
786 st %r10,__LC_USER_TIMER 965 s %r15,BASED(.Lc_spsize) # make room for registers & psw
787 st %r15,__LC_USER_TIMER+4 966 st %r15,12(%r12)
7880: # check if the system time calculation has been done 967 CREATE_STACK_FRAME __LC_SAVE_AREA
789 cl %r9,BASED(cleanup_system_call_insn+12) 968 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
790 jh 0f 969 mvc SP_ILC(4,%r15),__LC_SVC_ILC
791 l %r10,__LC_LAST_UPDATE_TIMER 970 mvc 0(4,%r12),__LC_THREAD_INFO
792 l %r15,__LC_LAST_UPDATE_TIMER+4 971cleanup_vtime:
793 SUB64 %r10,%r15,__LC_EXIT_TIMER 972 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
794 ADD64 %r10,%r15,__LC_SYSTEM_TIMER 973 bhe BASED(cleanup_stime)
795 st %r10,__LC_SYSTEM_TIMER 974 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
796 st %r15,__LC_SYSTEM_TIMER+4 975cleanup_stime:
7970: # update accounting time stamp 976 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
977 bh BASED(cleanup_update)
978 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
979cleanup_update:
798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 980 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
799 # set up saved register 11 981 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
800 l %r15,__LC_KERNEL_STACK 982 la %r12,__LC_RETURN_PSW
801 ahi %r15,-__PT_SIZE
802 st %r15,12(%r11) # r11 pt_regs pointer
803 # fill pt_regs
804 mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
805 stm %r0,%r7,__PT_R0(%r15)
806 mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
807 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
808 # setup saved register 15
809 ahi %r15,-STACK_FRAME_OVERHEAD
810 st %r15,28(%r11) # r15 stack pointer
811 # set new psw address and exit
812 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
813 br %r14 983 br %r14
814cleanup_system_call_insn: 984cleanup_system_call_insn:
985 .long sysc_saveall + 0x80000000
815 .long system_call + 0x80000000 986 .long system_call + 0x80000000
816 .long sysc_stm + 0x80000000 987 .long sysc_vtime + 0x80000000
817 .long sysc_vtime + 0x80000000 + 36 988 .long sysc_stime + 0x80000000
818 .long sysc_vtime + 0x80000000 + 76 989 .long sysc_update + 0x80000000
819 990
820cleanup_sysc_tif: 991cleanup_sysc_tif:
821 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 992 mvc __LC_RETURN_PSW(4),0(%r12)
993 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
994 la %r12,__LC_RETURN_PSW
822 br %r14 995 br %r14
823 996
824cleanup_sysc_restore: 997cleanup_sysc_restore:
825 cl %r9,BASED(cleanup_sysc_restore_insn) 998 clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
826 jhe 0f 999 be BASED(2f)
827 l %r9,12(%r11) # get saved pointer to pt_regs 1000 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
828 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 1001 c %r12,BASED(.Lmck_old_psw)
829 mvc 0(32,%r11),__PT_R8(%r9) 1002 be BASED(0f)
830 lm %r0,%r7,__PT_R0(%r9) 1003 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
8310: lm %r8,%r9,__LC_RETURN_PSW 10040: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
1005 be BASED(2f)
1006 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1007 c %r12,BASED(.Lmck_old_psw)
1008 la %r12,__LC_SAVE_AREA+32
1009 be BASED(1f)
1010 la %r12,__LC_SAVE_AREA+16
10111: mvc 0(16,%r12),SP_R12(%r15)
1012 lm %r0,%r11,SP_R0(%r15)
1013 l %r15,SP_R15(%r15)
10142: la %r12,__LC_RETURN_PSW
832 br %r14 1015 br %r14
833cleanup_sysc_restore_insn: 1016cleanup_sysc_restore_insn:
834 .long sysc_done - 4 + 0x80000000 1017 .long sysc_done - 4 + 0x80000000
1018 .long sysc_done - 8 + 0x80000000
835 1019
836cleanup_io_tif: 1020cleanup_io_tif:
837 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 1021 mvc __LC_RETURN_PSW(4),0(%r12)
1022 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1023 la %r12,__LC_RETURN_PSW
838 br %r14 1024 br %r14
839 1025
840cleanup_io_restore: 1026cleanup_io_restore:
841 cl %r9,BASED(cleanup_io_restore_insn) 1027 clc 4(4,%r12),BASED(cleanup_io_restore_insn)
842 jhe 0f 1028 be BASED(1f)
843 l %r9,12(%r11) # get saved r11 pointer to pt_regs 1029 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
844 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 1030 clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
845 mvc 0(32,%r11),__PT_R8(%r9) 1031 be BASED(1f)
846 lm %r0,%r7,__PT_R0(%r9) 1032 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
8470: lm %r8,%r9,__LC_RETURN_PSW 1033 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
1034 lm %r0,%r11,SP_R0(%r15)
1035 l %r15,SP_R15(%r15)
10361: la %r12,__LC_RETURN_PSW
848 br %r14 1037 br %r14
849cleanup_io_restore_insn: 1038cleanup_io_restore_insn:
850 .long io_done - 4 + 0x80000000 1039 .long io_done - 4 + 0x80000000
851 1040 .long io_done - 8 + 0x80000000
852cleanup_idle:
853 # copy interrupt clock & cpu timer
854 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
855 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
856 chi %r11,__LC_SAVE_AREA_ASYNC
857 je 0f
858 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
859 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8600: # check if stck has been executed
861 cl %r9,BASED(cleanup_idle_insn)
862 jhe 1f
863 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
864 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
8651: # account system time going idle
866 lm %r9,%r10,__LC_STEAL_TIMER
867 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
868 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
869 stm %r9,%r10,__LC_STEAL_TIMER
870 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
871 lm %r9,%r10,__LC_SYSTEM_TIMER
872 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
873 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
874 stm %r9,%r10,__LC_SYSTEM_TIMER
875 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
876 # prepare return psw
877 n %r8,BASED(cleanup_idle_wait) # clear wait state bit
878 l %r9,24(%r11) # return from psw_idle
879 br %r14
880cleanup_idle_insn:
881 .long psw_idle_lpsw + 0x80000000
882cleanup_idle_wait:
883 .long 0xfffdffff
884 1041
885/* 1042/*
886 * Integer constants 1043 * Integer constants
887 */ 1044 */
888 .align 4 1045 .align 4
889.Lnr_syscalls: 1046.Lc_spsize: .long SP_SIZE
890 .long NR_syscalls 1047.Lc_overhead: .long STACK_FRAME_OVERHEAD
891.Lvtimer_max: 1048.Lnr_syscalls: .long NR_syscalls
892 .quad 0x7fffffffffffffff 1049.L0x018: .short 0x018
1050.L0x020: .short 0x020
1051.L0x028: .short 0x028
1052.L0x030: .short 0x030
1053.L0x038: .short 0x038
1054.Lc_1: .long 1
893 1055
894/* 1056/*
895 * Symbol constants 1057 * Symbol constants
896 */ 1058 */
897.Ldo_machine_check: .long s390_do_machine_check 1059.Ls390_mcck: .long s390_do_machine_check
898.Lhandle_mcck: .long s390_handle_mcck 1060.Ls390_handle_mcck:
899.Ldo_IRQ: .long do_IRQ 1061 .long s390_handle_mcck
900.Ldo_extint: .long do_extint 1062.Lmck_old_psw: .long __LC_MCK_OLD_PSW
901.Ldo_signal: .long do_signal 1063.Ldo_IRQ: .long do_IRQ
902.Ldo_notify_resume: .long do_notify_resume 1064.Ldo_extint: .long do_extint
903.Ldo_per_trap: .long do_per_trap 1065.Ldo_signal: .long do_signal
904.Ljump_table: .long pgm_check_table 1066.Ldo_notify_resume:
905.Lschedule: .long schedule 1067 .long do_notify_resume
1068.Lhandle_per: .long do_per_trap
1069.Ldo_execve: .long do_execve
1070.Lexecve_tail: .long execve_tail
1071.Ljump_table: .long pgm_check_table
1072.Lschedule: .long schedule
906#ifdef CONFIG_PREEMPT 1073#ifdef CONFIG_PREEMPT
907.Lpreempt_irq: .long preempt_schedule_irq 1074.Lpreempt_schedule_irq:
1075 .long preempt_schedule_irq
908#endif 1076#endif
909.Ltrace_enter: .long do_syscall_trace_enter 1077.Ltrace_entry: .long do_syscall_trace_enter
910.Ltrace_exit: .long do_syscall_trace_exit 1078.Ltrace_exit: .long do_syscall_trace_exit
911.Lschedule_tail: .long schedule_tail 1079.Lschedtail: .long schedule_tail
912.Lsys_call_table: .long sys_call_table 1080.Lsysc_table: .long sys_call_table
913.Lsysc_per: .long sysc_per + 0x80000000
914#ifdef CONFIG_TRACE_IRQFLAGS 1081#ifdef CONFIG_TRACE_IRQFLAGS
915.Lhardirqs_on: .long trace_hardirqs_on_caller 1082.Ltrace_irq_on_caller:
916.Lhardirqs_off: .long trace_hardirqs_off_caller 1083 .long trace_hardirqs_on_caller
1084.Ltrace_irq_off_caller:
1085 .long trace_hardirqs_off_caller
917#endif 1086#endif
918#ifdef CONFIG_LOCKDEP 1087#ifdef CONFIG_LOCKDEP
919.Llockdep_sys_exit: .long lockdep_sys_exit 1088.Llockdep_sys_exit:
1089 .long lockdep_sys_exit
920#endif 1090#endif
921.Lcritical_start: .long __critical_start + 0x80000000 1091.Lcritical_start:
922.Lcritical_length: .long __critical_end - __critical_start 1092 .long __critical_start + 0x80000000
1093.Lcritical_end:
1094 .long __critical_end + 0x80000000
1095.Lcleanup_critical:
1096 .long cleanup_critical
923 1097
924 .section .rodata, "a" 1098 .section .rodata, "a"
925#define SYSCALL(esa,esame,emu) .long esa 1099#define SYSCALL(esa,esame,emu) .long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 2711936fe70..66729eb7bbc 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -4,61 +4,24 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7#include <asm/cputime.h>
8 7
9extern void *restart_stack; 8void do_protection_exception(struct pt_regs *, long, unsigned long);
9void do_dat_exception(struct pt_regs *, long, unsigned long);
10void do_asce_exception(struct pt_regs *, long, unsigned long);
10 11
11void system_call(void); 12extern int sysctl_userprocess_debug;
12void pgm_check_handler(void);
13void ext_int_handler(void);
14void io_int_handler(void);
15void mcck_int_handler(void);
16void restart_int_handler(void);
17void restart_call_handler(void);
18void psw_idle(struct s390_idle_data *, unsigned long);
19
20asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
21asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
22
23void do_protection_exception(struct pt_regs *regs);
24void do_dat_exception(struct pt_regs *regs);
25void do_asce_exception(struct pt_regs *regs);
26
27void addressing_exception(struct pt_regs *regs);
28void data_exception(struct pt_regs *regs);
29void default_trap_handler(struct pt_regs *regs);
30void divide_exception(struct pt_regs *regs);
31void execute_exception(struct pt_regs *regs);
32void hfp_divide_exception(struct pt_regs *regs);
33void hfp_overflow_exception(struct pt_regs *regs);
34void hfp_significance_exception(struct pt_regs *regs);
35void hfp_sqrt_exception(struct pt_regs *regs);
36void hfp_underflow_exception(struct pt_regs *regs);
37void illegal_op(struct pt_regs *regs);
38void operand_exception(struct pt_regs *regs);
39void overflow_exception(struct pt_regs *regs);
40void privileged_op(struct pt_regs *regs);
41void space_switch_exception(struct pt_regs *regs);
42void special_op_exception(struct pt_regs *regs);
43void specification_exception(struct pt_regs *regs);
44void transaction_exception(struct pt_regs *regs);
45void translation_exception(struct pt_regs *regs);
46 13
47void do_per_trap(struct pt_regs *regs); 14void do_per_trap(struct pt_regs *regs);
48void syscall_trace(struct pt_regs *regs, int entryexit); 15void syscall_trace(struct pt_regs *regs, int entryexit);
49void kernel_stack_overflow(struct pt_regs * regs); 16void kernel_stack_overflow(struct pt_regs * regs);
50void do_signal(struct pt_regs *regs); 17void do_signal(struct pt_regs *regs);
51void handle_signal32(unsigned long sig, struct k_sigaction *ka, 18int handle_signal32(unsigned long sig, struct k_sigaction *ka,
52 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 19 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
53void do_notify_resume(struct pt_regs *regs);
54 20
55struct ext_code; 21void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
56void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long); 22int __cpuinit start_secondary(void *cpuvoid);
57void do_restart(void);
58void __init startup_init(void); 23void __init startup_init(void);
59void die(struct pt_regs *regs, const char *str); 24void die(const char * str, struct pt_regs * regs, long err);
60
61void __init time_init(void);
62 25
63struct s390_mmap_arg_struct; 26struct s390_mmap_arg_struct;
64struct fadvise64_64_args; 27struct fadvise64_64_args;
@@ -73,6 +36,13 @@ long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
73long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); 36long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
74long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, 37long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
75 u32 len_low); 38 u32 len_low);
39long sys_fork(void);
40long sys_clone(unsigned long newsp, unsigned long clone_flags,
41 int __user *parent_tidptr, int __user *child_tidptr);
42long sys_vfork(void);
43void execve_tail(void);
44long sys_execve(const char __user *name, const char __user *const __user *argv,
45 const char __user *const __user *envp);
76long sys_sigsuspend(int history0, int history1, old_sigset_t mask); 46long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
77long sys_sigaction(int sig, const struct old_sigaction __user *act, 47long sys_sigaction(int sig, const struct old_sigaction __user *act,
78 struct old_sigaction __user *oact); 48 struct old_sigaction __user *oact);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 6d34e0c97a3..713da076053 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/entry64.S
2 * S390 low-level entry points. 3 * S390 low-level entry points.
3 * 4 *
4 * Copyright IBM Corp. 1999, 2012 5 * Copyright (C) IBM Corp. 1999,2010
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -10,7 +11,6 @@
10 11
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/linkage.h> 13#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm/errno.h> 15#include <asm/errno.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
@@ -18,148 +18,196 @@
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/sigp.h> 21
22 22/*
23__PT_R0 = __PT_GPRS 23 * Stack layout for the system_call stack entry.
24__PT_R1 = __PT_GPRS + 8 24 * The first few entries are identical to the user_regs_struct.
25__PT_R2 = __PT_GPRS + 16 25 */
26__PT_R3 = __PT_GPRS + 24 26SP_PTREGS = STACK_FRAME_OVERHEAD
27__PT_R4 = __PT_GPRS + 32 27SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
28__PT_R5 = __PT_GPRS + 40 28SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
29__PT_R6 = __PT_GPRS + 48 29SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
30__PT_R7 = __PT_GPRS + 56 30SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
31__PT_R8 = __PT_GPRS + 64 31SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
32__PT_R9 = __PT_GPRS + 72 32SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
33__PT_R10 = __PT_GPRS + 80 33SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
34__PT_R11 = __PT_GPRS + 88 34SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
35__PT_R12 = __PT_GPRS + 96 35SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
36__PT_R13 = __PT_GPRS + 104 36SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
37__PT_R14 = __PT_GPRS + 112 37SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
38__PT_R15 = __PT_GPRS + 120 38SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
39SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
40SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
41SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
42SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
47SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
39 49
40STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 50STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
41STACK_SIZE = 1 << STACK_SHIFT 51STACK_SIZE = 1 << STACK_SHIFT
42 52
43_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
44 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
45_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
46 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
47_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
48 _TIF_SYSCALL_TRACEPOINT) 58 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
49_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) 59_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
50 60
51#define BASED(name) name-system_call(%r13) 61#define BASED(name) name-system_call(%r13)
52 62
53 .macro TRACE_IRQS_ON 63 .macro SPP newpp
64#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
65 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
66 jz .+8
67 .insn s,0xb2800000,\newpp
68#endif
69 .endm
70
71 .macro HANDLE_SIE_INTERCEPT
72#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
73 tm __TI_flags+6(%r12),_TIF_SIE>>8
74 jz 0f
75 SPP __LC_CMF_HPP # set host id
76 clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
77 jl 0f
78 clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
79 jhe 0f
80 mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
810:
82#endif
83 .endm
84
54#ifdef CONFIG_TRACE_IRQFLAGS 85#ifdef CONFIG_TRACE_IRQFLAGS
86 .macro TRACE_IRQS_ON
55 basr %r2,%r0 87 basr %r2,%r0
56 brasl %r14,trace_hardirqs_on_caller 88 brasl %r14,trace_hardirqs_on_caller
57#endif
58 .endm 89 .endm
59 90
60 .macro TRACE_IRQS_OFF 91 .macro TRACE_IRQS_OFF
61#ifdef CONFIG_TRACE_IRQFLAGS
62 basr %r2,%r0 92 basr %r2,%r0
63 brasl %r14,trace_hardirqs_off_caller 93 brasl %r14,trace_hardirqs_off_caller
64#endif
65 .endm 94 .endm
95#else
96#define TRACE_IRQS_ON
97#define TRACE_IRQS_OFF
98#endif
66 99
67 .macro LOCKDEP_SYS_EXIT
68#ifdef CONFIG_LOCKDEP 100#ifdef CONFIG_LOCKDEP
69 tm __PT_PSW+1(%r11),0x01 # returning to user ? 101 .macro LOCKDEP_SYS_EXIT
70 jz .+10 102 tm SP_PSW+1(%r15),0x01 # returning to user ?
103 jz 0f
71 brasl %r14,lockdep_sys_exit 104 brasl %r14,lockdep_sys_exit
72#endif 1050:
73 .endm 106 .endm
74 107#else
75 .macro SPP newpp 108#define LOCKDEP_SYS_EXIT
76#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
77 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
78 jz .+8
79 .insn s,0xb2800000,\newpp
80#endif 109#endif
110
111 .macro UPDATE_VTIME lc_from,lc_to,lc_sum
112 lg %r10,\lc_from
113 slg %r10,\lc_to
114 alg %r10,\lc_sum
115 stg %r10,\lc_sum
81 .endm 116 .endm
82 117
83 .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck 118/*
84#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 119 * Register usage in interrupt handlers:
85 tmhh %r8,0x0001 # interrupting from user ? 120 * R9 - pointer to current task structure
86 jnz .+42 121 * R13 - pointer to literal pool
87 lgr \scratch,%r9 122 * R14 - return register for function calls
88 slg \scratch,BASED(.Lsie_loop) 123 * R15 - kernel stack pointer
89 clg \scratch,BASED(.Lsie_length) 124 */
90 .if \pgmcheck 125
91 # Some program interrupts are suppressing (e.g. protection). 126 .macro SAVE_ALL_SVC psworg,savearea
92 # We must also check the instruction after SIE in that case. 127 stmg %r11,%r15,\savearea
93 # do_protection_exception will rewind to rewind_pad 128 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
94 jh .+22 129 aghi %r15,-SP_SIZE # make room for registers & psw
95 .else 130 lg %r11,__LC_LAST_BREAK
96 jhe .+22
97 .endif
98 lg %r9,BASED(.Lsie_loop)
99 SPP BASED(.Lhost_id) # set host id
100#endif
101 .endm 131 .endm
102 132
103 .macro CHECK_STACK stacksize,savearea 133 .macro SAVE_ALL_PGM psworg,savearea
134 stmg %r11,%r15,\savearea
135 tm \psworg+1,0x01 # test problem state bit
104#ifdef CONFIG_CHECK_STACK 136#ifdef CONFIG_CHECK_STACK
105 tml %r15,\stacksize - CONFIG_STACK_GUARD 137 jnz 1f
106 lghi %r14,\savearea 138 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
107 jz stack_overflow 139 jnz 2f
140 la %r12,\psworg
141 j stack_overflow
142#else
143 jz 2f
108#endif 144#endif
1451: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
1462: aghi %r15,-SP_SIZE # make room for registers & psw
147 larl %r13,system_call
148 lg %r11,__LC_LAST_BREAK
109 .endm 149 .endm
110 150
111 .macro SWITCH_ASYNC savearea,stack,shift 151 .macro SAVE_ALL_ASYNC psworg,savearea
112 tmhh %r8,0x0001 # interrupting from user ? 152 stmg %r11,%r15,\savearea
113 jnz 1f 153 larl %r13,system_call
114 lgr %r14,%r9 154 lg %r11,__LC_LAST_BREAK
115 slg %r14,BASED(.Lcritical_start) 155 la %r12,\psworg
116 clg %r14,BASED(.Lcritical_length) 156 tm \psworg+1,0x01 # test problem state bit
157 jnz 1f # from user -> load kernel stack
158 clc \psworg+8(8),BASED(.Lcritical_end)
117 jhe 0f 159 jhe 0f
118 lghi %r11,\savearea # inside critical section, do cleanup 160 clc \psworg+8(8),BASED(.Lcritical_start)
161 jl 0f
119 brasl %r14,cleanup_critical 162 brasl %r14,cleanup_critical
120 tmhh %r8,0x0001 # retest problem state after cleanup 163 tm 1(%r12),0x01 # retest problem state after cleanup
121 jnz 1f 164 jnz 1f
1220: lg %r14,\stack # are we already on the target stack? 1650: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
123 slgr %r14,%r15 166 slgr %r14,%r15
124 srag %r14,%r14,\shift 167 srag %r14,%r14,STACK_SHIFT
168#ifdef CONFIG_CHECK_STACK
125 jnz 1f 169 jnz 1f
126 CHECK_STACK 1<<\shift,\savearea 170 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
127 j 2f 171 jnz 2f
1281: lg %r15,\stack # load target stack 172 j stack_overflow
1292: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 173#else
130 la %r11,STACK_FRAME_OVERHEAD(%r15) 174 jz 2f
175#endif
1761: lg %r15,__LC_ASYNC_STACK # load async stack
1772: aghi %r15,-SP_SIZE # make room for registers & psw
131 .endm 178 .endm
132 179
133 .macro UPDATE_VTIME scratch,enter_timer 180 .macro CREATE_STACK_FRAME savearea
134 lg \scratch,__LC_EXIT_TIMER 181 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
135 slg \scratch,\enter_timer 182 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
136 alg \scratch,__LC_USER_TIMER 183 mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
137 stg \scratch,__LC_USER_TIMER 184 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
138 lg \scratch,__LC_LAST_UPDATE_TIMER
139 slg \scratch,__LC_EXIT_TIMER
140 alg \scratch,__LC_SYSTEM_TIMER
141 stg \scratch,__LC_SYSTEM_TIMER
142 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
143 .endm 185 .endm
144 186
145 .macro LAST_BREAK scratch 187 .macro RESTORE_ALL psworg,sync
146 srag \scratch,%r10,23 188 mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
147 jz .+10 189 .if !\sync
148 stg %r10,__TI_last_break(%r12) 190 ni \psworg+1,0xfd # clear wait state bit
191 .endif
192 lg %r14,__LC_VDSO_PER_CPU
193 lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
194 stpt __LC_EXIT_TIMER
195 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
196 lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
197 lpswe \psworg # back to caller
149 .endm 198 .endm
150 199
151 .macro REENABLE_IRQS 200 .macro LAST_BREAK
152 stg %r8,__LC_RETURN_PSW 201 srag %r10,%r11,23
153 ni __LC_RETURN_PSW,0xbf 202 jz 0f
154 ssm __LC_RETURN_PSW 203 stg %r11,__TI_last_break(%r12)
2040:
155 .endm 205 .endm
156 206
157 .macro STCK savearea 207 .macro REENABLE_IRQS
158#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 208 mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
159 .insn s,0xb27c0000,\savearea # store clock fast 209 ni __SF_EMPTY(%r15),0xbf
160#else 210 ssm __SF_EMPTY(%r15)
161 .insn s,0xb2050000,\savearea # store clock
162#endif
163 .endm 211 .endm
164 212
165 .section .kprobes.text, "ax" 213 .section .kprobes.text, "ax"
@@ -172,23 +220,22 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
172 * gpr2 = prev 220 * gpr2 = prev
173 */ 221 */
174ENTRY(__switch_to) 222ENTRY(__switch_to)
175 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
176 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
177 lg %r4,__THREAD_info(%r2) # get thread_info of prev 223 lg %r4,__THREAD_info(%r2) # get thread_info of prev
178 lg %r5,__THREAD_info(%r3) # get thread_info of next 224 lg %r5,__THREAD_info(%r3) # get thread_info of next
179 lgr %r15,%r5
180 aghi %r15,STACK_SIZE # end of kernel stack of next
181 stg %r3,__LC_CURRENT # store task struct of next
182 stg %r5,__LC_THREAD_INFO # store thread info of next
183 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
184 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
185 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
186 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
187 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 225 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
188 jz 0f 226 jz 0f
189 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 227 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
190 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next 228 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
1910: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 2290: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
230 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
231 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
232 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
233 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
234 stg %r3,__LC_CURRENT # store task struct of next
235 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
236 stg %r5,__LC_THREAD_INFO # store thread info of next
237 aghi %r5,STACK_SIZE # end of kernel stack of next
238 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
192 br %r14 239 br %r14
193 240
194__critical_start: 241__critical_start:
@@ -199,82 +246,76 @@ __critical_start:
199 246
200ENTRY(system_call) 247ENTRY(system_call)
201 stpt __LC_SYNC_ENTER_TIMER 248 stpt __LC_SYNC_ENTER_TIMER
202sysc_stmg: 249sysc_saveall:
203 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 250 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
204 lg %r10,__LC_LAST_BREAK 251 CREATE_STACK_FRAME __LC_SAVE_AREA
205 lg %r12,__LC_THREAD_INFO 252 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
206 larl %r13,system_call 253 mvc SP_ILC(4,%r15),__LC_SVC_ILC
207sysc_per: 254 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
208 lg %r15,__LC_KERNEL_STACK
209 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
211sysc_vtime: 255sysc_vtime:
212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER 256 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
213 LAST_BREAK %r13 257sysc_stime:
214 stmg %r0,%r7,__PT_R0(%r11) 258 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
215 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 259sysc_update:
216 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 260 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 261 LAST_BREAK
218sysc_do_svc: 262sysc_do_svc:
219 oi __TI_flags+7(%r12),_TIF_SYSCALL 263 llgh %r7,SP_SVCNR(%r15)
220 llgh %r8,__PT_INT_CODE+2(%r11) 264 slag %r7,%r7,2 # shift and test for svc 0
221 slag %r8,%r8,2 # shift and test for svc 0
222 jnz sysc_nr_ok 265 jnz sysc_nr_ok
223 # svc 0: system call number in %r1 266 # svc 0: system call number in %r1
224 llgfr %r1,%r1 # clear high word in r1 267 llgfr %r1,%r1 # clear high word in r1
225 cghi %r1,NR_syscalls 268 cghi %r1,NR_syscalls
226 jnl sysc_nr_ok 269 jnl sysc_nr_ok
227 sth %r1,__PT_INT_CODE+2(%r11) 270 sth %r1,SP_SVCNR(%r15)
228 slag %r8,%r1,2 271 slag %r7,%r1,2 # shift and test for svc 0
229sysc_nr_ok: 272sysc_nr_ok:
230 larl %r10,sys_call_table # 64 bit system call table 273 larl %r10,sys_call_table
231#ifdef CONFIG_COMPAT 274#ifdef CONFIG_COMPAT
232 tm __TI_flags+5(%r12),(_TIF_31BIT>>16) 275 tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
233 jno sysc_noemu 276 jno sysc_noemu
234 larl %r10,sys_call_table_emu # 31 bit system call table 277 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
235sysc_noemu: 278sysc_noemu:
236#endif 279#endif
237 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 280 tm __TI_flags+6(%r12),_TIF_SYSCALL
238 stg %r2,__PT_ORIG_GPR2(%r11) 281 mvc SP_ARGS(8,%r15),SP_R7(%r15)
239 stg %r7,STACK_FRAME_OVERHEAD(%r15) 282 lgf %r8,0(%r7,%r10) # load address of system call routine
240 lgf %r9,0(%r8,%r10) # get system call add.
241 tm __TI_flags+6(%r12),_TIF_TRACE >> 8
242 jnz sysc_tracesys 283 jnz sysc_tracesys
243 basr %r14,%r9 # call sys_xxxx 284 basr %r14,%r8 # call sys_xxxx
244 stg %r2,__PT_R2(%r11) # store return value 285 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
245 286
246sysc_return: 287sysc_return:
247 LOCKDEP_SYS_EXIT 288 LOCKDEP_SYS_EXIT
248sysc_tif: 289sysc_tif:
249 tm __PT_PSW+1(%r11),0x01 # returning to user ?
250 jno sysc_restore
251 tm __TI_flags+7(%r12),_TIF_WORK_SVC 290 tm __TI_flags+7(%r12),_TIF_WORK_SVC
252 jnz sysc_work # check for work 291 jnz sysc_work # there is work to do (signals etc.)
253 ni __TI_flags+7(%r12),255-_TIF_SYSCALL
254sysc_restore: 292sysc_restore:
255 lg %r14,__LC_VDSO_PER_CPU 293 RESTORE_ALL __LC_RETURN_PSW,1
256 lmg %r0,%r10,__PT_R0(%r11)
257 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
258 stpt __LC_EXIT_TIMER
259 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
260 lmg %r11,%r15,__PT_R11(%r11)
261 lpswe __LC_RETURN_PSW
262sysc_done: 294sysc_done:
263 295
264# 296#
265# One of the work bits is on. Find out which one. 297# There is work to do, but first we need to check if we return to userspace.
266# 298#
267sysc_work: 299sysc_work:
300 tm SP_PSW+1(%r15),0x01 # returning to user ?
301 jno sysc_restore
302
303#
304# One of the work bits is on. Find out which one.
305#
306sysc_work_tif:
268 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING 307 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
269 jo sysc_mcck_pending 308 jo sysc_mcck_pending
270 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 309 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
271 jo sysc_reschedule 310 jo sysc_reschedule
272 tm __TI_flags+7(%r12),_TIF_PER_TRAP
273 jo sysc_singlestep
274 tm __TI_flags+7(%r12),_TIF_SIGPENDING 311 tm __TI_flags+7(%r12),_TIF_SIGPENDING
275 jo sysc_sigpending 312 jo sysc_sigpending
276 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 313 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
277 jo sysc_notify_resume 314 jo sysc_notify_resume
315 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
316 jo sysc_restart
317 tm __TI_flags+7(%r12),_TIF_PER_TRAP
318 jo sysc_singlestep
278 j sysc_return # beware of critical section cleanup 319 j sysc_return # beware of critical section cleanup
279 320
280# 321#
@@ -282,7 +323,7 @@ sysc_work:
282# 323#
283sysc_reschedule: 324sysc_reschedule:
284 larl %r14,sysc_return 325 larl %r14,sysc_return
285 jg schedule 326 jg schedule # return point is sysc_return
286 327
287# 328#
288# _TIF_MCCK_PENDING is set, call handler 329# _TIF_MCCK_PENDING is set, call handler
@@ -295,33 +336,43 @@ sysc_mcck_pending:
295# _TIF_SIGPENDING is set, call do_signal 336# _TIF_SIGPENDING is set, call do_signal
296# 337#
297sysc_sigpending: 338sysc_sigpending:
298 lgr %r2,%r11 # pass pointer to pt_regs 339 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
299 brasl %r14,do_signal 340 la %r2,SP_PTREGS(%r15) # load pt_regs
300 tm __TI_flags+7(%r12),_TIF_SYSCALL 341 brasl %r14,do_signal # call do_signal
301 jno sysc_return 342 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
302 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 343 jo sysc_restart
303 lghi %r8,0 # svc 0 returns -ENOSYS 344 tm __TI_flags+7(%r12),_TIF_PER_TRAP
304 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 345 jo sysc_singlestep
305 cghi %r1,NR_syscalls 346 j sysc_return
306 jnl sysc_nr_ok # invalid svc number -> do svc 0
307 slag %r8,%r1,2
308 j sysc_nr_ok # restart svc
309 347
310# 348#
311# _TIF_NOTIFY_RESUME is set, call do_notify_resume 349# _TIF_NOTIFY_RESUME is set, call do_notify_resume
312# 350#
313sysc_notify_resume: 351sysc_notify_resume:
314 lgr %r2,%r11 # pass pointer to pt_regs 352 la %r2,SP_PTREGS(%r15) # load pt_regs
315 larl %r14,sysc_return 353 larl %r14,sysc_return
316 jg do_notify_resume 354 jg do_notify_resume # call do_notify_resume
355
356#
357# _TIF_RESTART_SVC is set, set up registers and restart svc
358#
359sysc_restart:
360 ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
361 lg %r7,SP_R2(%r15) # load new svc number
362 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
363 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
364 sth %r7,SP_SVCNR(%r15)
365 slag %r7,%r7,2
366 j sysc_nr_ok # restart svc
317 367
318# 368#
319# _TIF_PER_TRAP is set, call do_per_trap 369# _TIF_PER_TRAP is set, call do_per_trap
320# 370#
321sysc_singlestep: 371sysc_singlestep:
322 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP 372 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
323 lgr %r2,%r11 # pass pointer to pt_regs 373 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
324 larl %r14,sysc_return 374 la %r2,SP_PTREGS(%r15) # address of register-save area
375 larl %r14,sysc_return # load adr. of system return
325 jg do_per_trap 376 jg do_per_trap
326 377
327# 378#
@@ -329,148 +380,223 @@ sysc_singlestep:
329# and after the system call 380# and after the system call
330# 381#
331sysc_tracesys: 382sysc_tracesys:
332 lgr %r2,%r11 # pass pointer to pt_regs 383 la %r2,SP_PTREGS(%r15) # load pt_regs
333 la %r3,0 384 la %r3,0
334 llgh %r0,__PT_INT_CODE+2(%r11) 385 llgh %r0,SP_SVCNR(%r15)
335 stg %r0,__PT_R2(%r11) 386 stg %r0,SP_R2(%r15)
336 brasl %r14,do_syscall_trace_enter 387 brasl %r14,do_syscall_trace_enter
337 lghi %r0,NR_syscalls 388 lghi %r0,NR_syscalls
338 clgr %r0,%r2 389 clgr %r0,%r2
339 jnh sysc_tracenogo 390 jnh sysc_tracenogo
340 sllg %r8,%r2,2 391 sllg %r7,%r2,2 # svc number *4
341 lgf %r9,0(%r8,%r10) 392 lgf %r8,0(%r7,%r10)
342sysc_tracego: 393sysc_tracego:
343 lmg %r3,%r7,__PT_R3(%r11) 394 lmg %r3,%r6,SP_R3(%r15)
344 stg %r7,STACK_FRAME_OVERHEAD(%r15) 395 mvc SP_ARGS(8,%r15),SP_R7(%r15)
345 lg %r2,__PT_ORIG_GPR2(%r11) 396 lg %r2,SP_ORIG_R2(%r15)
346 basr %r14,%r9 # call sys_xxx 397 basr %r14,%r8 # call sys_xxx
347 stg %r2,__PT_R2(%r11) # store return value 398 stg %r2,SP_R2(%r15) # store return value
348sysc_tracenogo: 399sysc_tracenogo:
349 tm __TI_flags+6(%r12),_TIF_TRACE >> 8 400 tm __TI_flags+6(%r12),_TIF_SYSCALL
350 jz sysc_return 401 jz sysc_return
351 lgr %r2,%r11 # pass pointer to pt_regs 402 la %r2,SP_PTREGS(%r15) # load pt_regs
352 larl %r14,sysc_return 403 larl %r14,sysc_return # return point is sysc_return
353 jg do_syscall_trace_exit 404 jg do_syscall_trace_exit
354 405
355# 406#
356# a new process exits the kernel with ret_from_fork 407# a new process exits the kernel with ret_from_fork
357# 408#
358ENTRY(ret_from_fork) 409ENTRY(ret_from_fork)
359 la %r11,STACK_FRAME_OVERHEAD(%r15) 410 lg %r13,__LC_SVC_NEW_PSW+8
360 lg %r12,__LC_THREAD_INFO 411 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
361 brasl %r14,schedule_tail 412 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
413 jo 0f
414 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
4150: brasl %r14,schedule_tail
362 TRACE_IRQS_ON 416 TRACE_IRQS_ON
363 ssm __LC_SVC_NEW_PSW # reenable interrupts 417 stosm 24(%r15),0x03 # reenable interrupts
364 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
365 jne sysc_tracenogo
366 # it's a kernel thread
367 lmg %r9,%r10,__PT_R9(%r11) # load gprs
368ENTRY(kernel_thread_starter)
369 la %r2,0(%r10)
370 basr %r14,%r9
371 j sysc_tracenogo 418 j sysc_tracenogo
372 419
420#
421# kernel_execve function needs to deal with pt_regs that is not
422# at the usual place
423#
424ENTRY(kernel_execve)
425 stmg %r12,%r15,96(%r15)
426 lgr %r14,%r15
427 aghi %r15,-SP_SIZE
428 stg %r14,__SF_BACKCHAIN(%r15)
429 la %r12,SP_PTREGS(%r15)
430 xc 0(__PT_SIZE,%r12),0(%r12)
431 lgr %r5,%r12
432 brasl %r14,do_execve
433 ltgfr %r2,%r2
434 je 0f
435 aghi %r15,SP_SIZE
436 lmg %r12,%r15,96(%r15)
437 br %r14
438 # execve succeeded.
4390: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
440 lg %r15,__LC_KERNEL_STACK # load ksp
441 aghi %r15,-SP_SIZE # make room for registers & psw
442 lg %r13,__LC_SVC_NEW_PSW+8
443 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
444 lg %r12,__LC_THREAD_INFO
445 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
446 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
447 brasl %r14,execve_tail
448 j sysc_return
449
373/* 450/*
374 * Program check handler routine 451 * Program check handler routine
375 */ 452 */
376 453
377ENTRY(pgm_check_handler) 454ENTRY(pgm_check_handler)
455/*
456 * First we need to check for a special case:
457 * Single stepping an instruction that disables the PER event mask will
458 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
459 * For a single stepped SVC the program check handler gets control after
460 * the SVC new PSW has been loaded. But we want to execute the SVC first and
461 * then handle the PER event. Therefore we update the SVC old PSW to point
462 * to the pgm_check_handler and branch to the SVC handler after we checked
463 * if we have to load the kernel stack register.
464 * For every other possible cause for PER event without the PER mask set
465 * we just ignore the PER event (FIXME: is there anything we have to do
466 * for LPSW?).
467 */
378 stpt __LC_SYNC_ENTER_TIMER 468 stpt __LC_SYNC_ENTER_TIMER
379 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 469 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
380 lg %r10,__LC_LAST_BREAK 470 jnz pgm_per # got per exception -> special case
381 lg %r12,__LC_THREAD_INFO 471 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
382 larl %r13,system_call 472 CREATE_STACK_FRAME __LC_SAVE_AREA
383 lmg %r8,%r9,__LC_PGM_OLD_PSW 473 xc SP_ILC(4,%r15),SP_ILC(%r15)
384 HANDLE_SIE_INTERCEPT %r14,1 474 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
385 tmhh %r8,0x0001 # test problem state bit 475 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
386 jnz 1f # -> fault in user space 476 HANDLE_SIE_INTERCEPT
387 tmhh %r8,0x4000 # PER bit set in old PSW ? 477 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
388 jnz 0f # -> enabled, can't be a double fault 478 jz pgm_no_vtime
389 tm __LC_PGM_ILC+3,0x80 # check for per exception 479 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
390 jnz pgm_svcper # -> single stepped svc 480 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
3910: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 481 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
392 j 2f 482 LAST_BREAK
3931: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER 483pgm_no_vtime:
394 LAST_BREAK %r14 484 stg %r11,SP_ARGS(%r15)
395 lg %r15,__LC_KERNEL_STACK 485 lgf %r3,__LC_PGM_ILC # load program interruption code
396 lg %r14,__TI_task(%r12) 486 lg %r4,__LC_TRANS_EXC_CODE
397 lghi %r13,__LC_PGM_TDB 487 REENABLE_IRQS
398 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 488 lghi %r8,0x7f
399 jz 2f 489 ngr %r8,%r3
400 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 490 sll %r8,3
4012: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
402 la %r11,STACK_FRAME_OVERHEAD(%r15)
403 stmg %r0,%r7,__PT_R0(%r11)
404 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
405 stmg %r8,%r9,__PT_PSW(%r11)
406 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
407 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
408 stg %r10,__PT_ARGS(%r11)
409 tm __LC_PGM_ILC+3,0x80 # check for per exception
410 jz 0f
411 tmhh %r8,0x0001 # kernel per event ?
412 jz pgm_kprobe
413 oi __TI_flags+7(%r12),_TIF_PER_TRAP
414 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
415 mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE
416 mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID
4170: REENABLE_IRQS
418 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
419 larl %r1,pgm_check_table 491 larl %r1,pgm_check_table
420 llgh %r10,__PT_INT_CODE+2(%r11) 492 lg %r1,0(%r8,%r1) # load address of handler routine
421 nill %r10,0x007f 493 la %r2,SP_PTREGS(%r15) # address of register-save area
422 sll %r10,2
423 je sysc_return
424 lgf %r1,0(%r10,%r1) # load address of handler routine
425 lgr %r2,%r11 # pass pointer to pt_regs
426 basr %r14,%r1 # branch to interrupt-handler 494 basr %r14,%r1 # branch to interrupt-handler
495pgm_exit:
427 j sysc_return 496 j sysc_return
428 497
429# 498#
430# PER event in supervisor state, must be kprobes 499# handle per exception
500#
501pgm_per:
502 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
503 jnz pgm_per_std # ok, normal per event from user space
504# ok its one of the special cases, now we need to find out which one
505 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
506 je pgm_svcper
507# no interesting special case, ignore PER event
508 lpswe __LC_PGM_OLD_PSW
509
510#
511# Normal per exception
431# 512#
432pgm_kprobe: 513pgm_per_std:
514 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
515 CREATE_STACK_FRAME __LC_SAVE_AREA
516 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
517 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
518 HANDLE_SIE_INTERCEPT
519 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
520 jz pgm_no_vtime2
521 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
522 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
523 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
524 LAST_BREAK
525pgm_no_vtime2:
526 lg %r1,__TI_task(%r12)
527 tm SP_PSW+1(%r15),0x01 # kernel per event ?
528 jz kernel_per
529 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
530 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
531 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
532 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
533 lgf %r3,__LC_PGM_ILC # load program interruption code
534 lg %r4,__LC_TRANS_EXC_CODE
433 REENABLE_IRQS 535 REENABLE_IRQS
434 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 536 lghi %r8,0x7f
435 lgr %r2,%r11 # pass pointer to pt_regs 537 ngr %r8,%r3 # clear per-event-bit and ilc
436 brasl %r14,do_per_trap 538 je pgm_exit2
539 sll %r8,3
540 larl %r1,pgm_check_table
541 lg %r1,0(%r8,%r1) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r1 # branch to interrupt-handler
544pgm_exit2:
437 j sysc_return 545 j sysc_return
438 546
439# 547#
440# single stepped system call 548# it was a single stepped SVC that is causing all the trouble
441# 549#
442pgm_svcper: 550pgm_svcper:
443 oi __TI_flags+7(%r12),_TIF_PER_TRAP 551 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
444 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 552 CREATE_STACK_FRAME __LC_SAVE_AREA
445 larl %r14,sysc_per 553 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
446 stg %r14,__LC_RETURN_PSW+8 554 mvc SP_ILC(4,%r15),__LC_SVC_ILC
447 lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs 555 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
556 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
557 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
558 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
559 LAST_BREAK
560 lg %r8,__TI_task(%r12)
561 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
562 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
563 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
564 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
565 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
566 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
567 j sysc_do_svc
568
569#
570# per was called from kernel, must be kprobes
571#
572kernel_per:
573 REENABLE_IRQS
574 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
575 la %r2,SP_PTREGS(%r15) # address of register-save area
576 brasl %r14,do_per_trap
577 j pgm_exit
448 578
449/* 579/*
450 * IO interrupt handler routine 580 * IO interrupt handler routine
451 */ 581 */
452ENTRY(io_int_handler) 582ENTRY(io_int_handler)
453 STCK __LC_INT_CLOCK 583 stck __LC_INT_CLOCK
454 stpt __LC_ASYNC_ENTER_TIMER 584 stpt __LC_ASYNC_ENTER_TIMER
455 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 585 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
456 lg %r10,__LC_LAST_BREAK 586 CREATE_STACK_FRAME __LC_SAVE_AREA+40
457 lg %r12,__LC_THREAD_INFO 587 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
458 larl %r13,system_call 588 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
459 lmg %r8,%r9,__LC_IO_OLD_PSW 589 HANDLE_SIE_INTERCEPT
460 HANDLE_SIE_INTERCEPT %r14,0 590 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
461 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 591 jz io_no_vtime
462 tmhh %r8,0x0001 # interrupting from user? 592 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
463 jz io_skip 593 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
464 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 594 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
465 LAST_BREAK %r14 595 LAST_BREAK
466io_skip: 596io_no_vtime:
467 stmg %r0,%r7,__PT_R0(%r11)
468 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
469 stmg %r8,%r9,__PT_PSW(%r11)
470 TRACE_IRQS_OFF 597 TRACE_IRQS_OFF
471 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 598 la %r2,SP_PTREGS(%r15) # address of register-save area
472 lgr %r2,%r11 # pass pointer to pt_regs 599 brasl %r14,do_IRQ # call standard irq handler
473 brasl %r14,do_IRQ
474io_return: 600io_return:
475 LOCKDEP_SYS_EXIT 601 LOCKDEP_SYS_EXIT
476 TRACE_IRQS_ON 602 TRACE_IRQS_ON
@@ -478,13 +604,7 @@ io_tif:
478 tm __TI_flags+7(%r12),_TIF_WORK_INT 604 tm __TI_flags+7(%r12),_TIF_WORK_INT
479 jnz io_work # there is work to do (signals etc.) 605 jnz io_work # there is work to do (signals etc.)
480io_restore: 606io_restore:
481 lg %r14,__LC_VDSO_PER_CPU 607 RESTORE_ALL __LC_RETURN_PSW,0
482 lmg %r0,%r10,__PT_R0(%r11)
483 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
484 stpt __LC_EXIT_TIMER
485 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
486 lmg %r11,%r15,__PT_R11(%r11)
487 lpswe __LC_RETURN_PSW
488io_done: 608io_done:
489 609
490# 610#
@@ -497,7 +617,7 @@ io_done:
497# Before any work can be done, a switch to the kernel stack is required. 617# Before any work can be done, a switch to the kernel stack is required.
498# 618#
499io_work: 619io_work:
500 tm __PT_PSW+1(%r11),0x01 # returning to user ? 620 tm SP_PSW+1(%r15),0x01 # returning to user ?
501 jo io_work_user # yes -> do resched & signal 621 jo io_work_user # yes -> do resched & signal
502#ifdef CONFIG_PREEMPT 622#ifdef CONFIG_PREEMPT
503 # check for preemptive scheduling 623 # check for preemptive scheduling
@@ -506,11 +626,10 @@ io_work:
506 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 626 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
507 jno io_restore 627 jno io_restore
508 # switch to kernel stack 628 # switch to kernel stack
509 lg %r1,__PT_R15(%r11) 629 lg %r1,SP_R15(%r15)
510 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 630 aghi %r1,-SP_SIZE
511 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 631 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
512 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 632 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
513 la %r11,STACK_FRAME_OVERHEAD(%r1)
514 lgr %r15,%r1 633 lgr %r15,%r1
515 # TRACE_IRQS_ON already done at io_return, call 634 # TRACE_IRQS_ON already done at io_return, call
516 # TRACE_IRQS_OFF to keep things symmetrical 635 # TRACE_IRQS_OFF to keep things symmetrical
@@ -526,10 +645,9 @@ io_work:
526# 645#
527io_work_user: 646io_work_user:
528 lg %r1,__LC_KERNEL_STACK 647 lg %r1,__LC_KERNEL_STACK
529 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 648 aghi %r1,-SP_SIZE
530 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 649 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
531 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 650 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
532 la %r11,STACK_FRAME_OVERHEAD(%r1)
533 lgr %r15,%r1 651 lgr %r15,%r1
534 652
535# 653#
@@ -562,9 +680,9 @@ io_mcck_pending:
562# 680#
563io_reschedule: 681io_reschedule:
564 # TRACE_IRQS_ON already done at io_return 682 # TRACE_IRQS_ON already done at io_return
565 ssm __LC_SVC_NEW_PSW # reenable interrupts 683 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
566 brasl %r14,schedule # call scheduler 684 brasl %r14,schedule # call scheduler
567 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 685 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
568 TRACE_IRQS_OFF 686 TRACE_IRQS_OFF
569 j io_return 687 j io_return
570 688
@@ -573,10 +691,10 @@ io_reschedule:
573# 691#
574io_sigpending: 692io_sigpending:
575 # TRACE_IRQS_ON already done at io_return 693 # TRACE_IRQS_ON already done at io_return
576 ssm __LC_SVC_NEW_PSW # reenable interrupts 694 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
577 lgr %r2,%r11 # pass pointer to pt_regs 695 la %r2,SP_PTREGS(%r15) # load pt_regs
578 brasl %r14,do_signal 696 brasl %r14,do_signal # call do_signal
579 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 697 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
580 TRACE_IRQS_OFF 698 TRACE_IRQS_OFF
581 j io_return 699 j io_return
582 700
@@ -585,10 +703,10 @@ io_sigpending:
585# 703#
586io_notify_resume: 704io_notify_resume:
587 # TRACE_IRQS_ON already done at io_return 705 # TRACE_IRQS_ON already done at io_return
588 ssm __LC_SVC_NEW_PSW # reenable interrupts 706 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
589 lgr %r2,%r11 # pass pointer to pt_regs 707 la %r2,SP_PTREGS(%r15) # load pt_regs
590 brasl %r14,do_notify_resume 708 brasl %r14,do_notify_resume # call do_notify_resume
591 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 709 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
592 TRACE_IRQS_OFF 710 TRACE_IRQS_OFF
593 j io_return 711 j io_return
594 712
@@ -596,154 +714,176 @@ io_notify_resume:
596 * External interrupt handler routine 714 * External interrupt handler routine
597 */ 715 */
598ENTRY(ext_int_handler) 716ENTRY(ext_int_handler)
599 STCK __LC_INT_CLOCK 717 stck __LC_INT_CLOCK
600 stpt __LC_ASYNC_ENTER_TIMER 718 stpt __LC_ASYNC_ENTER_TIMER
601 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 719 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
602 lg %r10,__LC_LAST_BREAK 720 CREATE_STACK_FRAME __LC_SAVE_AREA+40
603 lg %r12,__LC_THREAD_INFO 721 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
604 larl %r13,system_call 722 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
605 lmg %r8,%r9,__LC_EXT_OLD_PSW 723 HANDLE_SIE_INTERCEPT
606 HANDLE_SIE_INTERCEPT %r14,0 724 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
607 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 725 jz ext_no_vtime
608 tmhh %r8,0x0001 # interrupting from user ? 726 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
609 jz ext_skip 727 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
610 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 728 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
611 LAST_BREAK %r14 729 LAST_BREAK
612ext_skip: 730ext_no_vtime:
613 stmg %r0,%r7,__PT_R0(%r11)
614 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
615 stmg %r8,%r9,__PT_PSW(%r11)
616 TRACE_IRQS_OFF 731 TRACE_IRQS_OFF
617 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
618 lghi %r1,4096 732 lghi %r1,4096
619 lgr %r2,%r11 # pass pointer to pt_regs 733 la %r2,SP_PTREGS(%r15) # address of register-save area
620 llgf %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 734 llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
621 llgf %r4,__LC_EXT_PARAMS # get external parameter 735 llgf %r4,__LC_EXT_PARAMS # get external parameter
622 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter 736 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
623 brasl %r14,do_extint 737 brasl %r14,do_extint
624 j io_return 738 j io_return
625 739
626/*
627 * Load idle PSW. The second "half" of this function is in cleanup_idle.
628 */
629ENTRY(psw_idle)
630 stg %r3,__SF_EMPTY(%r15)
631 larl %r1,psw_idle_lpsw+4
632 stg %r1,__SF_EMPTY+8(%r15)
633 STCK __CLOCK_IDLE_ENTER(%r2)
634 stpt __TIMER_IDLE_ENTER(%r2)
635psw_idle_lpsw:
636 lpswe __SF_EMPTY(%r15)
637 br %r14
638psw_idle_end:
639
640__critical_end: 740__critical_end:
641 741
642/* 742/*
643 * Machine check handler routines 743 * Machine check handler routines
644 */ 744 */
645ENTRY(mcck_int_handler) 745ENTRY(mcck_int_handler)
646 STCK __LC_MCCK_CLOCK 746 stck __LC_MCCK_CLOCK
647 la %r1,4095 # revalidate r1 747 la %r1,4095 # revalidate r1
648 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 748 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
649 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 749 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
650 lg %r10,__LC_LAST_BREAK 750 stmg %r11,%r15,__LC_SAVE_AREA+80
651 lg %r12,__LC_THREAD_INFO
652 larl %r13,system_call 751 larl %r13,system_call
653 lmg %r8,%r9,__LC_MCK_OLD_PSW 752 lg %r11,__LC_LAST_BREAK
654 HANDLE_SIE_INTERCEPT %r14,0 753 la %r12,__LC_MCK_OLD_PSW
655 tm __LC_MCCK_CODE,0x80 # system damage? 754 tm __LC_MCCK_CODE,0x80 # system damage?
656 jo mcck_panic # yes -> rest of mcck code invalid 755 jo mcck_int_main # yes -> rest of mcck code invalid
657 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 756 la %r14,4095
658 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 757 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
659 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 758 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
660 jo 3f 759 jo 1f
661 la %r14,__LC_SYNC_ENTER_TIMER 760 la %r14,__LC_SYNC_ENTER_TIMER
662 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 761 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
663 jl 0f 762 jl 0f
664 la %r14,__LC_ASYNC_ENTER_TIMER 763 la %r14,__LC_ASYNC_ENTER_TIMER
6650: clc 0(8,%r14),__LC_EXIT_TIMER 7640: clc 0(8,%r14),__LC_EXIT_TIMER
666 jl 1f 765 jl 0f
667 la %r14,__LC_EXIT_TIMER 766 la %r14,__LC_EXIT_TIMER
6681: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 7670: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
669 jl 2f 768 jl 0f
670 la %r14,__LC_LAST_UPDATE_TIMER 769 la %r14,__LC_LAST_UPDATE_TIMER
6712: spt 0(%r14) 7700: spt 0(%r14)
672 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 771 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6733: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7721: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
674 jno mcck_panic # no -> skip cleanup critical 773 jno mcck_int_main # no -> skip cleanup critical
675 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT 774 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
676 tm %r8,0x0001 # interrupting from user ? 775 jnz mcck_int_main # from user -> load kernel stack
677 jz mcck_skip 776 clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 777 jhe mcck_int_main
679 LAST_BREAK %r14 778 clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
680mcck_skip: 779 jl mcck_int_main
681 lghi %r14,__LC_GPREGS_SAVE_AREA 780 brasl %r14,cleanup_critical
682 mvc __PT_R0(128,%r11),0(%r14) 781mcck_int_main:
683 stmg %r8,%r9,__PT_PSW(%r11) 782 lg %r14,__LC_PANIC_STACK # are we already on the panic stack?
684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 783 slgr %r14,%r15
685 lgr %r2,%r11 # pass pointer to pt_regs 784 srag %r14,%r14,PAGE_SHIFT
785 jz 0f
786 lg %r15,__LC_PANIC_STACK # load panic stack
7870: aghi %r15,-SP_SIZE # make room for registers & psw
788 CREATE_STACK_FRAME __LC_SAVE_AREA+80
789 mvc SP_PSW(16,%r15),0(%r12)
790 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
791 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
792 jno mcck_no_vtime # no -> no timer update
793 HANDLE_SIE_INTERCEPT
794 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
795 jz mcck_no_vtime
796 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
797 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
799 LAST_BREAK
800mcck_no_vtime:
801 la %r2,SP_PTREGS(%r15) # load pt_regs
686 brasl %r14,s390_do_machine_check 802 brasl %r14,s390_do_machine_check
687 tm __PT_PSW+1(%r11),0x01 # returning to user ? 803 tm SP_PSW+1(%r15),0x01 # returning to user ?
688 jno mcck_return 804 jno mcck_return
689 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 805 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
690 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 806 aghi %r1,-SP_SIZE
691 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 807 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
692 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 808 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
693 la %r11,STACK_FRAME_OVERHEAD(%r1)
694 lgr %r15,%r1 809 lgr %r15,%r1
695 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 810 stosm __SF_EMPTY(%r15),0x04 # turn dat on
696 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING 811 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
697 jno mcck_return 812 jno mcck_return
698 TRACE_IRQS_OFF 813 TRACE_IRQS_OFF
699 brasl %r14,s390_handle_mcck 814 brasl %r14,s390_handle_mcck
700 TRACE_IRQS_ON 815 TRACE_IRQS_ON
701mcck_return: 816mcck_return:
702 lg %r14,__LC_VDSO_PER_CPU 817 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
703 lmg %r0,%r10,__PT_R0(%r11) 818 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
704 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 819 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
705 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 820 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
706 jno 0f 821 jno 0f
707 stpt __LC_EXIT_TIMER 822 stpt __LC_EXIT_TIMER
708 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 8230: lpswe __LC_RETURN_MCCK_PSW # back to caller
7090: lmg %r11,%r15,__PT_R11(%r11) 824mcck_done:
710 lpswe __LC_RETURN_MCCK_PSW
711 825
712mcck_panic: 826/*
713 lg %r14,__LC_PANIC_STACK 827 * Restart interruption handler, kick starter for additional CPUs
714 slgr %r14,%r15 828 */
715 srag %r14,%r14,PAGE_SHIFT 829#ifdef CONFIG_SMP
716 jz 0f 830 __CPUINIT
717 lg %r15,__LC_PANIC_STACK 831ENTRY(restart_int_handler)
7180: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 832 basr %r1,0
719 j mcck_skip 833restart_base:
834 spt restart_vtime-restart_base(%r1)
835 stck __LC_LAST_UPDATE_CLOCK
836 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
837 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
838 lg %r15,__LC_SAVE_AREA+120 # load ksp
839 lghi %r10,__LC_CREGS_SAVE_AREA
840 lctlg %c0,%c15,0(%r10) # get new ctl regs
841 lghi %r10,__LC_AREGS_SAVE_AREA
842 lam %a0,%a15,0(%r10)
843 lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
844 lg %r1,__LC_THREAD_INFO
845 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
846 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
847 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
848 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
849 brasl %r14,start_secondary
850 .align 8
851restart_vtime:
852 .long 0x7fffffff,0xffffffff
853 .previous
854#else
855/*
856 * If we do not run with SMP enabled, let the new CPU crash ...
857 */
858ENTRY(restart_int_handler)
859 basr %r1,0
860restart_base:
861 lpswe restart_crash-restart_base(%r1)
862 .align 8
863restart_crash:
864 .long 0x000a0000,0x00000000,0x00000000,0x00000000
865restart_go:
866#endif
720 867
721# 868#
722# PSW restart interrupt handler 869# PSW restart interrupt handler
723# 870#
724ENTRY(restart_int_handler) 871ENTRY(psw_restart_int_handler)
725 stg %r15,__LC_SAVE_AREA_RESTART 872 stg %r15,__LC_SAVE_AREA_64(%r0) # save r15
726 lg %r15,__LC_RESTART_STACK 873 larl %r15,restart_stack # load restart stack
727 aghi %r15,-__PT_SIZE # create pt_regs on stack 874 lg %r15,0(%r15)
728 xc 0(__PT_SIZE,%r15),0(%r15) 875 aghi %r15,-SP_SIZE # make room for pt_regs
729 stmg %r0,%r14,__PT_R0(%r15) 876 stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
730 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 877 mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
731 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 878 mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
732 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 879 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
733 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 880 brasl %r14,do_restart
734 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 881
735 lg %r2,__LC_RESTART_DATA 882 larl %r14,restart_psw_crash # load disabled wait PSW if
736 lg %r3,__LC_RESTART_SOURCE 883 lpswe 0(%r14) # do_restart returns
737 ltgr %r3,%r3 # test source cpu address 884 .align 8
738 jm 1f # negative -> skip source stop 885restart_psw_crash:
7390: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 886 .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash
740 brc 10,0b # wait for status stored
7411: basr %r14,%r1 # call function
742 stap __SF_EMPTY(%r15) # store cpu address
743 llgh %r3,__SF_EMPTY(%r15)
7442: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
745 brc 2,2b
7463: j 3b
747 887
748 .section .kprobes.text, "ax" 888 .section .kprobes.text, "ax"
749 889
@@ -754,189 +894,170 @@ ENTRY(restart_int_handler)
754 * Setup a pt_regs so that show_trace can provide a good call trace. 894 * Setup a pt_regs so that show_trace can provide a good call trace.
755 */ 895 */
756stack_overflow: 896stack_overflow:
757 lg %r11,__LC_PANIC_STACK # change to panic stack 897 lg %r15,__LC_PANIC_STACK # change to panic stack
758 aghi %r11,-__PT_SIZE # create pt_regs 898 aghi %r15,-SP_SIZE
759 stmg %r0,%r7,__PT_R0(%r11) 899 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
760 stmg %r8,%r9,__PT_PSW(%r11) 900 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
761 mvc __PT_R8(64,%r11),0(%r14) 901 la %r1,__LC_SAVE_AREA
762 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 902 chi %r12,__LC_SVC_OLD_PSW
763 lgr %r15,%r11 903 je 0f
764 aghi %r15,-STACK_FRAME_OVERHEAD 904 chi %r12,__LC_PGM_OLD_PSW
765 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 905 je 0f
766 lgr %r2,%r11 # pass pointer to pt_regs 906 la %r1,__LC_SAVE_AREA+40
9070: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
908 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
909 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
910 la %r2,SP_PTREGS(%r15) # load pt_regs
767 jg kernel_stack_overflow 911 jg kernel_stack_overflow
768#endif 912#endif
769 913
770 .align 8 914cleanup_table_system_call:
771cleanup_table: 915 .quad system_call, sysc_do_svc
772 .quad system_call 916cleanup_table_sysc_tif:
773 .quad sysc_do_svc 917 .quad sysc_tif, sysc_restore
774 .quad sysc_tif 918cleanup_table_sysc_restore:
775 .quad sysc_restore 919 .quad sysc_restore, sysc_done
776 .quad sysc_done 920cleanup_table_io_tif:
777 .quad io_tif 921 .quad io_tif, io_restore
778 .quad io_restore 922cleanup_table_io_restore:
779 .quad io_done 923 .quad io_restore, io_done
780 .quad psw_idle
781 .quad psw_idle_end
782 924
783cleanup_critical: 925cleanup_critical:
784 clg %r9,BASED(cleanup_table) # system_call 926 clc 8(8,%r12),BASED(cleanup_table_system_call)
785 jl 0f 927 jl 0f
786 clg %r9,BASED(cleanup_table+8) # sysc_do_svc 928 clc 8(8,%r12),BASED(cleanup_table_system_call+8)
787 jl cleanup_system_call 929 jl cleanup_system_call
788 clg %r9,BASED(cleanup_table+16) # sysc_tif 9300:
931 clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
789 jl 0f 932 jl 0f
790 clg %r9,BASED(cleanup_table+24) # sysc_restore 933 clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
791 jl cleanup_sysc_tif 934 jl cleanup_sysc_tif
792 clg %r9,BASED(cleanup_table+32) # sysc_done 9350:
936 clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
937 jl 0f
938 clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
793 jl cleanup_sysc_restore 939 jl cleanup_sysc_restore
794 clg %r9,BASED(cleanup_table+40) # io_tif 9400:
941 clc 8(8,%r12),BASED(cleanup_table_io_tif)
795 jl 0f 942 jl 0f
796 clg %r9,BASED(cleanup_table+48) # io_restore 943 clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
797 jl cleanup_io_tif 944 jl cleanup_io_tif
798 clg %r9,BASED(cleanup_table+56) # io_done 9450:
799 jl cleanup_io_restore 946 clc 8(8,%r12),BASED(cleanup_table_io_restore)
800 clg %r9,BASED(cleanup_table+64) # psw_idle
801 jl 0f 947 jl 0f
802 clg %r9,BASED(cleanup_table+72) # psw_idle_end 948 clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
803 jl cleanup_idle 949 jl cleanup_io_restore
8040: br %r14 9500:
805 951 br %r14
806 952
807cleanup_system_call: 953cleanup_system_call:
808 # check if stpt has been executed 954 mvc __LC_RETURN_PSW(16),0(%r12)
809 clg %r9,BASED(cleanup_system_call_insn) 955 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
810 jh 0f 956 jh 0f
957 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
958 cghi %r12,__LC_MCK_OLD_PSW
959 je 0f
811 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 960 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
812 cghi %r11,__LC_SAVE_AREA_ASYNC 9610: cghi %r12,__LC_MCK_OLD_PSW
962 la %r12,__LC_SAVE_AREA+80
813 je 0f 963 je 0f
814 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 964 la %r12,__LC_SAVE_AREA+40
8150: # check if stmg has been executed 9650: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
816 clg %r9,BASED(cleanup_system_call_insn+8) 966 jhe cleanup_vtime
817 jh 0f 967 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
818 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
8190: # check if base register setup + TIF bit load has been done
820 clg %r9,BASED(cleanup_system_call_insn+16)
821 jhe 0f
822 # set up saved registers r10 and r12
823 stg %r10,16(%r11) # r10 last break
824 stg %r12,32(%r11) # r12 thread-info pointer
8250: # check if the user time update has been done
826 clg %r9,BASED(cleanup_system_call_insn+24)
827 jh 0f 968 jh 0f
828 lg %r15,__LC_EXIT_TIMER 969 mvc __LC_SAVE_AREA(40),0(%r12)
829 slg %r15,__LC_SYNC_ENTER_TIMER 9700: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
830 alg %r15,__LC_USER_TIMER 971 aghi %r15,-SP_SIZE # make room for registers & psw
831 stg %r15,__LC_USER_TIMER 972 stg %r15,32(%r12)
8320: # check if the system time update has been done 973 stg %r11,0(%r12)
833 clg %r9,BASED(cleanup_system_call_insn+32) 974 CREATE_STACK_FRAME __LC_SAVE_AREA
834 jh 0f 975 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
835 lg %r15,__LC_LAST_UPDATE_TIMER 976 mvc SP_ILC(4,%r15),__LC_SVC_ILC
836 slg %r15,__LC_EXIT_TIMER 977 mvc 8(8,%r12),__LC_THREAD_INFO
837 alg %r15,__LC_SYSTEM_TIMER 978cleanup_vtime:
838 stg %r15,__LC_SYSTEM_TIMER 979 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
8390: # update accounting time stamp 980 jhe cleanup_stime
981 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
982cleanup_stime:
983 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
984 jh cleanup_update
985 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
986cleanup_update:
840 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 987 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
841 # do LAST_BREAK 988 srag %r12,%r11,23
842 lg %r9,16(%r11) 989 lg %r12,__LC_THREAD_INFO
843 srag %r9,%r9,23
844 jz 0f 990 jz 0f
845 mvc __TI_last_break(8,%r12),16(%r11) 991 stg %r11,__TI_last_break(%r12)
8460: # set up saved register r11 9920: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
847 lg %r15,__LC_KERNEL_STACK 993 la %r12,__LC_RETURN_PSW
848 aghi %r15,-__PT_SIZE
849 stg %r15,24(%r11) # r11 pt_regs pointer
850 # fill pt_regs
851 mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
852 stmg %r0,%r7,__PT_R0(%r15)
853 mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
854 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
855 # setup saved register r15
856 aghi %r15,-STACK_FRAME_OVERHEAD
857 stg %r15,56(%r11) # r15 stack pointer
858 # set new psw address and exit
859 larl %r9,sysc_do_svc
860 br %r14 994 br %r14
861cleanup_system_call_insn: 995cleanup_system_call_insn:
996 .quad sysc_saveall
862 .quad system_call 997 .quad system_call
863 .quad sysc_stmg 998 .quad sysc_vtime
864 .quad sysc_per 999 .quad sysc_stime
865 .quad sysc_vtime+18 1000 .quad sysc_update
866 .quad sysc_vtime+42
867 1001
868cleanup_sysc_tif: 1002cleanup_sysc_tif:
869 larl %r9,sysc_tif 1003 mvc __LC_RETURN_PSW(8),0(%r12)
1004 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
1005 la %r12,__LC_RETURN_PSW
870 br %r14 1006 br %r14
871 1007
872cleanup_sysc_restore: 1008cleanup_sysc_restore:
873 clg %r9,BASED(cleanup_sysc_restore_insn) 1009 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
1010 je 2f
1011 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
1012 jhe 0f
1013 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1014 cghi %r12,__LC_MCK_OLD_PSW
874 je 0f 1015 je 0f
875 lg %r9,24(%r11) # get saved pointer to pt_regs 1016 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
876 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 10170: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
877 mvc 0(64,%r11),__PT_R8(%r9) 1018 cghi %r12,__LC_MCK_OLD_PSW
878 lmg %r0,%r7,__PT_R0(%r9) 1019 la %r12,__LC_SAVE_AREA+80
8790: lmg %r8,%r9,__LC_RETURN_PSW 1020 je 1f
1021 la %r12,__LC_SAVE_AREA+40
10221: mvc 0(40,%r12),SP_R11(%r15)
1023 lmg %r0,%r10,SP_R0(%r15)
1024 lg %r15,SP_R15(%r15)
10252: la %r12,__LC_RETURN_PSW
880 br %r14 1026 br %r14
881cleanup_sysc_restore_insn: 1027cleanup_sysc_restore_insn:
882 .quad sysc_done - 4 1028 .quad sysc_done - 4
1029 .quad sysc_done - 16
883 1030
884cleanup_io_tif: 1031cleanup_io_tif:
885 larl %r9,io_tif 1032 mvc __LC_RETURN_PSW(8),0(%r12)
1033 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
1034 la %r12,__LC_RETURN_PSW
886 br %r14 1035 br %r14
887 1036
888cleanup_io_restore: 1037cleanup_io_restore:
889 clg %r9,BASED(cleanup_io_restore_insn) 1038 clc 8(8,%r12),BASED(cleanup_io_restore_insn)
890 je 0f 1039 je 1f
891 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1040 clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
892 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1041 jhe 0f
893 mvc 0(64,%r11),__PT_R8(%r9) 1042 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
894 lmg %r0,%r7,__PT_R0(%r9) 10430: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
8950: lmg %r8,%r9,__LC_RETURN_PSW 1044 mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
1045 lmg %r0,%r10,SP_R0(%r15)
1046 lg %r15,SP_R15(%r15)
10471: la %r12,__LC_RETURN_PSW
896 br %r14 1048 br %r14
897cleanup_io_restore_insn: 1049cleanup_io_restore_insn:
898 .quad io_done - 4 1050 .quad io_done - 4
899 1051 .quad io_done - 16
900cleanup_idle:
901 # copy interrupt clock & cpu timer
902 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
903 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
904 cghi %r11,__LC_SAVE_AREA_ASYNC
905 je 0f
906 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
907 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9080: # check if stck & stpt have been executed
909 clg %r9,BASED(cleanup_idle_insn)
910 jhe 1f
911 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
912 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
9131: # account system time going idle
914 lg %r9,__LC_STEAL_TIMER
915 alg %r9,__CLOCK_IDLE_ENTER(%r2)
916 slg %r9,__LC_LAST_UPDATE_CLOCK
917 stg %r9,__LC_STEAL_TIMER
918 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
919 lg %r9,__LC_SYSTEM_TIMER
920 alg %r9,__LC_LAST_UPDATE_TIMER
921 slg %r9,__TIMER_IDLE_ENTER(%r2)
922 stg %r9,__LC_SYSTEM_TIMER
923 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
924 # prepare return psw
925 nihh %r8,0xfffd # clear wait state bit
926 lg %r9,48(%r11) # return from psw_idle
927 br %r14
928cleanup_idle_insn:
929 .quad psw_idle_lpsw
930 1052
931/* 1053/*
932 * Integer constants 1054 * Integer constants
933 */ 1055 */
934 .align 8 1056 .align 4
935.Lcritical_start: 1057.Lcritical_start:
936 .quad __critical_start 1058 .quad __critical_start
937.Lcritical_length: 1059.Lcritical_end:
938 .quad __critical_end - __critical_start 1060 .quad __critical_end
939
940 1061
941#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 1062#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
942/* 1063/*
@@ -948,15 +1069,9 @@ ENTRY(sie64a)
948 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 1069 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
949 stg %r2,__SF_EMPTY(%r15) # save control block pointer 1070 stg %r2,__SF_EMPTY(%r15) # save control block pointer
950 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 1071 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
951 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
952 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 1072 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
953# some program checks are suppressing. C code (e.g. do_protection_exception) 1073 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
954# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 1074 oi __TI_flags+6(%r14),_TIF_SIE>>8
955# instructions in the sie_loop should not cause program interrupts. So
956# lets use a nop (47 00 00 00) as a landing pad.
957# See also HANDLE_SIE_INTERCEPT
958rewind_pad:
959 nop 0
960sie_loop: 1075sie_loop:
961 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1076 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
962 tm __TI_flags+7(%r14),_TIF_EXIT_SIE 1077 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
@@ -970,18 +1085,19 @@ sie_gmap:
970 SPP __SF_EMPTY(%r15) # set guest id 1085 SPP __SF_EMPTY(%r15) # set guest id
971 sie 0(%r14) 1086 sie 0(%r14)
972sie_done: 1087sie_done:
973 SPP __SF_EMPTY+16(%r15) # set host id 1088 SPP __LC_CMF_HPP # set host id
974 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1089 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
975sie_exit: 1090sie_exit:
976 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1091 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1092 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
977 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 1093 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
978 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 1094 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
979 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 1095 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
980 lghi %r2,0 1096 lghi %r2,0
981 br %r14 1097 br %r14
982sie_fault: 1098sie_fault:
983 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
984 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1099 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1100 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
985 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 1101 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
986 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 1102 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
987 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 1103 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
@@ -991,13 +1107,12 @@ sie_fault:
991 .align 8 1107 .align 8
992.Lsie_loop: 1108.Lsie_loop:
993 .quad sie_loop 1109 .quad sie_loop
994.Lsie_length: 1110.Lsie_done:
995 .quad sie_done - sie_loop 1111 .quad sie_done
996.Lhost_id:
997 .quad 0
998 1112
999 EX_TABLE(rewind_pad,sie_fault) 1113 .section __ex_table,"a"
1000 EX_TABLE(sie_loop,sie_fault) 1114 .quad sie_loop,sie_fault
1115 .previous
1001#endif 1116#endif
1002 1117
1003 .section .rodata, "a" 1118 .section .rodata, "a"
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fd8db63dfc9..2d781bab37b 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2010 2 * Copyright IBM Corp. 1999,2010
3 * 3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -34,7 +34,125 @@
34#endif 34#endif
35 35
36__HEAD 36__HEAD
37#ifndef CONFIG_IPL
38 .org 0
39 .long 0x00080000,0x80000000+startup # Just a restart PSW
40#else
41#ifdef CONFIG_IPL_TAPE
42#define IPL_BS 1024
43 .org 0
44 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
45 .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
46 .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
47 .long 0x00000000,0x00000000 # external old psw
48 .long 0x00000000,0x00000000 # svc old psw
49 .long 0x00000000,0x00000000 # program check old psw
50 .long 0x00000000,0x00000000 # machine check old psw
51 .long 0x00000000,0x00000000 # io old psw
52 .long 0x00000000,0x00000000
53 .long 0x00000000,0x00000000
54 .long 0x00000000,0x00000000
55 .long 0x000a0000,0x00000058 # external new psw
56 .long 0x000a0000,0x00000060 # svc new psw
57 .long 0x000a0000,0x00000068 # program check new psw
58 .long 0x000a0000,0x00000070 # machine check new psw
59 .long 0x00080000,0x80000000+.Lioint # io new psw
60
61 .org 0x100
62#
63# subroutine for loading from tape
64# Parameters:
65# R1 = device number
66# R2 = load address
67.Lloader:
68 st %r14,.Lldret
69 la %r3,.Lorbread # r3 = address of orb
70 la %r5,.Lirb # r5 = address of irb
71 st %r2,.Lccwread+4 # initialize CCW data addresses
72 lctl %c6,%c6,.Lcr6
73 slr %r2,%r2
74.Lldlp:
75 la %r6,3 # 3 retries
76.Lssch:
77 ssch 0(%r3) # load chunk of IPL_BS bytes
78 bnz .Llderr
79.Lw4end:
80 bas %r14,.Lwait4io
81 tm 8(%r5),0x82 # do we have a problem ?
82 bnz .Lrecov
83 slr %r7,%r7
84 icm %r7,3,10(%r5) # get residual count
85 lcr %r7,%r7
86 la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
87 ar %r2,%r7 # add to total size
88 tm 8(%r5),0x01 # found a tape mark ?
89 bnz .Ldone
90 l %r0,.Lccwread+4 # update CCW data addresses
91 ar %r0,%r7
92 st %r0,.Lccwread+4
93 b .Lldlp
94.Ldone:
95 l %r14,.Lldret
96 br %r14 # r2 contains the total size
97.Lrecov:
98 bas %r14,.Lsense # do the sensing
99 bct %r6,.Lssch # dec. retry count & branch
100 b .Llderr
101#
102# Sense subroutine
103#
104.Lsense:
105 st %r14,.Lsnsret
106 la %r7,.Lorbsense
107 ssch 0(%r7) # start sense command
108 bnz .Llderr
109 bas %r14,.Lwait4io
110 l %r14,.Lsnsret
111 tm 8(%r5),0x82 # do we have a problem ?
112 bnz .Llderr
113 br %r14
114#
115# Wait for interrupt subroutine
116#
117.Lwait4io:
118 lpsw .Lwaitpsw
119.Lioint:
120 c %r1,0xb8 # compare subchannel number
121 bne .Lwait4io
122 tsch 0(%r5)
123 slr %r0,%r0
124 tm 8(%r5),0x82 # do we have a problem ?
125 bnz .Lwtexit
126 tm 8(%r5),0x04 # got device end ?
127 bz .Lwait4io
128.Lwtexit:
129 br %r14
130.Llderr:
131 lpsw .Lcrash
37 132
133 .align 8
134.Lorbread:
135 .long 0x00000000,0x0080ff00,.Lccwread
136 .align 8
137.Lorbsense:
138 .long 0x00000000,0x0080ff00,.Lccwsense
139 .align 8
140.Lccwread:
141 .long 0x02200000+IPL_BS,0x00000000
142.Lccwsense:
143 .long 0x04200001,0x00000000
144.Lwaitpsw:
145 .long 0x020a0000,0x80000000+.Lioint
146
147.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
148.Lcr6: .long 0xff000000
149 .align 8
150.Lcrash:.long 0x000a0000,0x00000000
151.Lldret:.long 0
152.Lsnsret: .long 0
153#endif /* CONFIG_IPL_TAPE */
154
155#ifdef CONFIG_IPL_VM
38#define IPL_BS 0x730 156#define IPL_BS 0x730
39 .org 0 157 .org 0
40 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded 158 .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
@@ -52,7 +170,7 @@ __HEAD
52 .long 0x02000370,0x60000050 # the channel program the PSW 170 .long 0x02000370,0x60000050 # the channel program the PSW
53 .long 0x020003c0,0x60000050 # at location 0 is loaded. 171 .long 0x020003c0,0x60000050 # at location 0 is loaded.
54 .long 0x02000410,0x60000050 # Initial processing starts 172 .long 0x02000410,0x60000050 # Initial processing starts
55 .long 0x02000460,0x60000050 # at 0x200 = iplstart. 173 .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
56 .long 0x020004b0,0x60000050 174 .long 0x020004b0,0x60000050
57 .long 0x02000500,0x60000050 175 .long 0x02000500,0x60000050
58 .long 0x02000550,0x60000050 176 .long 0x02000550,0x60000050
@@ -62,54 +180,11 @@ __HEAD
62 .long 0x02000690,0x60000050 180 .long 0x02000690,0x60000050
63 .long 0x020006e0,0x20000050 181 .long 0x020006e0,0x20000050
64 182
65 .org 0x200 183 .org 0xf0
66#
67# subroutine to set architecture mode
68#
69.Lsetmode:
70#ifdef CONFIG_64BIT
71 mvi __LC_AR_MODE_ID,1 # set esame flag
72 slr %r0,%r0 # set cpuid to zero
73 lhi %r1,2 # mode 2 = esame (dump)
74 sigp %r1,%r0,0x12 # switch to esame mode
75 bras %r13,0f
76 .fill 16,4,0x0
770: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
78 sam31 # switch to 31 bit addressing mode
79#else
80 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
81#endif
82 br %r14
83
84#
85# subroutine to wait for end I/O
86#
87.Lirqwait:
88#ifdef CONFIG_64BIT
89 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
90 lpsw .Lwaitpsw
91.Lioint:
92 br %r14
93 .align 8
94.Lnewpsw:
95 .quad 0x0000000080000000,.Lioint
96#else
97 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
98 lpsw .Lwaitpsw
99.Lioint:
100 br %r14
101 .align 8
102.Lnewpsw:
103 .long 0x00080000,0x80000000+.Lioint
104#endif
105.Lwaitpsw:
106 .long 0x020a0000,0x80000000+.Lioint
107
108# 184#
109# subroutine for loading cards from the reader 185# subroutine for loading cards from the reader
110# 186#
111.Lloader: 187.Lloader:
112 la %r4,0(%r14)
113 la %r3,.Lorb # r2 = address of orb into r2 188 la %r3,.Lorb # r2 = address of orb into r2
114 la %r5,.Lirb # r4 = address of irb 189 la %r5,.Lirb # r4 = address of irb
115 la %r6,.Lccws 190 la %r6,.Lccws
@@ -126,7 +201,9 @@ __HEAD
126 ssch 0(%r3) # load chunk of 1600 bytes 201 ssch 0(%r3) # load chunk of 1600 bytes
127 bnz .Llderr 202 bnz .Llderr
128.Lwait4irq: 203.Lwait4irq:
129 bas %r14,.Lirqwait 204 mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
205 lpsw .Lwaitpsw
206.Lioint:
130 c %r1,0xb8 # compare subchannel number 207 c %r1,0xb8 # compare subchannel number
131 bne .Lwait4irq 208 bne .Lwait4irq
132 tsch 0(%r5) 209 tsch 0(%r5)
@@ -145,7 +222,7 @@ __HEAD
145 sr %r0,%r3 # #ccws*80-residual=#bytes read 222 sr %r0,%r3 # #ccws*80-residual=#bytes read
146 ar %r2,%r0 223 ar %r2,%r0
147 224
148 br %r4 # r2 contains the total size 225 br %r14 # r2 contains the total size
149 226
150.Lcont: 227.Lcont:
151 ahi %r2,0x640 # add 0x640 to total size 228 ahi %r2,0x640 # add 0x640 to total size
@@ -169,15 +246,19 @@ __HEAD
169.Lloadp:.long 0,0 246.Lloadp:.long 0,0
170 .align 8 247 .align 8
171.Lcrash:.long 0x000a0000,0x00000000 248.Lcrash:.long 0x000a0000,0x00000000
249.Lnewpsw:
250 .long 0x00080000,0x80000000+.Lioint
251.Lwaitpsw:
252 .long 0x020a0000,0x80000000+.Lioint
172 253
173 .align 8 254 .align 8
174.Lccws: .rept 19 255.Lccws: .rept 19
175 .long 0x02600050,0x00000000 256 .long 0x02600050,0x00000000
176 .endr 257 .endr
177 .long 0x02200050,0x00000000 258 .long 0x02200050,0x00000000
259#endif /* CONFIG_IPL_VM */
178 260
179iplstart: 261iplstart:
180 bas %r14,.Lsetmode # Immediately switch to 64 bit mode
181 lh %r1,0xb8 # test if subchannel number 262 lh %r1,0xb8 # test if subchannel number
182 bct %r1,.Lnoload # is valid 263 bct %r1,.Lnoload # is valid
183 l %r1,0xb8 # load ipl subchannel number 264 l %r1,0xb8 # load ipl subchannel number
@@ -244,11 +325,12 @@ iplstart:
244 clc 0(3,%r2),.L_eof 325 clc 0(3,%r2),.L_eof
245 bz .Lagain2 326 bz .Lagain2
246 327
328#ifdef CONFIG_IPL_VM
247# 329#
248# reset files in VM reader 330# reset files in VM reader
249# 331#
250 stidp .Lcpuid # store cpuid 332 stidp __LC_SAVE_AREA # store cpuid
251 tm .Lcpuid,0xff # running VM ? 333 tm __LC_SAVE_AREA,0xff # running VM ?
252 bno .Lnoreset 334 bno .Lnoreset
253 la %r2,.Lreset 335 la %r2,.Lreset
254 lhi %r3,26 336 lhi %r3,26
@@ -260,14 +342,24 @@ iplstart:
260 tm 31(%r5),0xff # bits is set in the schib 342 tm 31(%r5),0xff # bits is set in the schib
261 bz .Lnoreset 343 bz .Lnoreset
262.Lwaitforirq: 344.Lwaitforirq:
263 bas %r14,.Lirqwait # wait for IO interrupt 345 mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
346.Lwaitrdrirq:
347 lpsw .Lrdrwaitpsw
348.Lrdrint:
264 c %r1,0xb8 # compare subchannel number 349 c %r1,0xb8 # compare subchannel number
265 bne .Lwaitforirq 350 bne .Lwaitrdrirq
266 la %r5,.Lirb 351 la %r5,.Lirb
267 tsch 0(%r5) 352 tsch 0(%r5)
268.Lnoreset: 353.Lnoreset:
269 b .Lnoload 354 b .Lnoload
270 355
356 .align 8
357.Lrdrnewpsw:
358 .long 0x00080000,0x80000000+.Lrdrint
359.Lrdrwaitpsw:
360 .long 0x020a0000,0x80000000+.Lrdrint
361#endif
362
271# 363#
272# everything loaded, go for it 364# everything loaded, go for it
273# 365#
@@ -283,8 +375,8 @@ iplstart:
283 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 375 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
284.L_eof: .long 0xc5d6c600 /* C'EOF' */ 376.L_eof: .long 0xc5d6c600 /* C'EOF' */
285.L_hdr: .long 0xc8c4d900 /* C'HDR' */ 377.L_hdr: .long 0xc8c4d900 /* C'HDR' */
286 .align 8 378
287.Lcpuid:.fill 8,1,0 379#endif /* CONFIG_IPL */
288 380
289# 381#
290# SALIPL loader support. Based on a patch by Rob van der Heij. 382# SALIPL loader support. Based on a patch by Rob van der Heij.
@@ -294,7 +386,6 @@ iplstart:
294 .org 0x800 386 .org 0x800
295ENTRY(start) 387ENTRY(start)
296 stm %r0,%r15,0x07b0 # store registers 388 stm %r0,%r15,0x07b0 # store registers
297 bas %r14,.Lsetmode # Immediately switch to 64 bit mode
298 basr %r12,%r0 389 basr %r12,%r0
299.base: 390.base:
300 l %r11,.parm 391 l %r11,.parm
@@ -358,70 +449,35 @@ ENTRY(start)
358# 449#
359 .org 0x10000 450 .org 0x10000
360ENTRY(startup) 451ENTRY(startup)
361 j .Lep_startup_normal
362 .org 0x10008
363#
364# This is a list of s390 kernel entry points. At address 0x1000f the number of
365# valid entry points is stored.
366#
367# IMPORTANT: Do not change this table, it is s390 kernel ABI!
368#
369 .ascii "S390EP"
370 .byte 0x00,0x01
371#
372# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
373#
374 .org 0x10010
375ENTRY(startup_kdump)
376 j .Lep_startup_kdump
377.Lep_startup_normal:
378#ifdef CONFIG_64BIT
379 mvi __LC_AR_MODE_ID,1 # set esame flag
380 slr %r0,%r0 # set cpuid to zero
381 lhi %r1,2 # mode 2 = esame (dump)
382 sigp %r1,%r0,0x12 # switch to esame mode
383 bras %r13,0f
384 .fill 16,4,0x0
3850: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
386 sam31 # switch to 31 bit addressing mode
387#else
388 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
389#endif
390 basr %r13,0 # get base 452 basr %r13,0 # get base
391.LPG0: 453.LPG0:
392 xc 0x200(256),0x200 # partially clear lowcore 454 xc 0x200(256),0x200 # partially clear lowcore
393 xc 0x300(256),0x300 455 xc 0x300(256),0x300
394 xc 0xe00(256),0xe00
395 stck __LC_LAST_UPDATE_CLOCK 456 stck __LC_LAST_UPDATE_CLOCK
396 spt 6f-.LPG0(%r13) 457 spt 5f-.LPG0(%r13)
397 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 458 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5 459#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 460 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
461 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
401 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list 462 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 463 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
403 jz 0f 464 jz 0f
404 la %r0,1 465 la %r0,0
405 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended 466 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
406 # verify if all required facilities are supported by the machine 4670: l %r0,__LC_STFL_FAC_LIST
4070: la %r1,__LC_STFL_FAC_LIST 468 n %r0,2f+8-.LPG0(%r13)
408 la %r2,3f+8-.LPG0(%r13) 469 cl %r0,2f+8-.LPG0(%r13)
409 l %r3,0(%r2) 470 jne 1f
4101: l %r0,0(%r1) 471 l %r0,__LC_STFL_FAC_LIST+4
411 n %r0,4(%r2) 472 n %r0,2f+12-.LPG0(%r13)
412 cl %r0,4(%r2) 473 cl %r0,2f+12-.LPG0(%r13)
413 jne 2f 474 je 3f
414 la %r1,4(%r1) 4751: l %r15,.Lstack-.LPG0(%r13)
415 la %r2,4(%r2)
416 ahi %r3,-1
417 jnz 1b
418 j 4f
4192: l %r15,.Lstack-.LPG0(%r13)
420 ahi %r15,-96 476 ahi %r15,-96
421 la %r2,.Lals_string-.LPG0(%r13) 477 la %r2,.Lals_string-.LPG0(%r13)
422 l %r3,.Lsclp_print-.LPG0(%r13) 478 l %r3,.Lsclp_print-.LPG0(%r13)
423 basr %r14,%r3 479 basr %r14,%r3
424 lpsw 3f-.LPG0(%r13) # machine type not good enough, crash 480 lpsw 2f-.LPG0(%r13) # machine type not good enough, crash
425.Lals_string: 481.Lals_string:
426 .asciz "The Linux kernel requires more recent processor hardware" 482 .asciz "The Linux kernel requires more recent processor hardware"
427.Lsclp_print: 483.Lsclp_print:
@@ -429,60 +485,54 @@ ENTRY(startup_kdump)
429.Lstack: 485.Lstack:
430 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) 486 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
431 .align 16 487 .align 16
4323: .long 0x000a0000,0x8badcccc 4882: .long 0x000a0000,0x8badcccc
433
434# List of facilities that are required. If not all facilities are present
435# the kernel will crash. Format is number of facility words with bits set,
436# followed by the facility words.
437
438#if defined(CONFIG_64BIT) 489#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 490#if defined(CONFIG_MARCH_Z196)
440 .long 3, 0xc100efe3, 0xf46ce000, 0x00400000 491 .long 0xc100efe3, 0xf46c0000
441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efe3, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 492#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efe3, 0xf0680000 493 .long 0xc100efe3, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 494#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc3 495 .long 0xc100efc3, 0x00000000
447#elif defined(CONFIG_MARCH_Z990) 496#elif defined(CONFIG_MARCH_Z990)
448 .long 1, 0xc0002000 497 .long 0xc0002000, 0x00000000
449#elif defined(CONFIG_MARCH_Z900) 498#elif defined(CONFIG_MARCH_Z900)
450 .long 1, 0xc0000000 499 .long 0xc0000000, 0x00000000
451#endif 500#endif
452#else 501#else
453#if defined(CONFIG_MARCH_ZEC12) 502#if defined(CONFIG_MARCH_Z196)
454 .long 1, 0x8100c880 503 .long 0x8100c880, 0x00000000
455#elif defined(CONFIG_MARCH_Z196)
456 .long 1, 0x8100c880
457#elif defined(CONFIG_MARCH_Z10) 504#elif defined(CONFIG_MARCH_Z10)
458 .long 1, 0x8100c880 505 .long 0x8100c880, 0x00000000
459#elif defined(CONFIG_MARCH_Z9_109) 506#elif defined(CONFIG_MARCH_Z9_109)
460 .long 1, 0x8100c880 507 .long 0x8100c880, 0x00000000
461#elif defined(CONFIG_MARCH_Z990) 508#elif defined(CONFIG_MARCH_Z990)
462 .long 1, 0x80002000 509 .long 0x80002000, 0x00000000
463#elif defined(CONFIG_MARCH_Z900) 510#elif defined(CONFIG_MARCH_Z900)
464 .long 1, 0x80000000 511 .long 0x80000000, 0x00000000
465#endif 512#endif
466#endif 513#endif
4674: 5143:
468#endif 515#endif
469 516
470#ifdef CONFIG_64BIT 517#ifdef CONFIG_64BIT
471 /* Continue with 64bit startup code in head64.S */ 518 mvi __LC_AR_MODE_ID,1 # set esame flag
519 slr %r0,%r0 # set cpuid to zero
520 lhi %r1,2 # mode 2 = esame (dump)
521 sigp %r1,%r0,0x12 # switch to esame mode
472 sam64 # switch to 64 bit mode 522 sam64 # switch to 64 bit mode
523 larl %r13,4f
524 lmh %r0,%r15,0(%r13) # clear high-order half
473 jg startup_continue 525 jg startup_continue
5264: .fill 16,4,0x0
474#else 527#else
475 /* Continue with 31bit startup code in head31.S */ 528 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
476 l %r13,5f-.LPG0(%r13) 529 l %r13,4f-.LPG0(%r13)
477 b 0(%r13) 530 b 0(%r13)
478 .align 8 531 .align 8
4795: .long startup_continue 5324: .long startup_continue
480#endif 533#endif
481
482 .align 8 534 .align 8
4836: .long 0x7fffffff,0xffffffff 5355: .long 0x7fffffff,0xffffffff
484
485#include "head_kdump.S"
486 536
487# 537#
488# params at 10400 (setup.h) 538# params at 10400 (setup.h)
@@ -491,8 +541,6 @@ ENTRY(startup_kdump)
491 .long 0,0 # IPL_DEVICE 541 .long 0,0 # IPL_DEVICE
492 .long 0,0 # INITRD_START 542 .long 0,0 # INITRD_START
493 .long 0,0 # INITRD_SIZE 543 .long 0,0 # INITRD_SIZE
494 .long 0,0 # OLDMEM_BASE
495 .long 0,0 # OLDMEM_SIZE
496 544
497 .org COMMAND_LINE 545 .org COMMAND_LINE
498 .byte "root=/dev/ram0 ro" 546 .byte "root=/dev/ram0 ro"
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 9a99856df1c..f21954b44dc 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2005, 2010 2 * arch/s390/kernel/head31.S
3 *
4 * Copyright (C) IBM Corp. 2005,2010
3 * 5 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -78,7 +80,10 @@ ENTRY(startup_continue)
78 80
79ENTRY(_ehead) 81ENTRY(_ehead)
80 82
83#ifdef CONFIG_SHARED_KERNEL
81 .org 0x100000 - 0x11000 # head.o ends at 0x11000 84 .org 0x100000 - 0x11000 # head.o ends at 0x11000
85#endif
86
82# 87#
83# startup-code, running in absolute addressing mode 88# startup-code, running in absolute addressing mode
84# 89#
@@ -87,7 +92,7 @@ ENTRY(_stext)
87.LPG3: 92.LPG3:
88# check control registers 93# check control registers
89 stctl %c0,%c15,0(%r15) 94 stctl %c0,%c15,0(%r15)
90 oi 2(%r15),0x60 # enable sigp emergency & external call 95 oi 2(%r15),0x40 # enable sigp emergency signal
91 oi 0(%r15),0x10 # switch on low address protection 96 oi 0(%r15),0x10 # switch on low address protection
92 lctl %c0,%c15,0(%r15) 97 lctl %c0,%c15,0(%r15)
93 98
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579..ae5d492b069 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2010 2 * arch/s390/kernel/head64.S
3 *
4 * Copyright (C) IBM Corp. 1999,2010
3 * 5 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -76,7 +78,10 @@ ENTRY(startup_continue)
76 78
77ENTRY(_ehead) 79ENTRY(_ehead)
78 80
81#ifdef CONFIG_SHARED_KERNEL
79 .org 0x100000 - 0x11000 # head.o ends at 0x11000 82 .org 0x100000 - 0x11000 # head.o ends at 0x11000
83#endif
84
80# 85#
81# startup-code, running in absolute addressing mode 86# startup-code, running in absolute addressing mode
82# 87#
@@ -85,7 +90,7 @@ ENTRY(_stext)
85.LPG3: 90.LPG3:
86# check control registers 91# check control registers
87 stctg %c0,%c15,0(%r15) 92 stctg %c0,%c15,0(%r15)
88 oi 6(%r15),0x60 # enable sigp emergency & external call 93 oi 6(%r15),0x40 # enable sigp emergency signal
89 oi 4(%r15),0x10 # switch on low address proctection 94 oi 4(%r15),0x10 # switch on low address proctection
90 lctlg %c0,%c15,0(%r15) 95 lctlg %c0,%c15,0(%r15)
91 96
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
deleted file mode 100644
index 085a95eb315..00000000000
--- a/arch/s390/kernel/head_kdump.S
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * S390 kdump lowlevel functions (new kernel)
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <asm/sigp.h>
9
10#define DATAMOVER_ADDR 0x4000
11#define COPY_PAGE_ADDR 0x6000
12
13#ifdef CONFIG_CRASH_DUMP
14
15#
16# kdump entry (new kernel - not yet relocated)
17#
18# Note: This code has to be position independent
19#
20
21.align 2
22.Lep_startup_kdump:
23 lhi %r1,2 # mode 2 = esame (dump)
24 sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode
25 sam64 # Switch to 64 bit addressing
26 basr %r13,0
27.Lbase:
28 larl %r2,.Lbase_addr # Check, if we have been
29 lg %r2,0(%r2) # already relocated:
30 clgr %r2,%r13 #
31 jne .Lrelocate # No : Start data mover
32 lghi %r2,0 # Yes: Start kdump kernel
33 brasl %r14,startup_kdump_relocated
34
35.Lrelocate:
36 larl %r4,startup
37 lg %r2,0x418(%r4) # Get kdump base
38 lg %r3,0x420(%r4) # Get kdump size
39
40 larl %r10,.Lcopy_start # Source of data mover
41 lghi %r8,DATAMOVER_ADDR # Target of data mover
42 mvc 0(256,%r8),0(%r10) # Copy data mover code
43
44 agr %r8,%r2 # Copy data mover to
45 mvc 0(256,%r8),0(%r10) # reserved mem
46
47 lghi %r14,DATAMOVER_ADDR # Jump to copied data mover
48 basr %r14,%r14
49.Lbase_addr:
50 .quad .Lbase
51
52#
53# kdump data mover code (runs at address DATAMOVER_ADDR)
54#
55# r2: kdump base address
56# r3: kdump size
57#
58.Lcopy_start:
59 basr %r13,0 # Base
600:
61 lgr %r11,%r2 # Save kdump base address
62 lgr %r12,%r2
63 agr %r12,%r3 # Compute kdump end address
64
65 lghi %r5,0
66 lghi %r10,COPY_PAGE_ADDR # Load copy page address
671:
68 mvc 0(256,%r10),0(%r5) # Copy old kernel to tmp
69 mvc 0(256,%r5),0(%r11) # Copy new kernel to old
70 mvc 0(256,%r11),0(%r10) # Copy tmp to new
71 aghi %r11,256
72 aghi %r5,256
73 clgr %r11,%r12
74 jl 1b
75
76 lg %r14,.Lstartup_kdump-0b(%r13)
77 basr %r14,%r14 # Start relocated kernel
78.Lstartup_kdump:
79 .long 0x00000000,0x00000000 + startup_kdump_relocated
80.Lcopy_end:
81
82#
83# Startup of kdump (relocated new kernel)
84#
85.align 2
86startup_kdump_relocated:
87 basr %r13,0
880: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
89.align 8
90.Lrestart_psw:
91 .quad 0x0000000080000000,0x0000000000000000 + startup
92#else
93.align 2
94.Lep_startup_kdump:
95#ifdef CONFIG_64BIT
96 larl %r13,startup_kdump_crash
97 lpswe 0(%r13)
98.align 8
99startup_kdump_crash:
100 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
101#else
102 basr %r13,0
1030: lpsw startup_kdump_crash-0b(%r13)
104.align 8
105startup_kdump_crash:
106 .long 0x000a0000,0x00000000 + startup_kdump_crash
107#endif /* CONFIG_64BIT */
108#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6ffcd320321..48c71020636 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/ipl.c
2 * ipl/reipl/dump support for Linux on s390. 3 * ipl/reipl/dump support for Linux on s390.
3 * 4 *
4 * Copyright IBM Corp. 2005, 2012 5 * Copyright IBM Corp. 2005,2007
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 * Volker Sameske <sameske@de.ibm.com> 8 * Volker Sameske <sameske@de.ibm.com>
@@ -15,8 +16,6 @@
15#include <linux/ctype.h> 16#include <linux/ctype.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17#include <linux/gfp.h> 18#include <linux/gfp.h>
18#include <linux/crash_dump.h>
19#include <linux/debug_locks.h>
20#include <asm/ipl.h> 19#include <asm/ipl.h>
21#include <asm/smp.h> 20#include <asm/smp.h>
22#include <asm/setup.h> 21#include <asm/setup.h>
@@ -25,10 +24,8 @@
25#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
26#include <asm/reset.h> 25#include <asm/reset.h>
27#include <asm/sclp.h> 26#include <asm/sclp.h>
27#include <asm/sigp.h>
28#include <asm/checksum.h> 28#include <asm/checksum.h>
29#include <asm/debug.h>
30#include <asm/os_info.h>
31#include "entry.h"
32 29
33#define IPL_PARM_BLOCK_VERSION 0 30#define IPL_PARM_BLOCK_VERSION 0
34 31
@@ -278,8 +275,8 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
278static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 275static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
279 276
280/* VM IPL PARM routines */ 277/* VM IPL PARM routines */
281static size_t reipl_get_ascii_vmparm(char *dest, size_t size, 278size_t reipl_get_ascii_vmparm(char *dest, size_t size,
282 const struct ipl_parameter_block *ipb) 279 const struct ipl_parameter_block *ipb)
283{ 280{
284 int i; 281 int i;
285 size_t len; 282 size_t len;
@@ -341,8 +338,8 @@ static size_t scpdata_length(const char* buf, size_t count)
341 return count; 338 return count;
342} 339}
343 340
344static size_t reipl_append_ascii_scpdata(char *dest, size_t size, 341size_t reipl_append_ascii_scpdata(char *dest, size_t size,
345 const struct ipl_parameter_block *ipb) 342 const struct ipl_parameter_block *ipb)
346{ 343{
347 size_t count; 344 size_t count;
348 size_t i; 345 size_t i;
@@ -572,7 +569,7 @@ static void __ipl_run(void *unused)
572 569
573static void ipl_run(struct shutdown_trigger *trigger) 570static void ipl_run(struct shutdown_trigger *trigger)
574{ 571{
575 smp_call_ipl_cpu(__ipl_run, NULL); 572 smp_switch_to_ipl_cpu(__ipl_run, NULL);
576} 573}
577 574
578static int __init ipl_init(void) 575static int __init ipl_init(void)
@@ -951,13 +948,6 @@ static struct attribute_group reipl_nss_attr_group = {
951 .attrs = reipl_nss_attrs, 948 .attrs = reipl_nss_attrs,
952}; 949};
953 950
954static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block)
955{
956 reipl_block_actual = reipl_block;
957 os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
958 reipl_block->hdr.len);
959}
960
961/* reipl type */ 951/* reipl type */
962 952
963static int reipl_set_type(enum ipl_type type) 953static int reipl_set_type(enum ipl_type type)
@@ -973,7 +963,7 @@ static int reipl_set_type(enum ipl_type type)
973 reipl_method = REIPL_METHOD_CCW_VM; 963 reipl_method = REIPL_METHOD_CCW_VM;
974 else 964 else
975 reipl_method = REIPL_METHOD_CCW_CIO; 965 reipl_method = REIPL_METHOD_CCW_CIO;
976 set_reipl_block_actual(reipl_block_ccw); 966 reipl_block_actual = reipl_block_ccw;
977 break; 967 break;
978 case IPL_TYPE_FCP: 968 case IPL_TYPE_FCP:
979 if (diag308_set_works) 969 if (diag308_set_works)
@@ -982,7 +972,7 @@ static int reipl_set_type(enum ipl_type type)
982 reipl_method = REIPL_METHOD_FCP_RO_VM; 972 reipl_method = REIPL_METHOD_FCP_RO_VM;
983 else 973 else
984 reipl_method = REIPL_METHOD_FCP_RO_DIAG; 974 reipl_method = REIPL_METHOD_FCP_RO_DIAG;
985 set_reipl_block_actual(reipl_block_fcp); 975 reipl_block_actual = reipl_block_fcp;
986 break; 976 break;
987 case IPL_TYPE_FCP_DUMP: 977 case IPL_TYPE_FCP_DUMP:
988 reipl_method = REIPL_METHOD_FCP_DUMP; 978 reipl_method = REIPL_METHOD_FCP_DUMP;
@@ -992,7 +982,7 @@ static int reipl_set_type(enum ipl_type type)
992 reipl_method = REIPL_METHOD_NSS_DIAG; 982 reipl_method = REIPL_METHOD_NSS_DIAG;
993 else 983 else
994 reipl_method = REIPL_METHOD_NSS; 984 reipl_method = REIPL_METHOD_NSS;
995 set_reipl_block_actual(reipl_block_nss); 985 reipl_block_actual = reipl_block_nss;
996 break; 986 break;
997 case IPL_TYPE_UNKNOWN: 987 case IPL_TYPE_UNKNOWN:
998 reipl_method = REIPL_METHOD_DEFAULT; 988 reipl_method = REIPL_METHOD_DEFAULT;
@@ -1109,7 +1099,7 @@ static void __reipl_run(void *unused)
1109 1099
1110static void reipl_run(struct shutdown_trigger *trigger) 1100static void reipl_run(struct shutdown_trigger *trigger)
1111{ 1101{
1112 smp_call_ipl_cpu(__reipl_run, NULL); 1102 smp_switch_to_ipl_cpu(__reipl_run, NULL);
1113} 1103}
1114 1104
1115static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1105static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
@@ -1264,29 +1254,6 @@ static int __init reipl_fcp_init(void)
1264 return 0; 1254 return 0;
1265} 1255}
1266 1256
1267static int __init reipl_type_init(void)
1268{
1269 enum ipl_type reipl_type = ipl_info.type;
1270 struct ipl_parameter_block *reipl_block;
1271 unsigned long size;
1272
1273 reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size);
1274 if (!reipl_block)
1275 goto out;
1276 /*
1277 * If we have an OS info reipl block, this will be used
1278 */
1279 if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
1280 memcpy(reipl_block_fcp, reipl_block, size);
1281 reipl_type = IPL_TYPE_FCP;
1282 } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
1283 memcpy(reipl_block_ccw, reipl_block, size);
1284 reipl_type = IPL_TYPE_CCW;
1285 }
1286out:
1287 return reipl_set_type(reipl_type);
1288}
1289
1290static int __init reipl_init(void) 1257static int __init reipl_init(void)
1291{ 1258{
1292 int rc; 1259 int rc;
@@ -1308,7 +1275,10 @@ static int __init reipl_init(void)
1308 rc = reipl_nss_init(); 1275 rc = reipl_nss_init();
1309 if (rc) 1276 if (rc)
1310 return rc; 1277 return rc;
1311 return reipl_type_init(); 1278 rc = reipl_set_type(ipl_info.type);
1279 if (rc)
1280 return rc;
1281 return 0;
1312} 1282}
1313 1283
1314static struct shutdown_action __refdata reipl_action = { 1284static struct shutdown_action __refdata reipl_action = {
@@ -1449,7 +1419,7 @@ static void dump_run(struct shutdown_trigger *trigger)
1449 if (dump_method == DUMP_METHOD_NONE) 1419 if (dump_method == DUMP_METHOD_NONE)
1450 return; 1420 return;
1451 smp_send_stop(); 1421 smp_send_stop();
1452 smp_call_ipl_cpu(__dump_run, NULL); 1422 smp_switch_to_ipl_cpu(__dump_run, NULL);
1453} 1423}
1454 1424
1455static int __init dump_ccw_init(void) 1425static int __init dump_ccw_init(void)
@@ -1527,12 +1497,30 @@ static struct shutdown_action __refdata dump_action = {
1527 1497
1528static void dump_reipl_run(struct shutdown_trigger *trigger) 1498static void dump_reipl_run(struct shutdown_trigger *trigger)
1529{ 1499{
1530 unsigned long ipib = (unsigned long) reipl_block_actual; 1500 preempt_disable();
1531 unsigned int csum; 1501 /*
1532 1502 * Bypass dynamic address translation (DAT) when storing IPL parameter
1533 csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); 1503 * information block address and checksum into the prefix area
1534 mem_assign_absolute(S390_lowcore.ipib, ipib); 1504 * (corresponding to absolute addresses 0-8191).
1535 mem_assign_absolute(S390_lowcore.ipib_checksum, csum); 1505 * When enhanced DAT applies and the STE format control in one,
1506 * the absolute address is formed without prefixing. In this case a
1507 * normal store (stg/st) into the prefix area would no more match to
1508 * absolute addresses 0-8191.
1509 */
1510#ifdef CONFIG_64BIT
1511 asm volatile("sturg %0,%1"
1512 :: "a" ((unsigned long) reipl_block_actual),
1513 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1514#else
1515 asm volatile("stura %0,%1"
1516 :: "a" ((unsigned long) reipl_block_actual),
1517 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1518#endif
1519 asm volatile("stura %0,%1"
1520 :: "a" (csum_partial(reipl_block_actual,
1521 reipl_block_actual->hdr.len, 0)),
1522 "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum));
1523 preempt_enable();
1536 dump_run(trigger); 1524 dump_run(trigger);
1537} 1525}
1538 1526
@@ -1583,7 +1571,7 @@ static struct kset *vmcmd_kset;
1583 1571
1584static void vmcmd_run(struct shutdown_trigger *trigger) 1572static void vmcmd_run(struct shutdown_trigger *trigger)
1585{ 1573{
1586 char *cmd; 1574 char *cmd, *next_cmd;
1587 1575
1588 if (strcmp(trigger->name, ON_REIPL_STR) == 0) 1576 if (strcmp(trigger->name, ON_REIPL_STR) == 0)
1589 cmd = vmcmd_on_reboot; 1577 cmd = vmcmd_on_reboot;
@@ -1600,7 +1588,15 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1600 1588
1601 if (strlen(cmd) == 0) 1589 if (strlen(cmd) == 0)
1602 return; 1590 return;
1603 __cpcmd(cmd, NULL, 0, NULL); 1591 do {
1592 next_cmd = strchr(cmd, '\n');
1593 if (next_cmd) {
1594 next_cmd[0] = 0;
1595 next_cmd += 1;
1596 }
1597 __cpcmd(cmd, NULL, 0, NULL);
1598 cmd = next_cmd;
1599 } while (cmd != NULL);
1604} 1600}
1605 1601
1606static int vmcmd_init(void) 1602static int vmcmd_init(void)
@@ -1625,7 +1621,9 @@ static void stop_run(struct shutdown_trigger *trigger)
1625 if (strcmp(trigger->name, ON_PANIC_STR) == 0 || 1621 if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
1626 strcmp(trigger->name, ON_RESTART_STR) == 0) 1622 strcmp(trigger->name, ON_RESTART_STR) == 0)
1627 disabled_wait((unsigned long) __builtin_return_address(0)); 1623 disabled_wait((unsigned long) __builtin_return_address(0));
1628 smp_stop_cpu(); 1624 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1625 cpu_relax();
1626 for (;;);
1629} 1627}
1630 1628
1631static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, 1629static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
@@ -1713,7 +1711,6 @@ static struct kobj_attribute on_panic_attr =
1713 1711
1714static void do_panic(void) 1712static void do_panic(void)
1715{ 1713{
1716 lgr_info_log();
1717 on_panic_trigger.action->fn(&on_panic_trigger); 1714 on_panic_trigger.action->fn(&on_panic_trigger);
1718 stop_run(&on_panic_trigger); 1715 stop_run(&on_panic_trigger);
1719} 1716}
@@ -1739,25 +1736,13 @@ static ssize_t on_restart_store(struct kobject *kobj,
1739static struct kobj_attribute on_restart_attr = 1736static struct kobj_attribute on_restart_attr =
1740 __ATTR(on_restart, 0644, on_restart_show, on_restart_store); 1737 __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
1741 1738
1742static void __do_restart(void *ignore) 1739void do_restart(void)
1743{ 1740{
1744 __arch_local_irq_stosm(0x04); /* enable DAT */
1745 smp_send_stop(); 1741 smp_send_stop();
1746#ifdef CONFIG_CRASH_DUMP
1747 crash_kexec(NULL);
1748#endif
1749 on_restart_trigger.action->fn(&on_restart_trigger); 1742 on_restart_trigger.action->fn(&on_restart_trigger);
1750 stop_run(&on_restart_trigger); 1743 stop_run(&on_restart_trigger);
1751} 1744}
1752 1745
1753void do_restart(void)
1754{
1755 tracing_off();
1756 debug_locks_off();
1757 lgr_info_log();
1758 smp_call_online_cpu(__do_restart, NULL);
1759}
1760
1761/* on halt */ 1746/* on halt */
1762 1747
1763static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action}; 1748static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
@@ -2024,7 +2009,7 @@ static void do_reset_calls(void)
2024 2009
2025u32 dump_prefix_page; 2010u32 dump_prefix_page;
2026 2011
2027void s390_reset_system(void (*func)(void *), void *data) 2012void s390_reset_system(void)
2028{ 2013{
2029 struct _lowcore *lc; 2014 struct _lowcore *lc;
2030 2015
@@ -2043,19 +2028,15 @@ void s390_reset_system(void (*func)(void *), void *data)
2043 __ctl_clear_bit(0,28); 2028 __ctl_clear_bit(0,28);
2044 2029
2045 /* Set new machine check handler */ 2030 /* Set new machine check handler */
2046 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2031 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
2047 S390_lowcore.mcck_new_psw.addr = 2032 S390_lowcore.mcck_new_psw.addr =
2048 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; 2033 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
2049 2034
2050 /* Set new program check handler */ 2035 /* Set new program check handler */
2051 S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2036 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
2052 S390_lowcore.program_new_psw.addr = 2037 S390_lowcore.program_new_psw.addr =
2053 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2038 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
2054 2039
2055 /* Store status at absolute zero */
2056 store_status();
2057
2058 do_reset_calls(); 2040 do_reset_calls();
2059 if (func)
2060 func(data);
2061} 2041}
2042
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 9df824ea166..1f4050d45f7 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2004, 2011 2 * Copyright IBM Corp. 2004,2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>, 4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
5 * Thomas Spatzier <tspat@de.ibm.com>, 5 * Thomas Spatzier <tspat@de.ibm.com>,
@@ -24,65 +24,36 @@
24#include <asm/irq.h> 24#include <asm/irq.h>
25#include "entry.h" 25#include "entry.h"
26 26
27DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
28EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
29
30struct irq_class { 27struct irq_class {
31 char *name; 28 char *name;
32 char *desc; 29 char *desc;
33}; 30};
34 31
35/* 32static const struct irq_class intrclass_names[] = {
36 * The list of "main" irq classes on s390. This is the list of interrrupts 33 {.name = "EXT" },
37 * that appear both in /proc/stat ("intr" line) and /proc/interrupts. 34 {.name = "I/O" },
38 * Historically only external and I/O interrupts have been part of /proc/stat. 35 {.name = "CLK", .desc = "[EXT] Clock Comparator" },
39 * We can't add the split external and I/O sub classes since the first field 36 {.name = "IPI", .desc = "[EXT] Signal Processor" },
40 * in the "intr" line in /proc/stat is supposed to be the sum of all other 37 {.name = "TMR", .desc = "[EXT] CPU Timer" },
41 * fields. 38 {.name = "TAL", .desc = "[EXT] Timing Alert" },
42 * Since the external and I/O interrupt fields are already sums we would end 39 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
43 * up with having a sum which accounts each interrupt twice. 40 {.name = "DSD", .desc = "[EXT] DASD Diag" },
44 */ 41 {.name = "VRT", .desc = "[EXT] Virtio" },
45static const struct irq_class irqclass_main_desc[NR_IRQS] = { 42 {.name = "SCP", .desc = "[EXT] Service Call" },
46 [EXTERNAL_INTERRUPT] = {.name = "EXT"}, 43 {.name = "IUC", .desc = "[EXT] IUCV" },
47 [IO_INTERRUPT] = {.name = "I/O"} 44 {.name = "CPM", .desc = "[EXT] CPU Measurement" },
48}; 45 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
49 46 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
50/* 47 {.name = "DAS", .desc = "[I/O] DASD" },
51 * The list of split external and I/O interrupts that appear only in 48 {.name = "C15", .desc = "[I/O] 3215" },
52 * /proc/interrupts. 49 {.name = "C70", .desc = "[I/O] 3270" },
53 * In addition this list contains non external / I/O events like NMIs. 50 {.name = "TAP", .desc = "[I/O] Tape" },
54 */ 51 {.name = "VMR", .desc = "[I/O] Unit Record Devices" },
55static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { 52 {.name = "LCS", .desc = "[I/O] LCS" },
56 [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, 53 {.name = "CLW", .desc = "[I/O] CLAW" },
57 [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, 54 {.name = "CTC", .desc = "[I/O] CTC" },
58 [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, 55 {.name = "APB", .desc = "[I/O] AP Bus" },
59 [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, 56 {.name = "NMI", .desc = "[NMI] Machine Check" },
60 [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
61 [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
62 [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
63 [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
64 [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
65 [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
66 [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
67 [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
68 [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
69 [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
70 [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
71 [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
72 [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"},
73 [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"},
74 [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
75 [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
76 [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
77 [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
78 [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
79 [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
80 [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
81 [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
82 [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
83 [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
84 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
85 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
86}; 57};
87 58
88/* 59/*
@@ -90,34 +61,30 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
90 */ 61 */
91int show_interrupts(struct seq_file *p, void *v) 62int show_interrupts(struct seq_file *p, void *v)
92{ 63{
93 int irq = *(loff_t *) v; 64 int i = *(loff_t *) v, j;
94 int cpu;
95 65
96 get_online_cpus(); 66 get_online_cpus();
97 if (irq == 0) { 67 if (i == 0) {
98 seq_puts(p, " "); 68 seq_puts(p, " ");
99 for_each_online_cpu(cpu) 69 for_each_online_cpu(j)
100 seq_printf(p, "CPU%d ", cpu); 70 seq_printf(p, "CPU%d ",j);
101 seq_putc(p, '\n'); 71 seq_putc(p, '\n');
102 } 72 }
103 if (irq < NR_IRQS) { 73
104 seq_printf(p, "%s: ", irqclass_main_desc[irq].name); 74 if (i < NR_IRQS) {
105 for_each_online_cpu(cpu) 75 seq_printf(p, "%s: ", intrclass_names[i].name);
106 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); 76#ifndef CONFIG_SMP
107 seq_putc(p, '\n'); 77 seq_printf(p, "%10u ", kstat_irqs(i));
108 goto skip_arch_irqs; 78#else
109 } 79 for_each_online_cpu(j)
110 for (irq = 0; irq < NR_ARCH_IRQS; irq++) { 80 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
111 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); 81#endif
112 for_each_online_cpu(cpu) 82 if (intrclass_names[i].desc)
113 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); 83 seq_printf(p, " %s", intrclass_names[i].desc);
114 if (irqclass_sub_desc[irq].desc) 84 seq_putc(p, '\n');
115 seq_printf(p, " %s", irqclass_sub_desc[irq].desc); 85 }
116 seq_putc(p, '\n');
117 }
118skip_arch_irqs:
119 put_online_cpus(); 86 put_online_cpus();
120 return 0; 87 return 0;
121} 88}
122 89
123/* 90/*
@@ -149,10 +116,9 @@ asmlinkage void do_softirq(void)
149 "a" (__do_softirq) 116 "a" (__do_softirq)
150 : "0", "1", "2", "3", "4", "5", "14", 117 : "0", "1", "2", "3", "4", "5", "14",
151 "cc", "memory" ); 118 "cc", "memory" );
152 } else { 119 } else
153 /* We are already on the async stack. */ 120 /* We are already on the async stack. */
154 __do_softirq(); 121 __do_softirq();
155 }
156 } 122 }
157 123
158 local_irq_restore(flags); 124 local_irq_restore(flags);
@@ -197,6 +163,13 @@ static inline int ext_hash(u16 code)
197 return (code + (code >> 9)) & 0xff; 163 return (code + (code >> 9)) & 0xff;
198} 164}
199 165
166static void ext_int_hash_update(struct rcu_head *head)
167{
168 struct ext_int_info *p = container_of(head, struct ext_int_info, rcu);
169
170 kfree(p);
171}
172
200int register_external_interrupt(u16 code, ext_int_handler_t handler) 173int register_external_interrupt(u16 code, ext_int_handler_t handler)
201{ 174{
202 struct ext_int_info *p; 175 struct ext_int_info *p;
@@ -224,39 +197,41 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
224 int index = ext_hash(code); 197 int index = ext_hash(code);
225 198
226 spin_lock_irqsave(&ext_int_hash_lock, flags); 199 spin_lock_irqsave(&ext_int_hash_lock, flags);
227 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) { 200 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
228 if (p->code == code && p->handler == handler) { 201 if (p->code == code && p->handler == handler) {
229 list_del_rcu(&p->entry); 202 list_del_rcu(&p->entry);
230 kfree_rcu(p, rcu); 203 call_rcu(&p->rcu, ext_int_hash_update);
231 } 204 }
232 }
233 spin_unlock_irqrestore(&ext_int_hash_lock, flags); 205 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
234 return 0; 206 return 0;
235} 207}
236EXPORT_SYMBOL(unregister_external_interrupt); 208EXPORT_SYMBOL(unregister_external_interrupt);
237 209
238void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, 210void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
239 unsigned int param32, unsigned long param64) 211 unsigned int param32, unsigned long param64)
240{ 212{
241 struct pt_regs *old_regs; 213 struct pt_regs *old_regs;
214 unsigned short code;
242 struct ext_int_info *p; 215 struct ext_int_info *p;
243 int index; 216 int index;
244 217
218 code = (unsigned short) ext_int_code;
245 old_regs = set_irq_regs(regs); 219 old_regs = set_irq_regs(regs);
220 s390_idle_check(regs, S390_lowcore.int_clock,
221 S390_lowcore.async_enter_timer);
246 irq_enter(); 222 irq_enter();
247 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) { 223 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
248 /* Serve timer interrupts first. */ 224 /* Serve timer interrupts first. */
249 clock_comparator_work(); 225 clock_comparator_work();
250 } 226 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
251 kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL); 227 if (code != 0x1004)
252 if (ext_code.code != 0x1004)
253 __get_cpu_var(s390_idle).nohz_delay = 1; 228 __get_cpu_var(s390_idle).nohz_delay = 1;
254 229
255 index = ext_hash(ext_code.code); 230 index = ext_hash(code);
256 rcu_read_lock(); 231 rcu_read_lock();
257 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 232 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
258 if (likely(p->code == ext_code.code)) 233 if (likely(p->code == code))
259 p->handler(ext_code, param32, param64); 234 p->handler(ext_int_code, param32, param64);
260 rcu_read_unlock(); 235 rcu_read_unlock();
261 irq_exit(); 236 irq_exit();
262 set_irq_regs(old_regs); 237 set_irq_regs(old_regs);
@@ -289,26 +264,3 @@ void service_subclass_irq_unregister(void)
289 spin_unlock(&sc_irq_lock); 264 spin_unlock(&sc_irq_lock);
290} 265}
291EXPORT_SYMBOL(service_subclass_irq_unregister); 266EXPORT_SYMBOL(service_subclass_irq_unregister);
292
293static DEFINE_SPINLOCK(ma_subclass_lock);
294static int ma_subclass_refcount;
295
296void measurement_alert_subclass_register(void)
297{
298 spin_lock(&ma_subclass_lock);
299 if (!ma_subclass_refcount)
300 ctl_set_bit(0, 5);
301 ma_subclass_refcount++;
302 spin_unlock(&ma_subclass_lock);
303}
304EXPORT_SYMBOL(measurement_alert_subclass_register);
305
306void measurement_alert_subclass_unregister(void)
307{
308 spin_lock(&ma_subclass_lock);
309 ma_subclass_refcount--;
310 if (!ma_subclass_refcount)
311 ctl_clear_bit(0, 5);
312 spin_unlock(&ma_subclass_lock);
313}
314EXPORT_SYMBOL(measurement_alert_subclass_unregister);
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c154..44cc06bedf7 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -18,35 +18,18 @@ struct insn {
18} __packed; 18} __packed;
19 19
20struct insn_args { 20struct insn_args {
21 struct jump_entry *entry; 21 unsigned long *target;
22 enum jump_label_type type; 22 struct insn *insn;
23 ssize_t size;
23}; 24};
24 25
25static void __jump_label_transform(struct jump_entry *entry, 26static int __arch_jump_label_transform(void *data)
26 enum jump_label_type type)
27{ 27{
28 struct insn insn; 28 struct insn_args *args = data;
29 int rc; 29 int rc;
30 30
31 if (type == JUMP_LABEL_ENABLE) { 31 rc = probe_kernel_write(args->target, args->insn, args->size);
32 /* brcl 15,offset */
33 insn.opcode = 0xc0f4;
34 insn.offset = (entry->target - entry->code) >> 1;
35 } else {
36 /* brcl 0,0 */
37 insn.opcode = 0xc004;
38 insn.offset = 0;
39 }
40
41 rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
42 WARN_ON_ONCE(rc < 0); 32 WARN_ON_ONCE(rc < 0);
43}
44
45static int __sm_arch_jump_label_transform(void *data)
46{
47 struct insn_args *args = data;
48
49 __jump_label_transform(args->entry, args->type);
50 return 0; 33 return 0;
51} 34}
52 35
@@ -54,17 +37,23 @@ void arch_jump_label_transform(struct jump_entry *entry,
54 enum jump_label_type type) 37 enum jump_label_type type)
55{ 38{
56 struct insn_args args; 39 struct insn_args args;
40 struct insn insn;
57 41
58 args.entry = entry; 42 if (type == JUMP_LABEL_ENABLE) {
59 args.type = type; 43 /* brcl 15,offset */
44 insn.opcode = 0xc0f4;
45 insn.offset = (entry->target - entry->code) >> 1;
46 } else {
47 /* brcl 0,0 */
48 insn.opcode = 0xc004;
49 insn.offset = 0;
50 }
60 51
61 stop_machine(__sm_arch_jump_label_transform, &args, NULL); 52 args.target = (void *) entry->code;
62} 53 args.insn = &insn;
54 args.size = JUMP_LABEL_NOP_SIZE;
63 55
64void arch_jump_label_transform_static(struct jump_entry *entry, 56 stop_machine(__arch_jump_label_transform, &args, NULL);
65 enum jump_label_type type)
66{
67 __jump_label_transform(entry, type);
68} 57}
69 58
70#endif 59#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d1c7214e157..1d05d669107 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright IBM Corp. 2002, 2006 18 * Copyright (C) IBM Corporation, 2002, 2006
19 * 19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */ 21 */
@@ -547,7 +547,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
547 */ 547 */
548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
549 if (entry) { 549 if (entry) {
550 regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; 550 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
551 return 1; 551 return 1;
552 } 552 }
553 553
@@ -635,7 +635,7 @@ void __kprobes jprobe_return(void)
635 asm volatile(".word 0x0002"); 635 asm volatile(".word 0x0002");
636} 636}
637 637
638static void __used __kprobes jprobe_return_end(void) 638void __kprobes jprobe_return_end(void)
639{ 639{
640 asm volatile("bcr 0,0"); 640 asm volatile("bcr 0,0");
641} 641}
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
deleted file mode 100644
index 6ea6d69339b..00000000000
--- a/arch/s390/kernel/lgr.c
+++ /dev/null
@@ -1,186 +0,0 @@
1/*
2 * Linux Guest Relocation (LGR) detection
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#include <linux/module.h>
9#include <linux/timer.h>
10#include <linux/slab.h>
11#include <asm/facility.h>
12#include <asm/sysinfo.h>
13#include <asm/ebcdic.h>
14#include <asm/debug.h>
15#include <asm/ipl.h>
16
17#define LGR_TIMER_INTERVAL_SECS (30 * 60)
18#define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */
19
20/*
21 * LGR info: Contains stfle and stsi data
22 */
23struct lgr_info {
24 /* Bit field with facility information: 4 DWORDs are stored */
25 u64 stfle_fac_list[4];
26 /* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */
27 u32 level;
28 /* Level 1: CEC info (stsi 1.1.1) */
29 char manufacturer[16];
30 char type[4];
31 char sequence[16];
32 char plant[4];
33 char model[16];
34 /* Level 2: LPAR info (stsi 2.2.2) */
35 u16 lpar_number;
36 char name[8];
37 /* Level 3: VM info (stsi 3.2.2) */
38 u8 vm_count;
39 struct {
40 char name[8];
41 char cpi[16];
42 } vm[VM_LEVEL_MAX];
43} __packed __aligned(8);
44
45/*
46 * LGR globals
47 */
48static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE);
49static struct lgr_info lgr_info_last;
50static struct lgr_info lgr_info_cur;
51static struct debug_info *lgr_dbf;
52
53/*
54 * Copy buffer and then convert it to ASCII
55 */
56static void cpascii(char *dst, char *src, int size)
57{
58 memcpy(dst, src, size);
59 EBCASC(dst, size);
60}
61
62/*
63 * Fill LGR info with 1.1.1 stsi data
64 */
65static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
66{
67 struct sysinfo_1_1_1 *si = (void *) lgr_page;
68
69 if (stsi(si, 1, 1, 1))
70 return;
71 cpascii(lgr_info->manufacturer, si->manufacturer,
72 sizeof(si->manufacturer));
73 cpascii(lgr_info->type, si->type, sizeof(si->type));
74 cpascii(lgr_info->model, si->model, sizeof(si->model));
75 cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence));
76 cpascii(lgr_info->plant, si->plant, sizeof(si->plant));
77}
78
79/*
80 * Fill LGR info with 2.2.2 stsi data
81 */
82static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
83{
84 struct sysinfo_2_2_2 *si = (void *) lgr_page;
85
86 if (stsi(si, 2, 2, 2))
87 return;
88 cpascii(lgr_info->name, si->name, sizeof(si->name));
89 memcpy(&lgr_info->lpar_number, &si->lpar_number,
90 sizeof(lgr_info->lpar_number));
91}
92
93/*
94 * Fill LGR info with 3.2.2 stsi data
95 */
96static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
97{
98 struct sysinfo_3_2_2 *si = (void *) lgr_page;
99 int i;
100
101 if (stsi(si, 3, 2, 2))
102 return;
103 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
104 cpascii(lgr_info->vm[i].name, si->vm[i].name,
105 sizeof(si->vm[i].name));
106 cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi,
107 sizeof(si->vm[i].cpi));
108 }
109 lgr_info->vm_count = si->count;
110}
111
112/*
113 * Fill LGR info with current data
114 */
115static void lgr_info_get(struct lgr_info *lgr_info)
116{
117 int level;
118
119 memset(lgr_info, 0, sizeof(*lgr_info));
120 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
121 level = stsi(NULL, 0, 0, 0);
122 lgr_info->level = level;
123 if (level >= 1)
124 lgr_stsi_1_1_1(lgr_info);
125 if (level >= 2)
126 lgr_stsi_2_2_2(lgr_info);
127 if (level >= 3)
128 lgr_stsi_3_2_2(lgr_info);
129}
130
131/*
132 * Check if LGR info has changed and if yes log new LGR info to s390dbf
133 */
134void lgr_info_log(void)
135{
136 static DEFINE_SPINLOCK(lgr_info_lock);
137 unsigned long flags;
138
139 if (!spin_trylock_irqsave(&lgr_info_lock, flags))
140 return;
141 lgr_info_get(&lgr_info_cur);
142 if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) {
143 debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur));
144 lgr_info_last = lgr_info_cur;
145 }
146 spin_unlock_irqrestore(&lgr_info_lock, flags);
147}
148EXPORT_SYMBOL_GPL(lgr_info_log);
149
150static void lgr_timer_set(void);
151
152/*
153 * LGR timer callback
154 */
155static void lgr_timer_fn(unsigned long ignored)
156{
157 lgr_info_log();
158 lgr_timer_set();
159}
160
161static struct timer_list lgr_timer =
162 TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0);
163
164/*
165 * Setup next LGR timer
166 */
167static void lgr_timer_set(void)
168{
169 mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ);
170}
171
172/*
173 * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer
174 */
175static int __init lgr_init(void)
176{
177 lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info));
178 if (!lgr_dbf)
179 return -ENOMEM;
180 debug_register_view(lgr_dbf, &debug_hex_ascii_view);
181 lgr_info_get(&lgr_info_last);
182 debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
183 lgr_timer_set();
184 return 0;
185}
186module_init(lgr_init);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de2770001..b09b9c62573 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright IBM Corp. 2005, 2011 2 * arch/s390/kernel/machine_kexec.c
3 *
4 * Copyright IBM Corp. 2005,2006
3 * 5 *
4 * Author(s): Rolf Adelsberger, 6 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
7 */ 8 */
8 9
9#include <linux/device.h> 10#include <linux/device.h>
@@ -12,156 +13,27 @@
12#include <linux/delay.h> 13#include <linux/delay.h>
13#include <linux/reboot.h> 14#include <linux/reboot.h>
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
15#include <linux/debug_locks.h>
16#include <asm/cio.h> 16#include <asm/cio.h>
17#include <asm/setup.h> 17#include <asm/setup.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20#include <asm/system.h>
20#include <asm/smp.h> 21#include <asm/smp.h>
21#include <asm/reset.h> 22#include <asm/reset.h>
22#include <asm/ipl.h> 23#include <asm/ipl.h>
23#include <asm/diag.h>
24#include <asm/elf.h>
25#include <asm/asm-offsets.h>
26#include <asm/os_info.h>
27 24
28typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 25typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
29 26
30extern const unsigned char relocate_kernel[]; 27extern const unsigned char relocate_kernel[];
31extern const unsigned long long relocate_kernel_len; 28extern const unsigned long long relocate_kernel_len;
32 29
33#ifdef CONFIG_CRASH_DUMP
34
35/*
36 * Create ELF notes for one CPU
37 */
38static void add_elf_notes(int cpu)
39{
40 struct save_area *sa = (void *) 4608 + store_prefix();
41 void *ptr;
42
43 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
44 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
45 ptr = fill_cpu_elf_notes(ptr, sa);
46 memset(ptr, 0, sizeof(struct elf_note));
47}
48
49/*
50 * Initialize CPU ELF notes
51 */
52void setup_regs(void)
53{
54 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
55 int cpu, this_cpu;
56
57 this_cpu = smp_find_processor_id(stap());
58 add_elf_notes(this_cpu);
59 for_each_online_cpu(cpu) {
60 if (cpu == this_cpu)
61 continue;
62 if (smp_store_status(cpu))
63 continue;
64 add_elf_notes(cpu);
65 }
66 /* Copy dump CPU store status info to absolute zero */
67 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
68}
69
70#endif
71
72/*
73 * Start kdump: We expect here that a store status has been done on our CPU
74 */
75static void __do_machine_kdump(void *image)
76{
77#ifdef CONFIG_CRASH_DUMP
78 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
79
80 setup_regs();
81 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
82 start_kdump(1);
83#endif
84}
85
86/*
87 * Check if kdump checksums are valid: We call purgatory with parameter "0"
88 */
89static int kdump_csum_valid(struct kimage *image)
90{
91#ifdef CONFIG_CRASH_DUMP
92 int (*start_kdump)(int) = (void *)image->start;
93 int rc;
94
95 __arch_local_irq_stnsm(0xfb); /* disable DAT */
96 rc = start_kdump(0);
97 __arch_local_irq_stosm(0x04); /* enable DAT */
98 return rc ? 0 : -EINVAL;
99#else
100 return -EINVAL;
101#endif
102}
103
104/*
105 * Map or unmap crashkernel memory
106 */
107static void crash_map_pages(int enable)
108{
109 unsigned long size = resource_size(&crashk_res);
110
111 BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
112 size % KEXEC_CRASH_MEM_ALIGN);
113 if (enable)
114 vmem_add_mapping(crashk_res.start, size);
115 else {
116 vmem_remove_mapping(crashk_res.start, size);
117 if (size)
118 os_info_crashkernel_add(crashk_res.start, size);
119 else
120 os_info_crashkernel_add(0, 0);
121 }
122}
123
124/*
125 * Map crashkernel memory
126 */
127void crash_map_reserved_pages(void)
128{
129 crash_map_pages(1);
130}
131
132/*
133 * Unmap crashkernel memory
134 */
135void crash_unmap_reserved_pages(void)
136{
137 crash_map_pages(0);
138}
139
140/*
141 * Give back memory to hypervisor before new kdump is loaded
142 */
143static int machine_kexec_prepare_kdump(void)
144{
145#ifdef CONFIG_CRASH_DUMP
146 if (MACHINE_IS_VM)
147 diag10_range(PFN_DOWN(crashk_res.start),
148 PFN_DOWN(crashk_res.end - crashk_res.start + 1));
149 return 0;
150#else
151 return -EINVAL;
152#endif
153}
154
155int machine_kexec_prepare(struct kimage *image) 30int machine_kexec_prepare(struct kimage *image)
156{ 31{
157 void *reboot_code_buffer; 32 void *reboot_code_buffer;
158 33
159 /* Can't replace kernel image since it is read-only. */ 34 /* Can't replace kernel image since it is read-only. */
160 if (ipl_flags & IPL_NSS_VALID) 35 if (ipl_flags & IPL_NSS_VALID)
161 return -EOPNOTSUPP; 36 return -ENOSYS;
162
163 if (image->type == KEXEC_TYPE_CRASH)
164 return machine_kexec_prepare_kdump();
165 37
166 /* We don't support anything but the default image type for now. */ 38 /* We don't support anything but the default image type for now. */
167 if (image->type != KEXEC_TYPE_DEFAULT) 39 if (image->type != KEXEC_TYPE_DEFAULT)
@@ -179,64 +51,28 @@ void machine_kexec_cleanup(struct kimage *image)
179{ 51{
180} 52}
181 53
182void arch_crash_save_vmcoreinfo(void)
183{
184 VMCOREINFO_SYMBOL(lowcore_ptr);
185 VMCOREINFO_SYMBOL(high_memory);
186 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
187}
188
189void machine_shutdown(void) 54void machine_shutdown(void)
190{ 55{
191} 56}
192 57
193void machine_crash_shutdown(struct pt_regs *regs) 58static void __machine_kexec(void *data)
194{
195}
196
197/*
198 * Do normal kexec
199 */
200static void __do_machine_kexec(void *data)
201{ 59{
202 relocate_kernel_t data_mover; 60 relocate_kernel_t data_mover;
203 struct kimage *image = data; 61 struct kimage *image = data;
204 62
63 pfault_fini();
64 s390_reset_system();
65
205 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); 66 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
206 67
207 /* Call the moving routine */ 68 /* Call the moving routine */
208 (*data_mover)(&image->head, image->start); 69 (*data_mover)(&image->head, image->start);
70 for (;;);
209} 71}
210 72
211/*
212 * Reset system and call either kdump or normal kexec
213 */
214static void __machine_kexec(void *data)
215{
216 struct kimage *image = data;
217
218 __arch_local_irq_stosm(0x04); /* enable DAT */
219 pfault_fini();
220 tracing_off();
221 debug_locks_off();
222 if (image->type == KEXEC_TYPE_CRASH) {
223 lgr_info_log();
224 s390_reset_system(__do_machine_kdump, data);
225 } else {
226 s390_reset_system(__do_machine_kexec, data);
227 }
228 disabled_wait((unsigned long) __builtin_return_address(0));
229}
230
231/*
232 * Do either kdump or normal kexec. In case of kdump we first ask
233 * purgatory, if kdump checksums are valid.
234 */
235void machine_kexec(struct kimage *image) 73void machine_kexec(struct kimage *image)
236{ 74{
237 if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
238 return;
239 tracer_disable(); 75 tracer_disable();
240 smp_send_stop(); 76 smp_send_stop();
241 smp_call_ipl_cpu(__machine_kexec, image); 77 smp_switch_to_ipl_cpu(__machine_kexec, image);
242} 78}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4567ce20d90..7e2c38ba137 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2008, 2009 2 * Copyright IBM Corp. 2008,2009
3 * 3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * 5 *
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index 11332193db3..f70cadec68f 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2008, 2009 2 * Copyright IBM Corp. 2008,2009
3 * 3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * 5 *
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 22d502e885e..0fbe4e32f7b 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -62,84 +62,3 @@ void detect_memory_layout(struct mem_chunk chunk[])
62 arch_local_irq_restore(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
65
66/*
67 * Move memory chunks array from index "from" to index "to"
68 */
69static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
70{
71 int cnt = MEMORY_CHUNKS - to;
72
73 memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
74}
75
76/*
77 * Initialize memory chunk
78 */
79static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
80 unsigned long size, int type)
81{
82 chunk->type = type;
83 chunk->addr = addr;
84 chunk->size = size;
85}
86
87/*
88 * Create memory hole with given address, size, and type
89 */
90void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
91 unsigned long size, int type)
92{
93 unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
94 int i, ch_type;
95
96 for (i = 0; i < MEMORY_CHUNKS; i++) {
97 if (chunk[i].size == 0)
98 continue;
99
100 /* Define chunk properties */
101 ch_start = chunk[i].addr;
102 ch_size = chunk[i].size;
103 ch_end = ch_start + ch_size - 1;
104 ch_type = chunk[i].type;
105
106 /* Is memory chunk hit by memory hole? */
107 if (addr + size <= ch_start)
108 continue; /* No: memory hole in front of chunk */
109 if (addr > ch_end)
110 continue; /* No: memory hole after chunk */
111
112 /* Yes: Define local hole properties */
113 lh_start = max(addr, chunk[i].addr);
114 lh_end = min(addr + size - 1, ch_end);
115 lh_size = lh_end - lh_start + 1;
116
117 if (lh_start == ch_start && lh_end == ch_end) {
118 /* Hole covers complete memory chunk */
119 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
120 } else if (lh_end == ch_end) {
121 /* Hole starts in memory chunk and convers chunk end */
122 mem_chunk_move(chunk, i + 1, i);
123 mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
124 ch_type);
125 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
126 i += 1;
127 } else if (lh_start == ch_start) {
128 /* Hole ends in memory chunk */
129 mem_chunk_move(chunk, i + 1, i);
130 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
131 mem_chunk_init(&chunk[i + 1], lh_end + 1,
132 ch_size - lh_size, ch_type);
133 break;
134 } else {
135 /* Hole splits memory chunk */
136 mem_chunk_move(chunk, i + 2, i);
137 mem_chunk_init(&chunk[i], ch_start,
138 lh_start - ch_start, ch_type);
139 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
140 mem_chunk_init(&chunk[i + 2], lh_end + 1,
141 ch_end - lh_end, ch_type);
142 break;
143 }
144 }
145}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 4610deafd95..dfcb3436bad 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * Kernel module help for s390. 2 * arch/s390/kernel/module.c - Kernel module help for s390.
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright IBM Corp. 2002, 2003 5 * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 9 *
@@ -44,17 +45,6 @@
44#define PLT_ENTRY_SIZE 20 45#define PLT_ENTRY_SIZE 20
45#endif /* CONFIG_64BIT */ 46#endif /* CONFIG_64BIT */
46 47
47#ifdef CONFIG_64BIT
48void *module_alloc(unsigned long size)
49{
50 if (PAGE_ALIGN(size) > MODULES_LEN)
51 return NULL;
52 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
53 GFP_KERNEL, PAGE_KERNEL, -1,
54 __builtin_return_address(0));
55}
56#endif
57
58/* Free memory returned from module_alloc */ 48/* Free memory returned from module_alloc */
59void module_free(struct module *mod, void *module_region) 49void module_free(struct module *mod, void *module_region)
60{ 50{
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 7918fbea36b..fab88431a06 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Machine check handler 2 * Machine check handler
3 * 3 *
4 * Copyright IBM Corp. 2000, 2009 4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>, 7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
@@ -30,7 +30,7 @@ struct mcck_struct {
30 30
31static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); 31static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
32 32
33static void s390_handle_damage(char *msg) 33static NORET_TYPE void s390_handle_damage(char *msg)
34{ 34{
35 smp_send_stop(); 35 smp_send_stop();
36 disabled_wait((unsigned long) __builtin_return_address(0)); 36 disabled_wait((unsigned long) __builtin_return_address(0));
@@ -254,7 +254,9 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
254 int umode; 254 int umode;
255 255
256 nmi_enter(); 256 nmi_enter();
257 inc_irq_stat(NMI_NMI); 257 s390_idle_check(regs, S390_lowcore.mcck_clock,
258 S390_lowcore.mcck_enter_timer);
259 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
258 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
259 mcck = &__get_cpu_var(cpu_mcck); 261 mcck = &__get_cpu_var(cpu_mcck);
260 umode = user_mode(regs); 262 umode = user_mode(regs);
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
deleted file mode 100644
index 46480d81df0..00000000000
--- a/arch/s390/kernel/os_info.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * OS info memory interface
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "os_info"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/crash_dump.h>
12#include <linux/kernel.h>
13#include <asm/checksum.h>
14#include <asm/lowcore.h>
15#include <asm/os_info.h>
16
17/*
18 * OS info structure has to be page aligned
19 */
20static struct os_info os_info __page_aligned_data;
21
22/*
23 * Compute checksum over OS info structure
24 */
25u32 os_info_csum(struct os_info *os_info)
26{
27 int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
28 return csum_partial(&os_info->version_major, size, 0);
29}
30
31/*
32 * Add crashkernel info to OS info and update checksum
33 */
34void os_info_crashkernel_add(unsigned long base, unsigned long size)
35{
36 os_info.crashkernel_addr = (u64)(unsigned long)base;
37 os_info.crashkernel_size = (u64)(unsigned long)size;
38 os_info.csum = os_info_csum(&os_info);
39}
40
41/*
42 * Add OS info entry and update checksum
43 */
44void os_info_entry_add(int nr, void *ptr, u64 size)
45{
46 os_info.entry[nr].addr = (u64)(unsigned long)ptr;
47 os_info.entry[nr].size = size;
48 os_info.entry[nr].csum = csum_partial(ptr, size, 0);
49 os_info.csum = os_info_csum(&os_info);
50}
51
52/*
53 * Initialize OS info struture and set lowcore pointer
54 */
55void __init os_info_init(void)
56{
57 void *ptr = &os_info;
58
59 os_info.version_major = OS_INFO_VERSION_MAJOR;
60 os_info.version_minor = OS_INFO_VERSION_MINOR;
61 os_info.magic = OS_INFO_MAGIC;
62 os_info.csum = os_info_csum(&os_info);
63 mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr);
64}
65
66#ifdef CONFIG_CRASH_DUMP
67
68static struct os_info *os_info_old;
69
70/*
71 * Allocate and copy OS info entry from oldmem
72 */
73static void os_info_old_alloc(int nr, int align)
74{
75 unsigned long addr, size = 0;
76 char *buf, *buf_align, *msg;
77 u32 csum;
78
79 addr = os_info_old->entry[nr].addr;
80 if (!addr) {
81 msg = "not available";
82 goto fail;
83 }
84 size = os_info_old->entry[nr].size;
85 buf = kmalloc(size + align - 1, GFP_KERNEL);
86 if (!buf) {
87 msg = "alloc failed";
88 goto fail;
89 }
90 buf_align = PTR_ALIGN(buf, align);
91 if (copy_from_oldmem(buf_align, (void *) addr, size)) {
92 msg = "copy failed";
93 goto fail_free;
94 }
95 csum = csum_partial(buf_align, size, 0);
96 if (csum != os_info_old->entry[nr].csum) {
97 msg = "checksum failed";
98 goto fail_free;
99 }
100 os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align;
101 msg = "copied";
102 goto out;
103fail_free:
104 kfree(buf);
105fail:
106 os_info_old->entry[nr].addr = 0;
107out:
108 pr_info("entry %i: %s (addr=0x%lx size=%lu)\n",
109 nr, msg, addr, size);
110}
111
112/*
113 * Initialize os info and os info entries from oldmem
114 */
115static void os_info_old_init(void)
116{
117 static int os_info_init;
118 unsigned long addr;
119
120 if (os_info_init)
121 return;
122 if (!OLDMEM_BASE)
123 goto fail;
124 if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr)))
125 goto fail;
126 if (addr == 0 || addr % PAGE_SIZE)
127 goto fail;
128 os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
129 if (!os_info_old)
130 goto fail;
131 if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old)))
132 goto fail_free;
133 if (os_info_old->magic != OS_INFO_MAGIC)
134 goto fail_free;
135 if (os_info_old->csum != os_info_csum(os_info_old))
136 goto fail_free;
137 if (os_info_old->version_major > OS_INFO_VERSION_MAJOR)
138 goto fail_free;
139 os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
140 os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
141 pr_info("crashkernel: addr=0x%lx size=%lu\n",
142 (unsigned long) os_info_old->crashkernel_addr,
143 (unsigned long) os_info_old->crashkernel_size);
144 os_info_init = 1;
145 return;
146fail_free:
147 kfree(os_info_old);
148fail:
149 os_info_init = 1;
150 os_info_old = NULL;
151}
152
153/*
154 * Return pointer to os infor entry and its size
155 */
156void *os_info_old_entry(int nr, unsigned long *size)
157{
158 os_info_old_init();
159
160 if (!os_info_old)
161 return NULL;
162 if (!os_info_old->entry[nr].addr)
163 return NULL;
164 *size = (unsigned long) os_info_old->entry[nr].size;
165 return (void *)(unsigned long)os_info_old->entry[nr].addr;
166}
167#endif
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
deleted file mode 100644
index 86ec7447e1f..00000000000
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ /dev/null
@@ -1,694 +0,0 @@
1/*
2 * Performance event support for s390x - CPU-measurement Counter Facility
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11#define KMSG_COMPONENT "cpum_cf"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/perf_event.h>
17#include <linux/percpu.h>
18#include <linux/notifier.h>
19#include <linux/init.h>
20#include <linux/export.h>
21#include <asm/ctl_reg.h>
22#include <asm/irq.h>
23#include <asm/cpu_mf.h>
24
25/* CPU-measurement counter facility supports these CPU counter sets:
26 * For CPU counter sets:
27 * Basic counter set: 0-31
28 * Problem-state counter set: 32-63
29 * Crypto-activity counter set: 64-127
30 * Extented counter set: 128-159
31 */
32enum cpumf_ctr_set {
33 /* CPU counter sets */
34 CPUMF_CTR_SET_BASIC = 0,
35 CPUMF_CTR_SET_USER = 1,
36 CPUMF_CTR_SET_CRYPTO = 2,
37 CPUMF_CTR_SET_EXT = 3,
38
39 /* Maximum number of counter sets */
40 CPUMF_CTR_SET_MAX,
41};
42
43#define CPUMF_LCCTL_ENABLE_SHIFT 16
44#define CPUMF_LCCTL_ACTCTL_SHIFT 0
45static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
46 [CPUMF_CTR_SET_BASIC] = 0x02,
47 [CPUMF_CTR_SET_USER] = 0x04,
48 [CPUMF_CTR_SET_CRYPTO] = 0x08,
49 [CPUMF_CTR_SET_EXT] = 0x01,
50};
51
52static void ctr_set_enable(u64 *state, int ctr_set)
53{
54 *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
55}
56static void ctr_set_disable(u64 *state, int ctr_set)
57{
58 *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
59}
60static void ctr_set_start(u64 *state, int ctr_set)
61{
62 *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
63}
64static void ctr_set_stop(u64 *state, int ctr_set)
65{
66 *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
67}
68
69/* Local CPUMF event structure */
70struct cpu_hw_events {
71 struct cpumf_ctr_info info;
72 atomic_t ctr_set[CPUMF_CTR_SET_MAX];
73 u64 state, tx_state;
74 unsigned int flags;
75};
76static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
77 .ctr_set = {
78 [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
79 [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
80 [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
81 [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
82 },
83 .state = 0,
84 .flags = 0,
85};
86
87static int get_counter_set(u64 event)
88{
89 int set = -1;
90
91 if (event < 32)
92 set = CPUMF_CTR_SET_BASIC;
93 else if (event < 64)
94 set = CPUMF_CTR_SET_USER;
95 else if (event < 128)
96 set = CPUMF_CTR_SET_CRYPTO;
97 else if (event < 256)
98 set = CPUMF_CTR_SET_EXT;
99
100 return set;
101}
102
103static int validate_event(const struct hw_perf_event *hwc)
104{
105 switch (hwc->config_base) {
106 case CPUMF_CTR_SET_BASIC:
107 case CPUMF_CTR_SET_USER:
108 case CPUMF_CTR_SET_CRYPTO:
109 case CPUMF_CTR_SET_EXT:
110 /* check for reserved counters */
111 if ((hwc->config >= 6 && hwc->config <= 31) ||
112 (hwc->config >= 38 && hwc->config <= 63) ||
113 (hwc->config >= 80 && hwc->config <= 127))
114 return -EOPNOTSUPP;
115 break;
116 default:
117 return -EINVAL;
118 }
119
120 return 0;
121}
122
123static int validate_ctr_version(const struct hw_perf_event *hwc)
124{
125 struct cpu_hw_events *cpuhw;
126 int err = 0;
127
128 cpuhw = &get_cpu_var(cpu_hw_events);
129
130 /* check required version for counter sets */
131 switch (hwc->config_base) {
132 case CPUMF_CTR_SET_BASIC:
133 case CPUMF_CTR_SET_USER:
134 if (cpuhw->info.cfvn < 1)
135 err = -EOPNOTSUPP;
136 break;
137 case CPUMF_CTR_SET_CRYPTO:
138 case CPUMF_CTR_SET_EXT:
139 if (cpuhw->info.csvn < 1)
140 err = -EOPNOTSUPP;
141 if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
142 (cpuhw->info.csvn == 2 && hwc->config > 175) ||
143 (cpuhw->info.csvn > 2 && hwc->config > 255))
144 err = -EOPNOTSUPP;
145 break;
146 }
147
148 put_cpu_var(cpu_hw_events);
149 return err;
150}
151
152static int validate_ctr_auth(const struct hw_perf_event *hwc)
153{
154 struct cpu_hw_events *cpuhw;
155 u64 ctrs_state;
156 int err = 0;
157
158 cpuhw = &get_cpu_var(cpu_hw_events);
159
160 /* check authorization for cpu counter sets */
161 ctrs_state = cpumf_state_ctl[hwc->config_base];
162 if (!(ctrs_state & cpuhw->info.auth_ctl))
163 err = -EPERM;
164
165 put_cpu_var(cpu_hw_events);
166 return err;
167}
168
169/*
170 * Change the CPUMF state to active.
171 * Enable and activate the CPU-counter sets according
172 * to the per-cpu control state.
173 */
174static void cpumf_pmu_enable(struct pmu *pmu)
175{
176 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
177 int err;
178
179 if (cpuhw->flags & PMU_F_ENABLED)
180 return;
181
182 err = lcctl(cpuhw->state);
183 if (err) {
184 pr_err("Enabling the performance measuring unit "
185 "failed with rc=%x\n", err);
186 return;
187 }
188
189 cpuhw->flags |= PMU_F_ENABLED;
190}
191
192/*
193 * Change the CPUMF state to inactive.
194 * Disable and enable (inactive) the CPU-counter sets according
195 * to the per-cpu control state.
196 */
197static void cpumf_pmu_disable(struct pmu *pmu)
198{
199 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
200 int err;
201 u64 inactive;
202
203 if (!(cpuhw->flags & PMU_F_ENABLED))
204 return;
205
206 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
207 err = lcctl(inactive);
208 if (err) {
209 pr_err("Disabling the performance measuring unit "
210 "failed with rc=%x\n", err);
211 return;
212 }
213
214 cpuhw->flags &= ~PMU_F_ENABLED;
215}
216
217
218/* Number of perf events counting hardware events */
219static atomic_t num_events = ATOMIC_INIT(0);
220/* Used to avoid races in calling reserve/release_cpumf_hardware */
221static DEFINE_MUTEX(pmc_reserve_mutex);
222
223/* CPU-measurement alerts for the counter facility */
224static void cpumf_measurement_alert(struct ext_code ext_code,
225 unsigned int alert, unsigned long unused)
226{
227 struct cpu_hw_events *cpuhw;
228
229 if (!(alert & CPU_MF_INT_CF_MASK))
230 return;
231
232 inc_irq_stat(IRQEXT_CMC);
233 cpuhw = &__get_cpu_var(cpu_hw_events);
234
235 /* Measurement alerts are shared and might happen when the PMU
236 * is not reserved. Ignore these alerts in this case. */
237 if (!(cpuhw->flags & PMU_F_RESERVED))
238 return;
239
240 /* counter authorization change alert */
241 if (alert & CPU_MF_INT_CF_CACA)
242 qctri(&cpuhw->info);
243
244 /* loss of counter data alert */
245 if (alert & CPU_MF_INT_CF_LCDA)
246 pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
247}
248
249#define PMC_INIT 0
250#define PMC_RELEASE 1
251static void setup_pmc_cpu(void *flags)
252{
253 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
254
255 switch (*((int *) flags)) {
256 case PMC_INIT:
257 memset(&cpuhw->info, 0, sizeof(cpuhw->info));
258 qctri(&cpuhw->info);
259 cpuhw->flags |= PMU_F_RESERVED;
260 break;
261
262 case PMC_RELEASE:
263 cpuhw->flags &= ~PMU_F_RESERVED;
264 break;
265 }
266
267 /* Disable CPU counter sets */
268 lcctl(0);
269}
270
271/* Initialize the CPU-measurement facility */
272static int reserve_pmc_hardware(void)
273{
274 int flags = PMC_INIT;
275
276 on_each_cpu(setup_pmc_cpu, &flags, 1);
277 measurement_alert_subclass_register();
278
279 return 0;
280}
281
282/* Release the CPU-measurement facility */
283static void release_pmc_hardware(void)
284{
285 int flags = PMC_RELEASE;
286
287 on_each_cpu(setup_pmc_cpu, &flags, 1);
288 measurement_alert_subclass_unregister();
289}
290
291/* Release the PMU if event is the last perf event */
292static void hw_perf_event_destroy(struct perf_event *event)
293{
294 if (!atomic_add_unless(&num_events, -1, 1)) {
295 mutex_lock(&pmc_reserve_mutex);
296 if (atomic_dec_return(&num_events) == 0)
297 release_pmc_hardware();
298 mutex_unlock(&pmc_reserve_mutex);
299 }
300}
301
302/* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
303static const int cpumf_generic_events_basic[] = {
304 [PERF_COUNT_HW_CPU_CYCLES] = 0,
305 [PERF_COUNT_HW_INSTRUCTIONS] = 1,
306 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
307 [PERF_COUNT_HW_CACHE_MISSES] = -1,
308 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
309 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
310 [PERF_COUNT_HW_BUS_CYCLES] = -1,
311};
312/* CPUMF <-> perf event mappings for userspace (problem-state set) */
313static const int cpumf_generic_events_user[] = {
314 [PERF_COUNT_HW_CPU_CYCLES] = 32,
315 [PERF_COUNT_HW_INSTRUCTIONS] = 33,
316 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
317 [PERF_COUNT_HW_CACHE_MISSES] = -1,
318 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
319 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
320 [PERF_COUNT_HW_BUS_CYCLES] = -1,
321};
322
323static int __hw_perf_event_init(struct perf_event *event)
324{
325 struct perf_event_attr *attr = &event->attr;
326 struct hw_perf_event *hwc = &event->hw;
327 int err;
328 u64 ev;
329
330 switch (attr->type) {
331 case PERF_TYPE_RAW:
332 /* Raw events are used to access counters directly,
333 * hence do not permit excludes */
334 if (attr->exclude_kernel || attr->exclude_user ||
335 attr->exclude_hv)
336 return -EOPNOTSUPP;
337 ev = attr->config;
338 break;
339
340 case PERF_TYPE_HARDWARE:
341 ev = attr->config;
342 /* Count user space (problem-state) only */
343 if (!attr->exclude_user && attr->exclude_kernel) {
344 if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
345 return -EOPNOTSUPP;
346 ev = cpumf_generic_events_user[ev];
347
348 /* No support for kernel space counters only */
349 } else if (!attr->exclude_kernel && attr->exclude_user) {
350 return -EOPNOTSUPP;
351
352 /* Count user and kernel space */
353 } else {
354 if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
355 return -EOPNOTSUPP;
356 ev = cpumf_generic_events_basic[ev];
357 }
358 break;
359
360 default:
361 return -ENOENT;
362 }
363
364 if (ev == -1)
365 return -ENOENT;
366
367 if (ev >= PERF_CPUM_CF_MAX_CTR)
368 return -EINVAL;
369
370 /* The CPU measurement counter facility does not have any interrupts
371 * to do sampling. Sampling must be provided by external means,
372 * for example, by timers.
373 */
374 if (hwc->sample_period)
375 return -EINVAL;
376
377 /* Use the hardware perf event structure to store the counter number
378 * in 'config' member and the counter set to which the counter belongs
379 * in the 'config_base'. The counter set (config_base) is then used
380 * to enable/disable the counters.
381 */
382 hwc->config = ev;
383 hwc->config_base = get_counter_set(ev);
384
385 /* Validate the counter that is assigned to this event.
386 * Because the counter facility can use numerous counters at the
387 * same time without constraints, it is not necessary to explicity
388 * validate event groups (event->group_leader != event).
389 */
390 err = validate_event(hwc);
391 if (err)
392 return err;
393
394 /* Initialize for using the CPU-measurement counter facility */
395 if (!atomic_inc_not_zero(&num_events)) {
396 mutex_lock(&pmc_reserve_mutex);
397 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
398 err = -EBUSY;
399 else
400 atomic_inc(&num_events);
401 mutex_unlock(&pmc_reserve_mutex);
402 }
403 event->destroy = hw_perf_event_destroy;
404
405 /* Finally, validate version and authorization of the counter set */
406 err = validate_ctr_auth(hwc);
407 if (!err)
408 err = validate_ctr_version(hwc);
409
410 return err;
411}
412
413static int cpumf_pmu_event_init(struct perf_event *event)
414{
415 int err;
416
417 switch (event->attr.type) {
418 case PERF_TYPE_HARDWARE:
419 case PERF_TYPE_HW_CACHE:
420 case PERF_TYPE_RAW:
421 err = __hw_perf_event_init(event);
422 break;
423 default:
424 return -ENOENT;
425 }
426
427 if (unlikely(err) && event->destroy)
428 event->destroy(event);
429
430 return err;
431}
432
433static int hw_perf_event_reset(struct perf_event *event)
434{
435 u64 prev, new;
436 int err;
437
438 do {
439 prev = local64_read(&event->hw.prev_count);
440 err = ecctr(event->hw.config, &new);
441 if (err) {
442 if (err != 3)
443 break;
444 /* The counter is not (yet) available. This
445 * might happen if the counter set to which
446 * this counter belongs is in the disabled
447 * state.
448 */
449 new = 0;
450 }
451 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
452
453 return err;
454}
455
456static int hw_perf_event_update(struct perf_event *event)
457{
458 u64 prev, new, delta;
459 int err;
460
461 do {
462 prev = local64_read(&event->hw.prev_count);
463 err = ecctr(event->hw.config, &new);
464 if (err)
465 goto out;
466 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
467
468 delta = (prev <= new) ? new - prev
469 : (-1ULL - prev) + new + 1; /* overflow */
470 local64_add(delta, &event->count);
471out:
472 return err;
473}
474
475static void cpumf_pmu_read(struct perf_event *event)
476{
477 if (event->hw.state & PERF_HES_STOPPED)
478 return;
479
480 hw_perf_event_update(event);
481}
482
483static void cpumf_pmu_start(struct perf_event *event, int flags)
484{
485 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
486 struct hw_perf_event *hwc = &event->hw;
487
488 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
489 return;
490
491 if (WARN_ON_ONCE(hwc->config == -1))
492 return;
493
494 if (flags & PERF_EF_RELOAD)
495 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
496
497 hwc->state = 0;
498
499 /* (Re-)enable and activate the counter set */
500 ctr_set_enable(&cpuhw->state, hwc->config_base);
501 ctr_set_start(&cpuhw->state, hwc->config_base);
502
503 /* The counter set to which this counter belongs can be already active.
504 * Because all counters in a set are active, the event->hw.prev_count
505 * needs to be synchronized. At this point, the counter set can be in
506 * the inactive or disabled state.
507 */
508 hw_perf_event_reset(event);
509
510 /* increment refcount for this counter set */
511 atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
512}
513
514static void cpumf_pmu_stop(struct perf_event *event, int flags)
515{
516 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
517 struct hw_perf_event *hwc = &event->hw;
518
519 if (!(hwc->state & PERF_HES_STOPPED)) {
520 /* Decrement reference count for this counter set and if this
521 * is the last used counter in the set, clear activation
522 * control and set the counter set state to inactive.
523 */
524 if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
525 ctr_set_stop(&cpuhw->state, hwc->config_base);
526 event->hw.state |= PERF_HES_STOPPED;
527 }
528
529 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
530 hw_perf_event_update(event);
531 event->hw.state |= PERF_HES_UPTODATE;
532 }
533}
534
535static int cpumf_pmu_add(struct perf_event *event, int flags)
536{
537 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
538
539 /* Check authorization for the counter set to which this
540 * counter belongs.
541 * For group events transaction, the authorization check is
542 * done in cpumf_pmu_commit_txn().
543 */
544 if (!(cpuhw->flags & PERF_EVENT_TXN))
545 if (validate_ctr_auth(&event->hw))
546 return -EPERM;
547
548 ctr_set_enable(&cpuhw->state, event->hw.config_base);
549 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
550
551 if (flags & PERF_EF_START)
552 cpumf_pmu_start(event, PERF_EF_RELOAD);
553
554 perf_event_update_userpage(event);
555
556 return 0;
557}
558
559static void cpumf_pmu_del(struct perf_event *event, int flags)
560{
561 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
562
563 cpumf_pmu_stop(event, PERF_EF_UPDATE);
564
565 /* Check if any counter in the counter set is still used. If not used,
566 * change the counter set to the disabled state. This also clears the
567 * content of all counters in the set.
568 *
569 * When a new perf event has been added but not yet started, this can
570 * clear enable control and resets all counters in a set. Therefore,
571 * cpumf_pmu_start() always has to reenable a counter set.
572 */
573 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
574 ctr_set_disable(&cpuhw->state, event->hw.config_base);
575
576 perf_event_update_userpage(event);
577}
578
579/*
580 * Start group events scheduling transaction.
581 * Set flags to perform a single test at commit time.
582 */
583static void cpumf_pmu_start_txn(struct pmu *pmu)
584{
585 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
586
587 perf_pmu_disable(pmu);
588 cpuhw->flags |= PERF_EVENT_TXN;
589 cpuhw->tx_state = cpuhw->state;
590}
591
592/*
593 * Stop and cancel a group events scheduling tranctions.
594 * Assumes cpumf_pmu_del() is called for each successful added
595 * cpumf_pmu_add() during the transaction.
596 */
597static void cpumf_pmu_cancel_txn(struct pmu *pmu)
598{
599 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
600
601 WARN_ON(cpuhw->tx_state != cpuhw->state);
602
603 cpuhw->flags &= ~PERF_EVENT_TXN;
604 perf_pmu_enable(pmu);
605}
606
607/*
608 * Commit the group events scheduling transaction. On success, the
609 * transaction is closed. On error, the transaction is kept open
610 * until cpumf_pmu_cancel_txn() is called.
611 */
612static int cpumf_pmu_commit_txn(struct pmu *pmu)
613{
614 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
615 u64 state;
616
617 /* check if the updated state can be scheduled */
618 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
619 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
620 if ((state & cpuhw->info.auth_ctl) != state)
621 return -EPERM;
622
623 cpuhw->flags &= ~PERF_EVENT_TXN;
624 perf_pmu_enable(pmu);
625 return 0;
626}
627
628/* Performance monitoring unit for s390x */
629static struct pmu cpumf_pmu = {
630 .pmu_enable = cpumf_pmu_enable,
631 .pmu_disable = cpumf_pmu_disable,
632 .event_init = cpumf_pmu_event_init,
633 .add = cpumf_pmu_add,
634 .del = cpumf_pmu_del,
635 .start = cpumf_pmu_start,
636 .stop = cpumf_pmu_stop,
637 .read = cpumf_pmu_read,
638 .start_txn = cpumf_pmu_start_txn,
639 .commit_txn = cpumf_pmu_commit_txn,
640 .cancel_txn = cpumf_pmu_cancel_txn,
641};
642
643static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self,
644 unsigned long action, void *hcpu)
645{
646 unsigned int cpu = (long) hcpu;
647 int flags;
648
649 switch (action & ~CPU_TASKS_FROZEN) {
650 case CPU_ONLINE:
651 flags = PMC_INIT;
652 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
653 break;
654 case CPU_DOWN_PREPARE:
655 flags = PMC_RELEASE;
656 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
657 break;
658 default:
659 break;
660 }
661
662 return NOTIFY_OK;
663}
664
665static int __init cpumf_pmu_init(void)
666{
667 int rc;
668
669 if (!cpum_cf_avail())
670 return -ENODEV;
671
672 /* clear bit 15 of cr0 to unauthorize problem-state to
673 * extract measurement counters */
674 ctl_clear_bit(0, 48);
675
676 /* register handler for measurement-alert interruptions */
677 rc = register_external_interrupt(0x1407, cpumf_measurement_alert);
678 if (rc) {
679 pr_err("Registering for CPU-measurement alerts "
680 "failed with rc=%i\n", rc);
681 goto out;
682 }
683
684 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
685 if (rc) {
686 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
687 unregister_external_interrupt(0x1407, cpumf_measurement_alert);
688 goto out;
689 }
690 perf_cpu_notifier(cpumf_pmu_notifier);
691out:
692 return rc;
693}
694early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
deleted file mode 100644
index f58f37f6682..00000000000
--- a/arch/s390/kernel/perf_event.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * Performance event support for s390x
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11#define KMSG_COMPONENT "perf"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/perf_event.h>
16#include <linux/percpu.h>
17#include <linux/export.h>
18#include <asm/irq.h>
19#include <asm/cpu_mf.h>
20#include <asm/lowcore.h>
21#include <asm/processor.h>
22
23const char *perf_pmu_name(void)
24{
25 if (cpum_cf_avail() || cpum_sf_avail())
26 return "CPU-measurement facilities (CPUMF)";
27 return "pmu";
28}
29EXPORT_SYMBOL(perf_pmu_name);
30
31int perf_num_counters(void)
32{
33 int num = 0;
34
35 if (cpum_cf_avail())
36 num += PERF_CPUM_CF_MAX_CTR;
37
38 return num;
39}
40EXPORT_SYMBOL(perf_num_counters);
41
42void perf_event_print_debug(void)
43{
44 struct cpumf_ctr_info cf_info;
45 unsigned long flags;
46 int cpu;
47
48 if (!cpum_cf_avail())
49 return;
50
51 local_irq_save(flags);
52
53 cpu = smp_processor_id();
54 memset(&cf_info, 0, sizeof(cf_info));
55 if (!qctri(&cf_info)) {
56 pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
57 cpu, cf_info.cfvn, cf_info.csvn,
58 cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
59 print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET,
60 &cf_info, sizeof(cf_info));
61 }
62
63 local_irq_restore(flags);
64}
65
66/* See also arch/s390/kernel/traps.c */
67static unsigned long __store_trace(struct perf_callchain_entry *entry,
68 unsigned long sp,
69 unsigned long low, unsigned long high)
70{
71 struct stack_frame *sf;
72 struct pt_regs *regs;
73
74 while (1) {
75 sp = sp & PSW_ADDR_INSN;
76 if (sp < low || sp > high - sizeof(*sf))
77 return sp;
78 sf = (struct stack_frame *) sp;
79 perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
80 /* Follow the backchain. */
81 while (1) {
82 low = sp;
83 sp = sf->back_chain & PSW_ADDR_INSN;
84 if (!sp)
85 break;
86 if (sp <= low || sp > high - sizeof(*sf))
87 return sp;
88 sf = (struct stack_frame *) sp;
89 perf_callchain_store(entry,
90 sf->gprs[8] & PSW_ADDR_INSN);
91 }
92 /* Zero backchain detected, check for interrupt frame. */
93 sp = (unsigned long) (sf + 1);
94 if (sp <= low || sp > high - sizeof(*regs))
95 return sp;
96 regs = (struct pt_regs *) sp;
97 perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
98 low = sp;
99 sp = regs->gprs[15];
100 }
101}
102
103void perf_callchain_kernel(struct perf_callchain_entry *entry,
104 struct pt_regs *regs)
105{
106 unsigned long head;
107 struct stack_frame *head_sf;
108
109 if (user_mode(regs))
110 return;
111
112 head = regs->gprs[15];
113 head_sf = (struct stack_frame *) head;
114
115 if (!head_sf || !head_sf->back_chain)
116 return;
117
118 head = head_sf->back_chain;
119 head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
120 S390_lowcore.async_stack);
121
122 __store_trace(entry, head, S390_lowcore.thread_info,
123 S390_lowcore.thread_info + THREAD_SIZE);
124}
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
deleted file mode 100644
index 14bdecb6192..00000000000
--- a/arch/s390/kernel/pgm_check.S
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Program check table.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9#ifdef CONFIG_32BIT
10#define PGM_CHECK_64BIT(handler) .long default_trap_handler
11#else
12#define PGM_CHECK_64BIT(handler) .long handler
13#endif
14
15#define PGM_CHECK(handler) .long handler
16#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
17
18/*
19 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
20 * line defines the 31 and/or 64 bit function to be called corresponding
21 * to the program check interruption code.
22 */
23.section .rodata, "a"
24ENTRY(pgm_check_table)
25PGM_CHECK_DEFAULT /* 00 */
26PGM_CHECK(illegal_op) /* 01 */
27PGM_CHECK(privileged_op) /* 02 */
28PGM_CHECK(execute_exception) /* 03 */
29PGM_CHECK(do_protection_exception) /* 04 */
30PGM_CHECK(addressing_exception) /* 05 */
31PGM_CHECK(specification_exception) /* 06 */
32PGM_CHECK(data_exception) /* 07 */
33PGM_CHECK(overflow_exception) /* 08 */
34PGM_CHECK(divide_exception) /* 09 */
35PGM_CHECK(overflow_exception) /* 0a */
36PGM_CHECK(divide_exception) /* 0b */
37PGM_CHECK(hfp_overflow_exception) /* 0c */
38PGM_CHECK(hfp_underflow_exception) /* 0d */
39PGM_CHECK(hfp_significance_exception) /* 0e */
40PGM_CHECK(hfp_divide_exception) /* 0f */
41PGM_CHECK(do_dat_exception) /* 10 */
42PGM_CHECK(do_dat_exception) /* 11 */
43PGM_CHECK(translation_exception) /* 12 */
44PGM_CHECK(special_op_exception) /* 13 */
45PGM_CHECK_DEFAULT /* 14 */
46PGM_CHECK(operand_exception) /* 15 */
47PGM_CHECK_DEFAULT /* 16 */
48PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_DEFAULT /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */
56PGM_CHECK_DEFAULT /* 1f */
57PGM_CHECK_DEFAULT /* 20 */
58PGM_CHECK_DEFAULT /* 21 */
59PGM_CHECK_DEFAULT /* 22 */
60PGM_CHECK_DEFAULT /* 23 */
61PGM_CHECK_DEFAULT /* 24 */
62PGM_CHECK_DEFAULT /* 25 */
63PGM_CHECK_DEFAULT /* 26 */
64PGM_CHECK_DEFAULT /* 27 */
65PGM_CHECK_DEFAULT /* 28 */
66PGM_CHECK_DEFAULT /* 29 */
67PGM_CHECK_DEFAULT /* 2a */
68PGM_CHECK_DEFAULT /* 2b */
69PGM_CHECK_DEFAULT /* 2c */
70PGM_CHECK_DEFAULT /* 2d */
71PGM_CHECK_DEFAULT /* 2e */
72PGM_CHECK_DEFAULT /* 2f */
73PGM_CHECK_DEFAULT /* 30 */
74PGM_CHECK_DEFAULT /* 31 */
75PGM_CHECK_DEFAULT /* 32 */
76PGM_CHECK_DEFAULT /* 33 */
77PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_64BIT(do_asce_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
85PGM_CHECK_DEFAULT /* 3c */
86PGM_CHECK_DEFAULT /* 3d */
87PGM_CHECK_DEFAULT /* 3e */
88PGM_CHECK_DEFAULT /* 3f */
89PGM_CHECK_DEFAULT /* 40 */
90PGM_CHECK_DEFAULT /* 41 */
91PGM_CHECK_DEFAULT /* 42 */
92PGM_CHECK_DEFAULT /* 43 */
93PGM_CHECK_DEFAULT /* 44 */
94PGM_CHECK_DEFAULT /* 45 */
95PGM_CHECK_DEFAULT /* 46 */
96PGM_CHECK_DEFAULT /* 47 */
97PGM_CHECK_DEFAULT /* 48 */
98PGM_CHECK_DEFAULT /* 49 */
99PGM_CHECK_DEFAULT /* 4a */
100PGM_CHECK_DEFAULT /* 4b */
101PGM_CHECK_DEFAULT /* 4c */
102PGM_CHECK_DEFAULT /* 4d */
103PGM_CHECK_DEFAULT /* 4e */
104PGM_CHECK_DEFAULT /* 4f */
105PGM_CHECK_DEFAULT /* 50 */
106PGM_CHECK_DEFAULT /* 51 */
107PGM_CHECK_DEFAULT /* 52 */
108PGM_CHECK_DEFAULT /* 53 */
109PGM_CHECK_DEFAULT /* 54 */
110PGM_CHECK_DEFAULT /* 55 */
111PGM_CHECK_DEFAULT /* 56 */
112PGM_CHECK_DEFAULT /* 57 */
113PGM_CHECK_DEFAULT /* 58 */
114PGM_CHECK_DEFAULT /* 59 */
115PGM_CHECK_DEFAULT /* 5a */
116PGM_CHECK_DEFAULT /* 5b */
117PGM_CHECK_DEFAULT /* 5c */
118PGM_CHECK_DEFAULT /* 5d */
119PGM_CHECK_DEFAULT /* 5e */
120PGM_CHECK_DEFAULT /* 5f */
121PGM_CHECK_DEFAULT /* 60 */
122PGM_CHECK_DEFAULT /* 61 */
123PGM_CHECK_DEFAULT /* 62 */
124PGM_CHECK_DEFAULT /* 63 */
125PGM_CHECK_DEFAULT /* 64 */
126PGM_CHECK_DEFAULT /* 65 */
127PGM_CHECK_DEFAULT /* 66 */
128PGM_CHECK_DEFAULT /* 67 */
129PGM_CHECK_DEFAULT /* 68 */
130PGM_CHECK_DEFAULT /* 69 */
131PGM_CHECK_DEFAULT /* 6a */
132PGM_CHECK_DEFAULT /* 6b */
133PGM_CHECK_DEFAULT /* 6c */
134PGM_CHECK_DEFAULT /* 6d */
135PGM_CHECK_DEFAULT /* 6e */
136PGM_CHECK_DEFAULT /* 6f */
137PGM_CHECK_DEFAULT /* 70 */
138PGM_CHECK_DEFAULT /* 71 */
139PGM_CHECK_DEFAULT /* 72 */
140PGM_CHECK_DEFAULT /* 73 */
141PGM_CHECK_DEFAULT /* 74 */
142PGM_CHECK_DEFAULT /* 75 */
143PGM_CHECK_DEFAULT /* 76 */
144PGM_CHECK_DEFAULT /* 77 */
145PGM_CHECK_DEFAULT /* 78 */
146PGM_CHECK_DEFAULT /* 79 */
147PGM_CHECK_DEFAULT /* 7a */
148PGM_CHECK_DEFAULT /* 7b */
149PGM_CHECK_DEFAULT /* 7c */
150PGM_CHECK_DEFAULT /* 7d */
151PGM_CHECK_DEFAULT /* 7e */
152PGM_CHECK_DEFAULT /* 7f */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 536d64579d9..541a7509fae 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file handles the architecture dependent parts of process handling. 2 * This file handles the architecture dependent parts of process handling.
3 * 3 *
4 * Copyright IBM Corp. 1999, 2009 4 * Copyright IBM Corp. 1999,2009
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Hartmut Penner <hp@de.ibm.com>, 6 * Hartmut Penner <hp@de.ibm.com>,
7 * Denis Joseph Barrow, 7 * Denis Joseph Barrow,
@@ -12,7 +12,6 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/elfcore.h>
16#include <linux/smp.h> 15#include <linux/smp.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -23,15 +22,14 @@
23#include <linux/kprobes.h> 22#include <linux/kprobes.h>
24#include <linux/random.h> 23#include <linux/random.h>
25#include <linux/module.h> 24#include <linux/module.h>
25#include <asm/system.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/vtimer.h>
29#include <asm/exec.h>
30#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/timer.h>
31#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/switch_to.h>
34#include <asm/runtime_instr.h>
35#include "entry.h" 33#include "entry.h"
36 34
37asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 35asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -77,35 +75,66 @@ static void default_idle(void)
77 if (test_thread_flag(TIF_MCCK_PENDING)) { 75 if (test_thread_flag(TIF_MCCK_PENDING)) {
78 local_mcck_enable(); 76 local_mcck_enable();
79 local_irq_enable(); 77 local_irq_enable();
78 s390_handle_mcck();
80 return; 79 return;
81 } 80 }
82 /* Halt the cpu and keep track of cpu time accounting. */ 81 trace_hardirqs_on();
82 /* Don't trace preempt off for idle. */
83 stop_critical_timings();
84 /* Stop virtual timer and halt the cpu. */
83 vtime_stop_cpu(); 85 vtime_stop_cpu();
86 /* Reenable preemption tracer. */
87 start_critical_timings();
84} 88}
85 89
86void cpu_idle(void) 90void cpu_idle(void)
87{ 91{
88 for (;;) { 92 for (;;) {
89 tick_nohz_idle_enter(); 93 tick_nohz_stop_sched_tick(1);
90 rcu_idle_enter(); 94 while (!need_resched())
91 while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING))
92 default_idle(); 95 default_idle();
93 rcu_idle_exit(); 96 tick_nohz_restart_sched_tick();
94 tick_nohz_idle_exit(); 97 preempt_enable_no_resched();
95 if (test_thread_flag(TIF_MCCK_PENDING)) 98 schedule();
96 s390_handle_mcck(); 99 preempt_disable();
97 schedule_preempt_disabled();
98 } 100 }
99} 101}
100 102
101extern void __kprobes kernel_thread_starter(void); 103extern void __kprobes kernel_thread_starter(void);
102 104
105asm(
106 ".section .kprobes.text, \"ax\"\n"
107 ".global kernel_thread_starter\n"
108 "kernel_thread_starter:\n"
109 " la 2,0(10)\n"
110 " basr 14,9\n"
111 " la 2,0\n"
112 " br 11\n"
113 ".previous\n");
114
115int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
116{
117 struct pt_regs regs;
118
119 memset(&regs, 0, sizeof(regs));
120 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
121 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
122 regs.gprs[9] = (unsigned long) fn;
123 regs.gprs[10] = (unsigned long) arg;
124 regs.gprs[11] = (unsigned long) do_exit;
125 regs.orig_gpr2 = -1;
126
127 /* Ok, create the new process.. */
128 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
129 0, &regs, 0, NULL, NULL);
130}
131EXPORT_SYMBOL(kernel_thread);
132
103/* 133/*
104 * Free current thread data structures etc.. 134 * Free current thread data structures etc..
105 */ 135 */
106void exit_thread(void) 136void exit_thread(void)
107{ 137{
108 exit_thread_runtime_instr();
109} 138}
110 139
111void flush_thread(void) 140void flush_thread(void)
@@ -117,7 +146,8 @@ void release_thread(struct task_struct *dead_task)
117} 146}
118 147
119int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 148int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
120 unsigned long arg, struct task_struct *p) 149 unsigned long unused,
150 struct task_struct *p, struct pt_regs *regs)
121{ 151{
122 struct thread_info *ti; 152 struct thread_info *ti;
123 struct fake_frame 153 struct fake_frame
@@ -128,50 +158,20 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
128 158
129 frame = container_of(task_pt_regs(p), struct fake_frame, childregs); 159 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
130 p->thread.ksp = (unsigned long) frame; 160 p->thread.ksp = (unsigned long) frame;
131 /* Save access registers to new thread structure. */ 161 /* Store access registers to kernel stack of new process. */
132 save_access_regs(&p->thread.acrs[0]); 162 frame->childregs = *regs;
133 /* start new process with ar4 pointing to the correct address space */ 163 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
134 p->thread.mm_segment = get_fs(); 164 frame->childregs.gprs[15] = new_stackp;
135 /* Don't copy debug registers */
136 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
137 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
138 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
139 clear_tsk_thread_flag(p, TIF_PER_TRAP);
140 /* Initialize per thread user and system timer values */
141 ti = task_thread_info(p);
142 ti->user_timer = 0;
143 ti->system_timer = 0;
144
145 frame->sf.back_chain = 0; 165 frame->sf.back_chain = 0;
166
146 /* new return point is ret_from_fork */ 167 /* new return point is ret_from_fork */
147 frame->sf.gprs[8] = (unsigned long) ret_from_fork; 168 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
169
148 /* fake return stack for resume(), don't go back to schedule */ 170 /* fake return stack for resume(), don't go back to schedule */
149 frame->sf.gprs[9] = (unsigned long) frame; 171 frame->sf.gprs[9] = (unsigned long) frame;
150 172
151 /* Store access registers to kernel stack of new process. */ 173 /* Save access registers to new thread structure. */
152 if (unlikely(p->flags & PF_KTHREAD)) { 174 save_access_regs(&p->thread.acrs[0]);
153 /* kernel thread */
154 memset(&frame->childregs, 0, sizeof(struct pt_regs));
155 frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT |
156 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
157 frame->childregs.psw.addr = PSW_ADDR_AMODE |
158 (unsigned long) kernel_thread_starter;
159 frame->childregs.gprs[9] = new_stackp; /* function */
160 frame->childregs.gprs[10] = arg;
161 frame->childregs.gprs[11] = (unsigned long) do_exit;
162 frame->childregs.orig_gpr2 = -1;
163
164 return 0;
165 }
166 frame->childregs = *current_pt_regs();
167 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
168 if (new_stackp)
169 frame->childregs.gprs[15] = new_stackp;
170
171 /* Don't copy runtime instrumentation info */
172 p->thread.ri_cb = NULL;
173 p->thread.ri_signum = 0;
174 frame->childregs.psw.mask &= ~PSW_MASK_RI;
175 175
176#ifndef CONFIG_64BIT 176#ifndef CONFIG_64BIT
177 /* 177 /*
@@ -183,24 +183,68 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
183 sizeof(s390_fp_regs)); 183 sizeof(s390_fp_regs));
184 /* Set a new TLS ? */ 184 /* Set a new TLS ? */
185 if (clone_flags & CLONE_SETTLS) 185 if (clone_flags & CLONE_SETTLS)
186 p->thread.acrs[0] = frame->childregs.gprs[6]; 186 p->thread.acrs[0] = regs->gprs[6];
187#else /* CONFIG_64BIT */ 187#else /* CONFIG_64BIT */
188 /* Save the fpu registers to new thread structure. */ 188 /* Save the fpu registers to new thread structure. */
189 save_fp_regs(&p->thread.fp_regs); 189 save_fp_regs(&p->thread.fp_regs);
190 /* Set a new TLS ? */ 190 /* Set a new TLS ? */
191 if (clone_flags & CLONE_SETTLS) { 191 if (clone_flags & CLONE_SETTLS) {
192 unsigned long tls = frame->childregs.gprs[6];
193 if (is_compat_task()) { 192 if (is_compat_task()) {
194 p->thread.acrs[0] = (unsigned int)tls; 193 p->thread.acrs[0] = (unsigned int) regs->gprs[6];
195 } else { 194 } else {
196 p->thread.acrs[0] = (unsigned int)(tls >> 32); 195 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
197 p->thread.acrs[1] = (unsigned int)tls; 196 p->thread.acrs[1] = (unsigned int) regs->gprs[6];
198 } 197 }
199 } 198 }
200#endif /* CONFIG_64BIT */ 199#endif /* CONFIG_64BIT */
200 /* start new process with ar4 pointing to the correct address space */
201 p->thread.mm_segment = get_fs();
202 /* Don't copy debug registers */
203 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
204 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
205 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
206 clear_tsk_thread_flag(p, TIF_PER_TRAP);
207 /* Initialize per thread user and system timer values */
208 ti = task_thread_info(p);
209 ti->user_timer = 0;
210 ti->system_timer = 0;
201 return 0; 211 return 0;
202} 212}
203 213
214SYSCALL_DEFINE0(fork)
215{
216 struct pt_regs *regs = task_pt_regs(current);
217 return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
218}
219
220SYSCALL_DEFINE4(clone, unsigned long, newsp, unsigned long, clone_flags,
221 int __user *, parent_tidptr, int __user *, child_tidptr)
222{
223 struct pt_regs *regs = task_pt_regs(current);
224
225 if (!newsp)
226 newsp = regs->gprs[15];
227 return do_fork(clone_flags, newsp, regs, 0,
228 parent_tidptr, child_tidptr);
229}
230
231/*
232 * This is trivial, and on the face of it looks like it
233 * could equally well be done in user mode.
234 *
235 * Not so, for quite unobvious reasons - register pressure.
236 * In user mode vfork() cannot have a stack frame, and if
237 * done by calling the "clone()" system call directly, you
238 * do not have enough call-clobbered registers to hold all
239 * the information you need.
240 */
241SYSCALL_DEFINE0(vfork)
242{
243 struct pt_regs *regs = task_pt_regs(current);
244 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
245 regs->gprs[15], regs, 0, NULL, NULL);
246}
247
204asmlinkage void execve_tail(void) 248asmlinkage void execve_tail(void)
205{ 249{
206 current->thread.fp_regs.fpc = 0; 250 current->thread.fp_regs.fpc = 0;
@@ -209,6 +253,31 @@ asmlinkage void execve_tail(void)
209} 253}
210 254
211/* 255/*
256 * sys_execve() executes a new program.
257 */
258SYSCALL_DEFINE3(execve, const char __user *, name,
259 const char __user *const __user *, argv,
260 const char __user *const __user *, envp)
261{
262 struct pt_regs *regs = task_pt_regs(current);
263 char *filename;
264 long rc;
265
266 filename = getname(name);
267 rc = PTR_ERR(filename);
268 if (IS_ERR(filename))
269 return rc;
270 rc = do_execve(filename, argv, envp, regs);
271 if (rc)
272 goto out;
273 execve_tail();
274 rc = regs->gprs[2];
275out:
276 putname(filename);
277 return rc;
278}
279
280/*
212 * fill in the FPU structure for a core dump. 281 * fill in the FPU structure for a core dump.
213 */ 282 */
214int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 283int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 753c41d0ffd..311e9d71288 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * arch/s390/kernel/processor.c
3 *
2 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008
3 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
4 */ 6 */
@@ -23,15 +25,13 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
23 */ 25 */
24void __cpuinit cpu_init(void) 26void __cpuinit cpu_init(void)
25{ 27{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 28 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
27 struct cpuid *id = &__get_cpu_var(cpu_id);
28 29
29 get_cpu_id(id); 30 get_cpu_id(id);
30 atomic_inc(&init_mm.mm_count); 31 atomic_inc(&init_mm.mm_count);
31 current->active_mm = &init_mm; 32 current->active_mm = &init_mm;
32 BUG_ON(current->mm); 33 BUG_ON(current->mm);
33 enter_lazy_tlb(&init_mm, current); 34 enter_lazy_tlb(&init_mm, current);
34 memset(idle, 0, sizeof(*idle));
35} 35}
36 36
37/* 37/*
@@ -39,9 +39,9 @@ void __cpuinit cpu_init(void)
39 */ 39 */
40static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
41{ 41{
42 static const char *hwcap_str[] = { 42 static const char *hwcap_str[10] = {
43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
44 "edat", "etf3eh", "highgprs", "te" 44 "edat", "etf3eh", "highgprs"
45 }; 45 };
46 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
47 int i; 47 int i;
@@ -54,11 +54,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
54 num_online_cpus(), loops_per_jiffy/(500000/HZ), 54 num_online_cpus(), loops_per_jiffy/(500000/HZ),
55 (loops_per_jiffy/(5000/HZ))%100); 55 (loops_per_jiffy/(5000/HZ))%100);
56 seq_puts(m, "features\t: "); 56 seq_puts(m, "features\t: ");
57 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) 57 for (i = 0; i < 10; i++)
58 if (hwcap_str[i] && (elf_hwcap & (1UL << i))) 58 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
59 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
60 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
61 show_cacheinfo(m);
62 } 61 }
63 get_online_cpus(); 62 get_online_cpus();
64 if (cpu_online(n)) { 63 if (cpu_online(n)) {
@@ -75,7 +74,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
75 74
76static void *c_start(struct seq_file *m, loff_t *pos) 75static void *c_start(struct seq_file *m, loff_t *pos)
77{ 76{
78 return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; 77 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
79} 78}
80 79
81static void *c_next(struct seq_file *m, void *v, loff_t *pos) 80static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e9..5804cfa7cba 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Ptrace user space interface. 2 * Ptrace user space interface.
3 * 3 *
4 * Copyright IBM Corp. 1999, 2010 4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Denis Joseph Barrow 5 * Author(s): Denis Joseph Barrow
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */ 7 */
@@ -20,15 +20,15 @@
20#include <linux/regset.h> 20#include <linux/regset.h>
21#include <linux/tracehook.h> 21#include <linux/tracehook.h>
22#include <linux/seccomp.h> 22#include <linux/seccomp.h>
23#include <linux/compat.h>
24#include <trace/syscall.h> 23#include <trace/syscall.h>
24#include <asm/compat.h>
25#include <asm/segment.h> 25#include <asm/segment.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/system.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/unistd.h> 31#include <asm/unistd.h>
31#include <asm/switch_to.h>
32#include "entry.h" 32#include "entry.h"
33 33
34#ifdef CONFIG_COMPAT 34#ifdef CONFIG_COMPAT
@@ -42,8 +42,6 @@ enum s390_regset {
42 REGSET_GENERAL, 42 REGSET_GENERAL,
43 REGSET_FP, 43 REGSET_FP,
44 REGSET_LAST_BREAK, 44 REGSET_LAST_BREAK,
45 REGSET_TDB,
46 REGSET_SYSTEM_CALL,
47 REGSET_GENERAL_EXTENDED, 45 REGSET_GENERAL_EXTENDED,
48}; 46};
49 47
@@ -53,22 +51,6 @@ void update_per_regs(struct task_struct *task)
53 struct thread_struct *thread = &task->thread; 51 struct thread_struct *thread = &task->thread;
54 struct per_regs old, new; 52 struct per_regs old, new;
55 53
56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new;
60
61 __ctl_store(cr0, 0, 0);
62 /* set or clear transaction execution bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54);
65 else
66 cr0_new = cr0 | (3UL << 54);
67 /* Only load control register 0 if necessary. */
68 if (cr0 != cr0_new)
69 __ctl_load(cr0_new, 0, 0);
70 }
71#endif
72 /* Copy user specified PER registers */ 54 /* Copy user specified PER registers */
73 new.control = thread->per_user.control; 55 new.control = thread->per_user.control;
74 new.start = thread->per_user.start; 56 new.start = thread->per_user.start;
@@ -77,10 +59,6 @@ void update_per_regs(struct task_struct *task)
77 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 59 /* merge TIF_SINGLE_STEP into user specified PER registers. */
78 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 60 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
79 new.control |= PER_EVENT_IFETCH; 61 new.control |= PER_EVENT_IFETCH;
80#ifdef CONFIG_64BIT
81 new.control |= PER_CONTROL_SUSPENSION;
82 new.control |= PER_EVENT_TRANSACTION_END;
83#endif
84 new.start = 0; 62 new.start = 0;
85 new.end = PSW_ADDR_INSN; 63 new.end = PSW_ADDR_INSN;
86 } 64 }
@@ -121,7 +99,6 @@ void ptrace_disable(struct task_struct *task)
121 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 99 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
122 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 100 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
123 clear_tsk_thread_flag(task, TIF_PER_TRAP); 101 clear_tsk_thread_flag(task, TIF_PER_TRAP);
124 task->thread.per_flags = 0;
125} 102}
126 103
127#ifndef CONFIG_64BIT 104#ifndef CONFIG_64BIT
@@ -191,8 +168,8 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
191 */ 168 */
192 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 169 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
193 if (addr == (addr_t) &dummy->regs.psw.mask) 170 if (addr == (addr_t) &dummy->regs.psw.mask)
194 /* Return a clean psw mask. */ 171 /* Remove per bit from user psw. */
195 tmp = psw_user_bits | (tmp & PSW_MASK_USER); 172 tmp &= ~PSW_MASK_PER;
196 173
197 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 174 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
198 /* 175 /*
@@ -314,10 +291,18 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
314 * psw and gprs are stored on the stack 291 * psw and gprs are stored on the stack
315 */ 292 */
316 if (addr == (addr_t) &dummy->regs.psw.mask && 293 if (addr == (addr_t) &dummy->regs.psw.mask &&
317 ((data & ~PSW_MASK_USER) != psw_user_bits || 294#ifdef CONFIG_COMPAT
318 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 295 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
296#endif
297 data != PSW_MASK_MERGE(psw_user_bits, data))
319 /* Invalid psw mask. */ 298 /* Invalid psw mask. */
320 return -EINVAL; 299 return -EINVAL;
300#ifndef CONFIG_64BIT
301 if (addr == (addr_t) &dummy->regs.psw.addr)
302 /* I'd like to reject addresses without the
303 high order bit but older gdb's rely on it */
304 data |= PSW_ADDR_AMODE;
305#endif
321 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 306 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
322 307
323 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 308 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -438,16 +423,6 @@ long arch_ptrace(struct task_struct *child, long request,
438 put_user(task_thread_info(child)->last_break, 423 put_user(task_thread_info(child)->last_break,
439 (unsigned long __user *) data); 424 (unsigned long __user *) data);
440 return 0; 425 return 0;
441 case PTRACE_ENABLE_TE:
442 if (!MACHINE_HAS_TE)
443 return -EIO;
444 child->thread.per_flags &= ~PER_FLAG_NO_TE;
445 return 0;
446 case PTRACE_DISABLE_TE:
447 if (!MACHINE_HAS_TE)
448 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE;
450 return 0;
451 default: 426 default:
452 /* Removing high order bit from addr (only for 31 bit). */ 427 /* Removing high order bit from addr (only for 31 bit). */
453 addr &= PSW_ADDR_INSN; 428 addr &= PSW_ADDR_INSN;
@@ -522,21 +497,21 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
522 __u32 tmp; 497 __u32 tmp;
523 498
524 if (addr < (addr_t) &dummy32->regs.acrs) { 499 if (addr < (addr_t) &dummy32->regs.acrs) {
525 struct pt_regs *regs = task_pt_regs(child);
526 /* 500 /*
527 * psw and gprs are stored on the stack 501 * psw and gprs are stored on the stack
528 */ 502 */
529 if (addr == (addr_t) &dummy32->regs.psw.mask) { 503 if (addr == (addr_t) &dummy32->regs.psw.mask) {
530 /* Fake a 31 bit psw mask. */ 504 /* Fake a 31 bit psw mask. */
531 tmp = (__u32)(regs->psw.mask >> 32); 505 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
532 tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); 506 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
533 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 507 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
534 /* Fake a 31 bit psw address. */ 508 /* Fake a 31 bit psw address. */
535 tmp = (__u32) regs->psw.addr | 509 tmp = (__u32) task_pt_regs(child)->psw.addr |
536 (__u32)(regs->psw.mask & PSW_MASK_BA); 510 PSW32_ADDR_AMODE31;
537 } else { 511 } else {
538 /* gpr 0-15 */ 512 /* gpr 0-15 */
539 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); 513 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
514 addr*2 + 4);
540 } 515 }
541 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 516 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
542 /* 517 /*
@@ -621,27 +596,24 @@ static int __poke_user_compat(struct task_struct *child,
621 addr_t offset; 596 addr_t offset;
622 597
623 if (addr < (addr_t) &dummy32->regs.acrs) { 598 if (addr < (addr_t) &dummy32->regs.acrs) {
624 struct pt_regs *regs = task_pt_regs(child);
625 /* 599 /*
626 * psw, gprs, acrs and orig_gpr2 are stored on the stack 600 * psw, gprs, acrs and orig_gpr2 are stored on the stack
627 */ 601 */
628 if (addr == (addr_t) &dummy32->regs.psw.mask) { 602 if (addr == (addr_t) &dummy32->regs.psw.mask) {
629 /* Build a 64 bit psw mask from 31 bit mask. */ 603 /* Build a 64 bit psw mask from 31 bit mask. */
630 if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) 604 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
631 /* Invalid psw mask. */ 605 /* Invalid psw mask. */
632 return -EINVAL; 606 return -EINVAL;
633 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 607 task_pt_regs(child)->psw.mask =
634 (regs->psw.mask & PSW_MASK_BA) | 608 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
635 (__u64)(tmp & PSW32_MASK_USER) << 32;
636 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 609 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
637 /* Build a 64 bit psw address from 31 bit address. */ 610 /* Build a 64 bit psw address from 31 bit address. */
638 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 611 task_pt_regs(child)->psw.addr =
639 /* Transfer 31 bit amode bit to psw mask. */ 612 (__u64) tmp & PSW32_ADDR_INSN;
640 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
641 (__u64)(tmp & PSW32_ADDR_AMODE);
642 } else { 613 } else {
643 /* gpr 0-15 */ 614 /* gpr 0-15 */
644 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; 615 *(__u32*)((addr_t) &task_pt_regs(child)->psw
616 + addr*2 + 4) = tmp;
645 } 617 }
646 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 618 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
647 /* 619 /*
@@ -751,11 +723,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
751 long ret = 0; 723 long ret = 0;
752 724
753 /* Do the secure computing check first. */ 725 /* Do the secure computing check first. */
754 if (secure_computing(regs->gprs[2])) { 726 secure_computing(regs->gprs[2]);
755 /* seccomp failures shouldn't expose any additional code. */
756 ret = -1;
757 goto out;
758 }
759 727
760 /* 728 /*
761 * The sysc_tracesys code in entry.S stored the system 729 * The sysc_tracesys code in entry.S stored the system
@@ -769,25 +737,27 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
769 * debugger stored an invalid system call number. Skip 737 * debugger stored an invalid system call number. Skip
770 * the system call and the system call restart handling. 738 * the system call and the system call restart handling.
771 */ 739 */
772 clear_thread_flag(TIF_SYSCALL); 740 regs->svcnr = 0;
773 ret = -1; 741 ret = -1;
774 } 742 }
775 743
776 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 744 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
777 trace_sys_enter(regs, regs->gprs[2]); 745 trace_sys_enter(regs, regs->gprs[2]);
778 746
779 audit_syscall_entry(is_compat_task() ? 747 if (unlikely(current->audit_context))
780 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, 748 audit_syscall_entry(is_compat_task() ?
781 regs->gprs[2], regs->orig_gpr2, 749 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
782 regs->gprs[3], regs->gprs[4], 750 regs->gprs[2], regs->orig_gpr2,
783 regs->gprs[5]); 751 regs->gprs[3], regs->gprs[4],
784out: 752 regs->gprs[5]);
785 return ret ?: regs->gprs[2]; 753 return ret ?: regs->gprs[2];
786} 754}
787 755
788asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) 756asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
789{ 757{
790 audit_syscall_exit(regs); 758 if (unlikely(current->audit_context))
759 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
760 regs->gprs[2]);
791 761
792 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 762 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
793 trace_sys_exit(regs, regs->gprs[2]); 763 trace_sys_exit(regs, regs->gprs[2]);
@@ -935,50 +905,8 @@ static int s390_last_break_set(struct task_struct *target,
935 return 0; 905 return 0;
936} 906}
937 907
938static int s390_tdb_get(struct task_struct *target,
939 const struct user_regset *regset,
940 unsigned int pos, unsigned int count,
941 void *kbuf, void __user *ubuf)
942{
943 struct pt_regs *regs = task_pt_regs(target);
944 unsigned char *data;
945
946 if (!(regs->int_code & 0x200))
947 return -ENODATA;
948 data = target->thread.trap_tdb;
949 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
950}
951
952static int s390_tdb_set(struct task_struct *target,
953 const struct user_regset *regset,
954 unsigned int pos, unsigned int count,
955 const void *kbuf, const void __user *ubuf)
956{
957 return 0;
958}
959
960#endif 908#endif
961 909
962static int s390_system_call_get(struct task_struct *target,
963 const struct user_regset *regset,
964 unsigned int pos, unsigned int count,
965 void *kbuf, void __user *ubuf)
966{
967 unsigned int *data = &task_thread_info(target)->system_call;
968 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
969 data, 0, sizeof(unsigned int));
970}
971
972static int s390_system_call_set(struct task_struct *target,
973 const struct user_regset *regset,
974 unsigned int pos, unsigned int count,
975 const void *kbuf, const void __user *ubuf)
976{
977 unsigned int *data = &task_thread_info(target)->system_call;
978 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
979 data, 0, sizeof(unsigned int));
980}
981
982static const struct user_regset s390_regsets[] = { 910static const struct user_regset s390_regsets[] = {
983 [REGSET_GENERAL] = { 911 [REGSET_GENERAL] = {
984 .core_note_type = NT_PRSTATUS, 912 .core_note_type = NT_PRSTATUS,
@@ -1005,23 +933,7 @@ static const struct user_regset s390_regsets[] = {
1005 .get = s390_last_break_get, 933 .get = s390_last_break_get,
1006 .set = s390_last_break_set, 934 .set = s390_last_break_set,
1007 }, 935 },
1008 [REGSET_TDB] = {
1009 .core_note_type = NT_S390_TDB,
1010 .n = 1,
1011 .size = 256,
1012 .align = 1,
1013 .get = s390_tdb_get,
1014 .set = s390_tdb_set,
1015 },
1016#endif 936#endif
1017 [REGSET_SYSTEM_CALL] = {
1018 .core_note_type = NT_S390_SYSTEM_CALL,
1019 .n = 1,
1020 .size = sizeof(unsigned int),
1021 .align = sizeof(unsigned int),
1022 .get = s390_system_call_get,
1023 .set = s390_system_call_set,
1024 },
1025}; 937};
1026 938
1027static const struct user_regset_view user_s390_view = { 939static const struct user_regset_view user_s390_view = {
@@ -1210,22 +1122,6 @@ static const struct user_regset s390_compat_regsets[] = {
1210 .get = s390_compat_last_break_get, 1122 .get = s390_compat_last_break_get,
1211 .set = s390_compat_last_break_set, 1123 .set = s390_compat_last_break_set,
1212 }, 1124 },
1213 [REGSET_TDB] = {
1214 .core_note_type = NT_S390_TDB,
1215 .n = 1,
1216 .size = 256,
1217 .align = 1,
1218 .get = s390_tdb_get,
1219 .set = s390_tdb_set,
1220 },
1221 [REGSET_SYSTEM_CALL] = {
1222 .core_note_type = NT_S390_SYSTEM_CALL,
1223 .n = 1,
1224 .size = sizeof(compat_uint_t),
1225 .align = sizeof(compat_uint_t),
1226 .get = s390_system_call_get,
1227 .set = s390_system_call_set,
1228 },
1229 [REGSET_GENERAL_EXTENDED] = { 1125 [REGSET_GENERAL_EXTENDED] = {
1230 .core_note_type = NT_S390_HIGH_GPRS, 1126 .core_note_type = NT_S390_HIGH_GPRS,
1231 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1127 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index dd8016b0477..303d961c3bb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -1,18 +1,13 @@
1/* 1/*
2 * arch/s390/kernel/reipl.S
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 2000 5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
4 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
5 */ 7 */
6 8
7#include <linux/linkage.h> 9#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
9#include <asm/sigp.h>
10
11#
12# store_status: Empty implementation until kdump is supported on 31 bit
13#
14ENTRY(store_status)
15 br %r14
16 11
17# 12#
18# do_reipl_asm 13# do_reipl_asm
@@ -59,7 +54,7 @@ ENTRY(do_reipl_asm)
59 bas %r14,.Ldisab-.Lpg0(%r13) 54 bas %r14,.Ldisab-.Lpg0(%r13)
60.L003: st %r1,__LC_SUBCHANNEL_ID 55.L003: st %r1,__LC_SUBCHANNEL_ID
61 lpsw 0 56 lpsw 0
62 sigp 0,0,SIGP_RESTART 57 sigp 0,0,0(6)
63.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) 58.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
64 lpsw .Ldispsw-.Lpg0(%r13) 59 lpsw .Ldispsw-.Lpg0(%r13)
65 .align 8 60 .align 8
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index dc3b1273c4d..e690975403f 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -1,12 +1,11 @@
1/* 1/*
2 * Copyright IBM Corp 2000, 2011 2 * Copyright IBM Corp 2000,2011
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
6 6
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9#include <asm/sigp.h>
10 9
11# 10#
12# store_status 11# store_status
@@ -18,11 +17,11 @@
18# 17#
19ENTRY(store_status) 18ENTRY(store_status)
20 /* Save register one and load save area base */ 19 /* Save register one and load save area base */
21 stg %r1,__LC_SAVE_AREA_RESTART 20 stg %r1,__LC_SAVE_AREA_64(%r0)
22 lghi %r1,SAVE_AREA_BASE 21 lghi %r1,SAVE_AREA_BASE
23 /* General purpose registers */ 22 /* General purpose registers */
24 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) 23 stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
25 lg %r2,__LC_SAVE_AREA_RESTART 24 lg %r2,__LC_SAVE_AREA_64(%r0)
26 stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) 25 stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
27 /* Control registers */ 26 /* Control registers */
28 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) 27 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
@@ -63,11 +62,8 @@ ENTRY(store_status)
63 larl %r2,store_status 62 larl %r2,store_status
64 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) 63 stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
65 br %r14 64 br %r14
66 65.align 8
67 .section .bss
68 .align 8
69.Lclkcmp: .quad 0x0000000000000000 66.Lclkcmp: .quad 0x0000000000000000
70 .previous
71 67
72# 68#
73# do_reipl_asm 69# do_reipl_asm
@@ -107,7 +103,7 @@ ENTRY(do_reipl_asm)
107.L003: st %r1,__LC_SUBCHANNEL_ID 103.L003: st %r1,__LC_SUBCHANNEL_ID
108 lhi %r1,0 # mode 0 = esa 104 lhi %r1,0 # mode 0 = esa
109 slr %r0,%r0 # set cpuid to zero 105 slr %r0,%r0 # set cpuid to zero
110 sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode 106 sigp %r1,%r0,0x12 # switch to esa mode
111 lpsw 0 107 lpsw 0
112.Ldisab: sll %r14,1 108.Ldisab: sll %r14,1
113 srl %r14,1 # need to kill hi bit to avoid specification exceptions. 109 srl %r14,1 # need to kill hi bit to avoid specification exceptions.
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f4e6f20e117..c91d70aede9 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2005 2 * arch/s390/kernel/relocate_kernel.S
3 *
4 * (C) Copyright IBM Corp. 2005
3 * 5 *
4 * Author(s): Rolf Adelsberger, 6 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -7,7 +9,6 @@
7 */ 9 */
8 10
9#include <linux/linkage.h> 11#include <linux/linkage.h>
10#include <asm/sigp.h>
11 12
12/* 13/*
13 * moves the new kernel to its destination... 14 * moves the new kernel to its destination...
@@ -92,7 +93,7 @@ ENTRY(relocate_kernel)
92 .no_diag308: 93 .no_diag308:
93 sr %r1,%r1 # clear %r1 94 sr %r1,%r1 # clear %r1
94 sr %r2,%r2 # clear %r2 95 sr %r2,%r2 # clear %r2
95 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero 96 sigp %r1,%r2,0x12 # set cpuid to zero
96 lpsw 0 # hopefully start new kernel... 97 lpsw 0 # hopefully start new kernel...
97 98
98 .align 8 99 .align 8
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index cfac28330b0..7c3ce589a7f 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2005 2 * arch/s390/kernel/relocate_kernel64.S
3 *
4 * (C) Copyright IBM Corp. 2005
3 * 5 *
4 * Author(s): Rolf Adelsberger, 6 * Author(s): Rolf Adelsberger,
5 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -7,7 +9,6 @@
7 */ 9 */
8 10
9#include <linux/linkage.h> 11#include <linux/linkage.h>
10#include <asm/sigp.h>
11 12
12/* 13/*
13 * moves the new kernel to its destination... 14 * moves the new kernel to its destination...
@@ -44,7 +45,7 @@ ENTRY(relocate_kernel)
44 diag %r0,%r0,0x308 45 diag %r0,%r0,0x308
45 .back: 46 .back:
46 lhi %r1,1 # mode 1 = esame 47 lhi %r1,1 # mode 1 = esame
47 sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode 48 sigp %r1,%r0,0x12 # switch to esame mode
48 sam64 # switch to 64 bit addressing mode 49 sam64 # switch to 64 bit addressing mode
49 basr %r13,0 50 basr %r13,0
50 .back_base: 51 .back_base:
@@ -95,7 +96,7 @@ ENTRY(relocate_kernel)
95 sam31 # 31 bit mode 96 sam31 # 31 bit mode
96 sr %r1,%r1 # erase register r1 97 sr %r1,%r1 # erase register r1
97 sr %r2,%r2 # erase register r2 98 sr %r2,%r2 # erase register r2
98 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero 99 sigp %r1,%r2,0x12 # set cpuid to zero
99 lpsw 0 # hopefully start new kernel... 100 lpsw 0 # hopefully start new kernel...
100 101
101 .align 8 102 .align 8
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
deleted file mode 100644
index 077a99389b0..00000000000
--- a/arch/s390/kernel/runtime_instr.c
+++ /dev/null
@@ -1,150 +0,0 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/syscalls.h>
8#include <linux/signal.h>
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <asm/runtime_instr.h>
15#include <asm/cpu_mf.h>
16#include <asm/irq.h>
17
18/* empty control block to disable RI by loading it */
19struct runtime_instr_cb runtime_instr_empty_cb;
20
21static int runtime_instr_avail(void)
22{
23 return test_facility(64);
24}
25
26static void disable_runtime_instr(void)
27{
28 struct pt_regs *regs = task_pt_regs(current);
29
30 load_runtime_instr_cb(&runtime_instr_empty_cb);
31
32 /*
33 * Make sure the RI bit is deleted from the PSW. If the user did not
34 * switch off RI before the system call the process will get a
35 * specification exception otherwise.
36 */
37 regs->psw.mask &= ~PSW_MASK_RI;
38}
39
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{
42 cb->buf_limit = 0xfff;
43 if (s390_user_mode == HOME_SPACE_MODE)
44 cb->home_space = 1;
45 cb->int_requested = 1;
46 cb->pstate = 1;
47 cb->pstate_set_buf = 1;
48 cb->pstate_sample = 1;
49 cb->pstate_collect = 1;
50 cb->key = PAGE_DEFAULT_KEY;
51 cb->valid = 1;
52}
53
54void exit_thread_runtime_instr(void)
55{
56 struct task_struct *task = current;
57
58 if (!task->thread.ri_cb)
59 return;
60 disable_runtime_instr();
61 kfree(task->thread.ri_cb);
62 task->thread.ri_signum = 0;
63 task->thread.ri_cb = NULL;
64}
65
66static void runtime_instr_int_handler(struct ext_code ext_code,
67 unsigned int param32, unsigned long param64)
68{
69 struct siginfo info;
70
71 if (!(param32 & CPU_MF_INT_RI_MASK))
72 return;
73
74 inc_irq_stat(IRQEXT_CMR);
75
76 if (!current->thread.ri_cb)
77 return;
78 if (current->thread.ri_signum < SIGRTMIN ||
79 current->thread.ri_signum > SIGRTMAX) {
80 WARN_ON_ONCE(1);
81 return;
82 }
83
84 memset(&info, 0, sizeof(info));
85 info.si_signo = current->thread.ri_signum;
86 info.si_code = SI_QUEUE;
87 if (param32 & CPU_MF_INT_RI_BUF_FULL)
88 info.si_int = ENOBUFS;
89 else if (param32 & CPU_MF_INT_RI_HALTED)
90 info.si_int = ECANCELED;
91 else
92 return; /* unknown reason */
93
94 send_sig_info(current->thread.ri_signum, &info, current);
95}
96
97SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
98{
99 struct runtime_instr_cb *cb;
100
101 if (!runtime_instr_avail())
102 return -EOPNOTSUPP;
103
104 if (command == S390_RUNTIME_INSTR_STOP) {
105 preempt_disable();
106 exit_thread_runtime_instr();
107 preempt_enable();
108 return 0;
109 }
110
111 if (command != S390_RUNTIME_INSTR_START ||
112 (signum < SIGRTMIN || signum > SIGRTMAX))
113 return -EINVAL;
114
115 if (!current->thread.ri_cb) {
116 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
117 if (!cb)
118 return -ENOMEM;
119 } else {
120 cb = current->thread.ri_cb;
121 memset(cb, 0, sizeof(*cb));
122 }
123
124 init_runtime_instr_cb(cb);
125 current->thread.ri_signum = signum;
126
127 /* now load the control block to make it available */
128 preempt_disable();
129 current->thread.ri_cb = cb;
130 load_runtime_instr_cb(cb);
131 preempt_enable();
132 return 0;
133}
134
135static int __init runtime_instr_init(void)
136{
137 int rc;
138
139 if (!runtime_instr_avail())
140 return 0;
141
142 measurement_alert_subclass_register();
143 rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
144 if (rc)
145 measurement_alert_subclass_unregister();
146 else
147 pr_info("Runtime instrumentation facility initialized\n");
148 return rc;
149}
150device_initcall(runtime_instr_init);
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 9bdbcef1da9..57b536649b0 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -8,5 +8,3 @@ EXPORT_SYMBOL(_mcount);
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10#endif 10#endif
11EXPORT_SYMBOL(memcpy);
12EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index b6506ee32a3..95792d846bb 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * Mini SCLP driver. 2 * Mini SCLP driver.
3 * 3 *
4 * Copyright IBM Corp. 2004, 2009 4 * Copyright IBM Corp. 2004,2009
5 * 5 *
6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, 6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>, 7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
@@ -44,12 +44,6 @@ _sclp_wait_int:
44#endif 44#endif
45 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) 45 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
46 mvc 0(16,%r8),0(%r9) 46 mvc 0(16,%r8),0(%r9)
47#ifdef CONFIG_64BIT
48 epsw %r6,%r7 # set current addressing mode
49 nill %r6,0x1 # in new psw (31 or 64 bit mode)
50 nilh %r7,0x8000
51 stm %r6,%r7,0(%r8)
52#endif
53 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 47 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
54 ltr %r2,%r2 48 ltr %r2,%r2
55 jz .LsetctS1 49 jz .LsetctS1
@@ -93,7 +87,7 @@ _sclp_wait_int:
93 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 87 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
94#ifdef CONFIG_64BIT 88#ifdef CONFIG_64BIT
95.LextpswS1_64: 89.LextpswS1_64:
96 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit 90 .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
97#endif 91#endif
98.LwaitpswS1: 92.LwaitpswS1:
99 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 93 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de85ec..7b371c37061 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/setup.c
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 1999, 2012 5 * Copyright (C) IBM Corp. 1999,2010
4 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * 8 *
@@ -16,10 +18,9 @@
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 19
18#include <linux/errno.h> 20#include <linux/errno.h>
19#include <linux/export.h> 21#include <linux/module.h>
20#include <linux/sched.h> 22#include <linux/sched.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/memblock.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/stddef.h> 25#include <linux/stddef.h>
25#include <linux/unistd.h> 26#include <linux/unistd.h>
@@ -41,14 +42,10 @@
41#include <linux/reboot.h> 42#include <linux/reboot.h>
42#include <linux/topology.h> 43#include <linux/topology.h>
43#include <linux/ftrace.h> 44#include <linux/ftrace.h>
44#include <linux/kexec.h>
45#include <linux/crash_dump.h>
46#include <linux/memory.h>
47#include <linux/compat.h>
48 45
49#include <asm/ipl.h> 46#include <asm/ipl.h>
50#include <asm/uaccess.h> 47#include <asm/uaccess.h>
51#include <asm/facility.h> 48#include <asm/system.h>
52#include <asm/smp.h> 49#include <asm/smp.h>
53#include <asm/mmu_context.h> 50#include <asm/mmu_context.h>
54#include <asm/cpcmd.h> 51#include <asm/cpcmd.h>
@@ -58,17 +55,14 @@
58#include <asm/ptrace.h> 55#include <asm/ptrace.h>
59#include <asm/sections.h> 56#include <asm/sections.h>
60#include <asm/ebcdic.h> 57#include <asm/ebcdic.h>
58#include <asm/compat.h>
61#include <asm/kvm_virtio.h> 59#include <asm/kvm_virtio.h>
62#include <asm/diag.h>
63#include <asm/os_info.h>
64#include <asm/sclp.h>
65#include "entry.h"
66 60
67long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 61long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
68 PSW_MASK_EA | PSW_MASK_BA; 62 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
69long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | 63long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
70 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | 64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
71 PSW_MASK_PSTATE | PSW_ASC_HOME; 65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
72 66
73/* 67/*
74 * User copy operations. 68 * User copy operations.
@@ -96,20 +90,6 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
96int __initdata memory_end_set; 90int __initdata memory_end_set;
97unsigned long __initdata memory_end; 91unsigned long __initdata memory_end;
98 92
99unsigned long VMALLOC_START;
100EXPORT_SYMBOL(VMALLOC_START);
101
102unsigned long VMALLOC_END;
103EXPORT_SYMBOL(VMALLOC_END);
104
105struct page *vmemmap;
106EXPORT_SYMBOL(vmemmap);
107
108#ifdef CONFIG_64BIT
109unsigned long MODULES_VADDR;
110unsigned long MODULES_END;
111#endif
112
113/* An array with a pointer to the lowcore of every CPU. */ 93/* An array with a pointer to the lowcore of every CPU. */
114struct _lowcore *lowcore_ptr[NR_CPUS]; 94struct _lowcore *lowcore_ptr[NR_CPUS];
115EXPORT_SYMBOL(lowcore_ptr); 95EXPORT_SYMBOL(lowcore_ptr);
@@ -142,14 +122,9 @@ __setup("condev=", condev_setup);
142 122
143static void __init set_preferred_console(void) 123static void __init set_preferred_console(void)
144{ 124{
145 if (MACHINE_IS_KVM) { 125 if (MACHINE_IS_KVM)
146 if (sclp_has_vt220()) 126 add_preferred_console("hvc", 0, NULL);
147 add_preferred_console("ttyS", 1, NULL); 127 else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
148 else if (sclp_has_linemode())
149 add_preferred_console("ttyS", 0, NULL);
150 else
151 add_preferred_console("hvc", 0, NULL);
152 } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
153 add_preferred_console("ttyS", 0, NULL); 128 add_preferred_console("ttyS", 0, NULL);
154 else if (CONSOLE_IS_3270) 129 else if (CONSOLE_IS_3270)
155 add_preferred_console("tty3270", 0, NULL); 130 add_preferred_console("tty3270", 0, NULL);
@@ -232,8 +207,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
232 207
233 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 208 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
234 return; 209 return;
235 if (OLDMEM_BASE)
236 return;
237 if (console_devno != -1) 210 if (console_devno != -1)
238 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", 211 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
239 ipl_info.data.fcp.dev_id.devno, console_devno); 212 ipl_info.data.fcp.dev_id.devno, console_devno);
@@ -289,7 +262,6 @@ void machine_power_off(void)
289 * Dummy power off function. 262 * Dummy power off function.
290 */ 263 */
291void (*pm_power_off)(void) = machine_power_off; 264void (*pm_power_off)(void) = machine_power_off;
292EXPORT_SYMBOL_GPL(pm_power_off);
293 265
294static int __init early_parse_mem(char *p) 266static int __init early_parse_mem(char *p)
295{ 267{
@@ -299,55 +271,71 @@ static int __init early_parse_mem(char *p)
299} 271}
300early_param("mem", early_parse_mem); 272early_param("mem", early_parse_mem);
301 273
302static int __init parse_vmalloc(char *arg) 274unsigned int user_mode = HOME_SPACE_MODE;
303{ 275EXPORT_SYMBOL_GPL(user_mode);
304 if (!arg)
305 return -EINVAL;
306 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
307 return 0;
308}
309early_param("vmalloc", parse_vmalloc);
310 276
311unsigned int s390_user_mode = PRIMARY_SPACE_MODE; 277static int set_amode_and_uaccess(unsigned long user_amode,
312EXPORT_SYMBOL_GPL(s390_user_mode); 278 unsigned long user32_amode)
313
314static void __init set_user_mode_primary(void)
315{ 279{
316 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; 280 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
317 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; 281 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
282 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
318#ifdef CONFIG_COMPAT 283#ifdef CONFIG_COMPAT
319 psw32_user_bits = 284 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
320 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; 285 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
286 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
287 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
288 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
289 PSW32_MASK_PSTATE;
321#endif 290#endif
322 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt; 291 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
292 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
293
294 if (MACHINE_HAS_MVCOS) {
295 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
296 return 1;
297 } else {
298 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
299 return 0;
300 }
323} 301}
324 302
303/*
304 * Switch kernel/user addressing modes?
305 */
306static int __init early_parse_switch_amode(char *p)
307{
308 user_mode = PRIMARY_SPACE_MODE;
309 return 0;
310}
311early_param("switch_amode", early_parse_switch_amode);
312
325static int __init early_parse_user_mode(char *p) 313static int __init early_parse_user_mode(char *p)
326{ 314{
327 if (p && strcmp(p, "primary") == 0) 315 if (p && strcmp(p, "primary") == 0)
328 s390_user_mode = PRIMARY_SPACE_MODE; 316 user_mode = PRIMARY_SPACE_MODE;
329 else if (!p || strcmp(p, "home") == 0) 317 else if (!p || strcmp(p, "home") == 0)
330 s390_user_mode = HOME_SPACE_MODE; 318 user_mode = HOME_SPACE_MODE;
331 else 319 else
332 return 1; 320 return 1;
333 return 0; 321 return 0;
334} 322}
335early_param("user_mode", early_parse_user_mode); 323early_param("user_mode", early_parse_user_mode);
336 324
337static void __init setup_addressing_mode(void) 325static void setup_addressing_mode(void)
338{ 326{
339 if (s390_user_mode != PRIMARY_SPACE_MODE) 327 if (user_mode == PRIMARY_SPACE_MODE) {
340 return; 328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
341 set_user_mode_primary(); 329 pr_info("Address spaces switched, "
342 if (MACHINE_HAS_MVCOS) 330 "mvcos available\n");
343 pr_info("Address spaces switched, mvcos available\n"); 331 else
344 else 332 pr_info("Address spaces switched, "
345 pr_info("Address spaces switched, mvcos not available\n"); 333 "mvcos not available\n");
334 }
346} 335}
347 336
348void *restart_stack __attribute__((__section__(".data"))); 337static void __init
349 338setup_lowcore(void)
350static void __init setup_lowcore(void)
351{ 339{
352 struct _lowcore *lc; 340 struct _lowcore *lc;
353 341
@@ -356,25 +344,24 @@ static void __init setup_lowcore(void)
356 */ 344 */
357 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 345 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
358 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 346 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
359 lc->restart_psw.mask = psw_kernel_bits; 347 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
360 lc->restart_psw.addr = 348 lc->restart_psw.addr =
361 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 349 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
362 lc->external_new_psw.mask = psw_kernel_bits | 350 if (user_mode != HOME_SPACE_MODE)
363 PSW_MASK_DAT | PSW_MASK_MCHECK; 351 lc->restart_psw.mask |= PSW_ASC_HOME;
352 lc->external_new_psw.mask = psw_kernel_bits;
364 lc->external_new_psw.addr = 353 lc->external_new_psw.addr =
365 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 354 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
366 lc->svc_new_psw.mask = psw_kernel_bits | 355 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
367 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
368 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 356 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
369 lc->program_new_psw.mask = psw_kernel_bits | 357 lc->program_new_psw.mask = psw_kernel_bits;
370 PSW_MASK_DAT | PSW_MASK_MCHECK;
371 lc->program_new_psw.addr = 358 lc->program_new_psw.addr =
372 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; 359 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
373 lc->mcck_new_psw.mask = psw_kernel_bits; 360 lc->mcck_new_psw.mask =
361 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
374 lc->mcck_new_psw.addr = 362 lc->mcck_new_psw.addr =
375 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 363 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
376 lc->io_new_psw.mask = psw_kernel_bits | 364 lc->io_new_psw.mask = psw_kernel_bits;
377 PSW_MASK_DAT | PSW_MASK_MCHECK;
378 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 365 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
379 lc->clock_comparator = -1ULL; 366 lc->clock_comparator = -1ULL;
380 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 367 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
@@ -396,6 +383,7 @@ static void __init setup_lowcore(void)
396 __ctl_set_bit(14, 29); 383 __ctl_set_bit(14, 29);
397 } 384 }
398#else 385#else
386 lc->cmf_hpp = -1ULL;
399 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 387 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
400#endif 388#endif
401 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 389 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -407,27 +395,6 @@ static void __init setup_lowcore(void)
407 lc->last_update_timer = S390_lowcore.last_update_timer; 395 lc->last_update_timer = S390_lowcore.last_update_timer;
408 lc->last_update_clock = S390_lowcore.last_update_clock; 396 lc->last_update_clock = S390_lowcore.last_update_clock;
409 lc->ftrace_func = S390_lowcore.ftrace_func; 397 lc->ftrace_func = S390_lowcore.ftrace_func;
410
411 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
412 restart_stack += ASYNC_SIZE;
413
414 /*
415 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
416 * restart data to the absolute zero lowcore. This is necesary if
417 * PSW restart is done on an offline CPU that has lowcore zero.
418 */
419 lc->restart_stack = (unsigned long) restart_stack;
420 lc->restart_fn = (unsigned long) do_restart;
421 lc->restart_data = 0;
422 lc->restart_source = -1UL;
423
424 /* Setup absolute zero lowcore */
425 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
426 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
427 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
428 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
429 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
430
431 set_prefix((u32)(unsigned long) lc); 398 set_prefix((u32)(unsigned long) lc);
432 lowcore_ptr[0] = lc; 399 lowcore_ptr[0] = lc;
433} 400}
@@ -468,14 +435,10 @@ static void __init setup_resources(void)
468 for (i = 0; i < MEMORY_CHUNKS; i++) { 435 for (i = 0; i < MEMORY_CHUNKS; i++) {
469 if (!memory_chunk[i].size) 436 if (!memory_chunk[i].size)
470 continue; 437 continue;
471 if (memory_chunk[i].type == CHUNK_OLDMEM ||
472 memory_chunk[i].type == CHUNK_CRASHK)
473 continue;
474 res = alloc_bootmem_low(sizeof(*res)); 438 res = alloc_bootmem_low(sizeof(*res));
475 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
476 switch (memory_chunk[i].type) { 440 switch (memory_chunk[i].type) {
477 case CHUNK_READ_WRITE: 441 case CHUNK_READ_WRITE:
478 case CHUNK_CRASHK:
479 res->name = "System RAM"; 442 res->name = "System RAM";
480 break; 443 break;
481 case CHUNK_READ_ONLY: 444 case CHUNK_READ_ONLY:
@@ -512,19 +475,22 @@ EXPORT_SYMBOL_GPL(real_memory_size);
512 475
513static void __init setup_memory_end(void) 476static void __init setup_memory_end(void)
514{ 477{
515 unsigned long vmax, vmalloc_size, tmp; 478 unsigned long memory_size;
479 unsigned long max_mem;
516 int i; 480 int i;
517 481
518
519#ifdef CONFIG_ZFCPDUMP 482#ifdef CONFIG_ZFCPDUMP
520 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { 483 if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
521 memory_end = ZFCPDUMP_HSA_SIZE; 484 memory_end = ZFCPDUMP_HSA_SIZE;
522 memory_end_set = 1; 485 memory_end_set = 1;
523 } 486 }
524#endif 487#endif
525 real_memory_size = 0; 488 memory_size = 0;
526 memory_end &= PAGE_MASK; 489 memory_end &= PAGE_MASK;
527 490
491 max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
492 memory_end = min(max_mem, memory_end);
493
528 /* 494 /*
529 * Make sure all chunks are MAX_ORDER aligned so we don't need the 495 * Make sure all chunks are MAX_ORDER aligned so we don't need the
530 * extra checks that HOLES_IN_ZONE would require. 496 * extra checks that HOLES_IN_ZONE would require.
@@ -544,241 +510,48 @@ static void __init setup_memory_end(void)
544 chunk->addr = start; 510 chunk->addr = start;
545 chunk->size = end - start; 511 chunk->size = end - start;
546 } 512 }
547 real_memory_size = max(real_memory_size,
548 chunk->addr + chunk->size);
549 } 513 }
550 514
551 /* Choose kernel address space layout: 2, 3, or 4 levels. */
552#ifdef CONFIG_64BIT
553 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
554 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
555 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
556 if (tmp <= (1UL << 42))
557 vmax = 1UL << 42; /* 3-level kernel page table */
558 else
559 vmax = 1UL << 53; /* 4-level kernel page table */
560 /* module area is at the end of the kernel address space. */
561 MODULES_END = vmax;
562 MODULES_VADDR = MODULES_END - MODULES_LEN;
563 VMALLOC_END = MODULES_VADDR;
564#else
565 vmalloc_size = VMALLOC_END ?: 96UL << 20;
566 vmax = 1UL << 31; /* 2-level kernel page table */
567 /* vmalloc area is at the end of the kernel address space. */
568 VMALLOC_END = vmax;
569#endif
570 VMALLOC_START = vmax - vmalloc_size;
571
572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
574 tmp = VMALLOC_START - tmp * sizeof(struct page);
575 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
576 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
577 vmemmap = (struct page *) tmp;
578
579 /* Take care that memory_end is set and <= vmemmap */
580 memory_end = min(memory_end ?: real_memory_size, tmp);
581
582 /* Fixup memory chunk array to fit into 0..memory_end */
583 for (i = 0; i < MEMORY_CHUNKS; i++) { 515 for (i = 0; i < MEMORY_CHUNKS; i++) {
584 struct mem_chunk *chunk = &memory_chunk[i]; 516 struct mem_chunk *chunk = &memory_chunk[i];
585 517
586 if (chunk->addr >= memory_end) { 518 real_memory_size = max(real_memory_size,
519 chunk->addr + chunk->size);
520 if (chunk->addr >= max_mem) {
587 memset(chunk, 0, sizeof(*chunk)); 521 memset(chunk, 0, sizeof(*chunk));
588 continue; 522 continue;
589 } 523 }
590 if (chunk->addr + chunk->size > memory_end) 524 if (chunk->addr + chunk->size > max_mem)
591 chunk->size = memory_end - chunk->addr; 525 chunk->size = max_mem - chunk->addr;
526 memory_size = max(memory_size, chunk->addr + chunk->size);
592 } 527 }
528 if (!memory_end)
529 memory_end = memory_size;
593} 530}
594 531
595static void __init setup_vmcoreinfo(void) 532void *restart_stack __attribute__((__section__(".data")));
596{
597 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
598}
599
600#ifdef CONFIG_CRASH_DUMP
601 533
602/* 534/*
603 * Find suitable location for crashkernel memory 535 * Setup new PSW and allocate stack for PSW restart interrupt
604 */ 536 */
605static unsigned long __init find_crash_base(unsigned long crash_size, 537static void __init setup_restart_psw(void)
606 char **msg)
607{ 538{
608 unsigned long crash_base; 539 psw_t psw;
609 struct mem_chunk *chunk;
610 int i;
611
612 if (memory_chunk[0].size < crash_size) {
613 *msg = "first memory chunk must be at least crashkernel size";
614 return 0;
615 }
616 if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
617 return OLDMEM_BASE;
618
619 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
620 chunk = &memory_chunk[i];
621 if (chunk->size == 0)
622 continue;
623 if (chunk->type != CHUNK_READ_WRITE)
624 continue;
625 if (chunk->size < crash_size)
626 continue;
627 crash_base = (chunk->addr + chunk->size) - crash_size;
628 if (crash_base < crash_size)
629 continue;
630 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
631 continue;
632 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
633 continue;
634 return crash_base;
635 }
636 *msg = "no suitable area found";
637 return 0;
638}
639 540
640/* 541 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
641 * Check if crash_base and crash_size is valid 542 restart_stack += ASYNC_SIZE;
642 */
643static int __init verify_crash_base(unsigned long crash_base,
644 unsigned long crash_size,
645 char **msg)
646{
647 struct mem_chunk *chunk;
648 int i;
649 543
650 /* 544 /*
651 * Because we do the swap to zero, we must have at least 'crash_size' 545 * Setup restart PSW for absolute zero lowcore. This is necesary
652 * bytes free space before crash_base 546 * if PSW restart is done on an offline CPU that has lowcore zero
653 */ 547 */
654 if (crash_size > crash_base) { 548 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
655 *msg = "crashkernel offset must be greater than size"; 549 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
656 return -EINVAL; 550 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
657 }
658
659 /* First memory chunk must be at least crash_size */
660 if (memory_chunk[0].size < crash_size) {
661 *msg = "first memory chunk must be at least crashkernel size";
662 return -EINVAL;
663 }
664 /* Check if we fit into the respective memory chunk */
665 for (i = 0; i < MEMORY_CHUNKS; i++) {
666 chunk = &memory_chunk[i];
667 if (chunk->size == 0)
668 continue;
669 if (crash_base < chunk->addr)
670 continue;
671 if (crash_base >= chunk->addr + chunk->size)
672 continue;
673 /* we have found the memory chunk */
674 if (crash_base + crash_size > chunk->addr + chunk->size) {
675 *msg = "selected memory chunk is too small for "
676 "crashkernel memory";
677 return -EINVAL;
678 }
679 return 0;
680 }
681 *msg = "invalid memory range specified";
682 return -EINVAL;
683}
684
685/*
686 * Reserve kdump memory by creating a memory hole in the mem_chunk array
687 */
688static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
689 int type)
690{
691 create_mem_hole(memory_chunk, addr, size, type);
692} 551}
693 552
694/* 553static void __init
695 * When kdump is enabled, we have to ensure that no memory from 554setup_memory(void)
696 * the area [0 - crashkernel memory size] and
697 * [crashk_res.start - crashk_res.end] is set offline.
698 */
699static int kdump_mem_notifier(struct notifier_block *nb,
700 unsigned long action, void *data)
701{
702 struct memory_notify *arg = data;
703
704 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
705 return NOTIFY_BAD;
706 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
707 return NOTIFY_OK;
708 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
709 return NOTIFY_OK;
710 return NOTIFY_BAD;
711}
712
713static struct notifier_block kdump_mem_nb = {
714 .notifier_call = kdump_mem_notifier,
715};
716
717#endif
718
719/*
720 * Make sure that oldmem, where the dump is stored, is protected
721 */
722static void reserve_oldmem(void)
723{
724#ifdef CONFIG_CRASH_DUMP
725 if (!OLDMEM_BASE)
726 return;
727
728 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
729 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
730 CHUNK_OLDMEM);
731 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
732 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
733 else
734 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
735#endif
736}
737
738/*
739 * Reserve memory for kdump kernel to be loaded with kexec
740 */
741static void __init reserve_crashkernel(void)
742{
743#ifdef CONFIG_CRASH_DUMP
744 unsigned long long crash_base, crash_size;
745 char *msg = NULL;
746 int rc;
747
748 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
749 &crash_base);
750 if (rc || crash_size == 0)
751 return;
752 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
753 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
754 if (register_memory_notifier(&kdump_mem_nb))
755 return;
756 if (!crash_base)
757 crash_base = find_crash_base(crash_size, &msg);
758 if (!crash_base) {
759 pr_info("crashkernel reservation failed: %s\n", msg);
760 unregister_memory_notifier(&kdump_mem_nb);
761 return;
762 }
763 if (verify_crash_base(crash_base, crash_size, &msg)) {
764 pr_info("crashkernel reservation failed: %s\n", msg);
765 unregister_memory_notifier(&kdump_mem_nb);
766 return;
767 }
768 if (!OLDMEM_BASE && MACHINE_IS_VM)
769 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
770 crashk_res.start = crash_base;
771 crashk_res.end = crash_base + crash_size - 1;
772 insert_resource(&iomem_resource, &crashk_res);
773 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
774 pr_info("Reserving %lluMB of memory at %lluMB "
775 "for crashkernel (System RAM: %luMB)\n",
776 crash_size >> 20, crash_base >> 20, memory_end >> 20);
777 os_info_crashkernel_add(crash_base, crash_size);
778#endif
779}
780
781static void __init setup_memory(void)
782{ 555{
783 unsigned long bootmap_size; 556 unsigned long bootmap_size;
784 unsigned long start_pfn, end_pfn; 557 unsigned long start_pfn, end_pfn;
@@ -807,14 +580,6 @@ static void __init setup_memory(void)
807 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { 580 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
808 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; 581 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
809 582
810#ifdef CONFIG_CRASH_DUMP
811 if (OLDMEM_BASE) {
812 /* Move initrd behind kdump oldmem */
813 if (start + INITRD_SIZE > OLDMEM_BASE &&
814 start < OLDMEM_BASE + OLDMEM_SIZE)
815 start = OLDMEM_BASE + OLDMEM_SIZE;
816 }
817#endif
818 if (start + INITRD_SIZE > memory_end) { 583 if (start + INITRD_SIZE > memory_end) {
819 pr_err("initrd extends beyond end of " 584 pr_err("initrd extends beyond end of "
820 "memory (0x%08lx > 0x%08lx) " 585 "memory (0x%08lx > 0x%08lx) "
@@ -845,18 +610,18 @@ static void __init setup_memory(void)
845 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 610 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
846 unsigned long start_chunk, end_chunk, pfn; 611 unsigned long start_chunk, end_chunk, pfn;
847 612
848 if (memory_chunk[i].type != CHUNK_READ_WRITE && 613 if (memory_chunk[i].type != CHUNK_READ_WRITE)
849 memory_chunk[i].type != CHUNK_CRASHK)
850 continue; 614 continue;
851 start_chunk = PFN_DOWN(memory_chunk[i].addr); 615 start_chunk = PFN_DOWN(memory_chunk[i].addr);
852 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); 616 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
853 end_chunk = min(end_chunk, end_pfn); 617 end_chunk = min(end_chunk, end_pfn);
854 if (start_chunk >= end_chunk) 618 if (start_chunk >= end_chunk)
855 continue; 619 continue;
856 memblock_add_node(PFN_PHYS(start_chunk), 620 add_active_range(0, start_chunk, end_chunk);
857 PFN_PHYS(end_chunk - start_chunk), 0);
858 pfn = max(start_chunk, start_pfn); 621 pfn = max(start_chunk, start_pfn);
859 storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk)); 622 for (; pfn < end_chunk; pfn++)
623 page_set_storage_key(PFN_PHYS(pfn),
624 PAGE_DEFAULT_KEY, 0);
860 } 625 }
861 626
862 psw_set_key(PAGE_DEFAULT_KEY); 627 psw_set_key(PAGE_DEFAULT_KEY);
@@ -879,15 +644,6 @@ static void __init setup_memory(void)
879 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, 644 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
880 BOOTMEM_DEFAULT); 645 BOOTMEM_DEFAULT);
881 646
882#ifdef CONFIG_CRASH_DUMP
883 if (crashk_res.start)
884 reserve_bootmem(crashk_res.start,
885 crashk_res.end - crashk_res.start + 1,
886 BOOTMEM_DEFAULT);
887 if (is_kdump_kernel())
888 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
889 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
890#endif
891#ifdef CONFIG_BLK_DEV_INITRD 647#ifdef CONFIG_BLK_DEV_INITRD
892 if (INITRD_START && INITRD_SIZE) { 648 if (INITRD_START && INITRD_SIZE) {
893 if (INITRD_START + INITRD_SIZE <= memory_end) { 649 if (INITRD_START + INITRD_SIZE <= memory_end) {
@@ -962,20 +718,12 @@ static void __init setup_hwcaps(void)
962 if (MACHINE_HAS_HPAGE) 718 if (MACHINE_HAS_HPAGE)
963 elf_hwcap |= HWCAP_S390_HPAGE; 719 elf_hwcap |= HWCAP_S390_HPAGE;
964 720
965#if defined(CONFIG_64BIT)
966 /* 721 /*
967 * 64-bit register support for 31-bit processes 722 * 64-bit register support for 31-bit processes
968 * HWCAP_S390_HIGH_GPRS is bit 9. 723 * HWCAP_S390_HIGH_GPRS is bit 9.
969 */ 724 */
970 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 725 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
971 726
972 /*
973 * Transactional execution support HWCAP_S390_TE is bit 10.
974 */
975 if (test_facility(50) && test_facility(73))
976 elf_hwcap |= HWCAP_S390_TE;
977#endif
978
979 get_cpu_id(&cpu_id); 727 get_cpu_id(&cpu_id);
980 switch (cpu_id.machine) { 728 switch (cpu_id.machine) {
981 case 0x9672: 729 case 0x9672:
@@ -1007,9 +755,6 @@ static void __init setup_hwcaps(void)
1007 case 0x2818: 755 case 0x2818:
1008 strcpy(elf_platform, "z196"); 756 strcpy(elf_platform, "z196");
1009 break; 757 break;
1010 case 0x2827:
1011 strcpy(elf_platform, "zEC12");
1012 break;
1013 } 758 }
1014} 759}
1015 760
@@ -1018,7 +763,8 @@ static void __init setup_hwcaps(void)
1018 * was printed. 763 * was printed.
1019 */ 764 */
1020 765
1021void __init setup_arch(char **cmdline_p) 766void __init
767setup_arch(char **cmdline_p)
1022{ 768{
1023 /* 769 /*
1024 * print what head.S has found out about the machine 770 * print what head.S has found out about the machine
@@ -1063,15 +809,12 @@ void __init setup_arch(char **cmdline_p)
1063 809
1064 parse_early_param(); 810 parse_early_param();
1065 811
1066 os_info_init();
1067 setup_ipl(); 812 setup_ipl();
1068 setup_memory_end(); 813 setup_memory_end();
1069 setup_addressing_mode(); 814 setup_addressing_mode();
1070 reserve_oldmem();
1071 reserve_crashkernel();
1072 setup_memory(); 815 setup_memory();
1073 setup_resources(); 816 setup_resources();
1074 setup_vmcoreinfo(); 817 setup_restart_psw();
1075 setup_lowcore(); 818 setup_lowcore();
1076 819
1077 cpu_init(); 820 cpu_init();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c3ff70a7b24..9a40e1cc5ec 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2006 2 * arch/s390/kernel/signal.c
3 *
4 * Copyright (C) IBM Corp. 1999,2006
3 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
4 * 6 *
5 * Based on Intel version 7 * Based on Intel version
@@ -28,9 +30,11 @@
28#include <asm/ucontext.h> 30#include <asm/ucontext.h>
29#include <asm/uaccess.h> 31#include <asm/uaccess.h>
30#include <asm/lowcore.h> 32#include <asm/lowcore.h>
31#include <asm/switch_to.h>
32#include "entry.h" 33#include "entry.h"
33 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36
37
34typedef struct 38typedef struct
35{ 39{
36 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 40 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
@@ -54,8 +58,15 @@ typedef struct
54SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) 58SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
55{ 59{
56 sigset_t blocked; 60 sigset_t blocked;
61
62 current->saved_sigmask = current->blocked;
63 mask &= _BLOCKABLE;
57 siginitset(&blocked, mask); 64 siginitset(&blocked, mask);
58 return sigsuspend(&blocked); 65 set_current_blocked(&blocked);
66 set_current_state(TASK_INTERRUPTIBLE);
67 schedule();
68 set_restore_sigmask();
69 return -ERESTARTNOHAND;
59} 70}
60 71
61SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act, 72SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act,
@@ -105,8 +116,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
105 116
106 /* Copy a 'clean' PSW mask to the user to avoid leaking 117 /* Copy a 'clean' PSW mask to the user to avoid leaking
107 information about whether PER is currently on. */ 118 information about whether PER is currently on. */
108 user_sregs.regs.psw.mask = psw_user_bits | 119 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
109 (regs->psw.mask & PSW_MASK_USER);
110 user_sregs.regs.psw.addr = regs->psw.addr; 120 user_sregs.regs.psw.addr = regs->psw.addr;
111 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 121 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
112 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 122 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
@@ -133,17 +143,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
133 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); 143 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
134 if (err) 144 if (err)
135 return err; 145 return err;
136 /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ 146 regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
137 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 147 user_sregs.regs.psw.mask);
138 (user_sregs.regs.psw.mask & PSW_MASK_USER); 148 regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr;
139 /* Check for invalid user address space control. */
140 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
141 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
142 (regs->psw.mask & ~PSW_MASK_ASC);
143 /* Check for invalid amode */
144 if (regs->psw.mask & PSW_MASK_EA)
145 regs->psw.mask |= PSW_MASK_BA;
146 regs->psw.addr = user_sregs.regs.psw.addr;
147 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); 149 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
148 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 150 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
149 sizeof(sregs->regs.acrs)); 151 sizeof(sregs->regs.acrs));
@@ -154,7 +156,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
154 current->thread.fp_regs.fpc &= FPC_VALID_MASK; 156 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
155 157
156 restore_fp_regs(&current->thread.fp_regs); 158 restore_fp_regs(&current->thread.fp_regs);
157 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ 159 regs->svcnr = 0; /* disable syscall checks */
158 return 0; 160 return 0;
159} 161}
160 162
@@ -168,6 +170,7 @@ SYSCALL_DEFINE0(sigreturn)
168 goto badframe; 170 goto badframe;
169 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 171 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
170 goto badframe; 172 goto badframe;
173 sigdelsetmask(&set, ~_BLOCKABLE);
171 set_current_blocked(&set); 174 set_current_blocked(&set);
172 if (restore_sigregs(regs, &frame->sregs)) 175 if (restore_sigregs(regs, &frame->sregs))
173 goto badframe; 176 goto badframe;
@@ -187,6 +190,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
187 goto badframe; 190 goto badframe;
188 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) 191 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
189 goto badframe; 192 goto badframe;
193 sigdelsetmask(&set, ~_BLOCKABLE);
190 set_current_blocked(&set); 194 set_current_blocked(&set);
191 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 195 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
192 goto badframe; 196 goto badframe;
@@ -225,6 +229,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
225 sp = current->sas_ss_sp + current->sas_ss_size; 229 sp = current->sas_ss_sp + current->sas_ss_size;
226 } 230 }
227 231
232 /* This is the legacy signal stack switching. */
233 else if (!user_mode(regs) &&
234 !(ka->sa.sa_flags & SA_RESTORER) &&
235 ka->sa.sa_restorer) {
236 sp = (unsigned long) ka->sa.sa_restorer;
237 }
238
228 return (void __user *)((sp - frame_size) & -8ul); 239 return (void __user *)((sp - frame_size) & -8ul);
229} 240}
230 241
@@ -277,10 +288,6 @@ static int setup_frame(int sig, struct k_sigaction *ka,
277 288
278 /* Set up registers for signal handler */ 289 /* Set up registers for signal handler */
279 regs->gprs[15] = (unsigned long) frame; 290 regs->gprs[15] = (unsigned long) frame;
280 /* Force default amode and default user address space control. */
281 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
282 (psw_user_bits & PSW_MASK_ASC) |
283 (regs->psw.mask & ~PSW_MASK_ASC);
284 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 291 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
285 292
286 regs->gprs[2] = map_signal(sig); 293 regs->gprs[2] = map_signal(sig);
@@ -288,13 +295,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
288 295
289 /* We forgot to include these in the sigcontext. 296 /* We forgot to include these in the sigcontext.
290 To avoid breaking binary compatibility, they are passed as args. */ 297 To avoid breaking binary compatibility, they are passed as args. */
291 if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 298 regs->gprs[4] = current->thread.trap_no;
292 sig == SIGTRAP || sig == SIGFPE) { 299 regs->gprs[5] = current->thread.prot_addr;
293 /* set extra registers only for synchronous signals */ 300 regs->gprs[6] = task_thread_info(current)->last_break;
294 regs->gprs[4] = regs->int_code & 127;
295 regs->gprs[5] = regs->int_parm_long;
296 regs->gprs[6] = task_thread_info(current)->last_break;
297 }
298 301
299 /* Place signal number on stack to allow backtrace from handler. */ 302 /* Place signal number on stack to allow backtrace from handler. */
300 if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) 303 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -353,10 +356,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
353 356
354 /* Set up registers for signal handler */ 357 /* Set up registers for signal handler */
355 regs->gprs[15] = (unsigned long) frame; 358 regs->gprs[15] = (unsigned long) frame;
356 /* Force default amode and default user address space control. */
357 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
358 (psw_user_bits & PSW_MASK_ASC) |
359 (regs->psw.mask & ~PSW_MASK_ASC);
360 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 359 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
361 360
362 regs->gprs[2] = map_signal(sig); 361 regs->gprs[2] = map_signal(sig);
@@ -370,10 +369,11 @@ give_sigsegv:
370 return -EFAULT; 369 return -EFAULT;
371} 370}
372 371
373static void handle_signal(unsigned long sig, struct k_sigaction *ka, 372static int handle_signal(unsigned long sig, struct k_sigaction *ka,
374 siginfo_t *info, sigset_t *oldset, 373 siginfo_t *info, sigset_t *oldset,
375 struct pt_regs *regs) 374 struct pt_regs *regs)
376{ 375{
376 sigset_t blocked;
377 int ret; 377 int ret;
378 378
379 /* Set up the stack frame */ 379 /* Set up the stack frame */
@@ -382,9 +382,12 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka,
382 else 382 else
383 ret = setup_frame(sig, ka, oldset, regs); 383 ret = setup_frame(sig, ka, oldset, regs);
384 if (ret) 384 if (ret)
385 return; 385 return ret;
386 signal_delivered(sig, info, ka, regs, 386 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
387 test_thread_flag(TIF_SINGLE_STEP)); 387 if (!(ka->sa.sa_flags & SA_NODEFER))
388 sigaddset(&blocked, sig);
389 set_current_blocked(&blocked);
390 return 0;
388} 391}
389 392
390/* 393/*
@@ -398,83 +401,112 @@ static void handle_signal(unsigned long sig, struct k_sigaction *ka,
398 */ 401 */
399void do_signal(struct pt_regs *regs) 402void do_signal(struct pt_regs *regs)
400{ 403{
404 unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
401 siginfo_t info; 405 siginfo_t info;
402 int signr; 406 int signr;
403 struct k_sigaction ka; 407 struct k_sigaction ka;
404 sigset_t *oldset = sigmask_to_save(); 408 sigset_t *oldset;
405 409
406 /* 410 /*
407 * Get signal to deliver. When running under ptrace, at this point 411 * We want the common case to go fast, which
408 * the debugger may change all our registers, including the system 412 * is why we may in certain cases get here from
409 * call information. 413 * kernel mode. Just return without doing anything
414 * if so.
410 */ 415 */
411 current_thread_info()->system_call = 416 if (!user_mode(regs))
412 test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; 417 return;
413 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
414 418
415 if (signr > 0) { 419 if (test_thread_flag(TIF_RESTORE_SIGMASK))
416 /* Whee! Actually deliver the signal. */ 420 oldset = &current->saved_sigmask;
417 if (current_thread_info()->system_call) { 421 else
418 regs->int_code = current_thread_info()->system_call; 422 oldset = &current->blocked;
419 /* Check for system call restarting. */
420 switch (regs->gprs[2]) {
421 case -ERESTART_RESTARTBLOCK:
422 case -ERESTARTNOHAND:
423 regs->gprs[2] = -EINTR;
424 break;
425 case -ERESTARTSYS:
426 if (!(ka.sa.sa_flags & SA_RESTART)) {
427 regs->gprs[2] = -EINTR;
428 break;
429 }
430 /* fallthrough */
431 case -ERESTARTNOINTR:
432 regs->gprs[2] = regs->orig_gpr2;
433 regs->psw.addr =
434 __rewind_psw(regs->psw,
435 regs->int_code >> 16);
436 break;
437 }
438 }
439 /* No longer in a system call */
440 clear_thread_flag(TIF_SYSCALL);
441 423
442 if (is_compat_task()) 424 /* Are we from a system call? */
443 handle_signal32(signr, &ka, &info, oldset, regs); 425 if (regs->svcnr) {
444 else 426 continue_addr = regs->psw.addr;
445 handle_signal(signr, &ka, &info, oldset, regs); 427 restart_addr = continue_addr - regs->ilc;
446 return; 428 retval = regs->gprs[2];
447 }
448 429
449 /* No handlers present - check for system call restart */ 430 /* Prepare for system call restart. We do this here so that a
450 clear_thread_flag(TIF_SYSCALL); 431 debugger will see the already changed PSW. */
451 if (current_thread_info()->system_call) { 432 switch (retval) {
452 regs->int_code = current_thread_info()->system_call;
453 switch (regs->gprs[2]) {
454 case -ERESTART_RESTARTBLOCK:
455 /* Restart with sys_restart_syscall */
456 regs->int_code = __NR_restart_syscall;
457 /* fallthrough */
458 case -ERESTARTNOHAND: 433 case -ERESTARTNOHAND:
459 case -ERESTARTSYS: 434 case -ERESTARTSYS:
460 case -ERESTARTNOINTR: 435 case -ERESTARTNOINTR:
461 /* Restart system call with magic TIF bit. */
462 regs->gprs[2] = regs->orig_gpr2; 436 regs->gprs[2] = regs->orig_gpr2;
463 set_thread_flag(TIF_SYSCALL); 437 regs->psw.addr = restart_addr;
464 if (test_thread_flag(TIF_SINGLE_STEP))
465 set_thread_flag(TIF_PER_TRAP);
466 break; 438 break;
439 case -ERESTART_RESTARTBLOCK:
440 regs->gprs[2] = -EINTR;
441 }
442 regs->svcnr = 0; /* Don't deal with this again. */
443 }
444
445 /* Get signal to deliver. When running under ptrace, at this point
446 the debugger may change all our registers ... */
447 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
448
449 /* Depending on the signal settings we may need to revert the
450 decision to restart the system call. */
451 if (signr > 0 && regs->psw.addr == restart_addr) {
452 if (retval == -ERESTARTNOHAND
453 || (retval == -ERESTARTSYS
454 && !(current->sighand->action[signr-1].sa.sa_flags
455 & SA_RESTART))) {
456 regs->gprs[2] = -EINTR;
457 regs->psw.addr = continue_addr;
458 }
459 }
460
461 if (signr > 0) {
462 /* Whee! Actually deliver the signal. */
463 int ret;
464#ifdef CONFIG_COMPAT
465 if (is_compat_task()) {
466 ret = handle_signal32(signr, &ka, &info, oldset, regs);
467 }
468 else
469#endif
470 ret = handle_signal(signr, &ka, &info, oldset, regs);
471 if (!ret) {
472 /*
473 * A signal was successfully delivered; the saved
474 * sigmask will have been stored in the signal frame,
475 * and will be restored by sigreturn, so we can simply
476 * clear the TIF_RESTORE_SIGMASK flag.
477 */
478 if (test_thread_flag(TIF_RESTORE_SIGMASK))
479 clear_thread_flag(TIF_RESTORE_SIGMASK);
480
481 /*
482 * Let tracing know that we've done the handler setup.
483 */
484 tracehook_signal_handler(signr, &info, &ka, regs,
485 test_thread_flag(TIF_SINGLE_STEP));
467 } 486 }
487 return;
468 } 488 }
469 489
470 /* 490 /*
471 * If there's no signal to deliver, we just put the saved sigmask back. 491 * If there's no signal to deliver, we just put the saved sigmask back.
472 */ 492 */
473 restore_saved_sigmask(); 493 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
494 clear_thread_flag(TIF_RESTORE_SIGMASK);
495 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
496 }
497
498 /* Restart a different system call. */
499 if (retval == -ERESTART_RESTARTBLOCK
500 && regs->psw.addr == continue_addr) {
501 regs->gprs[2] = __NR_restart_syscall;
502 set_thread_flag(TIF_RESTART_SVC);
503 }
474} 504}
475 505
476void do_notify_resume(struct pt_regs *regs) 506void do_notify_resume(struct pt_regs *regs)
477{ 507{
478 clear_thread_flag(TIF_NOTIFY_RESUME); 508 clear_thread_flag(TIF_NOTIFY_RESUME);
479 tracehook_notify_resume(regs); 509 tracehook_notify_resume(regs);
510 if (current->replacement_session_keyring)
511 key_replace_session_keyring();
480} 512}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7433a2f9e5c..07f73cde90b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,18 +1,23 @@
1/* 1/*
2 * SMP related functions 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * Copyright IBM Corp. 1999, 2012 4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow, 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens <heiko.carstens@de.ibm.com>, 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does 13 * We work with logical cpu numbering everywhere we can. The only
14 * the translation of logical to physical cpu ids. All new code that 14 * functions using the real cpu address (got from STAP) are the sigp
15 * operates on physical cpu numbers needs to go into smp.c. 15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
16 */ 21 */
17 22
18#define KMSG_COMPONENT "cpu" 23#define KMSG_COMPONENT "cpu"
@@ -26,423 +31,141 @@
26#include <linux/spinlock.h> 31#include <linux/spinlock.h>
27#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
28#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/cache.h>
29#include <linux/interrupt.h> 35#include <linux/interrupt.h>
30#include <linux/irqflags.h> 36#include <linux/irqflags.h>
31#include <linux/cpu.h> 37#include <linux/cpu.h>
38#include <linux/timex.h>
39#include <linux/bootmem.h>
32#include <linux/slab.h> 40#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <asm/asm-offsets.h> 41#include <asm/asm-offsets.h>
35#include <asm/switch_to.h>
36#include <asm/facility.h>
37#include <asm/ipl.h> 42#include <asm/ipl.h>
38#include <asm/setup.h> 43#include <asm/setup.h>
44#include <asm/sigp.h>
45#include <asm/pgalloc.h>
39#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/cpcmd.h>
40#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
41#include <asm/vtimer.h> 49#include <asm/timer.h>
42#include <asm/lowcore.h> 50#include <asm/lowcore.h>
43#include <asm/sclp.h> 51#include <asm/sclp.h>
52#include <asm/cputime.h>
44#include <asm/vdso.h> 53#include <asm/vdso.h>
45#include <asm/debug.h> 54#include <asm/cpu.h>
46#include <asm/os_info.h>
47#include <asm/sigp.h>
48#include "entry.h" 55#include "entry.h"
49 56
50enum { 57/* logical cpu to cpu address */
51 ec_schedule = 0, 58unsigned short __cpu_logical_map[NR_CPUS];
52 ec_call_function,
53 ec_call_function_single,
54 ec_stop_cpu,
55};
56 59
57enum { 60static struct task_struct *current_set[NR_CPUS];
61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
58 CPU_STATE_STANDBY, 66 CPU_STATE_STANDBY,
59 CPU_STATE_CONFIGURED, 67 CPU_STATE_CONFIGURED,
60}; 68};
61 69
62struct pcpu {
63 struct cpu cpu;
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */
69 int polarization; /* physical polarization */
70 u16 address; /* physical cpu address */
71};
72
73static u8 boot_cpu_type;
74static u16 boot_cpu_address;
75static struct pcpu pcpu_devices[NR_CPUS];
76
77/*
78 * The smp_cpu_state_mutex must be held when changing the state or polarization
79 * member of a pcpu data structure within the pcpu_devices arreay.
80 */
81DEFINE_MUTEX(smp_cpu_state_mutex); 70DEFINE_MUTEX(smp_cpu_state_mutex);
71int smp_cpu_polarization[NR_CPUS];
72static int smp_cpu_state[NR_CPUS];
73static int cpu_management;
82 74
83/* 75static DEFINE_PER_CPU(struct cpu, cpu_devices);
84 * Signal processor helper functions.
85 */
86static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
87{
88 register unsigned int reg1 asm ("1") = parm;
89 int cc;
90
91 asm volatile(
92 " sigp %1,%2,0(%3)\n"
93 " ipm %0\n"
94 " srl %0,28\n"
95 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
96 if (status && cc == 1)
97 *status = reg1;
98 return cc;
99}
100
101static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
102{
103 int cc;
104
105 while (1) {
106 cc = __pcpu_sigp(addr, order, parm, NULL);
107 if (cc != SIGP_CC_BUSY)
108 return cc;
109 cpu_relax();
110 }
111}
112
113static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
114{
115 int cc, retry;
116
117 for (retry = 0; ; retry++) {
118 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
119 if (cc != SIGP_CC_BUSY)
120 break;
121 if (retry >= 3)
122 udelay(10);
123 }
124 return cc;
125}
126
127static inline int pcpu_stopped(struct pcpu *pcpu)
128{
129 u32 uninitialized_var(status);
130
131 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
132 0, &status) != SIGP_CC_STATUS_STORED)
133 return 0;
134 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
135}
136
137static inline int pcpu_running(struct pcpu *pcpu)
138{
139 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
140 0, NULL) != SIGP_CC_STATUS_STORED)
141 return 1;
142 /* Status stored condition code is equivalent to cpu not running. */
143 return 0;
144}
145
146/*
147 * Find struct pcpu by cpu address.
148 */
149static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
150{
151 int cpu;
152 76
153 for_each_cpu(cpu, mask) 77static void smp_ext_bitcall(int, int);
154 if (pcpu_devices[cpu].address == address)
155 return pcpu_devices + cpu;
156 return NULL;
157}
158 78
159static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 79static int raw_cpu_stopped(int cpu)
160{ 80{
161 int order; 81 u32 status;
162 82
163 set_bit(ec_bit, &pcpu->ec_mask); 83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
164 order = pcpu_running(pcpu) ? 84 case sigp_status_stored:
165 SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 85 /* Check for stopped and check stop state */
166 pcpu_sigp_retry(pcpu, order, 0); 86 if (status & 0x50)
167} 87 return 1;
168 88 break;
169static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 89 default:
170{ 90 break;
171 struct _lowcore *lc;
172
173 if (pcpu != &pcpu_devices[0]) {
174 pcpu->lowcore = (struct _lowcore *)
175 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
176 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
177 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
178 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
179 goto out;
180 }
181 lc = pcpu->lowcore;
182 memcpy(lc, &S390_lowcore, 512);
183 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
184 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
185 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
186 lc->cpu_nr = cpu;
187#ifndef CONFIG_64BIT
188 if (MACHINE_HAS_IEEE) {
189 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
190 if (!lc->extended_save_area_addr)
191 goto out;
192 } 91 }
193#else
194 if (vdso_alloc_per_cpu(lc))
195 goto out;
196#endif
197 lowcore_ptr[cpu] = lc;
198 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
199 return 0; 92 return 0;
200out:
201 if (pcpu != &pcpu_devices[0]) {
202 free_page(pcpu->panic_stack);
203 free_pages(pcpu->async_stack, ASYNC_ORDER);
204 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
205 }
206 return -ENOMEM;
207}
208
209#ifdef CONFIG_HOTPLUG_CPU
210
211static void pcpu_free_lowcore(struct pcpu *pcpu)
212{
213 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
214 lowcore_ptr[pcpu - pcpu_devices] = NULL;
215#ifndef CONFIG_64BIT
216 if (MACHINE_HAS_IEEE) {
217 struct _lowcore *lc = pcpu->lowcore;
218
219 free_page((unsigned long) lc->extended_save_area_addr);
220 lc->extended_save_area_addr = 0;
221 }
222#else
223 vdso_free_per_cpu(pcpu->lowcore);
224#endif
225 if (pcpu != &pcpu_devices[0]) {
226 free_page(pcpu->panic_stack);
227 free_pages(pcpu->async_stack, ASYNC_ORDER);
228 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
229 }
230}
231
232#endif /* CONFIG_HOTPLUG_CPU */
233
234static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
235{
236 struct _lowcore *lc = pcpu->lowcore;
237
238 atomic_inc(&init_mm.context.attach_count);
239 lc->cpu_nr = cpu;
240 lc->percpu_offset = __per_cpu_offset[cpu];
241 lc->kernel_asce = S390_lowcore.kernel_asce;
242 lc->machine_flags = S390_lowcore.machine_flags;
243 lc->ftrace_func = S390_lowcore.ftrace_func;
244 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
245 __ctl_store(lc->cregs_save_area, 0, 15);
246 save_access_regs((unsigned int *) lc->access_regs_save_area);
247 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
248 MAX_FACILITY_BIT/8);
249}
250
251static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
252{
253 struct _lowcore *lc = pcpu->lowcore;
254 struct thread_info *ti = task_thread_info(tsk);
255
256 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
257 lc->thread_info = (unsigned long) task_thread_info(tsk);
258 lc->current_task = (unsigned long) tsk;
259 lc->user_timer = ti->user_timer;
260 lc->system_timer = ti->system_timer;
261 lc->steal_timer = 0;
262}
263
264static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
265{
266 struct _lowcore *lc = pcpu->lowcore;
267
268 lc->restart_stack = lc->kernel_stack;
269 lc->restart_fn = (unsigned long) func;
270 lc->restart_data = (unsigned long) data;
271 lc->restart_source = -1UL;
272 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
273} 93}
274 94
275/* 95static inline int cpu_stopped(int cpu)
276 * Call function via PSW restart on pcpu and stop the current cpu.
277 */
278static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
279 void *data, unsigned long stack)
280{ 96{
281 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 97 return raw_cpu_stopped(cpu_logical_map(cpu));
282 unsigned long source_cpu = stap();
283
284 __load_psw_mask(psw_kernel_bits);
285 if (pcpu->address == source_cpu)
286 func(data); /* should not return */
287 /* Stop target cpu (if func returns this stops the current cpu). */
288 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
289 /* Restart func on the target cpu and stop the current cpu. */
290 mem_assign_absolute(lc->restart_stack, stack);
291 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
292 mem_assign_absolute(lc->restart_data, (unsigned long) data);
293 mem_assign_absolute(lc->restart_source, source_cpu);
294 asm volatile(
295 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
296 " brc 2,0b # busy, try again\n"
297 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
298 " brc 2,1b # busy, try again\n"
299 : : "d" (pcpu->address), "d" (source_cpu),
300 "K" (SIGP_RESTART), "K" (SIGP_STOP)
301 : "0", "1", "cc");
302 for (;;) ;
303} 98}
304 99
305/* 100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
306 * Call function on an online CPU.
307 */
308void smp_call_online_cpu(void (*func)(void *), void *data)
309{ 101{
310 struct pcpu *pcpu; 102 struct _lowcore *lc, *current_lc;
311 103 struct stack_frame *sf;
312 /* Use the current cpu if it is online. */ 104 struct pt_regs *regs;
313 pcpu = pcpu_find_address(cpu_online_mask, stap()); 105 unsigned long sp;
314 if (!pcpu) 106
315 /* Use the first online cpu. */ 107 if (smp_processor_id() == 0)
316 pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 108 func(data);
317 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
318} 110 /* Disable lowcore protection */
319 111 __ctl_clear_bit(0, 28);
320/* 112 current_lc = lowcore_ptr[smp_processor_id()];
321 * Call function on the ipl CPU. 113 lc = lowcore_ptr[0];
322 */ 114 if (!lc)
323void smp_call_ipl_cpu(void (*func)(void *), void *data) 115 lc = current_lc;
324{ 116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
325 pcpu_delegate(&pcpu_devices[0], func, data, 117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
326 pcpu_devices->panic_stack + PAGE_SIZE); 118 if (!cpu_online(0))
327} 119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
328 120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
329int smp_find_processor_id(u16 address)
330{
331 int cpu;
332
333 for_each_present_cpu(cpu)
334 if (pcpu_devices[cpu].address == address)
335 return cpu;
336 return -1;
337}
338
339int smp_vcpu_scheduled(int cpu)
340{
341 return pcpu_running(pcpu_devices + cpu);
342}
343
344void smp_yield(void)
345{
346 if (MACHINE_HAS_DIAG44)
347 asm volatile("diag 0,0,0x44");
348}
349
350void smp_yield_cpu(int cpu)
351{
352 if (MACHINE_HAS_DIAG9C)
353 asm volatile("diag %0,0,0x9c"
354 : : "d" (pcpu_devices[cpu].address));
355 else if (MACHINE_HAS_DIAG44)
356 asm volatile("diag 0,0,0x44");
357}
358
359/*
360 * Send cpus emergency shutdown signal. This gives the cpus the
361 * opportunity to complete outstanding interrupts.
362 */
363void smp_emergency_stop(cpumask_t *cpumask)
364{
365 u64 end;
366 int cpu;
367
368 end = get_clock() + (1000000UL << 12);
369 for_each_cpu(cpu, cpumask) {
370 struct pcpu *pcpu = pcpu_devices + cpu;
371 set_bit(ec_stop_cpu, &pcpu->ec_mask);
372 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
373 0, NULL) == SIGP_CC_BUSY &&
374 get_clock() < end)
375 cpu_relax();
376 }
377 while (get_clock() < end) {
378 for_each_cpu(cpu, cpumask)
379 if (pcpu_stopped(pcpu_devices + cpu))
380 cpumask_clear_cpu(cpu, cpumask);
381 if (cpumask_empty(cpumask))
382 break;
383 cpu_relax(); 121 cpu_relax();
384 } 122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
385} 131}
386 132
387/*
388 * Stop all cpus but the current one.
389 */
390void smp_send_stop(void) 133void smp_send_stop(void)
391{ 134{
392 cpumask_t cpumask; 135 int cpu, rc;
393 int cpu;
394 136
395 /* Disable all interrupts/machine checks */ 137 /* Disable all interrupts/machine checks */
396 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 138 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
397 trace_hardirqs_off(); 139 trace_hardirqs_off();
398 140
399 debug_set_critical();
400 cpumask_copy(&cpumask, cpu_online_mask);
401 cpumask_clear_cpu(smp_processor_id(), &cpumask);
402
403 if (oops_in_progress)
404 smp_emergency_stop(&cpumask);
405
406 /* stop all processors */ 141 /* stop all processors */
407 for_each_cpu(cpu, &cpumask) { 142 for_each_online_cpu(cpu) {
408 struct pcpu *pcpu = pcpu_devices + cpu; 143 if (cpu == smp_processor_id())
409 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 144 continue;
410 while (!pcpu_stopped(pcpu)) 145 do {
146 rc = sigp(cpu, sigp_stop);
147 } while (rc == sigp_busy);
148
149 while (!cpu_stopped(cpu))
411 cpu_relax(); 150 cpu_relax();
412 } 151 }
413} 152}
414 153
415/* 154/*
416 * Stop the current cpu.
417 */
418void smp_stop_cpu(void)
419{
420 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
421 for (;;) ;
422}
423
424/*
425 * This is the main routine where commands issued by other 155 * This is the main routine where commands issued by other
426 * cpus are handled. 156 * cpus are handled.
427 */ 157 */
428static void do_ext_call_interrupt(struct ext_code ext_code, 158
159static void do_ext_call_interrupt(unsigned int ext_int_code,
429 unsigned int param32, unsigned long param64) 160 unsigned int param32, unsigned long param64)
430{ 161{
431 unsigned long bits; 162 unsigned long bits;
432 int cpu;
433 163
434 cpu = smp_processor_id(); 164 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
435 if (ext_code.code == 0x1202)
436 inc_irq_stat(IRQEXT_EXC);
437 else
438 inc_irq_stat(IRQEXT_EMS);
439 /* 165 /*
440 * handle bit signal external calls 166 * handle bit signal external calls
441 */ 167 */
442 bits = xchg(&pcpu_devices[cpu].ec_mask, 0); 168 bits = xchg(&S390_lowcore.ext_call_fast, 0);
443
444 if (test_bit(ec_stop_cpu, &bits))
445 smp_stop_cpu();
446 169
447 if (test_bit(ec_schedule, &bits)) 170 if (test_bit(ec_schedule, &bits))
448 scheduler_ipi(); 171 scheduler_ipi();
@@ -452,7 +175,20 @@ static void do_ext_call_interrupt(struct ext_code ext_code,
452 175
453 if (test_bit(ec_call_function_single, &bits)) 176 if (test_bit(ec_call_function_single, &bits))
454 generic_smp_call_function_single_interrupt(); 177 generic_smp_call_function_single_interrupt();
178}
455 179
180/*
181 * Send an external call sigp to another cpu and return without waiting
182 * for its completion.
183 */
184static void smp_ext_bitcall(int cpu, int sig)
185{
186 /*
187 * Set signaling bit in lowcore of target cpu and kick it
188 */
189 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
190 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
191 udelay(10);
456} 192}
457 193
458void arch_send_call_function_ipi_mask(const struct cpumask *mask) 194void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -460,12 +196,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
460 int cpu; 196 int cpu;
461 197
462 for_each_cpu(cpu, mask) 198 for_each_cpu(cpu, mask)
463 pcpu_ec_call(pcpu_devices + cpu, ec_call_function); 199 smp_ext_bitcall(cpu, ec_call_function);
464} 200}
465 201
466void arch_send_call_function_single_ipi(int cpu) 202void arch_send_call_function_single_ipi(int cpu)
467{ 203{
468 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 204 smp_ext_bitcall(cpu, ec_call_function_single);
469} 205}
470 206
471#ifndef CONFIG_64BIT 207#ifndef CONFIG_64BIT
@@ -491,16 +227,15 @@ EXPORT_SYMBOL(smp_ptlb_all);
491 */ 227 */
492void smp_send_reschedule(int cpu) 228void smp_send_reschedule(int cpu)
493{ 229{
494 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 230 smp_ext_bitcall(cpu, ec_schedule);
495} 231}
496 232
497/* 233/*
498 * parameter area for the set/clear control bit callbacks 234 * parameter area for the set/clear control bit callbacks
499 */ 235 */
500struct ec_creg_mask_parms { 236struct ec_creg_mask_parms {
501 unsigned long orval; 237 unsigned long orvals[16];
502 unsigned long andval; 238 unsigned long andvals[16];
503 int cr;
504}; 239};
505 240
506/* 241/*
@@ -510,9 +245,11 @@ static void smp_ctl_bit_callback(void *info)
510{ 245{
511 struct ec_creg_mask_parms *pp = info; 246 struct ec_creg_mask_parms *pp = info;
512 unsigned long cregs[16]; 247 unsigned long cregs[16];
248 int i;
513 249
514 __ctl_store(cregs, 0, 15); 250 __ctl_store(cregs, 0, 15);
515 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 251 for (i = 0; i <= 15; i++)
252 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
516 __ctl_load(cregs, 0, 15); 253 __ctl_load(cregs, 0, 15);
517} 254}
518 255
@@ -521,8 +258,11 @@ static void smp_ctl_bit_callback(void *info)
521 */ 258 */
522void smp_ctl_set_bit(int cr, int bit) 259void smp_ctl_set_bit(int cr, int bit)
523{ 260{
524 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 261 struct ec_creg_mask_parms parms;
525 262
263 memset(&parms.orvals, 0, sizeof(parms.orvals));
264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
265 parms.orvals[cr] = 1UL << bit;
526 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
527} 267}
528EXPORT_SYMBOL(smp_ctl_set_bit); 268EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -532,221 +272,367 @@ EXPORT_SYMBOL(smp_ctl_set_bit);
532 */ 272 */
533void smp_ctl_clear_bit(int cr, int bit) 273void smp_ctl_clear_bit(int cr, int bit)
534{ 274{
535 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 275 struct ec_creg_mask_parms parms;
536 276
277 memset(&parms.orvals, 0, sizeof(parms.orvals));
278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
279 parms.andvals[cr] = ~(1UL << bit);
537 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
538} 281}
539EXPORT_SYMBOL(smp_ctl_clear_bit); 282EXPORT_SYMBOL(smp_ctl_clear_bit);
540 283
541#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) 284#ifdef CONFIG_ZFCPDUMP
542
543struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
544EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
545 285
546static void __init smp_get_save_area(int cpu, u16 address) 286static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
547{ 287{
548 void *lc = pcpu_devices[0].lowcore; 288 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
549 struct save_area *save_area;
550
551 if (is_kdump_kernel())
552 return;
553 if (!OLDMEM_BASE && (address == boot_cpu_address ||
554 ipl_info.type != IPL_TYPE_FCP_DUMP))
555 return; 289 return;
556 if (cpu >= NR_CPUS) { 290 if (cpu >= NR_CPUS) {
557 pr_warning("CPU %i exceeds the maximum %i and is excluded " 291 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
558 "from the dump\n", cpu, NR_CPUS - 1); 292 "the dump\n", cpu, NR_CPUS - 1);
559 return;
560 }
561 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
562 if (!save_area)
563 panic("could not allocate memory for save area\n");
564 zfcpdump_save_areas[cpu] = save_area;
565#ifdef CONFIG_CRASH_DUMP
566 if (address == boot_cpu_address) {
567 /* Copy the registers of the boot cpu. */
568 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
569 SAVE_AREA_BASE - PAGE_SIZE, 0);
570 return; 293 return;
571 } 294 }
572#endif 295 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
573 /* Get the registers of a non-boot cpu. */ 296 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
574 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); 297 cpu_relax();
575 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); 298 memcpy_real(zfcpdump_save_areas[cpu],
299 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
300 sizeof(struct save_area));
576} 301}
577 302
578int smp_store_status(int cpu) 303struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
579{ 304EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
580 struct pcpu *pcpu;
581
582 pcpu = pcpu_devices + cpu;
583 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
584 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
585 return -EIO;
586 return 0;
587}
588 305
589#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 306#else
590 307
591static inline void smp_get_save_area(int cpu, u16 address) { } 308static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
592 309
593#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 310#endif /* CONFIG_ZFCPDUMP */
594 311
595void smp_cpu_set_polarization(int cpu, int val) 312static int cpu_known(int cpu_id)
596{ 313{
597 pcpu_devices[cpu].polarization = val; 314 int cpu;
315
316 for_each_present_cpu(cpu) {
317 if (__cpu_logical_map[cpu] == cpu_id)
318 return 1;
319 }
320 return 0;
598} 321}
599 322
600int smp_cpu_get_polarization(int cpu) 323static int smp_rescan_cpus_sigp(cpumask_t avail)
601{ 324{
602 return pcpu_devices[cpu].polarization; 325 int cpu_id, logical_cpu;
326
327 logical_cpu = cpumask_first(&avail);
328 if (logical_cpu >= nr_cpu_ids)
329 return 0;
330 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
331 if (cpu_known(cpu_id))
332 continue;
333 __cpu_logical_map[logical_cpu] = cpu_id;
334 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
335 if (!cpu_stopped(logical_cpu))
336 continue;
337 set_cpu_present(logical_cpu, true);
338 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
339 logical_cpu = cpumask_next(logical_cpu, &avail);
340 if (logical_cpu >= nr_cpu_ids)
341 break;
342 }
343 return 0;
603} 344}
604 345
605static struct sclp_cpu_info *smp_get_cpu_info(void) 346static int smp_rescan_cpus_sclp(cpumask_t avail)
606{ 347{
607 static int use_sigp_detection;
608 struct sclp_cpu_info *info; 348 struct sclp_cpu_info *info;
609 int address; 349 int cpu_id, logical_cpu, cpu;
610 350 int rc;
611 info = kzalloc(sizeof(*info), GFP_KERNEL); 351
612 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { 352 logical_cpu = cpumask_first(&avail);
613 use_sigp_detection = 1; 353 if (logical_cpu >= nr_cpu_ids)
614 for (address = 0; address <= MAX_CPU_ADDRESS; address++) { 354 return 0;
615 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == 355 info = kmalloc(sizeof(*info), GFP_KERNEL);
616 SIGP_CC_NOT_OPERATIONAL) 356 if (!info)
617 continue; 357 return -ENOMEM;
618 info->cpu[info->configured].address = address; 358 rc = sclp_get_cpu_info(info);
619 info->configured++; 359 if (rc)
620 } 360 goto out;
621 info->combined = info->configured; 361 for (cpu = 0; cpu < info->combined; cpu++) {
362 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
363 continue;
364 cpu_id = info->cpu[cpu].address;
365 if (cpu_known(cpu_id))
366 continue;
367 __cpu_logical_map[logical_cpu] = cpu_id;
368 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
369 set_cpu_present(logical_cpu, true);
370 if (cpu >= info->configured)
371 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
372 else
373 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
374 logical_cpu = cpumask_next(logical_cpu, &avail);
375 if (logical_cpu >= nr_cpu_ids)
376 break;
622 } 377 }
623 return info; 378out:
379 kfree(info);
380 return rc;
624} 381}
625 382
626static int __cpuinit smp_add_present_cpu(int cpu); 383static int __smp_rescan_cpus(void)
627
628static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
629 int sysfs_add)
630{ 384{
631 struct pcpu *pcpu;
632 cpumask_t avail; 385 cpumask_t avail;
633 int cpu, nr, i;
634 386
635 nr = 0;
636 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 387 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
637 cpu = cpumask_first(&avail); 388 if (smp_use_sigp_detection)
638 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 389 return smp_rescan_cpus_sigp(avail);
639 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) 390 else
640 continue; 391 return smp_rescan_cpus_sclp(avail);
641 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
642 continue;
643 pcpu = pcpu_devices + cpu;
644 pcpu->address = info->cpu[i].address;
645 pcpu->state = (cpu >= info->configured) ?
646 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
647 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
648 set_cpu_present(cpu, true);
649 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
650 set_cpu_present(cpu, false);
651 else
652 nr++;
653 cpu = cpumask_next(cpu, &avail);
654 }
655 return nr;
656} 392}
657 393
658static void __init smp_detect_cpus(void) 394static void __init smp_detect_cpus(void)
659{ 395{
660 unsigned int cpu, c_cpus, s_cpus; 396 unsigned int cpu, c_cpus, s_cpus;
661 struct sclp_cpu_info *info; 397 struct sclp_cpu_info *info;
398 u16 boot_cpu_addr, cpu_addr;
662 399
663 info = smp_get_cpu_info(); 400 c_cpus = 1;
401 s_cpus = 0;
402 boot_cpu_addr = __cpu_logical_map[0];
403 info = kmalloc(sizeof(*info), GFP_KERNEL);
664 if (!info) 404 if (!info)
665 panic("smp_detect_cpus failed to allocate memory\n"); 405 panic("smp_detect_cpus failed to allocate memory\n");
406 /* Use sigp detection algorithm if sclp doesn't work. */
407 if (sclp_get_cpu_info(info)) {
408 smp_use_sigp_detection = 1;
409 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
410 if (cpu == boot_cpu_addr)
411 continue;
412 if (!raw_cpu_stopped(cpu))
413 continue;
414 smp_get_save_area(c_cpus, cpu);
415 c_cpus++;
416 }
417 goto out;
418 }
419
666 if (info->has_cpu_type) { 420 if (info->has_cpu_type) {
667 for (cpu = 0; cpu < info->combined; cpu++) { 421 for (cpu = 0; cpu < info->combined; cpu++) {
668 if (info->cpu[cpu].address != boot_cpu_address) 422 if (info->cpu[cpu].address == boot_cpu_addr) {
669 continue; 423 smp_cpu_type = info->cpu[cpu].type;
670 /* The boot cpu dictates the cpu type. */ 424 break;
671 boot_cpu_type = info->cpu[cpu].type; 425 }
672 break;
673 } 426 }
674 } 427 }
675 c_cpus = s_cpus = 0; 428
676 for (cpu = 0; cpu < info->combined; cpu++) { 429 for (cpu = 0; cpu < info->combined; cpu++) {
677 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) 430 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
678 continue; 431 continue;
679 if (cpu < info->configured) { 432 cpu_addr = info->cpu[cpu].address;
680 smp_get_save_area(c_cpus, info->cpu[cpu].address); 433 if (cpu_addr == boot_cpu_addr)
681 c_cpus++; 434 continue;
682 } else 435 if (!raw_cpu_stopped(cpu_addr)) {
683 s_cpus++; 436 s_cpus++;
437 continue;
438 }
439 smp_get_save_area(c_cpus, cpu_addr);
440 c_cpus++;
684 } 441 }
442out:
443 kfree(info);
685 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 444 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
686 get_online_cpus(); 445 get_online_cpus();
687 __smp_rescan_cpus(info, 0); 446 __smp_rescan_cpus();
688 put_online_cpus(); 447 put_online_cpus();
689 kfree(info);
690} 448}
691 449
692/* 450/*
693 * Activate a secondary processor. 451 * Activate a secondary processor.
694 */ 452 */
695static void __cpuinit smp_start_secondary(void *cpuvoid) 453int __cpuinit start_secondary(void *cpuvoid)
696{ 454{
697 S390_lowcore.last_update_clock = get_clock();
698 S390_lowcore.restart_stack = (unsigned long) restart_stack;
699 S390_lowcore.restart_fn = (unsigned long) do_restart;
700 S390_lowcore.restart_data = 0;
701 S390_lowcore.restart_source = -1UL;
702 restore_access_regs(S390_lowcore.access_regs_save_area);
703 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
704 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
705 cpu_init(); 455 cpu_init();
706 preempt_disable(); 456 preempt_disable();
707 init_cpu_timer(); 457 init_cpu_timer();
708 init_cpu_vtimer(); 458 init_cpu_vtimer();
709 pfault_init(); 459 pfault_init();
460
710 notify_cpu_starting(smp_processor_id()); 461 notify_cpu_starting(smp_processor_id());
462 ipi_call_lock();
711 set_cpu_online(smp_processor_id(), true); 463 set_cpu_online(smp_processor_id(), true);
712 inc_irq_stat(CPU_RST); 464 ipi_call_unlock();
465 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
466 S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
467 S390_lowcore.restart_psw.addr =
468 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
469 __ctl_set_bit(0, 28); /* Enable lowcore protection */
713 local_irq_enable(); 470 local_irq_enable();
714 /* cpu_idle will call schedule for us */ 471 /* cpu_idle will call schedule for us */
715 cpu_idle(); 472 cpu_idle();
473 return 0;
716} 474}
717 475
718/* Upping and downing of CPUs */ 476struct create_idle {
719int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 477 struct work_struct work;
478 struct task_struct *idle;
479 struct completion done;
480 int cpu;
481};
482
483static void __cpuinit smp_fork_idle(struct work_struct *work)
720{ 484{
721 struct pcpu *pcpu; 485 struct create_idle *c_idle;
722 int rc;
723 486
724 pcpu = pcpu_devices + cpu; 487 c_idle = container_of(work, struct create_idle, work);
725 if (pcpu->state != CPU_STATE_CONFIGURED) 488 c_idle->idle = fork_idle(c_idle->cpu);
726 return -EIO; 489 complete(&c_idle->done);
727 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 490}
728 SIGP_CC_ORDER_CODE_ACCEPTED) 491
492static int __cpuinit smp_alloc_lowcore(int cpu)
493{
494 unsigned long async_stack, panic_stack;
495 struct _lowcore *lowcore;
496
497 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
498 if (!lowcore)
499 return -ENOMEM;
500 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
501 panic_stack = __get_free_page(GFP_KERNEL);
502 if (!panic_stack || !async_stack)
503 goto out;
504 memcpy(lowcore, &S390_lowcore, 512);
505 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
506 lowcore->async_stack = async_stack + ASYNC_SIZE;
507 lowcore->panic_stack = panic_stack + PAGE_SIZE;
508 lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
509 lowcore->restart_psw.addr =
510 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
511 if (user_mode != HOME_SPACE_MODE)
512 lowcore->restart_psw.mask |= PSW_ASC_HOME;
513#ifndef CONFIG_64BIT
514 if (MACHINE_HAS_IEEE) {
515 unsigned long save_area;
516
517 save_area = get_zeroed_page(GFP_KERNEL);
518 if (!save_area)
519 goto out;
520 lowcore->extended_save_area_addr = (u32) save_area;
521 }
522#else
523 if (vdso_alloc_per_cpu(cpu, lowcore))
524 goto out;
525#endif
526 lowcore_ptr[cpu] = lowcore;
527 return 0;
528
529out:
530 free_page(panic_stack);
531 free_pages(async_stack, ASYNC_ORDER);
532 free_pages((unsigned long) lowcore, LC_ORDER);
533 return -ENOMEM;
534}
535
536static void smp_free_lowcore(int cpu)
537{
538 struct _lowcore *lowcore;
539
540 lowcore = lowcore_ptr[cpu];
541#ifndef CONFIG_64BIT
542 if (MACHINE_HAS_IEEE)
543 free_page((unsigned long) lowcore->extended_save_area_addr);
544#else
545 vdso_free_per_cpu(cpu, lowcore);
546#endif
547 free_page(lowcore->panic_stack - PAGE_SIZE);
548 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
549 free_pages((unsigned long) lowcore, LC_ORDER);
550 lowcore_ptr[cpu] = NULL;
551}
552
553/* Upping and downing of CPUs */
554int __cpuinit __cpu_up(unsigned int cpu)
555{
556 struct _lowcore *cpu_lowcore;
557 struct create_idle c_idle;
558 struct task_struct *idle;
559 struct stack_frame *sf;
560 u32 lowcore;
561 int ccode;
562
563 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
729 return -EIO; 564 return -EIO;
565 idle = current_set[cpu];
566 if (!idle) {
567 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
568 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
569 c_idle.cpu = cpu;
570 schedule_work(&c_idle.work);
571 wait_for_completion(&c_idle.done);
572 if (IS_ERR(c_idle.idle))
573 return PTR_ERR(c_idle.idle);
574 idle = c_idle.idle;
575 current_set[cpu] = c_idle.idle;
576 }
577 init_idle(idle, cpu);
578 if (smp_alloc_lowcore(cpu))
579 return -ENOMEM;
580 do {
581 ccode = sigp(cpu, sigp_initial_cpu_reset);
582 if (ccode == sigp_busy)
583 udelay(10);
584 if (ccode == sigp_not_operational)
585 goto err_out;
586 } while (ccode == sigp_busy);
587
588 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
589 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
590 udelay(10);
591
592 cpu_lowcore = lowcore_ptr[cpu];
593 cpu_lowcore->kernel_stack = (unsigned long)
594 task_stack_page(idle) + THREAD_SIZE;
595 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
596 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
597 - sizeof(struct pt_regs)
598 - sizeof(struct stack_frame));
599 memset(sf, 0, sizeof(struct stack_frame));
600 sf->gprs[9] = (unsigned long) sf;
601 cpu_lowcore->save_area[15] = (unsigned long) sf;
602 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
603 atomic_inc(&init_mm.context.attach_count);
604 asm volatile(
605 " stam 0,15,0(%0)"
606 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
607 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
608 cpu_lowcore->current_task = (unsigned long) idle;
609 cpu_lowcore->cpu_nr = cpu;
610 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
611 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
612 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
613 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
614 MAX_FACILITY_BIT/8);
615 eieio();
616
617 while (sigp(cpu, sigp_restart) == sigp_busy)
618 udelay(10);
730 619
731 rc = pcpu_alloc_lowcore(pcpu, cpu);
732 if (rc)
733 return rc;
734 pcpu_prepare_secondary(pcpu, cpu);
735 pcpu_attach_task(pcpu, tidle);
736 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
737 while (!cpu_online(cpu)) 620 while (!cpu_online(cpu))
738 cpu_relax(); 621 cpu_relax();
739 return 0; 622 return 0;
623
624err_out:
625 smp_free_lowcore(cpu);
626 return -EIO;
740} 627}
741 628
742static int __init setup_possible_cpus(char *s) 629static int __init setup_possible_cpus(char *s)
743{ 630{
744 int max, cpu; 631 int pcpus, cpu;
745 632
746 if (kstrtoint(s, 0, &max) < 0) 633 pcpus = simple_strtoul(s, NULL, 0);
747 return 0;
748 init_cpu_possible(cpumask_of(0)); 634 init_cpu_possible(cpumask_of(0));
749 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) 635 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
750 set_cpu_possible(cpu, true); 636 set_cpu_possible(cpu, true);
751 return 0; 637 return 0;
752} 638}
@@ -756,66 +642,110 @@ early_param("possible_cpus", setup_possible_cpus);
756 642
757int __cpu_disable(void) 643int __cpu_disable(void)
758{ 644{
759 unsigned long cregs[16]; 645 struct ec_creg_mask_parms cr_parms;
646 int cpu = smp_processor_id();
647
648 set_cpu_online(cpu, false);
760 649
761 set_cpu_online(smp_processor_id(), false); 650 /* Disable pfault pseudo page faults on this cpu. */
762 /* Disable pseudo page faults on this cpu. */
763 pfault_fini(); 651 pfault_fini();
764 /* Disable interrupt sources via control register. */ 652
765 __ctl_store(cregs, 0, 15); 653 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
766 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 654 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
767 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 655
768 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 656 /* disable all external interrupts */
769 __ctl_load(cregs, 0, 15); 657 cr_parms.orvals[0] = 0;
658 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
659 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
660 1 << 4);
661 /* disable all I/O interrupts */
662 cr_parms.orvals[6] = 0;
663 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
664 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
665 /* disable most machine checks */
666 cr_parms.orvals[14] = 0;
667 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
668 1 << 25 | 1 << 24);
669
670 smp_ctl_bit_callback(&cr_parms);
671
770 return 0; 672 return 0;
771} 673}
772 674
773void __cpu_die(unsigned int cpu) 675void __cpu_die(unsigned int cpu)
774{ 676{
775 struct pcpu *pcpu;
776
777 /* Wait until target cpu is down */ 677 /* Wait until target cpu is down */
778 pcpu = pcpu_devices + cpu; 678 while (!cpu_stopped(cpu))
779 while (!pcpu_stopped(pcpu))
780 cpu_relax(); 679 cpu_relax();
781 pcpu_free_lowcore(pcpu); 680 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
681 udelay(10);
682 smp_free_lowcore(cpu);
782 atomic_dec(&init_mm.context.attach_count); 683 atomic_dec(&init_mm.context.attach_count);
783} 684}
784 685
785void __noreturn cpu_die(void) 686void __noreturn cpu_die(void)
786{ 687{
787 idle_task_exit(); 688 idle_task_exit();
788 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 689 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
789 for (;;) ; 690 cpu_relax();
691 for (;;);
790} 692}
791 693
792#endif /* CONFIG_HOTPLUG_CPU */ 694#endif /* CONFIG_HOTPLUG_CPU */
793 695
794void __init smp_prepare_cpus(unsigned int max_cpus) 696void __init smp_prepare_cpus(unsigned int max_cpus)
795{ 697{
698#ifndef CONFIG_64BIT
699 unsigned long save_area = 0;
700#endif
701 unsigned long async_stack, panic_stack;
702 struct _lowcore *lowcore;
703
704 smp_detect_cpus();
705
796 /* request the 0x1201 emergency signal external interrupt */ 706 /* request the 0x1201 emergency signal external interrupt */
797 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 707 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
798 panic("Couldn't request external interrupt 0x1201"); 708 panic("Couldn't request external interrupt 0x1201");
799 /* request the 0x1202 external call external interrupt */ 709
800 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) 710 /* Reallocate current lowcore, but keep its contents. */
801 panic("Couldn't request external interrupt 0x1202"); 711 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
802 smp_detect_cpus(); 712 panic_stack = __get_free_page(GFP_KERNEL);
713 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
714 BUG_ON(!lowcore || !panic_stack || !async_stack);
715#ifndef CONFIG_64BIT
716 if (MACHINE_HAS_IEEE)
717 save_area = get_zeroed_page(GFP_KERNEL);
718#endif
719 local_irq_disable();
720 local_mcck_disable();
721 lowcore_ptr[smp_processor_id()] = lowcore;
722 *lowcore = S390_lowcore;
723 lowcore->panic_stack = panic_stack + PAGE_SIZE;
724 lowcore->async_stack = async_stack + ASYNC_SIZE;
725#ifndef CONFIG_64BIT
726 if (MACHINE_HAS_IEEE)
727 lowcore->extended_save_area_addr = (u32) save_area;
728#endif
729 set_prefix((u32)(unsigned long) lowcore);
730 local_mcck_enable();
731 local_irq_enable();
732#ifdef CONFIG_64BIT
733 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
734 BUG();
735#endif
803} 736}
804 737
805void __init smp_prepare_boot_cpu(void) 738void __init smp_prepare_boot_cpu(void)
806{ 739{
807 struct pcpu *pcpu = pcpu_devices; 740 BUG_ON(smp_processor_id() != 0);
808 741
809 boot_cpu_address = stap(); 742 current_thread_info()->cpu = 0;
810 pcpu->state = CPU_STATE_CONFIGURED;
811 pcpu->address = boot_cpu_address;
812 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
813 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
814 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
815 S390_lowcore.percpu_offset = __per_cpu_offset[0];
816 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
817 set_cpu_present(0, true); 743 set_cpu_present(0, true);
818 set_cpu_online(0, true); 744 set_cpu_online(0, true);
745 S390_lowcore.percpu_offset = __per_cpu_offset[0];
746 current_set[0] = current;
747 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
748 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
819} 749}
820 750
821void __init smp_cpus_done(unsigned int max_cpus) 751void __init smp_cpus_done(unsigned int max_cpus)
@@ -825,6 +755,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
825void __init smp_setup_processor_id(void) 755void __init smp_setup_processor_id(void)
826{ 756{
827 S390_lowcore.cpu_nr = 0; 757 S390_lowcore.cpu_nr = 0;
758 __cpu_logical_map[0] = stap();
828} 759}
829 760
830/* 761/*
@@ -839,58 +770,55 @@ int setup_profiling_timer(unsigned int multiplier)
839} 770}
840 771
841#ifdef CONFIG_HOTPLUG_CPU 772#ifdef CONFIG_HOTPLUG_CPU
842static ssize_t cpu_configure_show(struct device *dev, 773static ssize_t cpu_configure_show(struct sys_device *dev,
843 struct device_attribute *attr, char *buf) 774 struct sysdev_attribute *attr, char *buf)
844{ 775{
845 ssize_t count; 776 ssize_t count;
846 777
847 mutex_lock(&smp_cpu_state_mutex); 778 mutex_lock(&smp_cpu_state_mutex);
848 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 779 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
849 mutex_unlock(&smp_cpu_state_mutex); 780 mutex_unlock(&smp_cpu_state_mutex);
850 return count; 781 return count;
851} 782}
852 783
853static ssize_t cpu_configure_store(struct device *dev, 784static ssize_t cpu_configure_store(struct sys_device *dev,
854 struct device_attribute *attr, 785 struct sysdev_attribute *attr,
855 const char *buf, size_t count) 786 const char *buf, size_t count)
856{ 787{
857 struct pcpu *pcpu; 788 int cpu = dev->id;
858 int cpu, val, rc; 789 int val, rc;
859 char delim; 790 char delim;
860 791
861 if (sscanf(buf, "%d %c", &val, &delim) != 1) 792 if (sscanf(buf, "%d %c", &val, &delim) != 1)
862 return -EINVAL; 793 return -EINVAL;
863 if (val != 0 && val != 1) 794 if (val != 0 && val != 1)
864 return -EINVAL; 795 return -EINVAL;
796
865 get_online_cpus(); 797 get_online_cpus();
866 mutex_lock(&smp_cpu_state_mutex); 798 mutex_lock(&smp_cpu_state_mutex);
867 rc = -EBUSY; 799 rc = -EBUSY;
868 /* disallow configuration changes of online cpus and cpu 0 */ 800 /* disallow configuration changes of online cpus and cpu 0 */
869 cpu = dev->id;
870 if (cpu_online(cpu) || cpu == 0) 801 if (cpu_online(cpu) || cpu == 0)
871 goto out; 802 goto out;
872 pcpu = pcpu_devices + cpu;
873 rc = 0; 803 rc = 0;
874 switch (val) { 804 switch (val) {
875 case 0: 805 case 0:
876 if (pcpu->state != CPU_STATE_CONFIGURED) 806 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
877 break; 807 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
878 rc = sclp_cpu_deconfigure(pcpu->address); 808 if (!rc) {
879 if (rc) 809 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
880 break; 810 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
881 pcpu->state = CPU_STATE_STANDBY; 811 }
882 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 812 }
883 topology_expect_change();
884 break; 813 break;
885 case 1: 814 case 1:
886 if (pcpu->state != CPU_STATE_STANDBY) 815 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
887 break; 816 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
888 rc = sclp_cpu_configure(pcpu->address); 817 if (!rc) {
889 if (rc) 818 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
890 break; 819 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
891 pcpu->state = CPU_STATE_CONFIGURED; 820 }
892 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 821 }
893 topology_expect_change();
894 break; 822 break;
895 default: 823 default:
896 break; 824 break;
@@ -900,21 +828,52 @@ out:
900 put_online_cpus(); 828 put_online_cpus();
901 return rc ? rc : count; 829 return rc ? rc : count;
902} 830}
903static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 831static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
904#endif /* CONFIG_HOTPLUG_CPU */ 832#endif /* CONFIG_HOTPLUG_CPU */
905 833
906static ssize_t show_cpu_address(struct device *dev, 834static ssize_t cpu_polarization_show(struct sys_device *dev,
907 struct device_attribute *attr, char *buf) 835 struct sysdev_attribute *attr, char *buf)
836{
837 int cpu = dev->id;
838 ssize_t count;
839
840 mutex_lock(&smp_cpu_state_mutex);
841 switch (smp_cpu_polarization[cpu]) {
842 case POLARIZATION_HRZ:
843 count = sprintf(buf, "horizontal\n");
844 break;
845 case POLARIZATION_VL:
846 count = sprintf(buf, "vertical:low\n");
847 break;
848 case POLARIZATION_VM:
849 count = sprintf(buf, "vertical:medium\n");
850 break;
851 case POLARIZATION_VH:
852 count = sprintf(buf, "vertical:high\n");
853 break;
854 default:
855 count = sprintf(buf, "unknown\n");
856 break;
857 }
858 mutex_unlock(&smp_cpu_state_mutex);
859 return count;
860}
861static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
862
863static ssize_t show_cpu_address(struct sys_device *dev,
864 struct sysdev_attribute *attr, char *buf)
908{ 865{
909 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 866 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
910} 867}
911static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 868static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
869
912 870
913static struct attribute *cpu_common_attrs[] = { 871static struct attribute *cpu_common_attrs[] = {
914#ifdef CONFIG_HOTPLUG_CPU 872#ifdef CONFIG_HOTPLUG_CPU
915 &dev_attr_configure.attr, 873 &attr_configure.attr,
916#endif 874#endif
917 &dev_attr_address.attr, 875 &attr_address.attr,
876 &attr_polarization.attr,
918 NULL, 877 NULL,
919}; 878};
920 879
@@ -922,45 +881,71 @@ static struct attribute_group cpu_common_attr_group = {
922 .attrs = cpu_common_attrs, 881 .attrs = cpu_common_attrs,
923}; 882};
924 883
925static ssize_t show_idle_count(struct device *dev, 884static ssize_t show_capability(struct sys_device *dev,
926 struct device_attribute *attr, char *buf) 885 struct sysdev_attribute *attr, char *buf)
886{
887 unsigned int capability;
888 int rc;
889
890 rc = get_cpu_capability(&capability);
891 if (rc)
892 return rc;
893 return sprintf(buf, "%u\n", capability);
894}
895static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
896
897static ssize_t show_idle_count(struct sys_device *dev,
898 struct sysdev_attribute *attr, char *buf)
927{ 899{
928 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 900 struct s390_idle_data *idle;
929 unsigned long long idle_count; 901 unsigned long long idle_count;
930 unsigned int sequence; 902 unsigned int sequence;
931 903
932 do { 904 idle = &per_cpu(s390_idle, dev->id);
933 sequence = ACCESS_ONCE(idle->sequence); 905repeat:
934 idle_count = ACCESS_ONCE(idle->idle_count); 906 sequence = idle->sequence;
935 if (ACCESS_ONCE(idle->clock_idle_enter)) 907 smp_rmb();
936 idle_count++; 908 if (sequence & 1)
937 } while ((sequence & 1) || (idle->sequence != sequence)); 909 goto repeat;
910 idle_count = idle->idle_count;
911 if (idle->idle_enter)
912 idle_count++;
913 smp_rmb();
914 if (idle->sequence != sequence)
915 goto repeat;
938 return sprintf(buf, "%llu\n", idle_count); 916 return sprintf(buf, "%llu\n", idle_count);
939} 917}
940static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 918static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
941 919
942static ssize_t show_idle_time(struct device *dev, 920static ssize_t show_idle_time(struct sys_device *dev,
943 struct device_attribute *attr, char *buf) 921 struct sysdev_attribute *attr, char *buf)
944{ 922{
945 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 923 struct s390_idle_data *idle;
946 unsigned long long now, idle_time, idle_enter, idle_exit; 924 unsigned long long now, idle_time, idle_enter;
947 unsigned int sequence; 925 unsigned int sequence;
948 926
949 do { 927 idle = &per_cpu(s390_idle, dev->id);
950 now = get_clock(); 928 now = get_clock();
951 sequence = ACCESS_ONCE(idle->sequence); 929repeat:
952 idle_time = ACCESS_ONCE(idle->idle_time); 930 sequence = idle->sequence;
953 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 931 smp_rmb();
954 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 932 if (sequence & 1)
955 } while ((sequence & 1) || (idle->sequence != sequence)); 933 goto repeat;
956 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 934 idle_time = idle->idle_time;
935 idle_enter = idle->idle_enter;
936 if (idle_enter != 0ULL && idle_enter < now)
937 idle_time += now - idle_enter;
938 smp_rmb();
939 if (idle->sequence != sequence)
940 goto repeat;
957 return sprintf(buf, "%llu\n", idle_time >> 12); 941 return sprintf(buf, "%llu\n", idle_time >> 12);
958} 942}
959static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); 943static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
960 944
961static struct attribute *cpu_online_attrs[] = { 945static struct attribute *cpu_online_attrs[] = {
962 &dev_attr_idle_count.attr, 946 &attr_capability.attr,
963 &dev_attr_idle_time_us.attr, 947 &attr_idle_count.attr,
948 &attr_idle_time_us.attr,
964 NULL, 949 NULL,
965}; 950};
966 951
@@ -972,25 +957,34 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
972 unsigned long action, void *hcpu) 957 unsigned long action, void *hcpu)
973{ 958{
974 unsigned int cpu = (unsigned int)(long)hcpu; 959 unsigned int cpu = (unsigned int)(long)hcpu;
975 struct cpu *c = &pcpu_devices[cpu].cpu; 960 struct cpu *c = &per_cpu(cpu_devices, cpu);
976 struct device *s = &c->dev; 961 struct sys_device *s = &c->sysdev;
962 struct s390_idle_data *idle;
977 int err = 0; 963 int err = 0;
978 964
979 switch (action & ~CPU_TASKS_FROZEN) { 965 switch (action) {
980 case CPU_ONLINE: 966 case CPU_ONLINE:
967 case CPU_ONLINE_FROZEN:
968 idle = &per_cpu(s390_idle, cpu);
969 memset(idle, 0, sizeof(struct s390_idle_data));
981 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 970 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
982 break; 971 break;
983 case CPU_DEAD: 972 case CPU_DEAD:
973 case CPU_DEAD_FROZEN:
984 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 974 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
985 break; 975 break;
986 } 976 }
987 return notifier_from_errno(err); 977 return notifier_from_errno(err);
988} 978}
989 979
990static int __cpuinit smp_add_present_cpu(int cpu) 980static struct notifier_block __cpuinitdata smp_cpu_nb = {
981 .notifier_call = smp_cpu_notify,
982};
983
984static int __devinit smp_add_present_cpu(int cpu)
991{ 985{
992 struct cpu *c = &pcpu_devices[cpu].cpu; 986 struct cpu *c = &per_cpu(cpu_devices, cpu);
993 struct device *s = &c->dev; 987 struct sys_device *s = &c->sysdev;
994 int rc; 988 int rc;
995 989
996 c->hotpluggable = 1; 990 c->hotpluggable = 1;
@@ -1000,20 +994,11 @@ static int __cpuinit smp_add_present_cpu(int cpu)
1000 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 994 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1001 if (rc) 995 if (rc)
1002 goto out_cpu; 996 goto out_cpu;
1003 if (cpu_online(cpu)) { 997 if (!cpu_online(cpu))
1004 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 998 goto out;
1005 if (rc) 999 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1006 goto out_online; 1000 if (!rc)
1007 } 1001 return 0;
1008 rc = topology_cpu_init(c);
1009 if (rc)
1010 goto out_topology;
1011 return 0;
1012
1013out_topology:
1014 if (cpu_online(cpu))
1015 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1016out_online:
1017 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 1002 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1018out_cpu: 1003out_cpu:
1019#ifdef CONFIG_HOTPLUG_CPU 1004#ifdef CONFIG_HOTPLUG_CPU
@@ -1027,25 +1012,33 @@ out:
1027 1012
1028int __ref smp_rescan_cpus(void) 1013int __ref smp_rescan_cpus(void)
1029{ 1014{
1030 struct sclp_cpu_info *info; 1015 cpumask_t newcpus;
1031 int nr; 1016 int cpu;
1017 int rc;
1032 1018
1033 info = smp_get_cpu_info();
1034 if (!info)
1035 return -ENOMEM;
1036 get_online_cpus(); 1019 get_online_cpus();
1037 mutex_lock(&smp_cpu_state_mutex); 1020 mutex_lock(&smp_cpu_state_mutex);
1038 nr = __smp_rescan_cpus(info, 1); 1021 cpumask_copy(&newcpus, cpu_present_mask);
1022 rc = __smp_rescan_cpus();
1023 if (rc)
1024 goto out;
1025 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1026 for_each_cpu(cpu, &newcpus) {
1027 rc = smp_add_present_cpu(cpu);
1028 if (rc)
1029 set_cpu_present(cpu, false);
1030 }
1031 rc = 0;
1032out:
1039 mutex_unlock(&smp_cpu_state_mutex); 1033 mutex_unlock(&smp_cpu_state_mutex);
1040 put_online_cpus(); 1034 put_online_cpus();
1041 kfree(info); 1035 if (!cpumask_empty(&newcpus))
1042 if (nr)
1043 topology_schedule_update(); 1036 topology_schedule_update();
1044 return 0; 1037 return rc;
1045} 1038}
1046 1039
1047static ssize_t __ref rescan_store(struct device *dev, 1040static ssize_t __ref rescan_store(struct sysdev_class *class,
1048 struct device_attribute *attr, 1041 struct sysdev_class_attribute *attr,
1049 const char *buf, 1042 const char *buf,
1050 size_t count) 1043 size_t count)
1051{ 1044{
@@ -1054,19 +1047,64 @@ static ssize_t __ref rescan_store(struct device *dev,
1054 rc = smp_rescan_cpus(); 1047 rc = smp_rescan_cpus();
1055 return rc ? rc : count; 1048 return rc ? rc : count;
1056} 1049}
1057static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); 1050static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1058#endif /* CONFIG_HOTPLUG_CPU */ 1051#endif /* CONFIG_HOTPLUG_CPU */
1059 1052
1060static int __init s390_smp_init(void) 1053static ssize_t dispatching_show(struct sysdev_class *class,
1054 struct sysdev_class_attribute *attr,
1055 char *buf)
1061{ 1056{
1062 int cpu, rc; 1057 ssize_t count;
1058
1059 mutex_lock(&smp_cpu_state_mutex);
1060 count = sprintf(buf, "%d\n", cpu_management);
1061 mutex_unlock(&smp_cpu_state_mutex);
1062 return count;
1063}
1064
1065static ssize_t dispatching_store(struct sysdev_class *dev,
1066 struct sysdev_class_attribute *attr,
1067 const char *buf,
1068 size_t count)
1069{
1070 int val, rc;
1071 char delim;
1072
1073 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1074 return -EINVAL;
1075 if (val != 0 && val != 1)
1076 return -EINVAL;
1077 rc = 0;
1078 get_online_cpus();
1079 mutex_lock(&smp_cpu_state_mutex);
1080 if (cpu_management == val)
1081 goto out;
1082 rc = topology_set_cpu_management(val);
1083 if (!rc)
1084 cpu_management = val;
1085out:
1086 mutex_unlock(&smp_cpu_state_mutex);
1087 put_online_cpus();
1088 return rc ? rc : count;
1089}
1090static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1091 dispatching_store);
1092
1093static int __init topology_init(void)
1094{
1095 int cpu;
1096 int rc;
1097
1098 register_cpu_notifier(&smp_cpu_nb);
1063 1099
1064 hotcpu_notifier(smp_cpu_notify, 0);
1065#ifdef CONFIG_HOTPLUG_CPU 1100#ifdef CONFIG_HOTPLUG_CPU
1066 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1101 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1067 if (rc) 1102 if (rc)
1068 return rc; 1103 return rc;
1069#endif 1104#endif
1105 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1106 if (rc)
1107 return rc;
1070 for_each_present_cpu(cpu) { 1108 for_each_present_cpu(cpu) {
1071 rc = smp_add_present_cpu(cpu); 1109 rc = smp_add_present_cpu(cpu);
1072 if (rc) 1110 if (rc)
@@ -1074,4 +1112,4 @@ static int __init s390_smp_init(void)
1074 } 1112 }
1075 return 0; 1113 return 0;
1076} 1114}
1077subsys_initcall(s390_smp_init); 1115subsys_initcall(topology_init);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 1785cd82253..8841919ef7e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/stacktrace.c
3 *
2 * Stack trace management functions 4 * Stack trace management functions
3 * 5 *
4 * Copyright IBM Corp. 2006 6 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */ 8 */
7 9
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d0e38..cf9e5c6d552 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -7,132 +7,13 @@
7 */ 7 */
8 8
9#include <linux/pfn.h> 9#include <linux/pfn.h>
10#include <linux/suspend.h> 10#include <asm/system.h>
11#include <linux/mm.h>
12#include <asm/ctl_reg.h>
13 11
14/* 12/*
15 * References to section boundaries 13 * References to section boundaries
16 */ 14 */
17extern const void __nosave_begin, __nosave_end; 15extern const void __nosave_begin, __nosave_end;
18 16
19/*
20 * The restore of the saved pages in an hibernation image will set
21 * the change and referenced bits in the storage key for each page.
22 * Overindication of the referenced bits after an hibernation cycle
23 * does not cause any harm but the overindication of the change bits
24 * would cause trouble.
25 * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
26 * page to the most significant byte of the associated page frame
27 * number in the hibernation image.
28 */
29
30/*
31 * Key storage is allocated as a linked list of pages.
32 * The size of the keys array is (PAGE_SIZE - sizeof(long))
33 */
34struct page_key_data {
35 struct page_key_data *next;
36 unsigned char data[];
37};
38
39#define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
40
41static struct page_key_data *page_key_data;
42static struct page_key_data *page_key_rp, *page_key_wp;
43static unsigned long page_key_rx, page_key_wx;
44
45/*
46 * For each page in the hibernation image one additional byte is
47 * stored in the most significant byte of the page frame number.
48 * On suspend no additional memory is required but on resume the
49 * keys need to be memorized until the page data has been restored.
50 * Only then can the storage keys be set to their old state.
51 */
52unsigned long page_key_additional_pages(unsigned long pages)
53{
54 return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
55}
56
57/*
58 * Free page_key_data list of arrays.
59 */
60void page_key_free(void)
61{
62 struct page_key_data *pkd;
63
64 while (page_key_data) {
65 pkd = page_key_data;
66 page_key_data = pkd->next;
67 free_page((unsigned long) pkd);
68 }
69}
70
71/*
72 * Allocate page_key_data list of arrays with enough room to store
73 * one byte for each page in the hibernation image.
74 */
75int page_key_alloc(unsigned long pages)
76{
77 struct page_key_data *pk;
78 unsigned long size;
79
80 size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
81 while (size--) {
82 pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
83 if (!pk) {
84 page_key_free();
85 return -ENOMEM;
86 }
87 pk->next = page_key_data;
88 page_key_data = pk;
89 }
90 page_key_rp = page_key_wp = page_key_data;
91 page_key_rx = page_key_wx = 0;
92 return 0;
93}
94
95/*
96 * Save the storage key into the upper 8 bits of the page frame number.
97 */
98void page_key_read(unsigned long *pfn)
99{
100 unsigned long addr;
101
102 addr = (unsigned long) page_address(pfn_to_page(*pfn));
103 *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
104}
105
106/*
107 * Extract the storage key from the upper 8 bits of the page frame number
108 * and store it in the page_key_data list of arrays.
109 */
110void page_key_memorize(unsigned long *pfn)
111{
112 page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
113 *(unsigned char *) pfn = 0;
114 if (++page_key_wx < PAGE_KEY_DATA_SIZE)
115 return;
116 page_key_wp = page_key_wp->next;
117 page_key_wx = 0;
118}
119
120/*
121 * Get the next key from the page_key_data list of arrays and set the
122 * storage key of the page referred by @address. If @address refers to
123 * a "safe" page the swsusp_arch_resume code will transfer the storage
124 * key from the buffer page to the original page.
125 */
126void page_key_write(void *address)
127{
128 page_set_storage_key((unsigned long) address,
129 page_key_rp->data[page_key_rx], 0);
130 if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
131 return;
132 page_key_rp = page_key_rp->next;
133 page_key_rx = 0;
134}
135
136int pfn_is_nosave(unsigned long pfn) 17int pfn_is_nosave(unsigned long pfn)
137{ 18{
138 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); 19 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b..51bcdb50a23 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -12,7 +12,6 @@
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/sigp.h>
16 15
17/* 16/*
18 * Save register context in absolute 0 lowcore and call swsusp_save() to 17 * Save register context in absolute 0 lowcore and call swsusp_save() to
@@ -43,7 +42,7 @@ ENTRY(swsusp_arch_suspend)
43 lghi %r1,0x1000 42 lghi %r1,0x1000
44 43
45 /* Save CPU address */ 44 /* Save CPU address */
46 stap __LC_EXT_CPU_ADDR(%r0) 45 stap __LC_CPU_ADDRESS(%r0)
47 46
48 /* Store registers */ 47 /* Store registers */
49 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 48 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -137,14 +136,11 @@ ENTRY(swsusp_arch_resume)
1370: 1360:
138 lg %r2,8(%r1) 137 lg %r2,8(%r1)
139 lg %r4,0(%r1) 138 lg %r4,0(%r1)
140 iske %r0,%r4
141 lghi %r3,PAGE_SIZE 139 lghi %r3,PAGE_SIZE
142 lghi %r5,PAGE_SIZE 140 lghi %r5,PAGE_SIZE
1431: 1411:
144 mvcle %r2,%r4,0 142 mvcle %r2,%r4,0
145 jo 1b 143 jo 1b
146 lg %r2,8(%r1)
147 sske %r0,%r2
148 lg %r1,16(%r1) 144 lg %r1,16(%r1)
149 ltgr %r1,%r1 145 ltgr %r1,%r1
150 jnz 0b 146 jnz 0b
@@ -164,7 +160,7 @@ ENTRY(swsusp_arch_resume)
164 diag %r0,%r0,0x308 160 diag %r0,%r0,0x308
165restart_entry: 161restart_entry:
166 lhi %r1,1 162 lhi %r1,1
167 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 163 sigp %r1,%r0,0x12
168 sam64 164 sam64
169 larl %r1,.Lnew_pgm_check_psw 165 larl %r1,.Lnew_pgm_check_psw
170 lpswe 0(%r1) 166 lpswe 0(%r1)
@@ -174,15 +170,15 @@ pgm_check_entry:
174 larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ 170 larl %r1,.Lresume_cpu /* Resume CPU address: r2 */
175 stap 0(%r1) 171 stap 0(%r1)
176 llgh %r2,0(%r1) 172 llgh %r2,0(%r1)
177 llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */ 173 llgh %r1,__LC_CPU_ADDRESS(%r0) /* Suspend CPU address: r1 */
178 cgr %r1,%r2 174 cgr %r1,%r2
179 je restore_registers /* r1 = r2 -> nothing to do */ 175 je restore_registers /* r1 = r2 -> nothing to do */
180 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 176 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
181 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 177 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1823: 1783:
183 sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */ 179 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
184 brc 8,4f /* accepted */ 180 brc 8,4f /* accepted */
185 brc 2,3b /* busy, try again */ 181 brc 2,3b /* busy, try again */
186 182
187 /* Suspend CPU not available -> panic */ 183 /* Suspend CPU not available -> panic */
188 larl %r15,init_thread_union 184 larl %r15,init_thread_union
@@ -191,16 +187,16 @@ pgm_check_entry:
191 larl %r3,_sclp_print_early 187 larl %r3,_sclp_print_early
192 lghi %r1,0 188 lghi %r1,0
193 sam31 189 sam31
194 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 190 sigp %r1,%r0,0x12
195 basr %r14,%r3 191 basr %r14,%r3
196 larl %r3,.Ldisabled_wait_31 192 larl %r3,.Ldisabled_wait_31
197 lpsw 0(%r3) 193 lpsw 0(%r3)
1984: 1944:
199 /* Switch to suspend CPU */ 195 /* Switch to suspend CPU */
200 sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */ 196 sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */
201 brc 2,4b /* busy, try again */ 197 brc 2,4b /* busy, try again */
2025: 1985:
203 sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */ 199 sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */
204 brc 2,5b /* busy, try again */ 200 brc 2,5b /* busy, try again */
2056: j 6b 2016: j 6b
206 202
@@ -208,7 +204,7 @@ restart_suspend:
208 larl %r1,.Lresume_cpu 204 larl %r1,.Lresume_cpu
209 llgh %r2,0(%r1) 205 llgh %r2,0(%r1)
2107: 2067:
211 sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */ 207 sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */
212 brc 8,7b /* accepted, status 0, still running */ 208 brc 8,7b /* accepted, status 0, still running */
213 brc 2,7b /* busy, try again */ 209 brc 2,7b /* busy, try again */
214 tmll %r9,0x40 /* Test if resume CPU is stopped */ 210 tmll %r9,0x40 /* Test if resume CPU is stopped */
@@ -258,9 +254,6 @@ restore_registers:
258 lghi %r2,0 254 lghi %r2,0
259 brasl %r14,arch_set_page_states 255 brasl %r14,arch_set_page_states
260 256
261 /* Log potential guest relocation */
262 brasl %r14,lgr_info_log
263
264 /* Reinitialize the channel subsystem */ 257 /* Reinitialize the channel subsystem */
265 brasl %r14,channel_subsystem_reinit 258 brasl %r14,channel_subsystem_reinit
266 259
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index d0964d22adb..476081440df 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/sys_s390.c
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 1999, 2000 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Thomas Spatzier (tspat@de.ibm.com) 7 * Thomas Spatzier (tspat@de.ibm.com)
6 * 8 *
@@ -58,22 +60,74 @@ out:
58} 60}
59 61
60/* 62/*
61 * sys_ipc() is the de-multiplexer for the SysV IPC calls. 63 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
64 *
65 * This is really horribly ugly.
62 */ 66 */
63SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, 67SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
64 unsigned long, third, void __user *, ptr) 68 unsigned long, third, void __user *, ptr)
65{ 69{
66 if (call >> 16) 70 struct ipc_kludge tmp;
67 return -EINVAL; 71 int ret;
68 /* The s390 sys_ipc variant has only five parameters instead of six 72
69 * like the generic variant. The only difference is the handling of 73 switch (call) {
70 * the SEMTIMEDOP subcall where on s390 the third parameter is used 74 case SEMOP:
71 * as a pointer to a struct timespec where the generic variant uses 75 return sys_semtimedop(first, (struct sembuf __user *)ptr,
72 * the fifth parameter. 76 (unsigned)second, NULL);
73 * Therefore we can call the generic variant by simply passing the 77 case SEMTIMEDOP:
74 * third parameter also as fifth parameter. 78 return sys_semtimedop(first, (struct sembuf __user *)ptr,
75 */ 79 (unsigned)second,
76 return sys_ipc(call, first, second, third, ptr, third); 80 (const struct timespec __user *) third);
81 case SEMGET:
82 return sys_semget(first, (int)second, third);
83 case SEMCTL: {
84 union semun fourth;
85 if (!ptr)
86 return -EINVAL;
87 if (get_user(fourth.__pad, (void __user * __user *) ptr))
88 return -EFAULT;
89 return sys_semctl(first, (int)second, third, fourth);
90 }
91 case MSGSND:
92 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
93 (size_t)second, third);
94 break;
95 case MSGRCV:
96 if (!ptr)
97 return -EINVAL;
98 if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
99 sizeof (struct ipc_kludge)))
100 return -EFAULT;
101 return sys_msgrcv (first, tmp.msgp,
102 (size_t)second, tmp.msgtyp, third);
103 case MSGGET:
104 return sys_msgget((key_t)first, (int)second);
105 case MSGCTL:
106 return sys_msgctl(first, (int)second,
107 (struct msqid_ds __user *)ptr);
108
109 case SHMAT: {
110 ulong raddr;
111 ret = do_shmat(first, (char __user *)ptr,
112 (int)second, &raddr);
113 if (ret)
114 return ret;
115 return put_user (raddr, (ulong __user *) third);
116 break;
117 }
118 case SHMDT:
119 return sys_shmdt ((char __user *)ptr);
120 case SHMGET:
121 return sys_shmget(first, (size_t)second, third);
122 case SHMCTL:
123 return sys_shmctl(first, (int)second,
124 (struct shmid_ds __user *) ptr);
125 default:
126 return -ENOSYS;
127
128 }
129
130 return -EINVAL;
77} 131}
78 132
79#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
@@ -81,12 +135,11 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
81{ 135{
82 unsigned int ret; 136 unsigned int ret;
83 137
84 if (personality(current->personality) == PER_LINUX32 && 138 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
85 personality(personality) == PER_LINUX) 139 personality = PER_LINUX32;
86 personality |= PER_LINUX32;
87 ret = sys_personality(personality); 140 ret = sys_personality(personality);
88 if (personality(ret) == PER_LINUX32) 141 if (ret == PER_LINUX32)
89 ret &= ~PER_LINUX32; 142 ret = PER_LINUX;
90 143
91 return ret; 144 return ret;
92} 145}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 6a6c61f94dd..73eb08c874f 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -348,8 +348,3 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at
348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) 348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) 349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) 350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
353SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
354SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
355SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 62f89d98e88..5c9e439bf3f 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -22,41 +22,17 @@
22#include <math-emu/soft-fp.h> 22#include <math-emu/soft-fp.h>
23#include <math-emu/single.h> 23#include <math-emu/single.h>
24 24
25int topology_max_mnest; 25static inline int stsi_0(void)
26
27/*
28 * stsi - store system information
29 *
30 * Returns the current configuration level if function code 0 was specified.
31 * Otherwise returns 0 on success or a negative value on error.
32 */
33int stsi(void *sysinfo, int fc, int sel1, int sel2)
34{ 26{
35 register int r0 asm("0") = (fc << 28) | sel1; 27 int rc = stsi(NULL, 0, 0, 0);
36 register int r1 asm("1") = sel2; 28 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
37 int rc = 0;
38
39 asm volatile(
40 " stsi 0(%3)\n"
41 "0: jz 2f\n"
42 "1: lhi %1,%4\n"
43 "2:\n"
44 EX_TABLE(0b, 1b)
45 : "+d" (r0), "+d" (rc)
46 : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
47 : "cc", "memory");
48 if (rc)
49 return rc;
50 return fc ? 0 : ((unsigned int) r0) >> 28;
51} 29}
52EXPORT_SYMBOL(stsi);
53 30
54static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) 31static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
55{ 32{
56 int i; 33 if (stsi(info, 1, 1, 1) == -ENOSYS)
34 return len;
57 35
58 if (stsi(info, 1, 1, 1))
59 return;
60 EBCASC(info->manufacturer, sizeof(info->manufacturer)); 36 EBCASC(info->manufacturer, sizeof(info->manufacturer));
61 EBCASC(info->type, sizeof(info->type)); 37 EBCASC(info->type, sizeof(info->type));
62 EBCASC(info->model, sizeof(info->model)); 38 EBCASC(info->model, sizeof(info->model));
@@ -65,197 +41,242 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
65 EBCASC(info->model_capacity, sizeof(info->model_capacity)); 41 EBCASC(info->model_capacity, sizeof(info->model_capacity));
66 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); 42 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
67 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); 43 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
68 seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer); 44 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
69 seq_printf(m, "Type: %-4.4s\n", info->type); 45 info->manufacturer);
70 /* 46 len += sprintf(page + len, "Type: %-4.4s\n",
71 * Sigh: the model field has been renamed with System z9 47 info->type);
72 * to model_capacity and a new model field has been added
73 * after the plant field. To avoid confusing older programs
74 * the "Model:" prints "model_capacity model" or just
75 * "model_capacity" if the model string is empty .
76 */
77 seq_printf(m, "Model: %-16.16s", info->model_capacity);
78 if (info->model[0] != '\0') 48 if (info->model[0] != '\0')
79 seq_printf(m, " %-16.16s", info->model); 49 /*
80 seq_putc(m, '\n'); 50 * Sigh: the model field has been renamed with System z9
81 seq_printf(m, "Sequence Code: %-16.16s\n", info->sequence); 51 * to model_capacity and a new model field has been added
82 seq_printf(m, "Plant: %-4.4s\n", info->plant); 52 * after the plant field. To avoid confusing older programs
83 seq_printf(m, "Model Capacity: %-16.16s %08u\n", 53 * the "Model:" prints "model_capacity model" or just
84 info->model_capacity, info->model_cap_rating); 54 * "model_capacity" if the model string is empty .
85 if (info->model_perm_cap_rating) 55 */
86 seq_printf(m, "Model Perm. Capacity: %-16.16s %08u\n", 56 len += sprintf(page + len,
87 info->model_perm_cap, 57 "Model: %-16.16s %-16.16s\n",
88 info->model_perm_cap_rating); 58 info->model_capacity, info->model);
89 if (info->model_temp_cap_rating) 59 else
90 seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n", 60 len += sprintf(page + len, "Model: %-16.16s\n",
91 info->model_temp_cap, 61 info->model_capacity);
92 info->model_temp_cap_rating); 62 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
93 if (info->ncr) 63 info->sequence);
94 seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr); 64 len += sprintf(page + len, "Plant: %-4.4s\n",
95 if (info->npr) 65 info->plant);
96 seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr); 66 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
97 if (info->ntr) 67 info->model_capacity, *(u32 *) info->model_cap_rating);
98 seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr); 68 if (info->model_perm_cap[0] != '\0')
69 len += sprintf(page + len,
70 "Model Perm. Capacity: %-16.16s %08u\n",
71 info->model_perm_cap,
72 *(u32 *) info->model_perm_cap_rating);
73 if (info->model_temp_cap[0] != '\0')
74 len += sprintf(page + len,
75 "Model Temp. Capacity: %-16.16s %08u\n",
76 info->model_temp_cap,
77 *(u32 *) info->model_temp_cap_rating);
99 if (info->cai) { 78 if (info->cai) {
100 seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai); 79 len += sprintf(page + len,
101 seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr); 80 "Capacity Adj. Ind.: %d\n",
102 seq_printf(m, "Capacity Transient: %d\n", info->t); 81 info->cai);
103 } 82 len += sprintf(page + len, "Capacity Ch. Reason: %d\n",
104 if (info->p) { 83 info->ccr);
105 for (i = 1; i <= ARRAY_SIZE(info->typepct); i++) {
106 seq_printf(m, "Type %d Percentage: %d\n",
107 i, info->typepct[i - 1]);
108 }
109 } 84 }
85 return len;
110} 86}
111 87
112static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info) 88static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len)
113{ 89{
114 static int max_mnest; 90 static int max_mnest;
115 int i, rc; 91 int i, rc;
116 92
117 seq_putc(m, '\n'); 93 len += sprintf(page + len, "\n");
118 if (!MACHINE_HAS_TOPOLOGY) 94 if (!MACHINE_HAS_TOPOLOGY)
119 return; 95 return len;
120 if (stsi(info, 15, 1, topology_max_mnest)) 96 if (max_mnest) {
121 return; 97 stsi(info, 15, 1, max_mnest);
122 seq_printf(m, "CPU Topology HW: "); 98 } else {
99 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
100 rc = stsi(info, 15, 1, max_mnest);
101 if (rc != -ENOSYS)
102 break;
103 }
104 }
105 len += sprintf(page + len, "CPU Topology HW: ");
123 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 106 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
124 seq_printf(m, " %d", info->mag[i]); 107 len += sprintf(page + len, " %d", info->mag[i]);
125 seq_putc(m, '\n'); 108 len += sprintf(page + len, "\n");
126#ifdef CONFIG_SCHED_MC 109#ifdef CONFIG_SCHED_MC
127 store_topology(info); 110 store_topology(info);
128 seq_printf(m, "CPU Topology SW: "); 111 len += sprintf(page + len, "CPU Topology SW: ");
129 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 112 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
130 seq_printf(m, " %d", info->mag[i]); 113 len += sprintf(page + len, " %d", info->mag[i]);
131 seq_putc(m, '\n'); 114 len += sprintf(page + len, "\n");
132#endif 115#endif
116 return len;
133} 117}
134 118
135static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info) 119static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
136{ 120{
137 struct sysinfo_1_2_2_extension *ext; 121 struct sysinfo_1_2_2_extension *ext;
138 int i; 122 int i;
139 123
140 if (stsi(info, 1, 2, 2)) 124 if (stsi(info, 1, 2, 2) == -ENOSYS)
141 return; 125 return len;
142 ext = (struct sysinfo_1_2_2_extension *) 126 ext = (struct sysinfo_1_2_2_extension *)
143 ((unsigned long) info + info->acc_offset); 127 ((unsigned long) info + info->acc_offset);
144 seq_printf(m, "CPUs Total: %d\n", info->cpus_total); 128
145 seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured); 129 len += sprintf(page + len, "CPUs Total: %d\n",
146 seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby); 130 info->cpus_total);
147 seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved); 131 len += sprintf(page + len, "CPUs Configured: %d\n",
148 /* 132 info->cpus_configured);
149 * Sigh 2. According to the specification the alternate 133 len += sprintf(page + len, "CPUs Standby: %d\n",
150 * capability field is a 32 bit floating point number 134 info->cpus_standby);
151 * if the higher order 8 bits are not zero. Printing 135 len += sprintf(page + len, "CPUs Reserved: %d\n",
152 * a floating point number in the kernel is a no-no, 136 info->cpus_reserved);
153 * always print the number as 32 bit unsigned integer. 137
154 * The user-space needs to know about the strange 138 if (info->format == 1) {
155 * encoding of the alternate cpu capability. 139 /*
156 */ 140 * Sigh 2. According to the specification the alternate
157 seq_printf(m, "Capability: %u", info->capability); 141 * capability field is a 32 bit floating point number
158 if (info->format == 1) 142 * if the higher order 8 bits are not zero. Printing
159 seq_printf(m, " %u", ext->alt_capability); 143 * a floating point number in the kernel is a no-no,
160 seq_putc(m, '\n'); 144 * always print the number as 32 bit unsigned integer.
161 if (info->nominal_cap) 145 * The user-space needs to know about the strange
162 seq_printf(m, "Nominal Capability: %d\n", info->nominal_cap); 146 * encoding of the alternate cpu capability.
163 if (info->secondary_cap) 147 */
164 seq_printf(m, "Secondary Capability: %d\n", info->secondary_cap); 148 len += sprintf(page + len, "Capability: %u %u\n",
165 for (i = 2; i <= info->cpus_total; i++) { 149 info->capability, ext->alt_capability);
166 seq_printf(m, "Adjustment %02d-way: %u", 150 for (i = 2; i <= info->cpus_total; i++)
167 i, info->adjustment[i-2]); 151 len += sprintf(page + len,
168 if (info->format == 1) 152 "Adjustment %02d-way: %u %u\n",
169 seq_printf(m, " %u", ext->alt_adjustment[i-2]); 153 i, info->adjustment[i-2],
170 seq_putc(m, '\n'); 154 ext->alt_adjustment[i-2]);
155
156 } else {
157 len += sprintf(page + len, "Capability: %u\n",
158 info->capability);
159 for (i = 2; i <= info->cpus_total; i++)
160 len += sprintf(page + len,
161 "Adjustment %02d-way: %u\n",
162 i, info->adjustment[i-2]);
171 } 163 }
164
165 if (info->secondary_capability != 0)
166 len += sprintf(page + len, "Secondary Capability: %d\n",
167 info->secondary_capability);
168 return len;
172} 169}
173 170
174static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) 171static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
175{ 172{
176 if (stsi(info, 2, 2, 2)) 173 if (stsi(info, 2, 2, 2) == -ENOSYS)
177 return; 174 return len;
175
178 EBCASC(info->name, sizeof(info->name)); 176 EBCASC(info->name, sizeof(info->name));
179 seq_putc(m, '\n'); 177
180 seq_printf(m, "LPAR Number: %d\n", info->lpar_number); 178 len += sprintf(page + len, "\n");
181 seq_printf(m, "LPAR Characteristics: "); 179 len += sprintf(page + len, "LPAR Number: %d\n",
180 info->lpar_number);
181
182 len += sprintf(page + len, "LPAR Characteristics: ");
182 if (info->characteristics & LPAR_CHAR_DEDICATED) 183 if (info->characteristics & LPAR_CHAR_DEDICATED)
183 seq_printf(m, "Dedicated "); 184 len += sprintf(page + len, "Dedicated ");
184 if (info->characteristics & LPAR_CHAR_SHARED) 185 if (info->characteristics & LPAR_CHAR_SHARED)
185 seq_printf(m, "Shared "); 186 len += sprintf(page + len, "Shared ");
186 if (info->characteristics & LPAR_CHAR_LIMITED) 187 if (info->characteristics & LPAR_CHAR_LIMITED)
187 seq_printf(m, "Limited "); 188 len += sprintf(page + len, "Limited ");
188 seq_putc(m, '\n'); 189 len += sprintf(page + len, "\n");
189 seq_printf(m, "LPAR Name: %-8.8s\n", info->name); 190
190 seq_printf(m, "LPAR Adjustment: %d\n", info->caf); 191 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
191 seq_printf(m, "LPAR CPUs Total: %d\n", info->cpus_total); 192 info->name);
192 seq_printf(m, "LPAR CPUs Configured: %d\n", info->cpus_configured); 193
193 seq_printf(m, "LPAR CPUs Standby: %d\n", info->cpus_standby); 194 len += sprintf(page + len, "LPAR Adjustment: %d\n",
194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); 195 info->caf);
195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); 196
196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); 197 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
198 info->cpus_total);
199 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
200 info->cpus_configured);
201 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
202 info->cpus_standby);
203 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
204 info->cpus_reserved);
205 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
206 info->cpus_dedicated);
207 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
208 info->cpus_shared);
209 return len;
197} 210}
198 211
199static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) 212static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
200{ 213{
201 int i; 214 int i;
202 215
203 if (stsi(info, 3, 2, 2)) 216 if (stsi(info, 3, 2, 2) == -ENOSYS)
204 return; 217 return len;
205 for (i = 0; i < info->count; i++) { 218 for (i = 0; i < info->count; i++) {
206 EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); 219 EBCASC(info->vm[i].name, sizeof(info->vm[i].name));
207 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); 220 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi));
208 seq_putc(m, '\n'); 221 len += sprintf(page + len, "\n");
209 seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name); 222 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
210 seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi); 223 i, info->vm[i].name);
211 seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf); 224 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
212 seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total); 225 i, info->vm[i].cpi);
213 seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured); 226
214 seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby); 227 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
215 seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved); 228 i, info->vm[i].caf);
229
230 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
231 i, info->vm[i].cpus_total);
232 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
233 i, info->vm[i].cpus_configured);
234 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
235 i, info->vm[i].cpus_standby);
236 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
237 i, info->vm[i].cpus_reserved);
216 } 238 }
239 return len;
217} 240}
218 241
219static int sysinfo_show(struct seq_file *m, void *v) 242static int proc_read_sysinfo(char *page, char **start,
243 off_t off, int count,
244 int *eof, void *data)
220{ 245{
221 void *info = (void *)get_zeroed_page(GFP_KERNEL); 246 unsigned long info = get_zeroed_page(GFP_KERNEL);
222 int level; 247 int level, len;
223 248
224 if (!info) 249 if (!info)
225 return 0; 250 return 0;
226 level = stsi(NULL, 0, 0, 0); 251
252 len = 0;
253 level = stsi_0();
227 if (level >= 1) 254 if (level >= 1)
228 stsi_1_1_1(m, info); 255 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
256
229 if (level >= 1) 257 if (level >= 1)
230 stsi_15_1_x(m, info); 258 len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len);
259
231 if (level >= 1) 260 if (level >= 1)
232 stsi_1_2_2(m, info); 261 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
262
233 if (level >= 2) 263 if (level >= 2)
234 stsi_2_2_2(m, info); 264 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
265
235 if (level >= 3) 266 if (level >= 3)
236 stsi_3_2_2(m, info); 267 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
237 free_page((unsigned long)info);
238 return 0;
239}
240 268
241static int sysinfo_open(struct inode *inode, struct file *file) 269 free_page(info);
242{ 270 return len;
243 return single_open(file, sysinfo_show, NULL);
244} 271}
245 272
246static const struct file_operations sysinfo_fops = { 273static __init int create_proc_sysinfo(void)
247 .open = sysinfo_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252
253static int __init sysinfo_create_proc(void)
254{ 274{
255 proc_create("sysinfo", 0444, NULL, &sysinfo_fops); 275 create_proc_read_entry("sysinfo", 0444, NULL,
276 proc_read_sysinfo, NULL);
256 return 0; 277 return 0;
257} 278}
258device_initcall(sysinfo_create_proc); 279device_initcall(create_proc_sysinfo);
259 280
260/* 281/*
261 * Service levels interface. 282 * Service levels interface.
@@ -372,6 +393,27 @@ static __init int create_proc_service_level(void)
372subsys_initcall(create_proc_service_level); 393subsys_initcall(create_proc_service_level);
373 394
374/* 395/*
396 * Bogomips calculation based on cpu capability.
397 */
398int get_cpu_capability(unsigned int *capability)
399{
400 struct sysinfo_1_2_2 *info;
401 int rc;
402
403 info = (void *) get_zeroed_page(GFP_KERNEL);
404 if (!info)
405 return -ENOMEM;
406 rc = stsi(info, 1, 2, 2);
407 if (rc == -ENOSYS)
408 goto out;
409 rc = 0;
410 *capability = info->capability;
411out:
412 free_page((unsigned long) info);
413 return rc;
414}
415
416/*
375 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 417 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
376 */ 418 */
377void s390_adjust_jiffies(void) 419void s390_adjust_jiffies(void)
@@ -386,7 +428,7 @@ void s390_adjust_jiffies(void)
386 if (!info) 428 if (!info)
387 return; 429 return;
388 430
389 if (stsi(info, 1, 2, 2) == 0) { 431 if (stsi(info, 1, 2, 2) != -ENOSYS) {
390 /* 432 /*
391 * Major sigh. The cpu capability encoding is "special". 433 * Major sigh. The cpu capability encoding is "special".
392 * If the first 9 bits of info->capability are 0 then it 434 * If the first 9 bits of info->capability are 0 then it
@@ -400,7 +442,7 @@ void s390_adjust_jiffies(void)
400 */ 442 */
401 FP_UNPACK_SP(SA, &fmil); 443 FP_UNPACK_SP(SA, &fmil);
402 if ((info->capability >> 23) == 0) 444 if ((info->capability >> 23) == 0)
403 FP_FROM_INT_S(SB, (long) info->capability, 64, long); 445 FP_FROM_INT_S(SB, info->capability, 32, int);
404 else 446 else
405 FP_UNPACK_SP(SB, &info->capability); 447 FP_UNPACK_SP(SB, &info->capability);
406 FP_DIV_S(SR, SA, SB); 448 FP_DIV_S(SR, SA, SB);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index aff0e350d77..dff933065ab 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/time.c
2 * Time of day based timer functions. 3 * Time of day based timer functions.
3 * 4 *
4 * S390 version 5 * S390 version
@@ -26,7 +27,7 @@
26#include <linux/cpu.h> 27#include <linux/cpu.h>
27#include <linux/stop_machine.h> 28#include <linux/stop_machine.h>
28#include <linux/time.h> 29#include <linux/time.h>
29#include <linux/device.h> 30#include <linux/sysdev.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/smp.h> 33#include <linux/smp.h>
@@ -34,7 +35,7 @@
34#include <linux/profile.h> 35#include <linux/profile.h>
35#include <linux/timex.h> 36#include <linux/timex.h>
36#include <linux/notifier.h> 37#include <linux/notifier.h>
37#include <linux/timekeeper_internal.h> 38#include <linux/clocksource.h>
38#include <linux/clockchips.h> 39#include <linux/clockchips.h>
39#include <linux/gfp.h> 40#include <linux/gfp.h>
40#include <linux/kprobes.h> 41#include <linux/kprobes.h>
@@ -44,10 +45,9 @@
44#include <asm/vdso.h> 45#include <asm/vdso.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/irq_regs.h> 47#include <asm/irq_regs.h>
47#include <asm/vtimer.h> 48#include <asm/timer.h>
48#include <asm/etr.h> 49#include <asm/etr.h>
49#include <asm/cio.h> 50#include <asm/cio.h>
50#include "entry.h"
51 51
52/* change this if you have some constant time drift */ 52/* change this if you have some constant time drift */
53#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 53#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
@@ -109,17 +109,10 @@ static void fixup_clock_comparator(unsigned long long delta)
109 set_clock_comparator(S390_lowcore.clock_comparator); 109 set_clock_comparator(S390_lowcore.clock_comparator);
110} 110}
111 111
112static int s390_next_ktime(ktime_t expires, 112static int s390_next_event(unsigned long delta,
113 struct clock_event_device *evt) 113 struct clock_event_device *evt)
114{ 114{
115 struct timespec ts; 115 S390_lowcore.clock_comparator = get_clock() + delta;
116 u64 nsecs;
117
118 ts.tv_sec = ts.tv_nsec = 0;
119 monotonic_to_bootbased(&ts);
120 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
121 do_div(nsecs, 125);
122 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
123 set_clock_comparator(S390_lowcore.clock_comparator); 116 set_clock_comparator(S390_lowcore.clock_comparator);
124 return 0; 117 return 0;
125} 118}
@@ -144,15 +137,14 @@ void init_cpu_timer(void)
144 cpu = smp_processor_id(); 137 cpu = smp_processor_id();
145 cd = &per_cpu(comparators, cpu); 138 cd = &per_cpu(comparators, cpu);
146 cd->name = "comparator"; 139 cd->name = "comparator";
147 cd->features = CLOCK_EVT_FEAT_ONESHOT | 140 cd->features = CLOCK_EVT_FEAT_ONESHOT;
148 CLOCK_EVT_FEAT_KTIME;
149 cd->mult = 16777; 141 cd->mult = 16777;
150 cd->shift = 12; 142 cd->shift = 12;
151 cd->min_delta_ns = 1; 143 cd->min_delta_ns = 1;
152 cd->max_delta_ns = LONG_MAX; 144 cd->max_delta_ns = LONG_MAX;
153 cd->rating = 400; 145 cd->rating = 400;
154 cd->cpumask = cpumask_of(cpu); 146 cd->cpumask = cpumask_of(cpu);
155 cd->set_next_ktime = s390_next_ktime; 147 cd->set_next_event = s390_next_event;
156 cd->set_mode = s390_set_mode; 148 cd->set_mode = s390_set_mode;
157 149
158 clockevents_register_device(cd); 150 clockevents_register_device(cd);
@@ -164,11 +156,11 @@ void init_cpu_timer(void)
164 __ctl_set_bit(0, 4); 156 __ctl_set_bit(0, 4);
165} 157}
166 158
167static void clock_comparator_interrupt(struct ext_code ext_code, 159static void clock_comparator_interrupt(unsigned int ext_int_code,
168 unsigned int param32, 160 unsigned int param32,
169 unsigned long param64) 161 unsigned long param64)
170{ 162{
171 inc_irq_stat(IRQEXT_CLK); 163 kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
172 if (S390_lowcore.clock_comparator == -1ULL) 164 if (S390_lowcore.clock_comparator == -1ULL)
173 set_clock_comparator(S390_lowcore.clock_comparator); 165 set_clock_comparator(S390_lowcore.clock_comparator);
174} 166}
@@ -176,10 +168,10 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
176static void etr_timing_alert(struct etr_irq_parm *); 168static void etr_timing_alert(struct etr_irq_parm *);
177static void stp_timing_alert(struct stp_irq_parm *); 169static void stp_timing_alert(struct stp_irq_parm *);
178 170
179static void timing_alert_interrupt(struct ext_code ext_code, 171static void timing_alert_interrupt(unsigned int ext_int_code,
180 unsigned int param32, unsigned long param64) 172 unsigned int param32, unsigned long param64)
181{ 173{
182 inc_irq_stat(IRQEXT_TLA); 174 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
183 if (param32 & 0x00c40000) 175 if (param32 & 0x00c40000)
184 etr_timing_alert((struct etr_irq_parm *) &param32); 176 etr_timing_alert((struct etr_irq_parm *) &param32);
185 if (param32 & 0x00038000) 177 if (param32 & 0x00038000)
@@ -219,7 +211,7 @@ struct clocksource * __init clocksource_default_clock(void)
219 return &clocksource_tod; 211 return &clocksource_tod;
220} 212}
221 213
222void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 214void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
223 struct clocksource *clock, u32 mult) 215 struct clocksource *clock, u32 mult)
224{ 216{
225 if (clock != &clocksource_tod) 217 if (clock != &clocksource_tod)
@@ -329,7 +321,7 @@ static unsigned long clock_sync_flags;
329 * The synchronous get_clock function. It will write the current clock 321 * The synchronous get_clock function. It will write the current clock
330 * value to the clock pointer and return 0 if the clock is in sync with 322 * value to the clock pointer and return 0 if the clock is in sync with
331 * the external time source. If the clock mode is local it will return 323 * the external time source. If the clock mode is local it will return
332 * -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external 324 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
333 * reference. 325 * reference.
334 */ 326 */
335int get_sync_clock(unsigned long long *clock) 327int get_sync_clock(unsigned long long *clock)
@@ -347,7 +339,7 @@ int get_sync_clock(unsigned long long *clock)
347 return 0; 339 return 0;
348 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && 340 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
349 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 341 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
350 return -EOPNOTSUPP; 342 return -ENOSYS;
351 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && 343 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
352 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 344 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
353 return -EACCES; 345 return -EACCES;
@@ -1118,35 +1110,34 @@ out_unlock:
1118/* 1110/*
1119 * Sysfs interface functions 1111 * Sysfs interface functions
1120 */ 1112 */
1121static struct bus_type etr_subsys = { 1113static struct sysdev_class etr_sysclass = {
1122 .name = "etr", 1114 .name = "etr",
1123 .dev_name = "etr",
1124}; 1115};
1125 1116
1126static struct device etr_port0_dev = { 1117static struct sys_device etr_port0_dev = {
1127 .id = 0, 1118 .id = 0,
1128 .bus = &etr_subsys, 1119 .cls = &etr_sysclass,
1129}; 1120};
1130 1121
1131static struct device etr_port1_dev = { 1122static struct sys_device etr_port1_dev = {
1132 .id = 1, 1123 .id = 1,
1133 .bus = &etr_subsys, 1124 .cls = &etr_sysclass,
1134}; 1125};
1135 1126
1136/* 1127/*
1137 * ETR subsys attributes 1128 * ETR class attributes
1138 */ 1129 */
1139static ssize_t etr_stepping_port_show(struct device *dev, 1130static ssize_t etr_stepping_port_show(struct sysdev_class *class,
1140 struct device_attribute *attr, 1131 struct sysdev_class_attribute *attr,
1141 char *buf) 1132 char *buf)
1142{ 1133{
1143 return sprintf(buf, "%i\n", etr_port0.esw.p); 1134 return sprintf(buf, "%i\n", etr_port0.esw.p);
1144} 1135}
1145 1136
1146static DEVICE_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); 1137static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1147 1138
1148static ssize_t etr_stepping_mode_show(struct device *dev, 1139static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
1149 struct device_attribute *attr, 1140 struct sysdev_class_attribute *attr,
1150 char *buf) 1141 char *buf)
1151{ 1142{
1152 char *mode_str; 1143 char *mode_str;
@@ -1160,12 +1151,12 @@ static ssize_t etr_stepping_mode_show(struct device *dev,
1160 return sprintf(buf, "%s\n", mode_str); 1151 return sprintf(buf, "%s\n", mode_str);
1161} 1152}
1162 1153
1163static DEVICE_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL); 1154static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1164 1155
1165/* 1156/*
1166 * ETR port attributes 1157 * ETR port attributes
1167 */ 1158 */
1168static inline struct etr_aib *etr_aib_from_dev(struct device *dev) 1159static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1169{ 1160{
1170 if (dev == &etr_port0_dev) 1161 if (dev == &etr_port0_dev)
1171 return etr_port0_online ? &etr_port0 : NULL; 1162 return etr_port0_online ? &etr_port0 : NULL;
@@ -1173,8 +1164,8 @@ static inline struct etr_aib *etr_aib_from_dev(struct device *dev)
1173 return etr_port1_online ? &etr_port1 : NULL; 1164 return etr_port1_online ? &etr_port1 : NULL;
1174} 1165}
1175 1166
1176static ssize_t etr_online_show(struct device *dev, 1167static ssize_t etr_online_show(struct sys_device *dev,
1177 struct device_attribute *attr, 1168 struct sysdev_attribute *attr,
1178 char *buf) 1169 char *buf)
1179{ 1170{
1180 unsigned int online; 1171 unsigned int online;
@@ -1183,8 +1174,8 @@ static ssize_t etr_online_show(struct device *dev,
1183 return sprintf(buf, "%i\n", online); 1174 return sprintf(buf, "%i\n", online);
1184} 1175}
1185 1176
1186static ssize_t etr_online_store(struct device *dev, 1177static ssize_t etr_online_store(struct sys_device *dev,
1187 struct device_attribute *attr, 1178 struct sysdev_attribute *attr,
1188 const char *buf, size_t count) 1179 const char *buf, size_t count)
1189{ 1180{
1190 unsigned int value; 1181 unsigned int value;
@@ -1221,20 +1212,20 @@ out:
1221 return count; 1212 return count;
1222} 1213}
1223 1214
1224static DEVICE_ATTR(online, 0600, etr_online_show, etr_online_store); 1215static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1225 1216
1226static ssize_t etr_stepping_control_show(struct device *dev, 1217static ssize_t etr_stepping_control_show(struct sys_device *dev,
1227 struct device_attribute *attr, 1218 struct sysdev_attribute *attr,
1228 char *buf) 1219 char *buf)
1229{ 1220{
1230 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? 1221 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231 etr_eacr.e0 : etr_eacr.e1); 1222 etr_eacr.e0 : etr_eacr.e1);
1232} 1223}
1233 1224
1234static DEVICE_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); 1225static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1235 1226
1236static ssize_t etr_mode_code_show(struct device *dev, 1227static ssize_t etr_mode_code_show(struct sys_device *dev,
1237 struct device_attribute *attr, char *buf) 1228 struct sysdev_attribute *attr, char *buf)
1238{ 1229{
1239 if (!etr_port0_online && !etr_port1_online) 1230 if (!etr_port0_online && !etr_port1_online)
1240 /* Status word is not uptodate if both ports are offline. */ 1231 /* Status word is not uptodate if both ports are offline. */
@@ -1243,10 +1234,10 @@ static ssize_t etr_mode_code_show(struct device *dev,
1243 etr_port0.esw.psc0 : etr_port0.esw.psc1); 1234 etr_port0.esw.psc0 : etr_port0.esw.psc1);
1244} 1235}
1245 1236
1246static DEVICE_ATTR(state_code, 0400, etr_mode_code_show, NULL); 1237static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1247 1238
1248static ssize_t etr_untuned_show(struct device *dev, 1239static ssize_t etr_untuned_show(struct sys_device *dev,
1249 struct device_attribute *attr, char *buf) 1240 struct sysdev_attribute *attr, char *buf)
1250{ 1241{
1251 struct etr_aib *aib = etr_aib_from_dev(dev); 1242 struct etr_aib *aib = etr_aib_from_dev(dev);
1252 1243
@@ -1255,10 +1246,10 @@ static ssize_t etr_untuned_show(struct device *dev,
1255 return sprintf(buf, "%i\n", aib->edf1.u); 1246 return sprintf(buf, "%i\n", aib->edf1.u);
1256} 1247}
1257 1248
1258static DEVICE_ATTR(untuned, 0400, etr_untuned_show, NULL); 1249static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1259 1250
1260static ssize_t etr_network_id_show(struct device *dev, 1251static ssize_t etr_network_id_show(struct sys_device *dev,
1261 struct device_attribute *attr, char *buf) 1252 struct sysdev_attribute *attr, char *buf)
1262{ 1253{
1263 struct etr_aib *aib = etr_aib_from_dev(dev); 1254 struct etr_aib *aib = etr_aib_from_dev(dev);
1264 1255
@@ -1267,10 +1258,10 @@ static ssize_t etr_network_id_show(struct device *dev,
1267 return sprintf(buf, "%i\n", aib->edf1.net_id); 1258 return sprintf(buf, "%i\n", aib->edf1.net_id);
1268} 1259}
1269 1260
1270static DEVICE_ATTR(network, 0400, etr_network_id_show, NULL); 1261static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1271 1262
1272static ssize_t etr_id_show(struct device *dev, 1263static ssize_t etr_id_show(struct sys_device *dev,
1273 struct device_attribute *attr, char *buf) 1264 struct sysdev_attribute *attr, char *buf)
1274{ 1265{
1275 struct etr_aib *aib = etr_aib_from_dev(dev); 1266 struct etr_aib *aib = etr_aib_from_dev(dev);
1276 1267
@@ -1279,10 +1270,10 @@ static ssize_t etr_id_show(struct device *dev,
1279 return sprintf(buf, "%i\n", aib->edf1.etr_id); 1270 return sprintf(buf, "%i\n", aib->edf1.etr_id);
1280} 1271}
1281 1272
1282static DEVICE_ATTR(id, 0400, etr_id_show, NULL); 1273static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1283 1274
1284static ssize_t etr_port_number_show(struct device *dev, 1275static ssize_t etr_port_number_show(struct sys_device *dev,
1285 struct device_attribute *attr, char *buf) 1276 struct sysdev_attribute *attr, char *buf)
1286{ 1277{
1287 struct etr_aib *aib = etr_aib_from_dev(dev); 1278 struct etr_aib *aib = etr_aib_from_dev(dev);
1288 1279
@@ -1291,10 +1282,10 @@ static ssize_t etr_port_number_show(struct device *dev,
1291 return sprintf(buf, "%i\n", aib->edf1.etr_pn); 1282 return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1292} 1283}
1293 1284
1294static DEVICE_ATTR(port, 0400, etr_port_number_show, NULL); 1285static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1295 1286
1296static ssize_t etr_coupled_show(struct device *dev, 1287static ssize_t etr_coupled_show(struct sys_device *dev,
1297 struct device_attribute *attr, char *buf) 1288 struct sysdev_attribute *attr, char *buf)
1298{ 1289{
1299 struct etr_aib *aib = etr_aib_from_dev(dev); 1290 struct etr_aib *aib = etr_aib_from_dev(dev);
1300 1291
@@ -1303,10 +1294,10 @@ static ssize_t etr_coupled_show(struct device *dev,
1303 return sprintf(buf, "%i\n", aib->edf3.c); 1294 return sprintf(buf, "%i\n", aib->edf3.c);
1304} 1295}
1305 1296
1306static DEVICE_ATTR(coupled, 0400, etr_coupled_show, NULL); 1297static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1307 1298
1308static ssize_t etr_local_time_show(struct device *dev, 1299static ssize_t etr_local_time_show(struct sys_device *dev,
1309 struct device_attribute *attr, char *buf) 1300 struct sysdev_attribute *attr, char *buf)
1310{ 1301{
1311 struct etr_aib *aib = etr_aib_from_dev(dev); 1302 struct etr_aib *aib = etr_aib_from_dev(dev);
1312 1303
@@ -1315,10 +1306,10 @@ static ssize_t etr_local_time_show(struct device *dev,
1315 return sprintf(buf, "%i\n", aib->edf3.blto); 1306 return sprintf(buf, "%i\n", aib->edf3.blto);
1316} 1307}
1317 1308
1318static DEVICE_ATTR(local_time, 0400, etr_local_time_show, NULL); 1309static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1319 1310
1320static ssize_t etr_utc_offset_show(struct device *dev, 1311static ssize_t etr_utc_offset_show(struct sys_device *dev,
1321 struct device_attribute *attr, char *buf) 1312 struct sysdev_attribute *attr, char *buf)
1322{ 1313{
1323 struct etr_aib *aib = etr_aib_from_dev(dev); 1314 struct etr_aib *aib = etr_aib_from_dev(dev);
1324 1315
@@ -1327,64 +1318,64 @@ static ssize_t etr_utc_offset_show(struct device *dev,
1327 return sprintf(buf, "%i\n", aib->edf3.buo); 1318 return sprintf(buf, "%i\n", aib->edf3.buo);
1328} 1319}
1329 1320
1330static DEVICE_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL); 1321static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1331 1322
1332static struct device_attribute *etr_port_attributes[] = { 1323static struct sysdev_attribute *etr_port_attributes[] = {
1333 &dev_attr_online, 1324 &attr_online,
1334 &dev_attr_stepping_control, 1325 &attr_stepping_control,
1335 &dev_attr_state_code, 1326 &attr_state_code,
1336 &dev_attr_untuned, 1327 &attr_untuned,
1337 &dev_attr_network, 1328 &attr_network,
1338 &dev_attr_id, 1329 &attr_id,
1339 &dev_attr_port, 1330 &attr_port,
1340 &dev_attr_coupled, 1331 &attr_coupled,
1341 &dev_attr_local_time, 1332 &attr_local_time,
1342 &dev_attr_utc_offset, 1333 &attr_utc_offset,
1343 NULL 1334 NULL
1344}; 1335};
1345 1336
1346static int __init etr_register_port(struct device *dev) 1337static int __init etr_register_port(struct sys_device *dev)
1347{ 1338{
1348 struct device_attribute **attr; 1339 struct sysdev_attribute **attr;
1349 int rc; 1340 int rc;
1350 1341
1351 rc = device_register(dev); 1342 rc = sysdev_register(dev);
1352 if (rc) 1343 if (rc)
1353 goto out; 1344 goto out;
1354 for (attr = etr_port_attributes; *attr; attr++) { 1345 for (attr = etr_port_attributes; *attr; attr++) {
1355 rc = device_create_file(dev, *attr); 1346 rc = sysdev_create_file(dev, *attr);
1356 if (rc) 1347 if (rc)
1357 goto out_unreg; 1348 goto out_unreg;
1358 } 1349 }
1359 return 0; 1350 return 0;
1360out_unreg: 1351out_unreg:
1361 for (; attr >= etr_port_attributes; attr--) 1352 for (; attr >= etr_port_attributes; attr--)
1362 device_remove_file(dev, *attr); 1353 sysdev_remove_file(dev, *attr);
1363 device_unregister(dev); 1354 sysdev_unregister(dev);
1364out: 1355out:
1365 return rc; 1356 return rc;
1366} 1357}
1367 1358
1368static void __init etr_unregister_port(struct device *dev) 1359static void __init etr_unregister_port(struct sys_device *dev)
1369{ 1360{
1370 struct device_attribute **attr; 1361 struct sysdev_attribute **attr;
1371 1362
1372 for (attr = etr_port_attributes; *attr; attr++) 1363 for (attr = etr_port_attributes; *attr; attr++)
1373 device_remove_file(dev, *attr); 1364 sysdev_remove_file(dev, *attr);
1374 device_unregister(dev); 1365 sysdev_unregister(dev);
1375} 1366}
1376 1367
1377static int __init etr_init_sysfs(void) 1368static int __init etr_init_sysfs(void)
1378{ 1369{
1379 int rc; 1370 int rc;
1380 1371
1381 rc = subsys_system_register(&etr_subsys, NULL); 1372 rc = sysdev_class_register(&etr_sysclass);
1382 if (rc) 1373 if (rc)
1383 goto out; 1374 goto out;
1384 rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_port); 1375 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1385 if (rc) 1376 if (rc)
1386 goto out_unreg_subsys; 1377 goto out_unreg_class;
1387 rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_mode); 1378 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1388 if (rc) 1379 if (rc)
1389 goto out_remove_stepping_port; 1380 goto out_remove_stepping_port;
1390 rc = etr_register_port(&etr_port0_dev); 1381 rc = etr_register_port(&etr_port0_dev);
@@ -1398,11 +1389,11 @@ static int __init etr_init_sysfs(void)
1398out_remove_port0: 1389out_remove_port0:
1399 etr_unregister_port(&etr_port0_dev); 1390 etr_unregister_port(&etr_port0_dev);
1400out_remove_stepping_mode: 1391out_remove_stepping_mode:
1401 device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_mode); 1392 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1402out_remove_stepping_port: 1393out_remove_stepping_port:
1403 device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_port); 1394 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1404out_unreg_subsys: 1395out_unreg_class:
1405 bus_unregister(&etr_subsys); 1396 sysdev_class_unregister(&etr_sysclass);
1406out: 1397out:
1407 return rc; 1398 return rc;
1408} 1399}
@@ -1602,15 +1593,14 @@ out_unlock:
1602} 1593}
1603 1594
1604/* 1595/*
1605 * STP subsys sysfs interface functions 1596 * STP class sysfs interface functions
1606 */ 1597 */
1607static struct bus_type stp_subsys = { 1598static struct sysdev_class stp_sysclass = {
1608 .name = "stp", 1599 .name = "stp",
1609 .dev_name = "stp",
1610}; 1600};
1611 1601
1612static ssize_t stp_ctn_id_show(struct device *dev, 1602static ssize_t stp_ctn_id_show(struct sysdev_class *class,
1613 struct device_attribute *attr, 1603 struct sysdev_class_attribute *attr,
1614 char *buf) 1604 char *buf)
1615{ 1605{
1616 if (!stp_online) 1606 if (!stp_online)
@@ -1619,10 +1609,10 @@ static ssize_t stp_ctn_id_show(struct device *dev,
1619 *(unsigned long long *) stp_info.ctnid); 1609 *(unsigned long long *) stp_info.ctnid);
1620} 1610}
1621 1611
1622static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); 1612static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1623 1613
1624static ssize_t stp_ctn_type_show(struct device *dev, 1614static ssize_t stp_ctn_type_show(struct sysdev_class *class,
1625 struct device_attribute *attr, 1615 struct sysdev_class_attribute *attr,
1626 char *buf) 1616 char *buf)
1627{ 1617{
1628 if (!stp_online) 1618 if (!stp_online)
@@ -1630,10 +1620,10 @@ static ssize_t stp_ctn_type_show(struct device *dev,
1630 return sprintf(buf, "%i\n", stp_info.ctn); 1620 return sprintf(buf, "%i\n", stp_info.ctn);
1631} 1621}
1632 1622
1633static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); 1623static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1634 1624
1635static ssize_t stp_dst_offset_show(struct device *dev, 1625static ssize_t stp_dst_offset_show(struct sysdev_class *class,
1636 struct device_attribute *attr, 1626 struct sysdev_class_attribute *attr,
1637 char *buf) 1627 char *buf)
1638{ 1628{
1639 if (!stp_online || !(stp_info.vbits & 0x2000)) 1629 if (!stp_online || !(stp_info.vbits & 0x2000))
@@ -1641,10 +1631,10 @@ static ssize_t stp_dst_offset_show(struct device *dev,
1641 return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); 1631 return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1642} 1632}
1643 1633
1644static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); 1634static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1645 1635
1646static ssize_t stp_leap_seconds_show(struct device *dev, 1636static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
1647 struct device_attribute *attr, 1637 struct sysdev_class_attribute *attr,
1648 char *buf) 1638 char *buf)
1649{ 1639{
1650 if (!stp_online || !(stp_info.vbits & 0x8000)) 1640 if (!stp_online || !(stp_info.vbits & 0x8000))
@@ -1652,10 +1642,10 @@ static ssize_t stp_leap_seconds_show(struct device *dev,
1652 return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); 1642 return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1653} 1643}
1654 1644
1655static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); 1645static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1656 1646
1657static ssize_t stp_stratum_show(struct device *dev, 1647static ssize_t stp_stratum_show(struct sysdev_class *class,
1658 struct device_attribute *attr, 1648 struct sysdev_class_attribute *attr,
1659 char *buf) 1649 char *buf)
1660{ 1650{
1661 if (!stp_online) 1651 if (!stp_online)
@@ -1663,10 +1653,10 @@ static ssize_t stp_stratum_show(struct device *dev,
1663 return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); 1653 return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1664} 1654}
1665 1655
1666static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL); 1656static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1667 1657
1668static ssize_t stp_time_offset_show(struct device *dev, 1658static ssize_t stp_time_offset_show(struct sysdev_class *class,
1669 struct device_attribute *attr, 1659 struct sysdev_class_attribute *attr,
1670 char *buf) 1660 char *buf)
1671{ 1661{
1672 if (!stp_online || !(stp_info.vbits & 0x0800)) 1662 if (!stp_online || !(stp_info.vbits & 0x0800))
@@ -1674,10 +1664,10 @@ static ssize_t stp_time_offset_show(struct device *dev,
1674 return sprintf(buf, "%i\n", (int) stp_info.tto); 1664 return sprintf(buf, "%i\n", (int) stp_info.tto);
1675} 1665}
1676 1666
1677static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL); 1667static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1678 1668
1679static ssize_t stp_time_zone_offset_show(struct device *dev, 1669static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
1680 struct device_attribute *attr, 1670 struct sysdev_class_attribute *attr,
1681 char *buf) 1671 char *buf)
1682{ 1672{
1683 if (!stp_online || !(stp_info.vbits & 0x4000)) 1673 if (!stp_online || !(stp_info.vbits & 0x4000))
@@ -1685,11 +1675,11 @@ static ssize_t stp_time_zone_offset_show(struct device *dev,
1685 return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); 1675 return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1686} 1676}
1687 1677
1688static DEVICE_ATTR(time_zone_offset, 0400, 1678static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1689 stp_time_zone_offset_show, NULL); 1679 stp_time_zone_offset_show, NULL);
1690 1680
1691static ssize_t stp_timing_mode_show(struct device *dev, 1681static ssize_t stp_timing_mode_show(struct sysdev_class *class,
1692 struct device_attribute *attr, 1682 struct sysdev_class_attribute *attr,
1693 char *buf) 1683 char *buf)
1694{ 1684{
1695 if (!stp_online) 1685 if (!stp_online)
@@ -1697,10 +1687,10 @@ static ssize_t stp_timing_mode_show(struct device *dev,
1697 return sprintf(buf, "%i\n", stp_info.tmd); 1687 return sprintf(buf, "%i\n", stp_info.tmd);
1698} 1688}
1699 1689
1700static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); 1690static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1701 1691
1702static ssize_t stp_timing_state_show(struct device *dev, 1692static ssize_t stp_timing_state_show(struct sysdev_class *class,
1703 struct device_attribute *attr, 1693 struct sysdev_class_attribute *attr,
1704 char *buf) 1694 char *buf)
1705{ 1695{
1706 if (!stp_online) 1696 if (!stp_online)
@@ -1708,17 +1698,17 @@ static ssize_t stp_timing_state_show(struct device *dev,
1708 return sprintf(buf, "%i\n", stp_info.tst); 1698 return sprintf(buf, "%i\n", stp_info.tst);
1709} 1699}
1710 1700
1711static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL); 1701static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1712 1702
1713static ssize_t stp_online_show(struct device *dev, 1703static ssize_t stp_online_show(struct sysdev_class *class,
1714 struct device_attribute *attr, 1704 struct sysdev_class_attribute *attr,
1715 char *buf) 1705 char *buf)
1716{ 1706{
1717 return sprintf(buf, "%i\n", stp_online); 1707 return sprintf(buf, "%i\n", stp_online);
1718} 1708}
1719 1709
1720static ssize_t stp_online_store(struct device *dev, 1710static ssize_t stp_online_store(struct sysdev_class *class,
1721 struct device_attribute *attr, 1711 struct sysdev_class_attribute *attr,
1722 const char *buf, size_t count) 1712 const char *buf, size_t count)
1723{ 1713{
1724 unsigned int value; 1714 unsigned int value;
@@ -1740,47 +1730,47 @@ static ssize_t stp_online_store(struct device *dev,
1740} 1730}
1741 1731
1742/* 1732/*
1743 * Can't use DEVICE_ATTR because the attribute should be named 1733 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1744 * stp/online but dev_attr_online already exists in this file .. 1734 * stp/online but attr_online already exists in this file ..
1745 */ 1735 */
1746static struct device_attribute dev_attr_stp_online = { 1736static struct sysdev_class_attribute attr_stp_online = {
1747 .attr = { .name = "online", .mode = 0600 }, 1737 .attr = { .name = "online", .mode = 0600 },
1748 .show = stp_online_show, 1738 .show = stp_online_show,
1749 .store = stp_online_store, 1739 .store = stp_online_store,
1750}; 1740};
1751 1741
1752static struct device_attribute *stp_attributes[] = { 1742static struct sysdev_class_attribute *stp_attributes[] = {
1753 &dev_attr_ctn_id, 1743 &attr_ctn_id,
1754 &dev_attr_ctn_type, 1744 &attr_ctn_type,
1755 &dev_attr_dst_offset, 1745 &attr_dst_offset,
1756 &dev_attr_leap_seconds, 1746 &attr_leap_seconds,
1757 &dev_attr_stp_online, 1747 &attr_stp_online,
1758 &dev_attr_stratum, 1748 &attr_stratum,
1759 &dev_attr_time_offset, 1749 &attr_time_offset,
1760 &dev_attr_time_zone_offset, 1750 &attr_time_zone_offset,
1761 &dev_attr_timing_mode, 1751 &attr_timing_mode,
1762 &dev_attr_timing_state, 1752 &attr_timing_state,
1763 NULL 1753 NULL
1764}; 1754};
1765 1755
1766static int __init stp_init_sysfs(void) 1756static int __init stp_init_sysfs(void)
1767{ 1757{
1768 struct device_attribute **attr; 1758 struct sysdev_class_attribute **attr;
1769 int rc; 1759 int rc;
1770 1760
1771 rc = subsys_system_register(&stp_subsys, NULL); 1761 rc = sysdev_class_register(&stp_sysclass);
1772 if (rc) 1762 if (rc)
1773 goto out; 1763 goto out;
1774 for (attr = stp_attributes; *attr; attr++) { 1764 for (attr = stp_attributes; *attr; attr++) {
1775 rc = device_create_file(stp_subsys.dev_root, *attr); 1765 rc = sysdev_class_create_file(&stp_sysclass, *attr);
1776 if (rc) 1766 if (rc)
1777 goto out_unreg; 1767 goto out_unreg;
1778 } 1768 }
1779 return 0; 1769 return 0;
1780out_unreg: 1770out_unreg:
1781 for (; attr >= stp_attributes; attr--) 1771 for (; attr >= stp_attributes; attr--)
1782 device_remove_file(stp_subsys.dev_root, *attr); 1772 sysdev_class_remove_file(&stp_sysclass, *attr);
1783 bus_unregister(&stp_subsys); 1773 sysdev_class_unregister(&stp_sysclass);
1784out: 1774out:
1785 return rc; 1775 return rc;
1786} 1776}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4b2e3e31700..0cd340b7263 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,24 +1,22 @@
1/* 1/*
2 * Copyright IBM Corp. 2007, 2011 2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */ 4 */
5 5
6#define KMSG_COMPONENT "cpu" 6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/workqueue.h>
10#include <linux/bootmem.h>
11#include <linux/cpuset.h>
12#include <linux/device.h>
13#include <linux/export.h>
14#include <linux/kernel.h> 9#include <linux/kernel.h>
15#include <linux/sched.h> 10#include <linux/mm.h>
16#include <linux/init.h> 11#include <linux/init.h>
17#include <linux/delay.h> 12#include <linux/device.h>
13#include <linux/bootmem.h>
14#include <linux/sched.h>
15#include <linux/workqueue.h>
18#include <linux/cpu.h> 16#include <linux/cpu.h>
19#include <linux/smp.h> 17#include <linux/smp.h>
20#include <linux/mm.h> 18#include <linux/cpuset.h>
21#include <asm/sysinfo.h> 19#include <asm/delay.h>
22 20
23#define PTF_HORIZONTAL (0UL) 21#define PTF_HORIZONTAL (0UL)
24#define PTF_VERTICAL (1UL) 22#define PTF_VERTICAL (1UL)
@@ -30,79 +28,88 @@ struct mask_info {
30 cpumask_t mask; 28 cpumask_t mask;
31}; 29};
32 30
33static void set_topology_timer(void); 31static int topology_enabled = 1;
34static void topology_work_fn(struct work_struct *work); 32static void topology_work_fn(struct work_struct *work);
35static struct sysinfo_15_1_x *tl_info; 33static struct sysinfo_15_1_x *tl_info;
36 34static struct timer_list topology_timer;
37static int topology_enabled = 1; 35static void set_topology_timer(void);
38static DECLARE_WORK(topology_work, topology_work_fn); 36static DECLARE_WORK(topology_work, topology_work_fn);
39 37/* topology_lock protects the core linked list */
40/* topology_lock protects the socket and book linked lists */
41static DEFINE_SPINLOCK(topology_lock); 38static DEFINE_SPINLOCK(topology_lock);
42static struct mask_info socket_info;
43static struct mask_info book_info;
44 39
45struct cpu_topology_s390 cpu_topology[NR_CPUS]; 40static struct mask_info core_info;
46EXPORT_SYMBOL_GPL(cpu_topology); 41cpumask_t cpu_core_map[NR_CPUS];
42unsigned char cpu_core_id[NR_CPUS];
43
44#ifdef CONFIG_SCHED_BOOK
45static struct mask_info book_info;
46cpumask_t cpu_book_map[NR_CPUS];
47unsigned char cpu_book_id[NR_CPUS];
48#endif
47 49
48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49{ 51{
50 cpumask_t mask; 52 cpumask_t mask;
51 53
52 cpumask_copy(&mask, cpumask_of(cpu)); 54 cpumask_clear(&mask);
53 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) 55 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
56 cpumask_copy(&mask, cpumask_of(cpu));
54 return mask; 57 return mask;
55 for (; info; info = info->next) {
56 if (cpumask_test_cpu(cpu, &info->mask))
57 return info->mask;
58 } 58 }
59 while (info) {
60 if (cpumask_test_cpu(cpu, &info->mask)) {
61 mask = info->mask;
62 break;
63 }
64 info = info->next;
65 }
66 if (cpumask_empty(&mask))
67 cpumask_copy(&mask, cpumask_of(cpu));
59 return mask; 68 return mask;
60} 69}
61 70
62static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 71static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
63 struct mask_info *book, 72 struct mask_info *book, struct mask_info *core)
64 struct mask_info *socket,
65 int one_socket_per_cpu)
66{ 73{
67 unsigned int cpu; 74 unsigned int cpu;
68 75
69 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { 76 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
70 unsigned int rcpu; 77 cpu < TOPOLOGY_CPU_BITS;
71 int lcpu; 78 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
79 {
80 unsigned int rcpu, lcpu;
72 81
73 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 82 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
74 lcpu = smp_find_processor_id(rcpu); 83 for_each_present_cpu(lcpu) {
75 if (lcpu < 0) 84 if (cpu_logical_map(lcpu) != rcpu)
76 continue; 85 continue;
77 cpumask_set_cpu(lcpu, &book->mask); 86#ifdef CONFIG_SCHED_BOOK
78 cpu_topology[lcpu].book_id = book->id; 87 cpumask_set_cpu(lcpu, &book->mask);
79 cpumask_set_cpu(lcpu, &socket->mask); 88 cpu_book_id[lcpu] = book->id;
80 cpu_topology[lcpu].core_id = rcpu; 89#endif
81 if (one_socket_per_cpu) { 90 cpumask_set_cpu(lcpu, &core->mask);
82 cpu_topology[lcpu].socket_id = rcpu; 91 cpu_core_id[lcpu] = core->id;
83 socket = socket->next; 92 smp_cpu_polarization[lcpu] = tl_cpu->pp;
84 } else {
85 cpu_topology[lcpu].socket_id = socket->id;
86 } 93 }
87 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
88 } 94 }
89 return socket;
90} 95}
91 96
92static void clear_masks(void) 97static void clear_masks(void)
93{ 98{
94 struct mask_info *info; 99 struct mask_info *info;
95 100
96 info = &socket_info; 101 info = &core_info;
97 while (info) { 102 while (info) {
98 cpumask_clear(&info->mask); 103 cpumask_clear(&info->mask);
99 info = info->next; 104 info = info->next;
100 } 105 }
106#ifdef CONFIG_SCHED_BOOK
101 info = &book_info; 107 info = &book_info;
102 while (info) { 108 while (info) {
103 cpumask_clear(&info->mask); 109 cpumask_clear(&info->mask);
104 info = info->next; 110 info = info->next;
105 } 111 }
112#endif
106} 113}
107 114
108static union topology_entry *next_tle(union topology_entry *tle) 115static union topology_entry *next_tle(union topology_entry *tle)
@@ -112,75 +119,43 @@ static union topology_entry *next_tle(union topology_entry *tle)
112 return (union topology_entry *)((struct topology_container *)tle + 1); 119 return (union topology_entry *)((struct topology_container *)tle + 1);
113} 120}
114 121
115static void __tl_to_masks_generic(struct sysinfo_15_1_x *info) 122static void tl_to_cores(struct sysinfo_15_1_x *info)
116{ 123{
117 struct mask_info *socket = &socket_info; 124#ifdef CONFIG_SCHED_BOOK
118 struct mask_info *book = &book_info; 125 struct mask_info *book = &book_info;
126#else
127 struct mask_info *book = NULL;
128#endif
129 struct mask_info *core = &core_info;
119 union topology_entry *tle, *end; 130 union topology_entry *tle, *end;
120 131
132
133 spin_lock_irq(&topology_lock);
134 clear_masks();
121 tle = info->tle; 135 tle = info->tle;
122 end = (union topology_entry *)((unsigned long)info + info->length); 136 end = (union topology_entry *)((unsigned long)info + info->length);
123 while (tle < end) { 137 while (tle < end) {
124 switch (tle->nl) { 138 switch (tle->nl) {
139#ifdef CONFIG_SCHED_BOOK
125 case 2: 140 case 2:
126 book = book->next; 141 book = book->next;
127 book->id = tle->container.id; 142 book->id = tle->container.id;
128 break; 143 break;
144#endif
129 case 1: 145 case 1:
130 socket = socket->next; 146 core = core->next;
131 socket->id = tle->container.id; 147 core->id = tle->container.id;
132 break; 148 break;
133 case 0: 149 case 0:
134 add_cpus_to_mask(&tle->cpu, book, socket, 0); 150 add_cpus_to_mask(&tle->cpu, book, core);
135 break; 151 break;
136 default: 152 default:
137 clear_masks(); 153 clear_masks();
138 return; 154 goto out;
139 } 155 }
140 tle = next_tle(tle); 156 tle = next_tle(tle);
141 } 157 }
142} 158out:
143
144static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
145{
146 struct mask_info *socket = &socket_info;
147 struct mask_info *book = &book_info;
148 union topology_entry *tle, *end;
149
150 tle = info->tle;
151 end = (union topology_entry *)((unsigned long)info + info->length);
152 while (tle < end) {
153 switch (tle->nl) {
154 case 1:
155 book = book->next;
156 book->id = tle->container.id;
157 break;
158 case 0:
159 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
160 break;
161 default:
162 clear_masks();
163 return;
164 }
165 tle = next_tle(tle);
166 }
167}
168
169static void tl_to_masks(struct sysinfo_15_1_x *info)
170{
171 struct cpuid cpu_id;
172
173 spin_lock_irq(&topology_lock);
174 get_cpu_id(&cpu_id);
175 clear_masks();
176 switch (cpu_id.machine) {
177 case 0x2097:
178 case 0x2098:
179 __tl_to_masks_z10(info);
180 break;
181 default:
182 __tl_to_masks_generic(info);
183 }
184 spin_unlock_irq(&topology_lock); 159 spin_unlock_irq(&topology_lock);
185} 160}
186 161
@@ -190,7 +165,7 @@ static void topology_update_polarization_simple(void)
190 165
191 mutex_lock(&smp_cpu_state_mutex); 166 mutex_lock(&smp_cpu_state_mutex);
192 for_each_possible_cpu(cpu) 167 for_each_possible_cpu(cpu)
193 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); 168 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
194 mutex_unlock(&smp_cpu_state_mutex); 169 mutex_unlock(&smp_cpu_state_mutex);
195} 170}
196 171
@@ -209,7 +184,8 @@ static int ptf(unsigned long fc)
209 184
210int topology_set_cpu_management(int fc) 185int topology_set_cpu_management(int fc)
211{ 186{
212 int cpu, rc; 187 int cpu;
188 int rc;
213 189
214 if (!MACHINE_HAS_TOPOLOGY) 190 if (!MACHINE_HAS_TOPOLOGY)
215 return -EOPNOTSUPP; 191 return -EOPNOTSUPP;
@@ -220,53 +196,54 @@ int topology_set_cpu_management(int fc)
220 if (rc) 196 if (rc)
221 return -EBUSY; 197 return -EBUSY;
222 for_each_possible_cpu(cpu) 198 for_each_possible_cpu(cpu)
223 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 199 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
224 return rc; 200 return rc;
225} 201}
226 202
227static void update_cpu_masks(void) 203static void update_cpu_core_map(void)
228{ 204{
229 unsigned long flags; 205 unsigned long flags;
230 int cpu; 206 int cpu;
231 207
232 spin_lock_irqsave(&topology_lock, flags); 208 spin_lock_irqsave(&topology_lock, flags);
233 for_each_possible_cpu(cpu) { 209 for_each_possible_cpu(cpu) {
234 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 210 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
235 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 211#ifdef CONFIG_SCHED_BOOK
236 if (!MACHINE_HAS_TOPOLOGY) { 212 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
237 cpu_topology[cpu].core_id = cpu; 213#endif
238 cpu_topology[cpu].socket_id = cpu;
239 cpu_topology[cpu].book_id = cpu;
240 }
241 } 214 }
242 spin_unlock_irqrestore(&topology_lock, flags); 215 spin_unlock_irqrestore(&topology_lock, flags);
243} 216}
244 217
245void store_topology(struct sysinfo_15_1_x *info) 218void store_topology(struct sysinfo_15_1_x *info)
246{ 219{
247 if (topology_max_mnest >= 3) 220#ifdef CONFIG_SCHED_BOOK
248 stsi(info, 15, 1, 3); 221 int rc;
249 else 222
250 stsi(info, 15, 1, 2); 223 rc = stsi(info, 15, 1, 3);
224 if (rc != -ENOSYS)
225 return;
226#endif
227 stsi(info, 15, 1, 2);
251} 228}
252 229
253int arch_update_cpu_topology(void) 230int arch_update_cpu_topology(void)
254{ 231{
255 struct sysinfo_15_1_x *info = tl_info; 232 struct sysinfo_15_1_x *info = tl_info;
256 struct device *dev; 233 struct sys_device *sysdev;
257 int cpu; 234 int cpu;
258 235
259 if (!MACHINE_HAS_TOPOLOGY) { 236 if (!MACHINE_HAS_TOPOLOGY) {
260 update_cpu_masks(); 237 update_cpu_core_map();
261 topology_update_polarization_simple(); 238 topology_update_polarization_simple();
262 return 0; 239 return 0;
263 } 240 }
264 store_topology(info); 241 store_topology(info);
265 tl_to_masks(info); 242 tl_to_cores(info);
266 update_cpu_masks(); 243 update_cpu_core_map();
267 for_each_online_cpu(cpu) { 244 for_each_online_cpu(cpu) {
268 dev = get_cpu_device(cpu); 245 sysdev = get_cpu_sysdev(cpu);
269 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 246 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
270 } 247 }
271 return 1; 248 return 1;
272} 249}
@@ -288,30 +265,12 @@ static void topology_timer_fn(unsigned long ignored)
288 set_topology_timer(); 265 set_topology_timer();
289} 266}
290 267
291static struct timer_list topology_timer =
292 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
293
294static atomic_t topology_poll = ATOMIC_INIT(0);
295
296static void set_topology_timer(void) 268static void set_topology_timer(void)
297{ 269{
298 if (atomic_add_unless(&topology_poll, -1, 0)) 270 topology_timer.function = topology_timer_fn;
299 mod_timer(&topology_timer, jiffies + HZ / 10); 271 topology_timer.data = 0;
300 else 272 topology_timer.expires = jiffies + 60 * HZ;
301 mod_timer(&topology_timer, jiffies + HZ * 60); 273 add_timer(&topology_timer);
302}
303
304void topology_expect_change(void)
305{
306 if (!MACHINE_HAS_TOPOLOGY)
307 return;
308 /* This is racy, but it doesn't matter since it is just a heuristic.
309 * Worst case is that we poll in a higher frequency for a bit longer.
310 */
311 if (atomic_read(&topology_poll) > 60)
312 return;
313 atomic_add(60, &topology_poll);
314 set_topology_timer();
315} 274}
316 275
317static int __init early_parse_topology(char *p) 276static int __init early_parse_topology(char *p)
@@ -323,8 +282,25 @@ static int __init early_parse_topology(char *p)
323} 282}
324early_param("topology", early_parse_topology); 283early_param("topology", early_parse_topology);
325 284
326static void __init alloc_masks(struct sysinfo_15_1_x *info, 285static int __init init_topology_update(void)
327 struct mask_info *mask, int offset) 286{
287 int rc;
288
289 rc = 0;
290 if (!MACHINE_HAS_TOPOLOGY) {
291 topology_update_polarization_simple();
292 goto out;
293 }
294 init_timer_deferrable(&topology_timer);
295 set_topology_timer();
296out:
297 update_cpu_core_map();
298 return rc;
299}
300__initcall(init_topology_update);
301
302static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
303 int offset)
328{ 304{
329 int i, nr_masks; 305 int i, nr_masks;
330 306
@@ -350,108 +326,10 @@ void __init s390_init_cpu_topology(void)
350 store_topology(info); 326 store_topology(info);
351 pr_info("The CPU configuration topology of the machine is:"); 327 pr_info("The CPU configuration topology of the machine is:");
352 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 328 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
353 printk(KERN_CONT " %d", info->mag[i]); 329 printk(" %d", info->mag[i]);
354 printk(KERN_CONT " / %d\n", info->mnest); 330 printk(" / %d\n", info->mnest);
355 alloc_masks(info, &socket_info, 1); 331 alloc_masks(info, &core_info, 2);
356 alloc_masks(info, &book_info, 2); 332#ifdef CONFIG_SCHED_BOOK
357} 333 alloc_masks(info, &book_info, 3);
358 334#endif
359static int cpu_management;
360
361static ssize_t dispatching_show(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364{
365 ssize_t count;
366
367 mutex_lock(&smp_cpu_state_mutex);
368 count = sprintf(buf, "%d\n", cpu_management);
369 mutex_unlock(&smp_cpu_state_mutex);
370 return count;
371}
372
373static ssize_t dispatching_store(struct device *dev,
374 struct device_attribute *attr,
375 const char *buf,
376 size_t count)
377{
378 int val, rc;
379 char delim;
380
381 if (sscanf(buf, "%d %c", &val, &delim) != 1)
382 return -EINVAL;
383 if (val != 0 && val != 1)
384 return -EINVAL;
385 rc = 0;
386 get_online_cpus();
387 mutex_lock(&smp_cpu_state_mutex);
388 if (cpu_management == val)
389 goto out;
390 rc = topology_set_cpu_management(val);
391 if (rc)
392 goto out;
393 cpu_management = val;
394 topology_expect_change();
395out:
396 mutex_unlock(&smp_cpu_state_mutex);
397 put_online_cpus();
398 return rc ? rc : count;
399}
400static DEVICE_ATTR(dispatching, 0644, dispatching_show,
401 dispatching_store);
402
403static ssize_t cpu_polarization_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 int cpu = dev->id;
407 ssize_t count;
408
409 mutex_lock(&smp_cpu_state_mutex);
410 switch (smp_cpu_get_polarization(cpu)) {
411 case POLARIZATION_HRZ:
412 count = sprintf(buf, "horizontal\n");
413 break;
414 case POLARIZATION_VL:
415 count = sprintf(buf, "vertical:low\n");
416 break;
417 case POLARIZATION_VM:
418 count = sprintf(buf, "vertical:medium\n");
419 break;
420 case POLARIZATION_VH:
421 count = sprintf(buf, "vertical:high\n");
422 break;
423 default:
424 count = sprintf(buf, "unknown\n");
425 break;
426 }
427 mutex_unlock(&smp_cpu_state_mutex);
428 return count;
429}
430static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
431
432static struct attribute *topology_cpu_attrs[] = {
433 &dev_attr_polarization.attr,
434 NULL,
435};
436
437static struct attribute_group topology_cpu_attr_group = {
438 .attrs = topology_cpu_attrs,
439};
440
441int topology_cpu_init(struct cpu *cpu)
442{
443 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
444}
445
446static int __init topology_init(void)
447{
448 if (!MACHINE_HAS_TOPOLOGY) {
449 topology_update_polarization_simple();
450 goto out;
451 }
452 set_topology_timer();
453out:
454 update_cpu_masks();
455 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
456} 335}
457device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 70ecfc5fe8f..ffabcd9d336 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * arch/s390/kernel/traps.c
3 *
2 * S390 version 4 * S390 version
3 * Copyright IBM Corp. 1999, 2000 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * 8 *
@@ -31,6 +33,7 @@
31#include <linux/kprobes.h> 33#include <linux/kprobes.h>
32#include <linux/bug.h> 34#include <linux/bug.h>
33#include <linux/utsname.h> 35#include <linux/utsname.h>
36#include <asm/system.h>
34#include <asm/uaccess.h> 37#include <asm/uaccess.h>
35#include <asm/io.h> 38#include <asm/io.h>
36#include <linux/atomic.h> 39#include <linux/atomic.h>
@@ -38,10 +41,11 @@
38#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
39#include <asm/lowcore.h> 42#include <asm/lowcore.h>
40#include <asm/debug.h> 43#include <asm/debug.h>
41#include <asm/ipl.h>
42#include "entry.h" 44#include "entry.h"
43 45
44int show_unhandled_signals = 1; 46void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
47
48int show_unhandled_signals;
45 49
46#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 50#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
47 51
@@ -55,23 +59,6 @@ static int kstack_depth_to_print = 12;
55static int kstack_depth_to_print = 20; 59static int kstack_depth_to_print = 20;
56#endif /* CONFIG_64BIT */ 60#endif /* CONFIG_64BIT */
57 61
58static inline void __user *get_trap_ip(struct pt_regs *regs)
59{
60#ifdef CONFIG_64BIT
61 unsigned long address;
62
63 if (regs->int_code & 0x200)
64 address = *(unsigned long *)(current->thread.trap_tdb + 24);
65 else
66 address = regs->psw.addr;
67 return (void __user *)
68 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
69#else
70 return (void __user *)
71 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
72#endif
73}
74
75/* 62/*
76 * For show_trace we have tree different stack to consider: 63 * For show_trace we have tree different stack to consider:
77 * - the panic stack which is used if the kernel stack has overflown 64 * - the panic stack which is used if the kernel stack has overflown
@@ -157,8 +144,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
157 for (i = 0; i < kstack_depth_to_print; i++) { 144 for (i = 0; i < kstack_depth_to_print; i++) {
158 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
159 break; 146 break;
160 if ((i * sizeof(long) % 32) == 0) 147 if (i && ((i * sizeof (long) % 32) == 0))
161 printk("%s ", i == 0 ? "" : "\n"); 148 printk("\n ");
162 printk(LONG, *stack++); 149 printk(LONG, *stack++);
163 } 150 }
164 printk("\n"); 151 printk("\n");
@@ -200,7 +187,7 @@ void show_registers(struct pt_regs *regs)
200{ 187{
201 char *mode; 188 char *mode;
202 189
203 mode = user_mode(regs) ? "User" : "Krnl"; 190 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
204 printk("%s PSW : %p %p", 191 printk("%s PSW : %p %p",
205 mode, (void *) regs->psw.mask, 192 mode, (void *) regs->psw.mask,
206 (void *) regs->psw.addr); 193 (void *) regs->psw.addr);
@@ -213,7 +200,7 @@ void show_registers(struct pt_regs *regs)
213 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
214 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
215#ifdef CONFIG_64BIT 202#ifdef CONFIG_64BIT
216 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 203 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
217#endif 204#endif
218 printk("\n%s GPRS: " FOURLONG, mode, 205 printk("\n%s GPRS: " FOURLONG, mode,
219 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
@@ -229,6 +216,7 @@ void show_registers(struct pt_regs *regs)
229 216
230void show_regs(struct pt_regs *regs) 217void show_regs(struct pt_regs *regs)
231{ 218{
219 print_modules();
232 printk("CPU: %d %s %s %.*s\n", 220 printk("CPU: %d %s %s %.*s\n",
233 task_thread_info(current)->cpu, print_tainted(), 221 task_thread_info(current)->cpu, print_tainted(),
234 init_utsname()->release, 222 init_utsname()->release,
@@ -239,24 +227,23 @@ void show_regs(struct pt_regs *regs)
239 (void *) current->thread.ksp); 227 (void *) current->thread.ksp);
240 show_registers(regs); 228 show_registers(regs);
241 /* Show stack backtrace if pt_regs is from kernel mode */ 229 /* Show stack backtrace if pt_regs is from kernel mode */
242 if (!user_mode(regs)) 230 if (!(regs->psw.mask & PSW_MASK_PSTATE))
243 show_trace(NULL, (unsigned long *) regs->gprs[15]); 231 show_trace(NULL, (unsigned long *) regs->gprs[15]);
244 show_last_breaking_event(regs); 232 show_last_breaking_event(regs);
245} 233}
246 234
247static DEFINE_SPINLOCK(die_lock); 235static DEFINE_SPINLOCK(die_lock);
248 236
249void die(struct pt_regs *regs, const char *str) 237void die(const char * str, struct pt_regs * regs, long err)
250{ 238{
251 static int die_counter; 239 static int die_counter;
252 240
253 oops_enter(); 241 oops_enter();
254 lgr_info_log();
255 debug_stop_all(); 242 debug_stop_all();
256 console_verbose(); 243 console_verbose();
257 spin_lock_irq(&die_lock); 244 spin_lock_irq(&die_lock);
258 bust_spinlocks(1); 245 bust_spinlocks(1);
259 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 246 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
260#ifdef CONFIG_PREEMPT 247#ifdef CONFIG_PREEMPT
261 printk("PREEMPT "); 248 printk("PREEMPT ");
262#endif 249#endif
@@ -267,8 +254,7 @@ void die(struct pt_regs *regs, const char *str)
267 printk("DEBUG_PAGEALLOC"); 254 printk("DEBUG_PAGEALLOC");
268#endif 255#endif
269 printk("\n"); 256 printk("\n");
270 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 257 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
271 print_modules();
272 show_regs(regs); 258 show_regs(regs);
273 bust_spinlocks(0); 259 bust_spinlocks(0);
274 add_taint(TAINT_DIE); 260 add_taint(TAINT_DIE);
@@ -281,7 +267,8 @@ void die(struct pt_regs *regs, const char *str)
281 do_exit(SIGSEGV); 267 do_exit(SIGSEGV);
282} 268}
283 269
284static inline void report_user_fault(struct pt_regs *regs, int signr) 270static void inline report_user_fault(struct pt_regs *regs, long int_code,
271 int signr)
285{ 272{
286 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 273 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
287 return; 274 return;
@@ -289,7 +276,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
289 return; 276 return;
290 if (!printk_ratelimit()) 277 if (!printk_ratelimit())
291 return; 278 return;
292 printk("User process fault: interruption code 0x%X ", regs->int_code); 279 printk("User process fault: interruption code 0x%lX ", int_code);
293 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 280 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
294 printk("\n"); 281 printk("\n");
295 show_regs(regs); 282 show_regs(regs);
@@ -300,38 +287,42 @@ int is_valid_bugaddr(unsigned long addr)
300 return 1; 287 return 1;
301} 288}
302 289
303static void __kprobes do_trap(struct pt_regs *regs, 290static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
304 int si_signo, int si_code, char *str) 291 struct pt_regs *regs, siginfo_t *info)
305{ 292{
306 siginfo_t info; 293 if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
307 294 pgm_int_code, signr) == NOTIFY_STOP)
308 if (notify_die(DIE_TRAP, str, regs, 0,
309 regs->int_code, si_signo) == NOTIFY_STOP)
310 return; 295 return;
311 296
312 if (user_mode(regs)) { 297 if (regs->psw.mask & PSW_MASK_PSTATE) {
313 info.si_signo = si_signo; 298 struct task_struct *tsk = current;
314 info.si_errno = 0; 299
315 info.si_code = si_code; 300 tsk->thread.trap_no = pgm_int_code & 0xffff;
316 info.si_addr = get_trap_ip(regs); 301 force_sig_info(signr, info, tsk);
317 force_sig_info(si_signo, &info, current); 302 report_user_fault(regs, pgm_int_code, signr);
318 report_user_fault(regs, si_signo);
319 } else { 303 } else {
320 const struct exception_table_entry *fixup; 304 const struct exception_table_entry *fixup;
321 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 305 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
322 if (fixup) 306 if (fixup)
323 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; 307 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
324 else { 308 else {
325 enum bug_trap_type btt; 309 enum bug_trap_type btt;
326 310
327 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 311 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
328 if (btt == BUG_TRAP_TYPE_WARN) 312 if (btt == BUG_TRAP_TYPE_WARN)
329 return; 313 return;
330 die(regs, str); 314 die(str, regs, pgm_int_code);
331 } 315 }
332 } 316 }
333} 317}
334 318
319static inline void __user *get_psw_address(struct pt_regs *regs,
320 long pgm_int_code)
321{
322 return (void __user *)
323 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
324}
325
335void __kprobes do_per_trap(struct pt_regs *regs) 326void __kprobes do_per_trap(struct pt_regs *regs)
336{ 327{
337 siginfo_t info; 328 siginfo_t info;
@@ -343,24 +334,30 @@ void __kprobes do_per_trap(struct pt_regs *regs)
343 info.si_signo = SIGTRAP; 334 info.si_signo = SIGTRAP;
344 info.si_errno = 0; 335 info.si_errno = 0;
345 info.si_code = TRAP_HWBKPT; 336 info.si_code = TRAP_HWBKPT;
346 info.si_addr = 337 info.si_addr = (void *) current->thread.per_event.address;
347 (void __force __user *) current->thread.per_event.address;
348 force_sig_info(SIGTRAP, &info, current); 338 force_sig_info(SIGTRAP, &info, current);
349} 339}
350 340
351void default_trap_handler(struct pt_regs *regs) 341static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
342 unsigned long trans_exc_code)
352{ 343{
353 if (user_mode(regs)) { 344 if (regs->psw.mask & PSW_MASK_PSTATE) {
354 report_user_fault(regs, SIGSEGV); 345 report_user_fault(regs, pgm_int_code, SIGSEGV);
355 do_exit(SIGSEGV); 346 do_exit(SIGSEGV);
356 } else 347 } else
357 die(regs, "Unknown program exception"); 348 die("Unknown program exception", regs, pgm_int_code);
358} 349}
359 350
360#define DO_ERROR_INFO(name, signr, sicode, str) \ 351#define DO_ERROR_INFO(name, signr, sicode, str) \
361void name(struct pt_regs *regs) \ 352static void name(struct pt_regs *regs, long pgm_int_code, \
362{ \ 353 unsigned long trans_exc_code) \
363 do_trap(regs, signr, sicode, str); \ 354{ \
355 siginfo_t info; \
356 info.si_signo = signr; \
357 info.si_errno = 0; \
358 info.si_code = sicode; \
359 info.si_addr = get_psw_address(regs, pgm_int_code); \
360 do_trap(pgm_int_code, signr, str, regs, &info); \
364} 361}
365 362
366DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 363DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -390,41 +387,44 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
390DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 387DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
391 "translation exception") 388 "translation exception")
392 389
393#ifdef CONFIG_64BIT 390static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
394DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 391 int fpc, long pgm_int_code)
395 "transaction constraint exception")
396#endif
397
398static inline void do_fp_trap(struct pt_regs *regs, int fpc)
399{ 392{
400 int si_code = 0; 393 siginfo_t si;
394
395 si.si_signo = SIGFPE;
396 si.si_errno = 0;
397 si.si_addr = location;
398 si.si_code = 0;
401 /* FPC[2] is Data Exception Code */ 399 /* FPC[2] is Data Exception Code */
402 if ((fpc & 0x00000300) == 0) { 400 if ((fpc & 0x00000300) == 0) {
403 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 401 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
404 if (fpc & 0x8000) /* invalid fp operation */ 402 if (fpc & 0x8000) /* invalid fp operation */
405 si_code = FPE_FLTINV; 403 si.si_code = FPE_FLTINV;
406 else if (fpc & 0x4000) /* div by 0 */ 404 else if (fpc & 0x4000) /* div by 0 */
407 si_code = FPE_FLTDIV; 405 si.si_code = FPE_FLTDIV;
408 else if (fpc & 0x2000) /* overflow */ 406 else if (fpc & 0x2000) /* overflow */
409 si_code = FPE_FLTOVF; 407 si.si_code = FPE_FLTOVF;
410 else if (fpc & 0x1000) /* underflow */ 408 else if (fpc & 0x1000) /* underflow */
411 si_code = FPE_FLTUND; 409 si.si_code = FPE_FLTUND;
412 else if (fpc & 0x0800) /* inexact */ 410 else if (fpc & 0x0800) /* inexact */
413 si_code = FPE_FLTRES; 411 si.si_code = FPE_FLTRES;
414 } 412 }
415 do_trap(regs, SIGFPE, si_code, "floating point exception"); 413 do_trap(pgm_int_code, SIGFPE,
414 "floating point exception", regs, &si);
416} 415}
417 416
418void __kprobes illegal_op(struct pt_regs *regs) 417static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
418 unsigned long trans_exc_code)
419{ 419{
420 siginfo_t info; 420 siginfo_t info;
421 __u8 opcode[6]; 421 __u8 opcode[6];
422 __u16 __user *location; 422 __u16 __user *location;
423 int signal = 0; 423 int signal = 0;
424 424
425 location = get_trap_ip(regs); 425 location = get_psw_address(regs, pgm_int_code);
426 426
427 if (user_mode(regs)) { 427 if (regs->psw.mask & PSW_MASK_PSTATE) {
428 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 428 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
429 return; 429 return;
430 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 430 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
@@ -466,33 +466,46 @@ void __kprobes illegal_op(struct pt_regs *regs)
466 * If we get an illegal op in kernel mode, send it through the 466 * If we get an illegal op in kernel mode, send it through the
467 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 467 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
468 */ 468 */
469 if (notify_die(DIE_BPT, "bpt", regs, 0, 469 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
470 3, SIGTRAP) != NOTIFY_STOP) 470 3, SIGTRAP) != NOTIFY_STOP)
471 signal = SIGILL; 471 signal = SIGILL;
472 } 472 }
473 473
474#ifdef CONFIG_MATHEMU 474#ifdef CONFIG_MATHEMU
475 if (signal == SIGFPE) 475 if (signal == SIGFPE)
476 do_fp_trap(regs, current->thread.fp_regs.fpc); 476 do_fp_trap(regs, location,
477 else if (signal == SIGSEGV) 477 current->thread.fp_regs.fpc, pgm_int_code);
478 do_trap(regs, signal, SEGV_MAPERR, "user address fault"); 478 else if (signal == SIGSEGV) {
479 else 479 info.si_signo = signal;
480 info.si_errno = 0;
481 info.si_code = SEGV_MAPERR;
482 info.si_addr = (void __user *) location;
483 do_trap(pgm_int_code, signal,
484 "user address fault", regs, &info);
485 } else
480#endif 486#endif
481 if (signal) 487 if (signal) {
482 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 488 info.si_signo = signal;
489 info.si_errno = 0;
490 info.si_code = ILL_ILLOPC;
491 info.si_addr = (void __user *) location;
492 do_trap(pgm_int_code, signal,
493 "illegal operation", regs, &info);
494 }
483} 495}
484 496
485 497
486#ifdef CONFIG_MATHEMU 498#ifdef CONFIG_MATHEMU
487void specification_exception(struct pt_regs *regs) 499void specification_exception(struct pt_regs *regs, long pgm_int_code,
500 unsigned long trans_exc_code)
488{ 501{
489 __u8 opcode[6]; 502 __u8 opcode[6];
490 __u16 __user *location = NULL; 503 __u16 __user *location = NULL;
491 int signal = 0; 504 int signal = 0;
492 505
493 location = (__u16 __user *) get_trap_ip(regs); 506 location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
494 507
495 if (user_mode(regs)) { 508 if (regs->psw.mask & PSW_MASK_PSTATE) {
496 get_user(*((__u16 *) opcode), location); 509 get_user(*((__u16 *) opcode), location);
497 switch (opcode[0]) { 510 switch (opcode[0]) {
498 case 0x28: /* LDR Rx,Ry */ 511 case 0x28: /* LDR Rx,Ry */
@@ -525,27 +538,36 @@ void specification_exception(struct pt_regs *regs)
525 signal = SIGILL; 538 signal = SIGILL;
526 539
527 if (signal == SIGFPE) 540 if (signal == SIGFPE)
528 do_fp_trap(regs, current->thread.fp_regs.fpc); 541 do_fp_trap(regs, location,
529 else if (signal) 542 current->thread.fp_regs.fpc, pgm_int_code);
530 do_trap(regs, signal, ILL_ILLOPN, "specification exception"); 543 else if (signal) {
544 siginfo_t info;
545 info.si_signo = signal;
546 info.si_errno = 0;
547 info.si_code = ILL_ILLOPN;
548 info.si_addr = location;
549 do_trap(pgm_int_code, signal,
550 "specification exception", regs, &info);
551 }
531} 552}
532#else 553#else
533DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 554DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
534 "specification exception"); 555 "specification exception");
535#endif 556#endif
536 557
537void data_exception(struct pt_regs *regs) 558static void data_exception(struct pt_regs *regs, long pgm_int_code,
559 unsigned long trans_exc_code)
538{ 560{
539 __u16 __user *location; 561 __u16 __user *location;
540 int signal = 0; 562 int signal = 0;
541 563
542 location = get_trap_ip(regs); 564 location = get_psw_address(regs, pgm_int_code);
543 565
544 if (MACHINE_HAS_IEEE) 566 if (MACHINE_HAS_IEEE)
545 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 567 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
546 568
547#ifdef CONFIG_MATHEMU 569#ifdef CONFIG_MATHEMU
548 else if (user_mode(regs)) { 570 else if (regs->psw.mask & PSW_MASK_PSTATE) {
549 __u8 opcode[6]; 571 __u8 opcode[6];
550 get_user(*((__u16 *) opcode), location); 572 get_user(*((__u16 *) opcode), location);
551 switch (opcode[0]) { 573 switch (opcode[0]) {
@@ -604,18 +626,32 @@ void data_exception(struct pt_regs *regs)
604 else 626 else
605 signal = SIGILL; 627 signal = SIGILL;
606 if (signal == SIGFPE) 628 if (signal == SIGFPE)
607 do_fp_trap(regs, current->thread.fp_regs.fpc); 629 do_fp_trap(regs, location,
608 else if (signal) 630 current->thread.fp_regs.fpc, pgm_int_code);
609 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 631 else if (signal) {
632 siginfo_t info;
633 info.si_signo = signal;
634 info.si_errno = 0;
635 info.si_code = ILL_ILLOPN;
636 info.si_addr = location;
637 do_trap(pgm_int_code, signal, "data exception", regs, &info);
638 }
610} 639}
611 640
612void space_switch_exception(struct pt_regs *regs) 641static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
642 unsigned long trans_exc_code)
613{ 643{
644 siginfo_t info;
645
614 /* Set user psw back to home space mode. */ 646 /* Set user psw back to home space mode. */
615 if (user_mode(regs)) 647 if (regs->psw.mask & PSW_MASK_PSTATE)
616 regs->psw.mask |= PSW_ASC_HOME; 648 regs->psw.mask |= PSW_ASC_HOME;
617 /* Send SIGILL. */ 649 /* Send SIGILL. */
618 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 650 info.si_signo = SIGILL;
651 info.si_errno = 0;
652 info.si_code = ILL_PRVOPC;
653 info.si_addr = get_psw_address(regs, pgm_int_code);
654 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
619} 655}
620 656
621void __kprobes kernel_stack_overflow(struct pt_regs * regs) 657void __kprobes kernel_stack_overflow(struct pt_regs * regs)
@@ -627,7 +663,42 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
627 panic("Corrupt kernel stack, can't continue."); 663 panic("Corrupt kernel stack, can't continue.");
628} 664}
629 665
666/* init is done in lowcore.S and head.S */
667
630void __init trap_init(void) 668void __init trap_init(void)
631{ 669{
670 int i;
671
672 for (i = 0; i < 128; i++)
673 pgm_check_table[i] = &default_trap_handler;
674 pgm_check_table[1] = &illegal_op;
675 pgm_check_table[2] = &privileged_op;
676 pgm_check_table[3] = &execute_exception;
677 pgm_check_table[4] = &do_protection_exception;
678 pgm_check_table[5] = &addressing_exception;
679 pgm_check_table[6] = &specification_exception;
680 pgm_check_table[7] = &data_exception;
681 pgm_check_table[8] = &overflow_exception;
682 pgm_check_table[9] = &divide_exception;
683 pgm_check_table[0x0A] = &overflow_exception;
684 pgm_check_table[0x0B] = &divide_exception;
685 pgm_check_table[0x0C] = &hfp_overflow_exception;
686 pgm_check_table[0x0D] = &hfp_underflow_exception;
687 pgm_check_table[0x0E] = &hfp_significance_exception;
688 pgm_check_table[0x0F] = &hfp_divide_exception;
689 pgm_check_table[0x10] = &do_dat_exception;
690 pgm_check_table[0x11] = &do_dat_exception;
691 pgm_check_table[0x12] = &translation_exception;
692 pgm_check_table[0x13] = &special_op_exception;
693#ifdef CONFIG_64BIT
694 pgm_check_table[0x38] = &do_asce_exception;
695 pgm_check_table[0x39] = &do_dat_exception;
696 pgm_check_table[0x3A] = &do_dat_exception;
697 pgm_check_table[0x3B] = &do_dat_exception;
698#endif /* CONFIG_64BIT */
699 pgm_check_table[0x15] = &operand_exception;
700 pgm_check_table[0x1C] = &space_switch_exception;
701 pgm_check_table[0x1D] = &hfp_sqrt_exception;
702 /* Enable machine checks early. */
632 local_mcck_enable(); 703 local_mcck_enable();
633} 704}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index d7776281cb6..d73630b4fe1 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -25,12 +25,12 @@
25#include <linux/compat.h> 25#include <linux/compat.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/system.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/mmu.h> 30#include <asm/mmu.h>
30#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
31#include <asm/sections.h> 32#include <asm/sections.h>
32#include <asm/vdso.h> 33#include <asm/vdso.h>
33#include <asm/facility.h>
34 34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
36extern char vdso32_start, vdso32_end; 36extern char vdso32_start, vdso32_end;
@@ -84,17 +84,23 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
84 */ 84 */
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = 87 vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
88 s390_user_mode != HOME_SPACE_MODE && test_facility(31);
89} 88}
90 89
91#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
92/* 91/*
92 * Setup per cpu vdso data page.
93 */
94static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
95{
96}
97
98/*
93 * Allocate/free per cpu vdso data. 99 * Allocate/free per cpu vdso data.
94 */ 100 */
95#define SEGMENT_ORDER 2 101#define SEGMENT_ORDER 2
96 102
97int vdso_alloc_per_cpu(struct _lowcore *lowcore) 103int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
98{ 104{
99 unsigned long segment_table, page_table, page_frame; 105 unsigned long segment_table, page_table, page_frame;
100 u32 *psal, *aste; 106 u32 *psal, *aste;
@@ -102,7 +108,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
102 108
103 lowcore->vdso_per_cpu_data = __LC_PASTE; 109 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 110
105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 111 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
106 return 0; 112 return 0;
107 113
108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 114 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -133,6 +139,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
133 aste[4] = (u32)(addr_t) psal; 139 aste[4] = (u32)(addr_t) psal;
134 lowcore->vdso_per_cpu_data = page_frame; 140 lowcore->vdso_per_cpu_data = page_frame;
135 141
142 vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
136 return 0; 143 return 0;
137 144
138out: 145out:
@@ -142,12 +149,12 @@ out:
142 return -ENOMEM; 149 return -ENOMEM;
143} 150}
144 151
145void vdso_free_per_cpu(struct _lowcore *lowcore) 152void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
146{ 153{
147 unsigned long segment_table, page_table, page_frame; 154 unsigned long segment_table, page_table, page_frame;
148 u32 *psal, *aste; 155 u32 *psal, *aste;
149 156
150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 157 if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
151 return; 158 return;
152 159
153 psal = (u32 *)(addr_t) lowcore->paste[4]; 160 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -161,15 +168,19 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
161 free_pages(segment_table, SEGMENT_ORDER); 168 free_pages(segment_table, SEGMENT_ORDER);
162} 169}
163 170
164static void vdso_init_cr5(void) 171static void __vdso_init_cr5(void *dummy)
165{ 172{
166 unsigned long cr5; 173 unsigned long cr5;
167 174
168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
169 return;
170 cr5 = offsetof(struct _lowcore, paste); 175 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5); 176 __ctl_load(cr5, 5, 5);
172} 177}
178
179static void vdso_init_cr5(void)
180{
181 if (user_mode != HOME_SPACE_MODE && vdso_enabled)
182 on_each_cpu(__vdso_init_cr5, NULL, 1);
183}
173#endif /* CONFIG_64BIT */ 184#endif /* CONFIG_64BIT */
174 185
175/* 186/*
@@ -242,11 +253,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
242 * on the "data" page of the vDSO or you'll stop getting kernel 253 * on the "data" page of the vDSO or you'll stop getting kernel
243 * updates and your nice userland gettimeofday will be totally dead. 254 * updates and your nice userland gettimeofday will be totally dead.
244 * It's fine to use that for setting breakpoints in the vDSO code 255 * It's fine to use that for setting breakpoints in the vDSO code
245 * pages though. 256 * pages though
257 *
258 * Make sure the vDSO gets into every core dump.
259 * Dumping its contents makes post-mortem fully interpretable later
260 * without matching up the same kernel and hardware config to see
261 * what PC values meant.
246 */ 262 */
247 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 263 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
248 VM_READ|VM_EXEC| 264 VM_READ|VM_EXEC|
249 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 265 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
266 VM_ALWAYSDUMP,
250 vdso_pagelist); 267 vdso_pagelist);
251 if (rc) 268 if (rc)
252 current->mm->context.vdso_base = 0; 269 current->mm->context.vdso_base = 0;
@@ -305,8 +322,10 @@ static int __init vdso_init(void)
305 } 322 }
306 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 323 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
307 vdso64_pagelist[vdso64_pages] = NULL; 324 vdso64_pagelist[vdso64_pages] = NULL;
308 if (vdso_alloc_per_cpu(&S390_lowcore)) 325#ifndef CONFIG_SMP
326 if (vdso_alloc_per_cpu(0, &S390_lowcore))
309 BUG(); 327 BUG();
328#endif
310 vdso_init_cr5(); 329 vdso_init_cr5();
311#endif /* CONFIG_64BIT */ 330#endif /* CONFIG_64BIT */
312 331
@@ -316,7 +335,7 @@ static int __init vdso_init(void)
316 335
317 return 0; 336 return 0;
318} 337}
319early_initcall(vdso_init); 338arch_initcall(vdso_init);
320 339
321int in_gate_area_no_mm(unsigned long addr) 340int in_gate_area_no_mm(unsigned long addr)
322{ 341{
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 79cb51adc74..56fe6bc81fe 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -8,13 +8,13 @@
8 8
9#ifndef CONFIG_64BIT 9#ifndef CONFIG_64BIT
10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 10OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
11OUTPUT_ARCH(s390:31-bit) 11OUTPUT_ARCH(s390)
12ENTRY(startup) 12ENTRY(_start)
13jiffies = jiffies_64 + 4; 13jiffies = jiffies_64 + 4;
14#else 14#else
15OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 15OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
16OUTPUT_ARCH(s390:64-bit) 16OUTPUT_ARCH(s390:64-bit)
17ENTRY(startup) 17ENTRY(_start)
18jiffies = jiffies_64; 18jiffies = jiffies_64;
19#endif 19#endif
20 20
@@ -43,9 +43,7 @@ SECTIONS
43 43
44 NOTES :text :note 44 NOTES :text :note
45 45
46 .dummy : { *(.dummy) } :data 46 RODATA
47
48 RO_DATA_SECTION(PAGE_SIZE)
49 47
50#ifdef CONFIG_SHARED_KERNEL 48#ifdef CONFIG_SHARED_KERNEL
51 . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */ 49 . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b68444..2d6228f60cd 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,82 +1,70 @@
1/* 1/*
2 * arch/s390/kernel/vtime.c
2 * Virtual cpu timer based timer functions. 3 * Virtual cpu timer based timer functions.
3 * 4 *
4 * Copyright IBM Corp. 2004, 2012 5 * S390 version
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
6 */ 8 */
7 9
8#include <linux/kernel_stat.h> 10#include <linux/module.h>
9#include <linux/notifier.h>
10#include <linux/kprobes.h>
11#include <linux/export.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/timex.h>
14#include <linux/types.h>
15#include <linux/time.h> 12#include <linux/time.h>
16#include <linux/cpu.h> 13#include <linux/delay.h>
14#include <linux/init.h>
17#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/notifier.h>
19#include <linux/kernel_stat.h>
20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h>
22#include <linux/cpu.h>
23#include <linux/kprobes.h>
18 24
25#include <asm/timer.h>
19#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
20#include <asm/cputime.h> 27#include <asm/cputime.h>
21#include <asm/vtimer.h>
22#include <asm/irq.h> 28#include <asm/irq.h>
23#include "entry.h"
24 29
25static void virt_timer_expire(void); 30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
26 31
27DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 32DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
28 33
29static LIST_HEAD(virt_timer_list); 34static inline __u64 get_vtimer(void)
30static DEFINE_SPINLOCK(virt_timer_lock);
31static atomic64_t virt_timer_current;
32static atomic64_t virt_timer_elapsed;
33
34static inline u64 get_vtimer(void)
35{ 35{
36 u64 timer; 36 __u64 timer;
37 37
38 asm volatile("stpt %0" : "=m" (timer)); 38 asm volatile("STPT %0" : "=m" (timer));
39 return timer; 39 return timer;
40} 40}
41 41
42static inline void set_vtimer(u64 expires) 42static inline void set_vtimer(__u64 expires)
43{ 43{
44 u64 timer; 44 __u64 timer;
45 45
46 asm volatile( 46 asm volatile (" STPT %0\n" /* Store current cpu timer value */
47 " stpt %0\n" /* Store current cpu timer value */ 47 " SPT %1" /* Set new value immediately afterwards */
48 " spt %1" /* Set new value imm. afterwards */ 48 : "=m" (timer) : "m" (expires) );
49 : "=m" (timer) : "m" (expires));
50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 49 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51 S390_lowcore.last_update_timer = expires; 50 S390_lowcore.last_update_timer = expires;
52} 51}
53 52
54static inline int virt_timer_forward(u64 elapsed)
55{
56 BUG_ON(!irqs_disabled());
57
58 if (list_empty(&virt_timer_list))
59 return 0;
60 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
61 return elapsed >= atomic64_read(&virt_timer_current);
62}
63
64/* 53/*
65 * Update process times based on virtual cpu times stored by entry.S 54 * Update process times based on virtual cpu times stored by entry.S
66 * to the lowcore fields user_timer, system_timer & steal_clock. 55 * to the lowcore fields user_timer, system_timer & steal_clock.
67 */ 56 */
68static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 57static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
69{ 58{
70 struct thread_info *ti = task_thread_info(tsk); 59 struct thread_info *ti = task_thread_info(tsk);
71 u64 timer, clock, user, system, steal; 60 __u64 timer, clock, user, system, steal;
72 61
73 timer = S390_lowcore.last_update_timer; 62 timer = S390_lowcore.last_update_timer;
74 clock = S390_lowcore.last_update_clock; 63 clock = S390_lowcore.last_update_clock;
75 asm volatile( 64 asm volatile (" STPT %0\n" /* Store current cpu timer value */
76 " stpt %0\n" /* Store current cpu timer value */ 65 " STCK %1" /* Store current tod clock value */
77 " stck %1" /* Store current tod clock value */ 66 : "=m" (S390_lowcore.last_update_timer),
78 : "=m" (S390_lowcore.last_update_timer), 67 "=m" (S390_lowcore.last_update_clock) );
79 "=m" (S390_lowcore.last_update_clock));
80 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 68 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
81 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 69 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
82 70
@@ -95,11 +83,9 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
95 S390_lowcore.steal_timer = 0; 83 S390_lowcore.steal_timer = 0;
96 account_steal_time(steal); 84 account_steal_time(steal);
97 } 85 }
98
99 return virt_timer_forward(user + system);
100} 86}
101 87
102void vtime_task_switch(struct task_struct *prev) 88void account_vtime(struct task_struct *prev, struct task_struct *next)
103{ 89{
104 struct thread_info *ti; 90 struct thread_info *ti;
105 91
@@ -107,32 +93,24 @@ void vtime_task_switch(struct task_struct *prev)
107 ti = task_thread_info(prev); 93 ti = task_thread_info(prev);
108 ti->user_timer = S390_lowcore.user_timer; 94 ti->user_timer = S390_lowcore.user_timer;
109 ti->system_timer = S390_lowcore.system_timer; 95 ti->system_timer = S390_lowcore.system_timer;
110 ti = task_thread_info(current); 96 ti = task_thread_info(next);
111 S390_lowcore.user_timer = ti->user_timer; 97 S390_lowcore.user_timer = ti->user_timer;
112 S390_lowcore.system_timer = ti->system_timer; 98 S390_lowcore.system_timer = ti->system_timer;
113} 99}
114 100
115/* 101void account_process_tick(struct task_struct *tsk, int user_tick)
116 * In s390, accounting pending user time also implies
117 * accounting system time in order to correctly compute
118 * the stolen time accounting.
119 */
120void vtime_account_user(struct task_struct *tsk)
121{ 102{
122 if (do_account_vtime(tsk, HARDIRQ_OFFSET)) 103 do_account_vtime(tsk, HARDIRQ_OFFSET);
123 virt_timer_expire();
124} 104}
125 105
126/* 106/*
127 * Update process times based on virtual cpu times stored by entry.S 107 * Update process times based on virtual cpu times stored by entry.S
128 * to the lowcore fields user_timer, system_timer & steal_clock. 108 * to the lowcore fields user_timer, system_timer & steal_clock.
129 */ 109 */
130void vtime_account(struct task_struct *tsk) 110void account_system_vtime(struct task_struct *tsk)
131{ 111{
132 struct thread_info *ti = task_thread_info(tsk); 112 struct thread_info *ti = task_thread_info(tsk);
133 u64 timer, system; 113 __u64 timer, system;
134
135 WARN_ON_ONCE(!irqs_disabled());
136 114
137 timer = S390_lowcore.last_update_timer; 115 timer = S390_lowcore.last_update_timer;
138 S390_lowcore.last_update_timer = get_vtimer(); 116 S390_lowcore.last_update_timer = get_vtimer();
@@ -142,61 +120,153 @@ void vtime_account(struct task_struct *tsk)
142 S390_lowcore.steal_timer -= system; 120 S390_lowcore.steal_timer -= system;
143 ti->system_timer = S390_lowcore.system_timer; 121 ti->system_timer = S390_lowcore.system_timer;
144 account_system_time(tsk, 0, system, system); 122 account_system_time(tsk, 0, system, system);
145
146 virt_timer_forward(system);
147} 123}
148EXPORT_SYMBOL_GPL(vtime_account); 124EXPORT_SYMBOL_GPL(account_system_vtime);
149 125
150void vtime_account_system(struct task_struct *tsk) 126void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
151__attribute__((alias("vtime_account")));
152EXPORT_SYMBOL_GPL(vtime_account_system);
153
154void __kprobes vtime_stop_cpu(void)
155{ 127{
156 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
157 unsigned long long idle_time; 129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
158 unsigned long psw_mask; 130 __u64 idle_time, expires;
159 131
160 trace_hardirqs_on(); 132 if (idle->idle_enter == 0ULL)
161 /* Don't trace preempt off for idle. */ 133 return;
162 stop_critical_timings();
163
164 /* Wait for external, I/O or machine check interrupt. */
165 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
166 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
167 idle->nohz_delay = 0;
168
169 /* Call the assembler magic in entry.S */
170 psw_idle(idle, psw_mask);
171
172 /* Reenable preemption tracer. */
173 start_critical_timings();
174 134
175 /* Account time spent with enabled wait psw loaded as idle time. */ 135 /* Account time spent with enabled wait psw loaded as idle time. */
136 idle_time = int_clock - idle->idle_enter;
137 account_idle_time(idle_time);
138 S390_lowcore.steal_timer +=
139 idle->idle_enter - S390_lowcore.last_update_clock;
140 S390_lowcore.last_update_clock = int_clock;
141
142 /* Account system time spent going idle. */
143 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
144 S390_lowcore.last_update_timer = enter_timer;
145
146 /* Restart vtime CPU timer */
147 if (vq->do_spt) {
148 /* Program old expire value but first save progress. */
149 expires = vq->idle - enter_timer;
150 expires += get_vtimer();
151 set_vtimer(expires);
152 } else {
153 /* Don't account the CPU timer delta while the cpu was idle. */
154 vq->elapsed -= vq->idle - enter_timer;
155 }
156
176 idle->sequence++; 157 idle->sequence++;
177 smp_wmb(); 158 smp_wmb();
178 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
179 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
180 idle->idle_time += idle_time; 159 idle->idle_time += idle_time;
160 idle->idle_enter = 0ULL;
181 idle->idle_count++; 161 idle->idle_count++;
182 account_idle_time(idle_time);
183 smp_wmb(); 162 smp_wmb();
184 idle->sequence++; 163 idle->sequence++;
185} 164}
186 165
166void __kprobes vtime_stop_cpu(void)
167{
168 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
169 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
170 psw_t psw;
171
172 /* Wait for external, I/O or machine check interrupt. */
173 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
174
175 idle->nohz_delay = 0;
176
177 /* Check if the CPU timer needs to be reprogrammed. */
178 if (vq->do_spt) {
179 __u64 vmax = VTIMER_MAX_SLICE;
180 /*
181 * The inline assembly is equivalent to
182 * vq->idle = get_cpu_timer();
183 * set_cpu_timer(VTIMER_MAX_SLICE);
184 * idle->idle_enter = get_clock();
185 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
186 * PSW_MASK_IO | PSW_MASK_EXT);
187 * The difference is that the inline assembly makes sure that
188 * the last three instruction are stpt, stck and lpsw in that
189 * order. This is done to increase the precision.
190 */
191 asm volatile(
192#ifndef CONFIG_64BIT
193 " basr 1,0\n"
194 "0: ahi 1,1f-0b\n"
195 " st 1,4(%2)\n"
196#else /* CONFIG_64BIT */
197 " larl 1,1f\n"
198 " stg 1,8(%2)\n"
199#endif /* CONFIG_64BIT */
200 " stpt 0(%4)\n"
201 " spt 0(%5)\n"
202 " stck 0(%3)\n"
203#ifndef CONFIG_64BIT
204 " lpsw 0(%2)\n"
205#else /* CONFIG_64BIT */
206 " lpswe 0(%2)\n"
207#endif /* CONFIG_64BIT */
208 "1:"
209 : "=m" (idle->idle_enter), "=m" (vq->idle)
210 : "a" (&psw), "a" (&idle->idle_enter),
211 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
212 : "memory", "cc", "1");
213 } else {
214 /*
215 * The inline assembly is equivalent to
216 * vq->idle = get_cpu_timer();
217 * idle->idle_enter = get_clock();
218 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
219 * PSW_MASK_IO | PSW_MASK_EXT);
220 * The difference is that the inline assembly makes sure that
221 * the last three instruction are stpt, stck and lpsw in that
222 * order. This is done to increase the precision.
223 */
224 asm volatile(
225#ifndef CONFIG_64BIT
226 " basr 1,0\n"
227 "0: ahi 1,1f-0b\n"
228 " st 1,4(%2)\n"
229#else /* CONFIG_64BIT */
230 " larl 1,1f\n"
231 " stg 1,8(%2)\n"
232#endif /* CONFIG_64BIT */
233 " stpt 0(%4)\n"
234 " stck 0(%3)\n"
235#ifndef CONFIG_64BIT
236 " lpsw 0(%2)\n"
237#else /* CONFIG_64BIT */
238 " lpswe 0(%2)\n"
239#endif /* CONFIG_64BIT */
240 "1:"
241 : "=m" (idle->idle_enter), "=m" (vq->idle)
242 : "a" (&psw), "a" (&idle->idle_enter),
243 "a" (&vq->idle), "m" (psw)
244 : "memory", "cc", "1");
245 }
246}
247
187cputime64_t s390_get_idle_time(int cpu) 248cputime64_t s390_get_idle_time(int cpu)
188{ 249{
189 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 250 struct s390_idle_data *idle;
190 unsigned long long now, idle_enter, idle_exit; 251 unsigned long long now, idle_time, idle_enter;
191 unsigned int sequence; 252 unsigned int sequence;
192 253
193 do { 254 idle = &per_cpu(s390_idle, cpu);
194 now = get_clock(); 255
195 sequence = ACCESS_ONCE(idle->sequence); 256 now = get_clock();
196 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 257repeat:
197 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 258 sequence = idle->sequence;
198 } while ((sequence & 1) || (idle->sequence != sequence)); 259 smp_rmb();
199 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; 260 if (sequence & 1)
261 goto repeat;
262 idle_time = 0;
263 idle_enter = idle->idle_enter;
264 if (idle_enter != 0ULL && idle_enter < now)
265 idle_time = now - idle_enter;
266 smp_rmb();
267 if (idle->sequence != sequence)
268 goto repeat;
269 return idle_time;
200} 270}
201 271
202/* 272/*
@@ -205,11 +275,11 @@ cputime64_t s390_get_idle_time(int cpu)
205 */ 275 */
206static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 276static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
207{ 277{
208 struct vtimer_list *tmp; 278 struct vtimer_list *event;
209 279
210 list_for_each_entry(tmp, head, entry) { 280 list_for_each_entry(event, head, entry) {
211 if (tmp->expires > timer->expires) { 281 if (event->expires > timer->expires) {
212 list_add_tail(&timer->entry, &tmp->entry); 282 list_add_tail(&timer->entry, &event->entry);
213 return; 283 return;
214 } 284 }
215 } 285 }
@@ -217,47 +287,86 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
217} 287}
218 288
219/* 289/*
220 * Handler for expired virtual CPU timer. 290 * Do the callback functions of expired vtimer events.
291 * Called from within the interrupt handler.
221 */ 292 */
222static void virt_timer_expire(void) 293static void do_callbacks(struct list_head *cb_list)
223{ 294{
224 struct vtimer_list *timer, *tmp; 295 struct vtimer_queue *vq;
225 unsigned long elapsed; 296 struct vtimer_list *event, *tmp;
226 LIST_HEAD(cb_list); 297
227 298 if (list_empty(cb_list))
228 /* walk timer list, fire all expired timers */ 299 return;
229 spin_lock(&virt_timer_lock); 300
230 elapsed = atomic64_read(&virt_timer_elapsed); 301 vq = &__get_cpu_var(virt_cpu_timer);
231 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { 302
232 if (timer->expires < elapsed) 303 list_for_each_entry_safe(event, tmp, cb_list, entry) {
233 /* move expired timer to the callback queue */ 304 list_del_init(&event->entry);
234 list_move_tail(&timer->entry, &cb_list); 305 (event->function)(event->data);
235 else 306 if (event->interval) {
236 timer->expires -= elapsed;
237 }
238 if (!list_empty(&virt_timer_list)) {
239 timer = list_first_entry(&virt_timer_list,
240 struct vtimer_list, entry);
241 atomic64_set(&virt_timer_current, timer->expires);
242 }
243 atomic64_sub(elapsed, &virt_timer_elapsed);
244 spin_unlock(&virt_timer_lock);
245
246 /* Do callbacks and recharge periodic timers */
247 list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
248 list_del_init(&timer->entry);
249 timer->function(timer->data);
250 if (timer->interval) {
251 /* Recharge interval timer */ 307 /* Recharge interval timer */
252 timer->expires = timer->interval + 308 event->expires = event->interval + vq->elapsed;
253 atomic64_read(&virt_timer_elapsed); 309 spin_lock(&vq->lock);
254 spin_lock(&virt_timer_lock); 310 list_add_sorted(event, &vq->list);
255 list_add_sorted(timer, &virt_timer_list); 311 spin_unlock(&vq->lock);
256 spin_unlock(&virt_timer_lock);
257 } 312 }
258 } 313 }
259} 314}
260 315
316/*
317 * Handler for the virtual CPU timer.
318 */
319static void do_cpu_timer_interrupt(unsigned int ext_int_code,
320 unsigned int param32, unsigned long param64)
321{
322 struct vtimer_queue *vq;
323 struct vtimer_list *event, *tmp;
324 struct list_head cb_list; /* the callback queue */
325 __u64 elapsed, next;
326
327 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
328 INIT_LIST_HEAD(&cb_list);
329 vq = &__get_cpu_var(virt_cpu_timer);
330
331 /* walk timer list, fire all expired events */
332 spin_lock(&vq->lock);
333
334 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
335 BUG_ON((s64) elapsed < 0);
336 vq->elapsed = 0;
337 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
338 if (event->expires < elapsed)
339 /* move expired timer to the callback queue */
340 list_move_tail(&event->entry, &cb_list);
341 else
342 event->expires -= elapsed;
343 }
344 spin_unlock(&vq->lock);
345
346 vq->do_spt = list_empty(&cb_list);
347 do_callbacks(&cb_list);
348
349 /* next event is first in list */
350 next = VTIMER_MAX_SLICE;
351 spin_lock(&vq->lock);
352 if (!list_empty(&vq->list)) {
353 event = list_first_entry(&vq->list, struct vtimer_list, entry);
354 next = event->expires;
355 } else
356 vq->do_spt = 0;
357 spin_unlock(&vq->lock);
358 /*
359 * To improve precision add the time spent by the
360 * interrupt handler to the elapsed time.
361 * Note: CPU timer counts down and we got an interrupt,
362 * the current content is negative
363 */
364 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
365 set_vtimer(next - elapsed);
366 vq->timer = next - elapsed;
367 vq->elapsed = elapsed;
368}
369
261void init_virt_timer(struct vtimer_list *timer) 370void init_virt_timer(struct vtimer_list *timer)
262{ 371{
263 timer->function = NULL; 372 timer->function = NULL;
@@ -267,108 +376,179 @@ EXPORT_SYMBOL(init_virt_timer);
267 376
268static inline int vtimer_pending(struct vtimer_list *timer) 377static inline int vtimer_pending(struct vtimer_list *timer)
269{ 378{
270 return !list_empty(&timer->entry); 379 return (!list_empty(&timer->entry));
271} 380}
272 381
382/*
383 * this function should only run on the specified CPU
384 */
273static void internal_add_vtimer(struct vtimer_list *timer) 385static void internal_add_vtimer(struct vtimer_list *timer)
274{ 386{
275 if (list_empty(&virt_timer_list)) { 387 struct vtimer_queue *vq;
276 /* First timer, just program it. */ 388 unsigned long flags;
277 atomic64_set(&virt_timer_current, timer->expires); 389 __u64 left, expires;
278 atomic64_set(&virt_timer_elapsed, 0); 390
279 list_add(&timer->entry, &virt_timer_list); 391 vq = &per_cpu(virt_cpu_timer, timer->cpu);
392 spin_lock_irqsave(&vq->lock, flags);
393
394 BUG_ON(timer->cpu != smp_processor_id());
395
396 if (list_empty(&vq->list)) {
397 /* First timer on this cpu, just program it. */
398 list_add(&timer->entry, &vq->list);
399 set_vtimer(timer->expires);
400 vq->timer = timer->expires;
401 vq->elapsed = 0;
280 } else { 402 } else {
281 /* Update timer against current base. */ 403 /* Check progress of old timers. */
282 timer->expires += atomic64_read(&virt_timer_elapsed); 404 expires = timer->expires;
283 if (likely((s64) timer->expires < 405 left = get_vtimer();
284 (s64) atomic64_read(&virt_timer_current))) 406 if (likely((s64) expires < (s64) left)) {
285 /* The new timer expires before the current timer. */ 407 /* The new timer expires before the current timer. */
286 atomic64_set(&virt_timer_current, timer->expires); 408 set_vtimer(expires);
287 /* Insert new timer into the list. */ 409 vq->elapsed += vq->timer - left;
288 list_add_sorted(timer, &virt_timer_list); 410 vq->timer = expires;
411 } else {
412 vq->elapsed += vq->timer - left;
413 vq->timer = left;
414 }
415 /* Insert new timer into per cpu list. */
416 timer->expires += vq->elapsed;
417 list_add_sorted(timer, &vq->list);
289 } 418 }
419
420 spin_unlock_irqrestore(&vq->lock, flags);
421 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
422 put_cpu();
290} 423}
291 424
292static void __add_vtimer(struct vtimer_list *timer, int periodic) 425static inline void prepare_vtimer(struct vtimer_list *timer)
293{ 426{
294 unsigned long flags; 427 BUG_ON(!timer->function);
295 428 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
296 timer->interval = periodic ? timer->expires : 0; 429 BUG_ON(vtimer_pending(timer));
297 spin_lock_irqsave(&virt_timer_lock, flags); 430 timer->cpu = get_cpu();
298 internal_add_vtimer(timer);
299 spin_unlock_irqrestore(&virt_timer_lock, flags);
300} 431}
301 432
302/* 433/*
303 * add_virt_timer - add an oneshot virtual CPU timer 434 * add_virt_timer - add an oneshot virtual CPU timer
304 */ 435 */
305void add_virt_timer(struct vtimer_list *timer) 436void add_virt_timer(void *new)
306{ 437{
307 __add_vtimer(timer, 0); 438 struct vtimer_list *timer;
439
440 timer = (struct vtimer_list *)new;
441 prepare_vtimer(timer);
442 timer->interval = 0;
443 internal_add_vtimer(timer);
308} 444}
309EXPORT_SYMBOL(add_virt_timer); 445EXPORT_SYMBOL(add_virt_timer);
310 446
311/* 447/*
312 * add_virt_timer_int - add an interval virtual CPU timer 448 * add_virt_timer_int - add an interval virtual CPU timer
313 */ 449 */
314void add_virt_timer_periodic(struct vtimer_list *timer) 450void add_virt_timer_periodic(void *new)
315{ 451{
316 __add_vtimer(timer, 1); 452 struct vtimer_list *timer;
453
454 timer = (struct vtimer_list *)new;
455 prepare_vtimer(timer);
456 timer->interval = timer->expires;
457 internal_add_vtimer(timer);
317} 458}
318EXPORT_SYMBOL(add_virt_timer_periodic); 459EXPORT_SYMBOL(add_virt_timer_periodic);
319 460
320static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) 461int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
321{ 462{
463 struct vtimer_queue *vq;
322 unsigned long flags; 464 unsigned long flags;
323 int rc; 465 int cpu;
324 466
325 BUG_ON(!timer->function); 467 BUG_ON(!timer->function);
468 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
326 469
327 if (timer->expires == expires && vtimer_pending(timer)) 470 if (timer->expires == expires && vtimer_pending(timer))
328 return 1; 471 return 1;
329 spin_lock_irqsave(&virt_timer_lock, flags); 472
330 rc = vtimer_pending(timer); 473 cpu = get_cpu();
331 if (rc) 474 vq = &per_cpu(virt_cpu_timer, cpu);
332 list_del_init(&timer->entry); 475
333 timer->interval = periodic ? expires : 0; 476 /* disable interrupts before test if timer is pending */
477 spin_lock_irqsave(&vq->lock, flags);
478
479 /* if timer isn't pending add it on the current CPU */
480 if (!vtimer_pending(timer)) {
481 spin_unlock_irqrestore(&vq->lock, flags);
482
483 if (periodic)
484 timer->interval = expires;
485 else
486 timer->interval = 0;
487 timer->expires = expires;
488 timer->cpu = cpu;
489 internal_add_vtimer(timer);
490 return 0;
491 }
492
493 /* check if we run on the right CPU */
494 BUG_ON(timer->cpu != cpu);
495
496 list_del_init(&timer->entry);
334 timer->expires = expires; 497 timer->expires = expires;
498 if (periodic)
499 timer->interval = expires;
500
501 /* the timer can't expire anymore so we can release the lock */
502 spin_unlock_irqrestore(&vq->lock, flags);
335 internal_add_vtimer(timer); 503 internal_add_vtimer(timer);
336 spin_unlock_irqrestore(&virt_timer_lock, flags); 504 return 1;
337 return rc;
338} 505}
339 506
340/* 507/*
508 * If we change a pending timer the function must be called on the CPU
509 * where the timer is running on.
510 *
341 * returns whether it has modified a pending timer (1) or not (0) 511 * returns whether it has modified a pending timer (1) or not (0)
342 */ 512 */
343int mod_virt_timer(struct vtimer_list *timer, u64 expires) 513int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
344{ 514{
345 return __mod_vtimer(timer, expires, 0); 515 return __mod_vtimer(timer, expires, 0);
346} 516}
347EXPORT_SYMBOL(mod_virt_timer); 517EXPORT_SYMBOL(mod_virt_timer);
348 518
349/* 519/*
520 * If we change a pending timer the function must be called on the CPU
521 * where the timer is running on.
522 *
350 * returns whether it has modified a pending timer (1) or not (0) 523 * returns whether it has modified a pending timer (1) or not (0)
351 */ 524 */
352int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) 525int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
353{ 526{
354 return __mod_vtimer(timer, expires, 1); 527 return __mod_vtimer(timer, expires, 1);
355} 528}
356EXPORT_SYMBOL(mod_virt_timer_periodic); 529EXPORT_SYMBOL(mod_virt_timer_periodic);
357 530
358/* 531/*
359 * Delete a virtual timer. 532 * delete a virtual timer
360 * 533 *
361 * returns whether the deleted timer was pending (1) or not (0) 534 * returns whether the deleted timer was pending (1) or not (0)
362 */ 535 */
363int del_virt_timer(struct vtimer_list *timer) 536int del_virt_timer(struct vtimer_list *timer)
364{ 537{
365 unsigned long flags; 538 unsigned long flags;
539 struct vtimer_queue *vq;
366 540
541 /* check if timer is pending */
367 if (!vtimer_pending(timer)) 542 if (!vtimer_pending(timer))
368 return 0; 543 return 0;
369 spin_lock_irqsave(&virt_timer_lock, flags); 544
545 vq = &per_cpu(virt_cpu_timer, timer->cpu);
546 spin_lock_irqsave(&vq->lock, flags);
547
548 /* we don't interrupt a running timer, just let it expire! */
370 list_del_init(&timer->entry); 549 list_del_init(&timer->entry);
371 spin_unlock_irqrestore(&virt_timer_lock, flags); 550
551 spin_unlock_irqrestore(&vq->lock, flags);
372 return 1; 552 return 1;
373} 553}
374EXPORT_SYMBOL(del_virt_timer); 554EXPORT_SYMBOL(del_virt_timer);
@@ -376,10 +556,17 @@ EXPORT_SYMBOL(del_virt_timer);
376/* 556/*
377 * Start the virtual CPU timer on the current CPU. 557 * Start the virtual CPU timer on the current CPU.
378 */ 558 */
379void __cpuinit init_cpu_vtimer(void) 559void init_cpu_vtimer(void)
380{ 560{
381 /* set initial cpu timer */ 561 struct vtimer_queue *vq;
382 set_vtimer(VTIMER_MAX_SLICE); 562
563 /* initialize per cpu vtimer structure */
564 vq = &__get_cpu_var(virt_cpu_timer);
565 INIT_LIST_HEAD(&vq->list);
566 spin_lock_init(&vq->lock);
567
568 /* enable cpu timer interrupts */
569 __ctl_set_bit(0,10);
383} 570}
384 571
385static int __cpuinit s390_nohz_notify(struct notifier_block *self, 572static int __cpuinit s390_nohz_notify(struct notifier_block *self,
@@ -389,8 +576,9 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
389 long cpu = (long) hcpu; 576 long cpu = (long) hcpu;
390 577
391 idle = &per_cpu(s390_idle, cpu); 578 idle = &per_cpu(s390_idle, cpu);
392 switch (action & ~CPU_TASKS_FROZEN) { 579 switch (action) {
393 case CPU_DYING: 580 case CPU_DYING:
581 case CPU_DYING_FROZEN:
394 idle->nohz_delay = 0; 582 idle->nohz_delay = 0;
395 default: 583 default:
396 break; 584 break;
@@ -400,7 +588,12 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
400 588
401void __init vtime_init(void) 589void __init vtime_init(void)
402{ 590{
591 /* request the cpu timer external interrupt */
592 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
593 panic("Couldn't request external interrupt 0x1005");
594
403 /* Enable cpu timer interrupts on the boot cpu. */ 595 /* Enable cpu timer interrupts on the boot cpu. */
404 init_cpu_vtimer(); 596 init_cpu_vtimer();
405 cpu_notifier(s390_nohz_notify, 0); 597 cpu_notifier(s390_nohz_notify, 0);
406} 598}
599