aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c104
-rw-r--r--arch/s390/kernel/base.S2
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/dis.c369
-rw-r--r--arch/s390/kernel/early.c29
-rw-r--r--arch/s390/kernel/entry.S329
-rw-r--r--arch/s390/kernel/entry.h10
-rw-r--r--arch/s390/kernel/entry64.S623
-rw-r--r--arch/s390/kernel/ftrace.c12
-rw-r--r--arch/s390/kernel/head.S67
-rw-r--r--arch/s390/kernel/head31.S18
-rw-r--r--arch/s390/kernel/head64.S96
-rw-r--r--arch/s390/kernel/ipl.c56
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c10
-rw-r--r--arch/s390/kernel/module.c6
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/processor.c37
-rw-r--r--arch/s390/kernel/ptrace.c131
-rw-r--r--arch/s390/kernel/reipl.S2
-rw-r--r--arch/s390/kernel/reipl64.S2
-rw-r--r--arch/s390/kernel/s390_ext.c3
-rw-r--r--arch/s390/kernel/sclp.S38
-rw-r--r--arch/s390/kernel/setup.c41
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c131
-rw-r--r--arch/s390/kernel/switch_cpu.S58
-rw-r--r--arch/s390/kernel/switch_cpu64.S51
-rw-r--r--arch/s390/kernel/swsusp_asm64.S5
-rw-r--r--arch/s390/kernel/sys_s390.c43
-rw-r--r--arch/s390/kernel/syscalls.S6
-rw-r--r--arch/s390/kernel/sysinfo.c1
-rw-r--r--arch/s390/kernel/time.c88
-rw-r--r--arch/s390/kernel/topology.c10
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/s390/kernel/vtime.c15
45 files changed, 1481 insertions, 991 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 683f6381cc59..64230bc392fa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30 30
31extra-y += head.o init_task.o vmlinux.lds 31extra-y += head.o init_task.o vmlinux.lds
32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
32 33
33obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 34obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
34obj-$(CONFIG_SMP) += smp.o topology.o 35obj-$(CONFIG_SMP) += smp.o topology.o
36obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
37 switch_cpu.o)
35obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 38obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
36obj-$(CONFIG_AUDIT) += audit.o 39obj-$(CONFIG_AUDIT) += audit.o
37compat-obj-$(CONFIG_AUDIT) += compat_audit.o 40compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 63e46433e81d..5232278d79ad 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -4,18 +4,27 @@
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6 6
7#include <linux/sched.h> 7#define ASM_OFFSETS_C
8
8#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h>
9#include <asm/vdso.h> 11#include <asm/vdso.h>
10#include <asm/sigp.h> 12#include <asm/sigp.h>
11 13
14/*
15 * Make sure that the compiler is new enough. We want a compiler that
16 * is known to work with the "Q" assembler constraint.
17 */
18#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
19#error Your compiler is too old; please use version 3.3.3 or newer
20#endif
21
12int main(void) 22int main(void)
13{ 23{
14 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
15 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
16 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); 26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
17 DEFINE(__THREAD_mm_segment, 27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
18 offsetof(struct task_struct, thread.mm_segment));
19 BLANK(); 28 BLANK();
20 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
21 BLANK(); 30 BLANK();
@@ -30,6 +39,7 @@ int main(void)
30 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
31 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
32 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); 41 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
42 DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
33 BLANK(); 43 BLANK();
34 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 44 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
35 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
@@ -52,18 +62,98 @@ int main(void)
52 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 62 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
53 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 63 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
54 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 64 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
55 DEFINE(__VDSO_ECTG_BASE, 65 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
56 offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 66 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
57 DEFINE(__VDSO_ECTG_USER, 67 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
58 offsetof(struct vdso_per_cpu_data, ectg_user_time));
59 /* constants used by the vdso */ 68 /* constants used by the vdso */
60 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 69 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
61 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 70 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
62 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 71 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK();
63 /* constants for SIGP */ 73 /* constants for SIGP */
64 DEFINE(__SIGP_STOP, sigp_stop); 74 DEFINE(__SIGP_STOP, sigp_stop);
65 DEFINE(__SIGP_RESTART, sigp_restart); 75 DEFINE(__SIGP_RESTART, sigp_restart);
66 DEFINE(__SIGP_SENSE, sigp_sense); 76 DEFINE(__SIGP_SENSE, sigp_sense);
67 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); 77 DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
78 BLANK();
79 /* lowcore offsets */
80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
81 DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
82 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
83 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
88 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
89 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
90 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
91 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
92 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
93 DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
94 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
95 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
96 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
97 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
98 BLANK();
99 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
100 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
101 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
102 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
103 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
104 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
105 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
106 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
107 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
108 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
109 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
110 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
111 DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
112 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
113 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
114 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
115 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
116 DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
117 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
118 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
119 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
120 DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
121 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
122 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
123 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
124 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
125 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
126 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
127 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
128 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
129 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
130 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
135 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
136 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
137 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
138 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
139 DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
140 DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
141 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
142 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
143 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
144#ifdef CONFIG_32BIT
145 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
147#else /* CONFIG_32BIT */
148 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
149 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
150 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
151 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
152 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
153 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
154 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
155 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
156 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
157#endif /* CONFIG_32BIT */
68 return 0; 158 return 0;
69} 159}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index dc7e5259770f..15e46ca94335 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,8 +6,8 @@
6 * Michael Holzheu <holzheu@de.ibm.com> 6 * Michael Holzheu <holzheu@de.ibm.com>
7 */ 7 */
8 8
9#include <asm/asm-offsets.h>
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11 11
12#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
13 13
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 11c3aba664ea..73b624ed9cd8 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -29,7 +29,6 @@
29#include <linux/sem.h> 29#include <linux/sem.h>
30#include <linux/msg.h> 30#include <linux/msg.h>
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/slab.h>
33#include <linux/uio.h> 32#include <linux/uio.h>
34#include <linux/quota.h> 33#include <linux/quota.h>
35#include <linux/module.h> 34#include <linux/module.h>
@@ -52,6 +51,7 @@
52#include <linux/ptrace.h> 51#include <linux/ptrace.h>
53#include <linux/fadvise.h> 52#include <linux/fadvise.h>
54#include <linux/ipc.h> 53#include <linux/ipc.h>
54#include <linux/slab.h>
55 55
56#include <asm/types.h> 56#include <asm/types.h>
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 30de2d0e52bb..672ce52341b4 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -547,7 +547,7 @@ sys32_setdomainname_wrapper:
547 .globl sys32_newuname_wrapper 547 .globl sys32_newuname_wrapper
548sys32_newuname_wrapper: 548sys32_newuname_wrapper:
549 llgtr %r2,%r2 # struct new_utsname * 549 llgtr %r2,%r2 # struct new_utsname *
550 jg sys_s390_newuname # branch to system call 550 jg sys_newuname # branch to system call
551 551
552 .globl compat_sys_adjtimex_wrapper 552 .globl compat_sys_adjtimex_wrapper
553compat_sys_adjtimex_wrapper: 553compat_sys_adjtimex_wrapper:
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 0168472b2fdf..98192261491d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -655,6 +655,7 @@ found:
655 p_info->act_entry_offset = 0; 655 p_info->act_entry_offset = 0;
656 file->private_data = p_info; 656 file->private_data = p_info;
657 debug_info_get(debug_info); 657 debug_info_get(debug_info);
658 nonseekable_open(inode, file);
658out: 659out:
659 mutex_unlock(&debug_mutex); 660 mutex_unlock(&debug_mutex);
660 return rc; 661 return rc;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index db943a7ec513..b39b27d68b45 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -86,10 +86,17 @@ enum {
86 U4_12, /* 4 bit unsigned value starting at 12 */ 86 U4_12, /* 4 bit unsigned value starting at 12 */
87 U4_16, /* 4 bit unsigned value starting at 16 */ 87 U4_16, /* 4 bit unsigned value starting at 16 */
88 U4_20, /* 4 bit unsigned value starting at 20 */ 88 U4_20, /* 4 bit unsigned value starting at 20 */
89 U4_32, /* 4 bit unsigned value starting at 32 */
89 U8_8, /* 8 bit unsigned value starting at 8 */ 90 U8_8, /* 8 bit unsigned value starting at 8 */
90 U8_16, /* 8 bit unsigned value starting at 16 */ 91 U8_16, /* 8 bit unsigned value starting at 16 */
92 U8_24, /* 8 bit unsigned value starting at 24 */
93 U8_32, /* 8 bit unsigned value starting at 32 */
94 I8_8, /* 8 bit signed value starting at 8 */
95 I8_32, /* 8 bit signed value starting at 32 */
91 I16_16, /* 16 bit signed value starting at 16 */ 96 I16_16, /* 16 bit signed value starting at 16 */
97 I16_32, /* 32 bit signed value starting at 16 */
92 U16_16, /* 16 bit unsigned value starting at 16 */ 98 U16_16, /* 16 bit unsigned value starting at 16 */
99 U16_32, /* 32 bit unsigned value starting at 16 */
93 J16_16, /* PC relative jump offset at 16 */ 100 J16_16, /* PC relative jump offset at 16 */
94 J32_16, /* PC relative long offset at 16 */ 101 J32_16, /* PC relative long offset at 16 */
95 I32_16, /* 32 bit signed value starting at 16 */ 102 I32_16, /* 32 bit signed value starting at 16 */
@@ -104,21 +111,37 @@ enum {
104 */ 111 */
105enum { 112enum {
106 INSTR_INVALID, 113 INSTR_INVALID,
107 INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, 114 INSTR_E,
108 INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, 115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU,
117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
118 INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
119 INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
109 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, 120 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
110 INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, 121 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
111 INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, 122 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
112 INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, 123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
125 INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR,
126 INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
113 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
114 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, 128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
115 INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, 129 INSTR_RSI_RRP,
116 INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, 130 INSTR_RSL_R0RD,
117 INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, 131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
118 INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, 132 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
119 INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, 133 INSTR_RS_RURD,
120 INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, 134 INSTR_RXE_FRRD, INSTR_RXE_RRRD,
121 INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 135 INSTR_RXF_FRRDF,
136 INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
137 INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
138 INSTR_SIL_RDI, INSTR_SIL_RDU,
139 INSTR_SIY_IRD, INSTR_SIY_URD,
140 INSTR_SI_URD,
141 INSTR_SSE_RDRD,
142 INSTR_SSF_RRDRD,
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
122 INSTR_S_00, INSTR_S_RD, 145 INSTR_S_00, INSTR_S_RD,
123}; 146};
124 147
@@ -129,7 +152,7 @@ struct operand {
129}; 152};
130 153
131struct insn { 154struct insn {
132 const char name[5]; 155 const char name[6];
133 unsigned char opfrag; 156 unsigned char opfrag;
134 unsigned char format; 157 unsigned char format;
135}; 158};
@@ -170,11 +193,16 @@ static const struct operand operands[] =
170 [U4_12] = { 4, 12, 0 }, 193 [U4_12] = { 4, 12, 0 },
171 [U4_16] = { 4, 16, 0 }, 194 [U4_16] = { 4, 16, 0 },
172 [U4_20] = { 4, 20, 0 }, 195 [U4_20] = { 4, 20, 0 },
196 [U4_32] = { 4, 32, 0 },
173 [U8_8] = { 8, 8, 0 }, 197 [U8_8] = { 8, 8, 0 },
174 [U8_16] = { 8, 16, 0 }, 198 [U8_16] = { 8, 16, 0 },
199 [U8_24] = { 8, 24, 0 },
200 [U8_32] = { 8, 32, 0 },
175 [I16_16] = { 16, 16, OPERAND_SIGNED }, 201 [I16_16] = { 16, 16, OPERAND_SIGNED },
176 [U16_16] = { 16, 16, 0 }, 202 [U16_16] = { 16, 16, 0 },
203 [U16_32] = { 16, 32, 0 },
177 [J16_16] = { 16, 16, OPERAND_PCREL }, 204 [J16_16] = { 16, 16, OPERAND_PCREL },
205 [I16_32] = { 16, 32, OPERAND_SIGNED },
178 [J32_16] = { 32, 16, OPERAND_PCREL }, 206 [J32_16] = { 32, 16, OPERAND_PCREL },
179 [I32_16] = { 32, 16, OPERAND_SIGNED }, 207 [I32_16] = { 32, 16, OPERAND_SIGNED },
180 [U32_16] = { 32, 16, 0 }, 208 [U32_16] = { 32, 16, 0 },
@@ -183,82 +211,93 @@ static const struct operand operands[] =
183}; 211};
184 212
185static const unsigned char formats[][7] = { 213static const unsigned char formats[][7] = {
186 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ 214 [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
187 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ 215 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
188 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ 216 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
189 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ 217 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
190 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ 218 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
191 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ 219 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
192 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ 220 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
193 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ 221 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
194 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ 222 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
195 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ 223 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
196 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ 224 [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
197 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ 225 [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
198 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ 226 [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
199 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ 227 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
200 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ 228 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
201 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ 229 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
202 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ 230 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
203 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ 231 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
204 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ 232 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
205 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ 233 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
206 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ 234 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
207 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ 235 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
208 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ 236 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
209 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ 237 [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
210 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ 238 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
211 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ 239 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
212 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ 240 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
213 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ 241 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
214 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ 242 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
215 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ 243 [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
216 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ 244 [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
217 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ 245 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
218 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ 246 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
219 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ 247 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
220 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ 248 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
221 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ 249 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
222 [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ 250 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
223 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ 251 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
224 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ 252 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
253 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
254 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
255 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
256 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
257 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
258 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
259 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
260 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
261 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
262 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
263 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
264 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
265 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
266 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
267 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
268 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
269 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
270 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
225 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 271 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
226 /* e.g. icmh */ 272 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
227 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ 273 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
228 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ 274 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
229 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ 275 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
230 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ 276 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
231 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ 277 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
232 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ 278 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
233 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
234 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
235 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
236 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, 279 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
237 /* e.g. madb */ 280 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
238 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ 281 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
239 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ 282 [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
240 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ 283 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
241 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ 284 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
242 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ 285 [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
243 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ 286 [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
244 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ 287 [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
245 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ 288 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
289 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
290 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
291 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
292 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
246 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 293 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
247 /* e.g. mvc */
248 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 294 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
249 /* e.g. srp */
250 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 295 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
251 /* e.g. pack */
252 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
253 /* e.g. mvck */
254 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, 296 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
255 /* e.g. plo */
256 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, 297 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
257 /* e.g. lmd */ 298 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
258 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ 299 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
259 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ 300 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
260 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
261 /* e.g. mvcos */
262}; 301};
263 302
264static struct insn opcode[] = { 303static struct insn opcode[] = {
@@ -454,6 +493,8 @@ static struct insn opcode[] = {
454static struct insn opcode_01[] = { 493static struct insn opcode_01[] = {
455#ifdef CONFIG_64BIT 494#ifdef CONFIG_64BIT
456 { "sam64", 0x0e, INSTR_E }, 495 { "sam64", 0x0e, INSTR_E },
496 { "pfpo", 0x0a, INSTR_E },
497 { "ptff", 0x04, INSTR_E },
457#endif 498#endif
458 { "pr", 0x01, INSTR_E }, 499 { "pr", 0x01, INSTR_E },
459 { "upt", 0x02, INSTR_E }, 500 { "upt", 0x02, INSTR_E },
@@ -519,6 +560,8 @@ static struct insn opcode_b2[] = {
519 { "cutfu", 0xa7, INSTR_RRF_M0RR }, 560 { "cutfu", 0xa7, INSTR_RRF_M0RR },
520 { "stfle", 0xb0, INSTR_S_RD }, 561 { "stfle", 0xb0, INSTR_S_RD },
521 { "lpswe", 0xb2, INSTR_S_RD }, 562 { "lpswe", 0xb2, INSTR_S_RD },
563 { "srnmt", 0xb9, INSTR_S_RD },
564 { "lfas", 0xbd, INSTR_S_RD },
522#endif 565#endif
523 { "stidp", 0x02, INSTR_S_RD }, 566 { "stidp", 0x02, INSTR_S_RD },
524 { "sck", 0x04, INSTR_S_RD }, 567 { "sck", 0x04, INSTR_S_RD },
@@ -589,7 +632,6 @@ static struct insn opcode_b2[] = {
589 { "clst", 0x5d, INSTR_RRE_RR }, 632 { "clst", 0x5d, INSTR_RRE_RR },
590 { "srst", 0x5e, INSTR_RRE_RR }, 633 { "srst", 0x5e, INSTR_RRE_RR },
591 { "cmpsc", 0x63, INSTR_RRE_RR }, 634 { "cmpsc", 0x63, INSTR_RRE_RR },
592 { "cmpsc", 0x63, INSTR_RRE_RR },
593 { "siga", 0x74, INSTR_S_RD }, 635 { "siga", 0x74, INSTR_S_RD },
594 { "xsch", 0x76, INSTR_S_00 }, 636 { "xsch", 0x76, INSTR_S_00 },
595 { "rp", 0x77, INSTR_S_RD }, 637 { "rp", 0x77, INSTR_S_RD },
@@ -630,6 +672,57 @@ static struct insn opcode_b3[] = {
630 { "cger", 0xc8, INSTR_RRF_U0RF }, 672 { "cger", 0xc8, INSTR_RRF_U0RF },
631 { "cgdr", 0xc9, INSTR_RRF_U0RF }, 673 { "cgdr", 0xc9, INSTR_RRF_U0RF },
632 { "cgxr", 0xca, INSTR_RRF_U0RF }, 674 { "cgxr", 0xca, INSTR_RRF_U0RF },
675 { "lpdfr", 0x70, INSTR_RRE_FF },
676 { "lndfr", 0x71, INSTR_RRE_FF },
677 { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
678 { "lcdfr", 0x73, INSTR_RRE_FF },
679 { "ldgr", 0xc1, INSTR_RRE_FR },
680 { "lgdr", 0xcd, INSTR_RRE_RF },
681 { "adtr", 0xd2, INSTR_RRR_F0FF },
682 { "axtr", 0xda, INSTR_RRR_F0FF },
683 { "cdtr", 0xe4, INSTR_RRE_FF },
684 { "cxtr", 0xec, INSTR_RRE_FF },
685 { "kdtr", 0xe0, INSTR_RRE_FF },
686 { "kxtr", 0xe8, INSTR_RRE_FF },
687 { "cedtr", 0xf4, INSTR_RRE_FF },
688 { "cextr", 0xfc, INSTR_RRE_FF },
689 { "cdgtr", 0xf1, INSTR_RRE_FR },
690 { "cxgtr", 0xf9, INSTR_RRE_FR },
691 { "cdstr", 0xf3, INSTR_RRE_FR },
692 { "cxstr", 0xfb, INSTR_RRE_FR },
693 { "cdutr", 0xf2, INSTR_RRE_FR },
694 { "cxutr", 0xfa, INSTR_RRE_FR },
695 { "cgdtr", 0xe1, INSTR_RRF_U0RF },
696 { "cgxtr", 0xe9, INSTR_RRF_U0RF },
697 { "csdtr", 0xe3, INSTR_RRE_RF },
698 { "csxtr", 0xeb, INSTR_RRE_RF },
699 { "cudtr", 0xe2, INSTR_RRE_RF },
700 { "cuxtr", 0xea, INSTR_RRE_RF },
701 { "ddtr", 0xd1, INSTR_RRR_F0FF },
702 { "dxtr", 0xd9, INSTR_RRR_F0FF },
703 { "eedtr", 0xe5, INSTR_RRE_RF },
704 { "eextr", 0xed, INSTR_RRE_RF },
705 { "esdtr", 0xe7, INSTR_RRE_RF },
706 { "esxtr", 0xef, INSTR_RRE_RF },
707 { "iedtr", 0xf6, INSTR_RRF_F0FR },
708 { "iextr", 0xfe, INSTR_RRF_F0FR },
709 { "ltdtr", 0xd6, INSTR_RRE_FF },
710 { "ltxtr", 0xde, INSTR_RRE_FF },
711 { "fidtr", 0xd7, INSTR_RRF_UUFF },
712 { "fixtr", 0xdf, INSTR_RRF_UUFF },
713 { "ldetr", 0xd4, INSTR_RRF_0UFF },
714 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
715 { "ledtr", 0xd5, INSTR_RRF_UUFF },
716 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
717 { "mdtr", 0xd0, INSTR_RRR_F0FF },
718 { "mxtr", 0xd8, INSTR_RRR_F0FF },
719 { "qadtr", 0xf5, INSTR_RRF_FUFF },
720 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
721 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
722 { "rrxtr", 0xff, INSTR_RRF_FFRU },
723 { "sfasr", 0x85, INSTR_RRE_R0 },
724 { "sdtr", 0xd3, INSTR_RRR_F0FF },
725 { "sxtr", 0xdb, INSTR_RRR_F0FF },
633#endif 726#endif
634 { "lpebr", 0x00, INSTR_RRE_FF }, 727 { "lpebr", 0x00, INSTR_RRE_FF },
635 { "lnebr", 0x01, INSTR_RRE_FF }, 728 { "lnebr", 0x01, INSTR_RRE_FF },
@@ -780,6 +873,14 @@ static struct insn opcode_b9[] = {
780 { "cu24", 0xb1, INSTR_RRF_M0RR }, 873 { "cu24", 0xb1, INSTR_RRF_M0RR },
781 { "cu41", 0xb2, INSTR_RRF_M0RR }, 874 { "cu41", 0xb2, INSTR_RRF_M0RR },
782 { "cu42", 0xb3, INSTR_RRF_M0RR }, 875 { "cu42", 0xb3, INSTR_RRF_M0RR },
876 { "crt", 0x72, INSTR_RRF_U0RR },
877 { "cgrt", 0x60, INSTR_RRF_U0RR },
878 { "clrt", 0x73, INSTR_RRF_U0RR },
879 { "clgrt", 0x61, INSTR_RRF_U0RR },
880 { "ptf", 0xa2, INSTR_RRE_R0 },
881 { "pfmf", 0xaf, INSTR_RRE_RR },
882 { "trte", 0xbf, INSTR_RRF_M0RR },
883 { "trtre", 0xbd, INSTR_RRF_M0RR },
783#endif 884#endif
784 { "kmac", 0x1e, INSTR_RRE_RR }, 885 { "kmac", 0x1e, INSTR_RRE_RR },
785 { "lrvr", 0x1f, INSTR_RRE_RR }, 886 { "lrvr", 0x1f, INSTR_RRE_RR },
@@ -835,6 +936,43 @@ static struct insn opcode_c2[] = {
835 { "cfi", 0x0d, INSTR_RIL_RI }, 936 { "cfi", 0x0d, INSTR_RIL_RI },
836 { "clgfi", 0x0e, INSTR_RIL_RU }, 937 { "clgfi", 0x0e, INSTR_RIL_RU },
837 { "clfi", 0x0f, INSTR_RIL_RU }, 938 { "clfi", 0x0f, INSTR_RIL_RU },
939 { "msfi", 0x01, INSTR_RIL_RI },
940 { "msgfi", 0x00, INSTR_RIL_RI },
941#endif
942 { "", 0, INSTR_INVALID }
943};
944
945static struct insn opcode_c4[] = {
946#ifdef CONFIG_64BIT
947 { "lrl", 0x0d, INSTR_RIL_RP },
948 { "lgrl", 0x08, INSTR_RIL_RP },
949 { "lgfrl", 0x0c, INSTR_RIL_RP },
950 { "lhrl", 0x05, INSTR_RIL_RP },
951 { "lghrl", 0x04, INSTR_RIL_RP },
952 { "llgfrl", 0x0e, INSTR_RIL_RP },
953 { "llhrl", 0x02, INSTR_RIL_RP },
954 { "llghrl", 0x06, INSTR_RIL_RP },
955 { "strl", 0x0f, INSTR_RIL_RP },
956 { "stgrl", 0x0b, INSTR_RIL_RP },
957 { "sthrl", 0x07, INSTR_RIL_RP },
958#endif
959 { "", 0, INSTR_INVALID }
960};
961
962static struct insn opcode_c6[] = {
963#ifdef CONFIG_64BIT
964 { "crl", 0x0d, INSTR_RIL_RP },
965 { "cgrl", 0x08, INSTR_RIL_RP },
966 { "cgfrl", 0x0c, INSTR_RIL_RP },
967 { "chrl", 0x05, INSTR_RIL_RP },
968 { "cghrl", 0x04, INSTR_RIL_RP },
969 { "clrl", 0x0f, INSTR_RIL_RP },
970 { "clgrl", 0x0a, INSTR_RIL_RP },
971 { "clgfrl", 0x0e, INSTR_RIL_RP },
972 { "clhrl", 0x07, INSTR_RIL_RP },
973 { "clghrl", 0x06, INSTR_RIL_RP },
974 { "pfdrl", 0x02, INSTR_RIL_UP },
975 { "exrl", 0x00, INSTR_RIL_RP },
838#endif 976#endif
839 { "", 0, INSTR_INVALID } 977 { "", 0, INSTR_INVALID }
840}; 978};
@@ -842,6 +980,8 @@ static struct insn opcode_c2[] = {
842static struct insn opcode_c8[] = { 980static struct insn opcode_c8[] = {
843#ifdef CONFIG_64BIT 981#ifdef CONFIG_64BIT
844 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 982 { "mvcos", 0x00, INSTR_SSF_RRDRD },
983 { "ectg", 0x01, INSTR_SSF_RRDRD },
984 { "csst", 0x02, INSTR_SSF_RRDRD },
845#endif 985#endif
846 { "", 0, INSTR_INVALID } 986 { "", 0, INSTR_INVALID }
847}; 987};
@@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = {
917 { "llgh", 0x91, INSTR_RXY_RRRD }, 1057 { "llgh", 0x91, INSTR_RXY_RRRD },
918 { "llc", 0x94, INSTR_RXY_RRRD }, 1058 { "llc", 0x94, INSTR_RXY_RRRD },
919 { "llh", 0x95, INSTR_RXY_RRRD }, 1059 { "llh", 0x95, INSTR_RXY_RRRD },
1060 { "cgh", 0x34, INSTR_RXY_RRRD },
1061 { "laey", 0x75, INSTR_RXY_RRRD },
1062 { "ltgf", 0x32, INSTR_RXY_RRRD },
1063 { "mfy", 0x5c, INSTR_RXY_RRRD },
1064 { "mhy", 0x7c, INSTR_RXY_RRRD },
1065 { "pfd", 0x36, INSTR_RXY_URRD },
920#endif 1066#endif
921 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1067 { "lrv", 0x1e, INSTR_RXY_RRRD },
922 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1068 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = {
931static struct insn opcode_e5[] = { 1077static struct insn opcode_e5[] = {
932#ifdef CONFIG_64BIT 1078#ifdef CONFIG_64BIT
933 { "strag", 0x02, INSTR_SSE_RDRD }, 1079 { "strag", 0x02, INSTR_SSE_RDRD },
1080 { "chhsi", 0x54, INSTR_SIL_RDI },
1081 { "chsi", 0x5c, INSTR_SIL_RDI },
1082 { "cghsi", 0x58, INSTR_SIL_RDI },
1083 { "clhhsi", 0x55, INSTR_SIL_RDU },
1084 { "clfhsi", 0x5d, INSTR_SIL_RDU },
1085 { "clghsi", 0x59, INSTR_SIL_RDU },
1086 { "mvhhi", 0x44, INSTR_SIL_RDI },
1087 { "mvhi", 0x4c, INSTR_SIL_RDI },
1088 { "mvghi", 0x48, INSTR_SIL_RDI },
934#endif 1089#endif
935 { "lasp", 0x00, INSTR_SSE_RDRD }, 1090 { "lasp", 0x00, INSTR_SSE_RDRD },
936 { "tprot", 0x01, INSTR_SSE_RDRD }, 1091 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = {
977 { "lmy", 0x98, INSTR_RSY_RRRD }, 1132 { "lmy", 0x98, INSTR_RSY_RRRD },
978 { "lamy", 0x9a, INSTR_RSY_AARD }, 1133 { "lamy", 0x9a, INSTR_RSY_AARD },
979 { "stamy", 0x9b, INSTR_RSY_AARD }, 1134 { "stamy", 0x9b, INSTR_RSY_AARD },
1135 { "asi", 0x6a, INSTR_SIY_IRD },
1136 { "agsi", 0x7a, INSTR_SIY_IRD },
1137 { "alsi", 0x6e, INSTR_SIY_IRD },
1138 { "algsi", 0x7e, INSTR_SIY_IRD },
1139 { "ecag", 0x4c, INSTR_RSY_RRRD },
980#endif 1140#endif
981 { "rll", 0x1d, INSTR_RSY_RRRD }, 1141 { "rll", 0x1d, INSTR_RSY_RRRD },
982 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1142 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = {
988#ifdef CONFIG_64BIT 1148#ifdef CONFIG_64BIT
989 { "brxhg", 0x44, INSTR_RIE_RRP }, 1149 { "brxhg", 0x44, INSTR_RIE_RRP },
990 { "brxlg", 0x45, INSTR_RIE_RRP }, 1150 { "brxlg", 0x45, INSTR_RIE_RRP },
1151 { "crb", 0xf6, INSTR_RRS_RRRDU },
1152 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1153 { "crj", 0x76, INSTR_RIE_RRPU },
1154 { "cgrj", 0x64, INSTR_RIE_RRPU },
1155 { "cib", 0xfe, INSTR_RIS_RURDI },
1156 { "cgib", 0xfc, INSTR_RIS_RURDI },
1157 { "cij", 0x7e, INSTR_RIE_RUPI },
1158 { "cgij", 0x7c, INSTR_RIE_RUPI },
1159 { "cit", 0x72, INSTR_RIE_R0IU },
1160 { "cgit", 0x70, INSTR_RIE_R0IU },
1161 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1162 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1163 { "clrj", 0x77, INSTR_RIE_RRPU },
1164 { "clgrj", 0x65, INSTR_RIE_RRPU },
1165 { "clib", 0xff, INSTR_RIS_RURDU },
1166 { "clgib", 0xfd, INSTR_RIS_RURDU },
1167 { "clij", 0x7f, INSTR_RIE_RUPU },
1168 { "clgij", 0x7d, INSTR_RIE_RUPU },
1169 { "clfit", 0x73, INSTR_RIE_R0UU },
1170 { "clgit", 0x71, INSTR_RIE_R0UU },
1171 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1172 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1173 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1174 { "risbg", 0x55, INSTR_RIE_RRUUU },
991#endif 1175#endif
992 { "", 0, INSTR_INVALID } 1176 { "", 0, INSTR_INVALID }
993}; 1177};
@@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = {
1004 { "ldy", 0x65, INSTR_RXY_FRRD }, 1188 { "ldy", 0x65, INSTR_RXY_FRRD },
1005 { "stey", 0x66, INSTR_RXY_FRRD }, 1189 { "stey", 0x66, INSTR_RXY_FRRD },
1006 { "stdy", 0x67, INSTR_RXY_FRRD }, 1190 { "stdy", 0x67, INSTR_RXY_FRRD },
1191 { "sldt", 0x40, INSTR_RXF_FRRDF },
1192 { "slxt", 0x48, INSTR_RXF_FRRDF },
1193 { "srdt", 0x41, INSTR_RXF_FRRDF },
1194 { "srxt", 0x49, INSTR_RXF_FRRDF },
1195 { "tdcet", 0x50, INSTR_RXE_FRRD },
1196 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1197 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1198 { "tdget", 0x51, INSTR_RXE_FRRD },
1199 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1200 { "tdgxt", 0x59, INSTR_RXE_FRRD },
1007#endif 1201#endif
1008 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1202 { "ldeb", 0x04, INSTR_RXE_FRRD },
1009 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1203 { "lxdb", 0x05, INSTR_RXE_FRRD },
@@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = {
1037 { "mae", 0x2e, INSTR_RXF_FRRDF }, 1231 { "mae", 0x2e, INSTR_RXF_FRRDF },
1038 { "mse", 0x2f, INSTR_RXF_FRRDF }, 1232 { "mse", 0x2f, INSTR_RXF_FRRDF },
1039 { "sqe", 0x34, INSTR_RXE_FRRD }, 1233 { "sqe", 0x34, INSTR_RXE_FRRD },
1234 { "sqd", 0x35, INSTR_RXE_FRRD },
1040 { "mee", 0x37, INSTR_RXE_FRRD }, 1235 { "mee", 0x37, INSTR_RXE_FRRD },
1041 { "mad", 0x3e, INSTR_RXF_FRRDF }, 1236 { "mad", 0x3e, INSTR_RXF_FRRDF },
1042 { "msd", 0x3f, INSTR_RXF_FRRDF }, 1237 { "msd", 0x3f, INSTR_RXF_FRRDF },
@@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code)
1117 case 0xc2: 1312 case 0xc2:
1118 table = opcode_c2; 1313 table = opcode_c2;
1119 break; 1314 break;
1315 case 0xc4:
1316 table = opcode_c4;
1317 break;
1318 case 0xc6:
1319 table = opcode_c6;
1320 break;
1120 case 0xc8: 1321 case 0xc8:
1121 table = opcode_c8; 1322 table = opcode_c8;
1122 break; 1323 break;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index e49e9e0c69fd..c00856ad4e5a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -82,7 +82,8 @@ asm(
82 " lm 6,15,24(15)\n" 82 " lm 6,15,24(15)\n"
83#endif 83#endif
84 " br 14\n" 84 " br 14\n"
85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); 85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
86 " .previous\n");
86 87
87static __initdata char upper_command_line[COMMAND_LINE_SIZE]; 88static __initdata char upper_command_line[COMMAND_LINE_SIZE];
88 89
@@ -214,10 +215,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
214 215
215static noinline __init void detect_machine_type(void) 216static noinline __init void detect_machine_type(void)
216{ 217{
217 /* No VM information? Looks like LPAR */ 218 /* Check current-configuration-level */
218 if (stsi(&vmms, 3, 2, 2) == -ENOSYS) 219 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
220 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
219 return; 221 return;
220 if (!vmms.count) 222 }
223 /* Get virtual-machine cpu information. */
224 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
221 return; 225 return;
222 226
223 /* Running under KVM? If not we assume z/VM */ 227 /* Running under KVM? If not we assume z/VM */
@@ -352,6 +356,7 @@ static __init void detect_machine_facilities(void)
352{ 356{
353#ifdef CONFIG_64BIT 357#ifdef CONFIG_64BIT
354 unsigned int facilities; 358 unsigned int facilities;
359 unsigned long long facility_bits;
355 360
356 facilities = stfl(); 361 facilities = stfl();
357 if (facilities & (1 << 28)) 362 if (facilities & (1 << 28))
@@ -360,6 +365,9 @@ static __init void detect_machine_facilities(void)
360 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 365 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
361 if (facilities & (1 << 4)) 366 if (facilities & (1 << 4))
362 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 367 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
368 if ((stfle(&facility_bits, 1) > 0) &&
369 (facility_bits & (1ULL << (63 - 40))))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
363#endif 371#endif
364} 372}
365 373
@@ -402,8 +410,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
402 410
403static void __init setup_boot_command_line(void) 411static void __init setup_boot_command_line(void)
404{ 412{
413 int i;
414
415 /* convert arch command line to ascii */
416 for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
417 if (COMMAND_LINE[i] & 0x80)
418 break;
419 if (i < ARCH_COMMAND_LINE_SIZE)
420 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
421 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
422
405 /* copy arch command line */ 423 /* copy arch command line */
406 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 424 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
425 ARCH_COMMAND_LINE_SIZE);
407 426
408 /* append IPL PARM data to the boot command line */ 427 /* append IPL PARM data to the boot command line */
409 if (MACHINE_IS_VM) 428 if (MACHINE_IS_VM)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e8ef21c51bbe..bea9ee37ac9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/ptrace.h> 17#include <asm/ptrace.h>
19#include <asm/thread_info.h> 18#include <asm/thread_info.h>
@@ -74,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT
74 basr %r14,%r1 73 basr %r14,%r1
75 .endm 74 .endm
76 75
77 .macro TRACE_IRQS_CHECK 76 .macro TRACE_IRQS_CHECK_ON
78 basr %r2,%r0
79 tm SP_PSW(%r15),0x03 # irqs enabled? 77 tm SP_PSW(%r15),0x03 # irqs enabled?
80 jz 0f 78 bz BASED(0f)
81 l %r1,BASED(.Ltrace_irq_on_caller) 79 TRACE_IRQS_ON
82 basr %r14,%r1 800:
83 j 1f 81 .endm
840: l %r1,BASED(.Ltrace_irq_off_caller) 82
85 basr %r14,%r1 83 .macro TRACE_IRQS_CHECK_OFF
861: 84 tm SP_PSW(%r15),0x03 # irqs enabled?
85 bz BASED(0f)
86 TRACE_IRQS_OFF
870:
87 .endm 88 .endm
88#else 89#else
89#define TRACE_IRQS_ON 90#define TRACE_IRQS_ON
90#define TRACE_IRQS_OFF 91#define TRACE_IRQS_OFF
91#define TRACE_IRQS_CHECK 92#define TRACE_IRQS_CHECK_ON
93#define TRACE_IRQS_CHECK_OFF
92#endif 94#endif
93 95
94#ifdef CONFIG_LOCKDEP 96#ifdef CONFIG_LOCKDEP
@@ -178,9 +180,9 @@ STACK_SIZE = 1 << STACK_SHIFT
178 s %r15,BASED(.Lc_spsize) # make room for registers & psw 180 s %r15,BASED(.Lc_spsize) # make room for registers & psw
179 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 181 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
180 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 182 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
181 icm %r12,3,__LC_SVC_ILC 183 icm %r12,12,__LC_SVC_ILC
182 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 184 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
183 st %r12,SP_SVCNR(%r15) 185 st %r12,SP_ILC(%r15)
184 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 186 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
185 la %r12,0 187 la %r12,0
186 st %r12,__SF_BACKCHAIN(%r15) # clear back chain 188 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
@@ -274,66 +276,45 @@ sysc_do_restart:
274 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 276 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
275 277
276sysc_return: 278sysc_return:
279 LOCKDEP_SYS_EXIT
280sysc_tif:
277 tm __TI_flags+3(%r9),_TIF_WORK_SVC 281 tm __TI_flags+3(%r9),_TIF_WORK_SVC
278 bnz BASED(sysc_work) # there is work to do (signals etc.) 282 bnz BASED(sysc_work) # there is work to do (signals etc.)
279sysc_restore: 283sysc_restore:
280#ifdef CONFIG_TRACE_IRQFLAGS
281 la %r1,BASED(sysc_restore_trace_psw_addr)
282 l %r1,0(%r1)
283 lpsw 0(%r1)
284sysc_restore_trace:
285 TRACE_IRQS_CHECK
286 LOCKDEP_SYS_EXIT
287#endif
288sysc_leave:
289 RESTORE_ALL __LC_RETURN_PSW,1 284 RESTORE_ALL __LC_RETURN_PSW,1
290sysc_done: 285sysc_done:
291 286
292#ifdef CONFIG_TRACE_IRQFLAGS
293sysc_restore_trace_psw_addr:
294 .long sysc_restore_trace_psw
295
296 .section .data,"aw",@progbits
297 .align 8
298 .globl sysc_restore_trace_psw
299sysc_restore_trace_psw:
300 .long 0, sysc_restore_trace + 0x80000000
301 .previous
302#endif
303
304# 287#
305# recheck if there is more work to do 288# There is work to do, but first we need to check if we return to userspace.
306#
307sysc_work_loop:
308 tm __TI_flags+3(%r9),_TIF_WORK_SVC
309 bz BASED(sysc_restore) # there is no work to do
310#
311# One of the work bits is on. Find out which one.
312# 289#
313sysc_work: 290sysc_work:
314 tm SP_PSW+1(%r15),0x01 # returning to user ? 291 tm SP_PSW+1(%r15),0x01 # returning to user ?
315 bno BASED(sysc_restore) 292 bno BASED(sysc_restore)
293
294#
295# One of the work bits is on. Find out which one.
296#
297sysc_work_tif:
316 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 298 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
317 bo BASED(sysc_mcck_pending) 299 bo BASED(sysc_mcck_pending)
318 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 300 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
319 bo BASED(sysc_reschedule) 301 bo BASED(sysc_reschedule)
320 tm __TI_flags+3(%r9),_TIF_SIGPENDING 302 tm __TI_flags+3(%r9),_TIF_SIGPENDING
321 bnz BASED(sysc_sigpending) 303 bo BASED(sysc_sigpending)
322 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 304 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
323 bnz BASED(sysc_notify_resume) 305 bo BASED(sysc_notify_resume)
324 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 306 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
325 bo BASED(sysc_restart) 307 bo BASED(sysc_restart)
326 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 308 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
327 bo BASED(sysc_singlestep) 309 bo BASED(sysc_singlestep)
328 b BASED(sysc_restore) 310 b BASED(sysc_return) # beware of critical section cleanup
329sysc_work_done:
330 311
331# 312#
332# _TIF_NEED_RESCHED is set, call schedule 313# _TIF_NEED_RESCHED is set, call schedule
333# 314#
334sysc_reschedule: 315sysc_reschedule:
335 l %r1,BASED(.Lschedule) 316 l %r1,BASED(.Lschedule)
336 la %r14,BASED(sysc_work_loop) 317 la %r14,BASED(sysc_return)
337 br %r1 # call scheduler 318 br %r1 # call scheduler
338 319
339# 320#
@@ -341,7 +322,7 @@ sysc_reschedule:
341# 322#
342sysc_mcck_pending: 323sysc_mcck_pending:
343 l %r1,BASED(.Ls390_handle_mcck) 324 l %r1,BASED(.Ls390_handle_mcck)
344 la %r14,BASED(sysc_work_loop) 325 la %r14,BASED(sysc_return)
345 br %r1 # TIF bit will be cleared by handler 326 br %r1 # TIF bit will be cleared by handler
346 327
347# 328#
@@ -356,7 +337,7 @@ sysc_sigpending:
356 bo BASED(sysc_restart) 337 bo BASED(sysc_restart)
357 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 338 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
358 bo BASED(sysc_singlestep) 339 bo BASED(sysc_singlestep)
359 b BASED(sysc_work_loop) 340 b BASED(sysc_return)
360 341
361# 342#
362# _TIF_NOTIFY_RESUME is set, call do_notify_resume 343# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -364,7 +345,7 @@ sysc_sigpending:
364sysc_notify_resume: 345sysc_notify_resume:
365 la %r2,SP_PTREGS(%r15) # load pt_regs 346 la %r2,SP_PTREGS(%r15) # load pt_regs
366 l %r1,BASED(.Ldo_notify_resume) 347 l %r1,BASED(.Ldo_notify_resume)
367 la %r14,BASED(sysc_work_loop) 348 la %r14,BASED(sysc_return)
368 br %r1 # call do_notify_resume 349 br %r1 # call do_notify_resume
369 350
370 351
@@ -459,11 +440,13 @@ kernel_execve:
459 br %r14 440 br %r14
460 # execve succeeded. 441 # execve succeeded.
4610: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4420: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
443 TRACE_IRQS_OFF
462 l %r15,__LC_KERNEL_STACK # load ksp 444 l %r15,__LC_KERNEL_STACK # load ksp
463 s %r15,BASED(.Lc_spsize) # make room for registers & psw 445 s %r15,BASED(.Lc_spsize) # make room for registers & psw
464 l %r9,__LC_THREAD_INFO 446 l %r9,__LC_THREAD_INFO
465 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 447 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
466 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449 TRACE_IRQS_ON
467 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 450 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
468 l %r1,BASED(.Lexecve_tail) 451 l %r1,BASED(.Lexecve_tail)
469 basr %r14,%r1 452 basr %r14,%r1
@@ -500,8 +483,8 @@ pgm_check_handler:
500 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 483 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
501 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 484 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
502pgm_no_vtime: 485pgm_no_vtime:
486 TRACE_IRQS_CHECK_OFF
503 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 487 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
504 TRACE_IRQS_OFF
505 l %r3,__LC_PGM_ILC # load program interruption code 488 l %r3,__LC_PGM_ILC # load program interruption code
506 la %r8,0x7f 489 la %r8,0x7f
507 nr %r8,%r3 490 nr %r8,%r3
@@ -510,8 +493,10 @@ pgm_do_call:
510 sll %r8,2 493 sll %r8,2
511 l %r7,0(%r8,%r7) # load address of handler routine 494 l %r7,0(%r8,%r7) # load address of handler routine
512 la %r2,SP_PTREGS(%r15) # address of register-save area 495 la %r2,SP_PTREGS(%r15) # address of register-save area
513 la %r14,BASED(sysc_return) 496 basr %r14,%r7 # branch to interrupt-handler
514 br %r7 # branch to interrupt-handler 497pgm_exit:
498 TRACE_IRQS_CHECK_ON
499 b BASED(sysc_return)
515 500
516# 501#
517# handle per exception 502# handle per exception
@@ -538,20 +523,28 @@ pgm_per_std:
538 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 523 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
539 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 524 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
540pgm_no_vtime2: 525pgm_no_vtime2:
526 TRACE_IRQS_CHECK_OFF
541 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 527 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
542 TRACE_IRQS_OFF
543 l %r1,__TI_task(%r9) 528 l %r1,__TI_task(%r9)
529 tm SP_PSW+1(%r15),0x01 # kernel per event ?
530 bz BASED(kernel_per)
544 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 531 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
545 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 532 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
546 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 533 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
547 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 534 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
548 tm SP_PSW+1(%r15),0x01 # kernel per event ?
549 bz BASED(kernel_per)
550 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
551 la %r8,0x7f 536 la %r8,0x7f
552 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
553 be BASED(sysc_return) # only per or per+check ? 538 be BASED(pgm_exit2) # only per or per+check ?
554 b BASED(pgm_do_call) 539 l %r7,BASED(.Ljump_table)
540 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler
544pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return)
555 548
556# 549#
557# it was a single stepped SVC that is causing all the trouble 550# it was a single stepped SVC that is causing all the trouble
@@ -571,8 +564,8 @@ pgm_svcper:
571 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 564 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 565 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
573 TRACE_IRQS_ON 566 TRACE_IRQS_ON
574 lm %r2,%r6,SP_R2(%r15) # load svc arguments
575 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 567 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
568 lm %r2,%r6,SP_R2(%r15) # load svc arguments
576 b BASED(sysc_do_svc) 569 b BASED(sysc_do_svc)
577 570
578# 571#
@@ -583,8 +576,8 @@ kernel_per:
583 mvi SP_SVCNR+1(%r15),0xff 576 mvi SP_SVCNR+1(%r15),0xff
584 la %r2,SP_PTREGS(%r15) # address of register-save area 577 la %r2,SP_PTREGS(%r15) # address of register-save area
585 l %r1,BASED(.Lhandle_per) # load adr. of per handler 578 l %r1,BASED(.Lhandle_per) # load adr. of per handler
586 la %r14,BASED(sysc_restore)# load adr. of system return 579 basr %r14,%r1 # branch to do_single_step
587 br %r1 # branch to do_single_step 580 b BASED(pgm_exit)
588 581
589/* 582/*
590 * IO interrupt handler routine 583 * IO interrupt handler routine
@@ -603,134 +596,126 @@ io_int_handler:
603 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 596 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
604 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 597 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
605io_no_vtime: 598io_no_vtime:
606 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
607 TRACE_IRQS_OFF 599 TRACE_IRQS_OFF
600 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
608 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 601 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
609 la %r2,SP_PTREGS(%r15) # address of register-save area 602 la %r2,SP_PTREGS(%r15) # address of register-save area
610 basr %r14,%r1 # branch to standard irq handler 603 basr %r14,%r1 # branch to standard irq handler
611io_return: 604io_return:
605 LOCKDEP_SYS_EXIT
606 TRACE_IRQS_ON
607io_tif:
612 tm __TI_flags+3(%r9),_TIF_WORK_INT 608 tm __TI_flags+3(%r9),_TIF_WORK_INT
613 bnz BASED(io_work) # there is work to do (signals etc.) 609 bnz BASED(io_work) # there is work to do (signals etc.)
614io_restore: 610io_restore:
615#ifdef CONFIG_TRACE_IRQFLAGS
616 la %r1,BASED(io_restore_trace_psw_addr)
617 l %r1,0(%r1)
618 lpsw 0(%r1)
619io_restore_trace:
620 TRACE_IRQS_CHECK
621 LOCKDEP_SYS_EXIT
622#endif
623io_leave:
624 RESTORE_ALL __LC_RETURN_PSW,0 611 RESTORE_ALL __LC_RETURN_PSW,0
625io_done: 612io_done:
626 613
627#ifdef CONFIG_TRACE_IRQFLAGS
628io_restore_trace_psw_addr:
629 .long io_restore_trace_psw
630
631 .section .data,"aw",@progbits
632 .align 8
633 .globl io_restore_trace_psw
634io_restore_trace_psw:
635 .long 0, io_restore_trace + 0x80000000
636 .previous
637#endif
638
639# 614#
640# switch to kernel stack, then check the TIF bits 615# There is work todo, find out in which context we have been interrupted:
616# 1) if we return to user space we can do all _TIF_WORK_INT work
617# 2) if we return to kernel code and preemptive scheduling is enabled check
618# the preemption counter and if it is zero call preempt_schedule_irq
619# Before any work can be done, a switch to the kernel stack is required.
641# 620#
642io_work: 621io_work:
643 tm SP_PSW+1(%r15),0x01 # returning to user ? 622 tm SP_PSW+1(%r15),0x01 # returning to user ?
644#ifndef CONFIG_PREEMPT 623 bo BASED(io_work_user) # yes -> do resched & signal
645 bno BASED(io_restore) # no-> skip resched & signal 624#ifdef CONFIG_PREEMPT
646#else
647 bnz BASED(io_work_user) # no -> check for preemptive scheduling
648 # check for preemptive scheduling 625 # check for preemptive scheduling
649 icm %r0,15,__TI_precount(%r9) 626 icm %r0,15,__TI_precount(%r9)
650 bnz BASED(io_restore) # preemption disabled 627 bnz BASED(io_restore) # preemption disabled
628 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
629 bno BASED(io_restore)
630 # switch to kernel stack
651 l %r1,SP_R15(%r15) 631 l %r1,SP_R15(%r15)
652 s %r1,BASED(.Lc_spsize) 632 s %r1,BASED(.Lc_spsize)
653 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 633 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
654 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 634 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
655 lr %r15,%r1 635 lr %r15,%r1
656io_resume_loop: 636 # TRACE_IRQS_ON already done at io_return, call
657 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 637 # TRACE_IRQS_OFF to keep things symmetrical
658 bno BASED(io_restore) 638 TRACE_IRQS_OFF
659 l %r1,BASED(.Lpreempt_schedule_irq) 639 l %r1,BASED(.Lpreempt_schedule_irq)
660 la %r14,BASED(io_resume_loop) 640 basr %r14,%r1 # call preempt_schedule_irq
661 br %r1 # call schedule 641 b BASED(io_return)
642#else
643 b BASED(io_restore)
662#endif 644#endif
663 645
646#
647# Need to do work before returning to userspace, switch to kernel stack
648#
664io_work_user: 649io_work_user:
665 l %r1,__LC_KERNEL_STACK 650 l %r1,__LC_KERNEL_STACK
666 s %r1,BASED(.Lc_spsize) 651 s %r1,BASED(.Lc_spsize)
667 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 652 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
668 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 653 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
669 lr %r15,%r1 654 lr %r15,%r1
655
670# 656#
671# One of the work bits is on. Find out which one. 657# One of the work bits is on. Find out which one.
672# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED 658# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
673# and _TIF_MCCK_PENDING 659# and _TIF_MCCK_PENDING
674# 660#
675io_work_loop: 661io_work_tif:
676 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 662 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
677 bo BASED(io_mcck_pending) 663 bo BASED(io_mcck_pending)
678 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 664 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
679 bo BASED(io_reschedule) 665 bo BASED(io_reschedule)
680 tm __TI_flags+3(%r9),_TIF_SIGPENDING 666 tm __TI_flags+3(%r9),_TIF_SIGPENDING
681 bnz BASED(io_sigpending) 667 bo BASED(io_sigpending)
682 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 668 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
683 bnz BASED(io_notify_resume) 669 bo BASED(io_notify_resume)
684 b BASED(io_restore) 670 b BASED(io_return) # beware of critical section cleanup
685io_work_done:
686 671
687# 672#
688# _TIF_MCCK_PENDING is set, call handler 673# _TIF_MCCK_PENDING is set, call handler
689# 674#
690io_mcck_pending: 675io_mcck_pending:
676 # TRACE_IRQS_ON already done at io_return
691 l %r1,BASED(.Ls390_handle_mcck) 677 l %r1,BASED(.Ls390_handle_mcck)
692 basr %r14,%r1 # TIF bit will be cleared by handler 678 basr %r14,%r1 # TIF bit will be cleared by handler
693 b BASED(io_work_loop) 679 TRACE_IRQS_OFF
680 b BASED(io_return)
694 681
695# 682#
696# _TIF_NEED_RESCHED is set, call schedule 683# _TIF_NEED_RESCHED is set, call schedule
697# 684#
698io_reschedule: 685io_reschedule:
699 TRACE_IRQS_ON 686 # TRACE_IRQS_ON already done at io_return
700 l %r1,BASED(.Lschedule) 687 l %r1,BASED(.Lschedule)
701 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 688 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
702 basr %r14,%r1 # call scheduler 689 basr %r14,%r1 # call scheduler
703 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 690 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
704 TRACE_IRQS_OFF 691 TRACE_IRQS_OFF
705 tm __TI_flags+3(%r9),_TIF_WORK_INT 692 b BASED(io_return)
706 bz BASED(io_restore) # there is no work to do
707 b BASED(io_work_loop)
708 693
709# 694#
710# _TIF_SIGPENDING is set, call do_signal 695# _TIF_SIGPENDING is set, call do_signal
711# 696#
712io_sigpending: 697io_sigpending:
713 TRACE_IRQS_ON 698 # TRACE_IRQS_ON already done at io_return
714 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 699 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
715 la %r2,SP_PTREGS(%r15) # load pt_regs 700 la %r2,SP_PTREGS(%r15) # load pt_regs
716 l %r1,BASED(.Ldo_signal) 701 l %r1,BASED(.Ldo_signal)
717 basr %r14,%r1 # call do_signal 702 basr %r14,%r1 # call do_signal
718 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 703 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
719 TRACE_IRQS_OFF 704 TRACE_IRQS_OFF
720 b BASED(io_work_loop) 705 b BASED(io_return)
721 706
722# 707#
723# _TIF_SIGPENDING is set, call do_signal 708# _TIF_SIGPENDING is set, call do_signal
724# 709#
725io_notify_resume: 710io_notify_resume:
726 TRACE_IRQS_ON 711 # TRACE_IRQS_ON already done at io_return
727 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 712 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
728 la %r2,SP_PTREGS(%r15) # load pt_regs 713 la %r2,SP_PTREGS(%r15) # load pt_regs
729 l %r1,BASED(.Ldo_notify_resume) 714 l %r1,BASED(.Ldo_notify_resume)
730 basr %r14,%r1 # call do_signal 715 basr %r14,%r1 # call do_signal
731 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 716 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
732 TRACE_IRQS_OFF 717 TRACE_IRQS_OFF
733 b BASED(io_work_loop) 718 b BASED(io_return)
734 719
735/* 720/*
736 * External interrupt handler routine 721 * External interrupt handler routine
@@ -765,15 +750,14 @@ __critical_end:
765 750
766 .globl mcck_int_handler 751 .globl mcck_int_handler
767mcck_int_handler: 752mcck_int_handler:
768 stck __LC_INT_CLOCK 753 stck __LC_MCCK_CLOCK
769 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 754 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
770 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 755 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
771 SAVE_ALL_BASE __LC_SAVE_AREA+32 756 SAVE_ALL_BASE __LC_SAVE_AREA+32
772 la %r12,__LC_MCK_OLD_PSW 757 la %r12,__LC_MCK_OLD_PSW
773 tm __LC_MCCK_CODE,0x80 # system damage? 758 tm __LC_MCCK_CODE,0x80 # system damage?
774 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 759 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
775 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER 760 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
776 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
777 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 761 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
778 bo BASED(1f) 762 bo BASED(1f)
779 la %r14,__LC_SYNC_ENTER_TIMER 763 la %r14,__LC_SYNC_ENTER_TIMER
@@ -787,7 +771,7 @@ mcck_int_handler:
787 bl BASED(0f) 771 bl BASED(0f)
788 la %r14,__LC_LAST_UPDATE_TIMER 772 la %r14,__LC_LAST_UPDATE_TIMER
7890: spt 0(%r14) 7730: spt 0(%r14)
790 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 774 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7911: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7751: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
792 bno BASED(mcck_int_main) # no -> skip cleanup critical 776 bno BASED(mcck_int_main) # no -> skip cleanup critical
793 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 777 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -809,9 +793,9 @@ mcck_int_main:
809 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 793 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
810 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 794 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
811 bz BASED(mcck_no_vtime) 795 bz BASED(mcck_no_vtime)
812 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 796 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
813 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 797 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
814 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
815mcck_no_vtime: 799mcck_no_vtime:
816 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 800 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
817 la %r2,SP_PTREGS(%r15) # load pt_regs 801 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -834,7 +818,6 @@ mcck_no_vtime:
834mcck_return: 818mcck_return:
835 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 819 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
836 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 820 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
837 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
838 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 821 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
839 bno BASED(0f) 822 bno BASED(0f)
840 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 823 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
@@ -918,18 +901,14 @@ stack_overflow:
918 901
919cleanup_table_system_call: 902cleanup_table_system_call:
920 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 903 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
921cleanup_table_sysc_return: 904cleanup_table_sysc_tif:
922 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 905 .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
923cleanup_table_sysc_leave: 906cleanup_table_sysc_restore:
924 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 907 .long sysc_restore + 0x80000000, sysc_done + 0x80000000
925cleanup_table_sysc_work_loop: 908cleanup_table_io_tif:
926 .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 909 .long io_tif + 0x80000000, io_restore + 0x80000000
927cleanup_table_io_return: 910cleanup_table_io_restore:
928 .long io_return + 0x80000000, io_leave + 0x80000000 911 .long io_restore + 0x80000000, io_done + 0x80000000
929cleanup_table_io_leave:
930 .long io_leave + 0x80000000, io_done + 0x80000000
931cleanup_table_io_work_loop:
932 .long io_work_loop + 0x80000000, io_work_done + 0x80000000
933 912
934cleanup_critical: 913cleanup_critical:
935 clc 4(4,%r12),BASED(cleanup_table_system_call) 914 clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -937,49 +916,40 @@ cleanup_critical:
937 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 916 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
938 bl BASED(cleanup_system_call) 917 bl BASED(cleanup_system_call)
9390: 9180:
940 clc 4(4,%r12),BASED(cleanup_table_sysc_return) 919 clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
941 bl BASED(0f)
942 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
943 bl BASED(cleanup_sysc_return)
9440:
945 clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
946 bl BASED(0f) 920 bl BASED(0f)
947 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 921 clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
948 bl BASED(cleanup_sysc_leave) 922 bl BASED(cleanup_sysc_tif)
9490: 9230:
950 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 924 clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
951 bl BASED(0f) 925 bl BASED(0f)
952 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 926 clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
953 bl BASED(cleanup_sysc_return) 927 bl BASED(cleanup_sysc_restore)
9540: 9280:
955 clc 4(4,%r12),BASED(cleanup_table_io_return) 929 clc 4(4,%r12),BASED(cleanup_table_io_tif)
956 bl BASED(0f) 930 bl BASED(0f)
957 clc 4(4,%r12),BASED(cleanup_table_io_return+4) 931 clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
958 bl BASED(cleanup_io_return) 932 bl BASED(cleanup_io_tif)
9590: 9330:
960 clc 4(4,%r12),BASED(cleanup_table_io_leave) 934 clc 4(4,%r12),BASED(cleanup_table_io_restore)
961 bl BASED(0f) 935 bl BASED(0f)
962 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 936 clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
963 bl BASED(cleanup_io_leave) 937 bl BASED(cleanup_io_restore)
9640:
965 clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
966 bl BASED(0f)
967 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
968 bl BASED(cleanup_io_return)
9690: 9380:
970 br %r14 939 br %r14
971 940
972cleanup_system_call: 941cleanup_system_call:
973 mvc __LC_RETURN_PSW(8),0(%r12) 942 mvc __LC_RETURN_PSW(8),0(%r12)
974 c %r12,BASED(.Lmck_old_psw)
975 be BASED(0f)
976 la %r12,__LC_SAVE_AREA+16
977 b BASED(1f)
9780: la %r12,__LC_SAVE_AREA+32
9791:
980 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 943 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
981 bh BASED(0f) 944 bh BASED(0f)
945 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
946 c %r12,BASED(.Lmck_old_psw)
947 be BASED(0f)
982 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 948 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9490: c %r12,BASED(.Lmck_old_psw)
950 la %r12,__LC_SAVE_AREA+32
951 be BASED(0f)
952 la %r12,__LC_SAVE_AREA+16
9830: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 9530: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
984 bhe BASED(cleanup_vtime) 954 bhe BASED(cleanup_vtime)
985 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 955 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
@@ -1012,55 +982,54 @@ cleanup_system_call_insn:
1012 .long sysc_stime + 0x80000000 982 .long sysc_stime + 0x80000000
1013 .long sysc_update + 0x80000000 983 .long sysc_update + 0x80000000
1014 984
1015cleanup_sysc_return: 985cleanup_sysc_tif:
1016 mvc __LC_RETURN_PSW(4),0(%r12) 986 mvc __LC_RETURN_PSW(4),0(%r12)
1017 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) 987 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
1018 la %r12,__LC_RETURN_PSW 988 la %r12,__LC_RETURN_PSW
1019 br %r14 989 br %r14
1020 990
1021cleanup_sysc_leave: 991cleanup_sysc_restore:
1022 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 992 clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
1023 be BASED(2f) 993 be BASED(2f)
994 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
995 c %r12,BASED(.Lmck_old_psw)
996 be BASED(0f)
1024 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 997 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1025 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 9980: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
1026 be BASED(2f) 999 be BASED(2f)
1027 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1000 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1028 c %r12,BASED(.Lmck_old_psw) 1001 c %r12,BASED(.Lmck_old_psw)
1029 bne BASED(0f) 1002 la %r12,__LC_SAVE_AREA+32
1030 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1003 be BASED(1f)
1031 b BASED(1f) 1004 la %r12,__LC_SAVE_AREA+16
10320: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 10051: mvc 0(16,%r12),SP_R12(%r15)
10331: lm %r0,%r11,SP_R0(%r15) 1006 lm %r0,%r11,SP_R0(%r15)
1034 l %r15,SP_R15(%r15) 1007 l %r15,SP_R15(%r15)
10352: la %r12,__LC_RETURN_PSW 10082: la %r12,__LC_RETURN_PSW
1036 br %r14 1009 br %r14
1037cleanup_sysc_leave_insn: 1010cleanup_sysc_restore_insn:
1038 .long sysc_done - 4 + 0x80000000 1011 .long sysc_done - 4 + 0x80000000
1039 .long sysc_done - 8 + 0x80000000 1012 .long sysc_done - 8 + 0x80000000
1040 1013
1041cleanup_io_return: 1014cleanup_io_tif:
1042 mvc __LC_RETURN_PSW(4),0(%r12) 1015 mvc __LC_RETURN_PSW(4),0(%r12)
1043 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1016 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1044 la %r12,__LC_RETURN_PSW 1017 la %r12,__LC_RETURN_PSW
1045 br %r14 1018 br %r14
1046 1019
1047cleanup_io_leave: 1020cleanup_io_restore:
1048 clc 4(4,%r12),BASED(cleanup_io_leave_insn) 1021 clc 4(4,%r12),BASED(cleanup_io_restore_insn)
1049 be BASED(2f) 1022 be BASED(1f)
1050 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1023 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1051 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) 1024 clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
1052 be BASED(2f) 1025 be BASED(1f)
1053 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1026 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1054 c %r12,BASED(.Lmck_old_psw)
1055 bne BASED(0f)
1056 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1027 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
1057 b BASED(1f) 1028 lm %r0,%r11,SP_R0(%r15)
10580: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
10591: lm %r0,%r11,SP_R0(%r15)
1060 l %r15,SP_R15(%r15) 1029 l %r15,SP_R15(%r15)
10612: la %r12,__LC_RETURN_PSW 10301: la %r12,__LC_RETURN_PSW
1062 br %r14 1031 br %r14
1063cleanup_io_leave_insn: 1032cleanup_io_restore_insn:
1064 .long io_done - 4 + 0x80000000 1033 .long io_done - 4 + 0x80000000
1065 .long io_done - 8 + 0x80000000 1034 .long io_done - 8 + 0x80000000
1066 1035
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e1e5e767ab56..eb15c12ec158 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -24,17 +24,13 @@ int __cpuinit start_secondary(void *cpuvoid);
24void __init startup_init(void); 24void __init startup_init(void);
25void die(const char * str, struct pt_regs * regs, long err); 25void die(const char * str, struct pt_regs * regs, long err);
26 26
27struct new_utsname; 27struct s390_mmap_arg_struct;
28struct mmap_arg_struct;
29struct fadvise64_64_args; 28struct fadvise64_64_args;
30struct old_sigaction; 29struct old_sigaction;
31struct sel_arg_struct;
32 30
33long sys_mmap2(struct mmap_arg_struct __user *arg); 31long sys_mmap2(struct s390_mmap_arg_struct __user *arg);
34long sys_s390_old_mmap(struct mmap_arg_struct __user *arg); 32long sys_s390_ipc(uint call, int first, unsigned long second,
35long sys_ipc(uint call, int first, unsigned long second,
36 unsigned long third, void __user *ptr); 33 unsigned long third, void __user *ptr);
37long sys_s390_newuname(struct new_utsname __user *name);
38long sys_s390_personality(unsigned long personality); 34long sys_s390_personality(unsigned long personality);
39long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low, 35long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
40 size_t len, int advice); 36 size_t len, int advice);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f33658f09dd7..8bccec15ea90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,18 +2,16 @@
2 * arch/s390/kernel/entry64.S 2 * arch/s390/kernel/entry64.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h> 15#include <asm/errno.h>
18#include <asm/ptrace.h> 16#include <asm/ptrace.h>
19#include <asm/thread_info.h> 17#include <asm/thread_info.h>
@@ -61,30 +59,45 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
61 59
62#define BASED(name) name-system_call(%r13) 60#define BASED(name) name-system_call(%r13)
63 61
62 .macro HANDLE_SIE_INTERCEPT
63#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
64 lg %r3,__LC_SIE_HOOK
65 ltgr %r3,%r3
66 jz 0f
67 basr %r14,%r3
680:
69#endif
70 .endm
71
64#ifdef CONFIG_TRACE_IRQFLAGS 72#ifdef CONFIG_TRACE_IRQFLAGS
65 .macro TRACE_IRQS_ON 73 .macro TRACE_IRQS_ON
66 basr %r2,%r0 74 basr %r2,%r0
67 brasl %r14,trace_hardirqs_on_caller 75 brasl %r14,trace_hardirqs_on_caller
68 .endm 76 .endm
69 77
70 .macro TRACE_IRQS_OFF 78 .macro TRACE_IRQS_OFF
71 basr %r2,%r0 79 basr %r2,%r0
72 brasl %r14,trace_hardirqs_off_caller 80 brasl %r14,trace_hardirqs_off_caller
73 .endm 81 .endm
74 82
75 .macro TRACE_IRQS_CHECK 83 .macro TRACE_IRQS_CHECK_ON
76 basr %r2,%r0
77 tm SP_PSW(%r15),0x03 # irqs enabled? 84 tm SP_PSW(%r15),0x03 # irqs enabled?
78 jz 0f 85 jz 0f
79 brasl %r14,trace_hardirqs_on_caller 86 TRACE_IRQS_ON
80 j 1f 870:
810: brasl %r14,trace_hardirqs_off_caller 88 .endm
821: 89
90 .macro TRACE_IRQS_CHECK_OFF
91 tm SP_PSW(%r15),0x03 # irqs enabled?
92 jz 0f
93 TRACE_IRQS_OFF
940:
83 .endm 95 .endm
84#else 96#else
85#define TRACE_IRQS_ON 97#define TRACE_IRQS_ON
86#define TRACE_IRQS_OFF 98#define TRACE_IRQS_OFF
87#define TRACE_IRQS_CHECK 99#define TRACE_IRQS_CHECK_ON
100#define TRACE_IRQS_CHECK_OFF
88#endif 101#endif
89 102
90#ifdef CONFIG_LOCKDEP 103#ifdef CONFIG_LOCKDEP
@@ -113,31 +126,35 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
113 * R15 - kernel stack pointer 126 * R15 - kernel stack pointer
114 */ 127 */
115 128
116 .macro SAVE_ALL_BASE savearea
117 stmg %r12,%r15,\savearea
118 larl %r13,system_call
119 .endm
120
121 .macro SAVE_ALL_SVC psworg,savearea 129 .macro SAVE_ALL_SVC psworg,savearea
122 la %r12,\psworg 130 stmg %r11,%r15,\savearea
123 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp 131 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
132 aghi %r15,-SP_SIZE # make room for registers & psw
133 lg %r11,__LC_LAST_BREAK
124 .endm 134 .endm
125 135
126 .macro SAVE_ALL_SYNC psworg,savearea 136 .macro SAVE_ALL_PGM psworg,savearea
127 la %r12,\psworg 137 stmg %r11,%r15,\savearea
128 tm \psworg+1,0x01 # test problem state bit 138 tm \psworg+1,0x01 # test problem state bit
129 jz 2f # skip stack setup save
130 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
131#ifdef CONFIG_CHECK_STACK 139#ifdef CONFIG_CHECK_STACK
132 j 3f 140 jnz 1f
1332: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 141 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
134 jz stack_overflow 142 jnz 2f
1353: 143 la %r12,\psworg
144 j stack_overflow
145#else
146 jz 2f
136#endif 147#endif
1372: 1481: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
1492: aghi %r15,-SP_SIZE # make room for registers & psw
150 larl %r13,system_call
151 lg %r11,__LC_LAST_BREAK
138 .endm 152 .endm
139 153
140 .macro SAVE_ALL_ASYNC psworg,savearea 154 .macro SAVE_ALL_ASYNC psworg,savearea
155 stmg %r11,%r15,\savearea
156 larl %r13,system_call
157 lg %r11,__LC_LAST_BREAK
141 la %r12,\psworg 158 la %r12,\psworg
142 tm \psworg+1,0x01 # test problem state bit 159 tm \psworg+1,0x01 # test problem state bit
143 jnz 1f # from user -> load kernel stack 160 jnz 1f # from user -> load kernel stack
@@ -151,27 +168,23 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
1510: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? 1680: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
152 slgr %r14,%r15 169 slgr %r14,%r15
153 srag %r14,%r14,STACK_SHIFT 170 srag %r14,%r14,STACK_SHIFT
154 jz 2f
1551: lg %r15,__LC_ASYNC_STACK # load async stack
156#ifdef CONFIG_CHECK_STACK 171#ifdef CONFIG_CHECK_STACK
157 j 3f 172 jnz 1f
1582: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 173 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
159 jz stack_overflow 174 jnz 2f
1603: 175 j stack_overflow
176#else
177 jz 2f
161#endif 178#endif
1622: 1791: lg %r15,__LC_ASYNC_STACK # load async stack
1802: aghi %r15,-SP_SIZE # make room for registers & psw
163 .endm 181 .endm
164 182
165 .macro CREATE_STACK_FRAME psworg,savearea 183 .macro CREATE_STACK_FRAME savearea
166 aghi %r15,-SP_SIZE # make room for registers & psw 184 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
167 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
168 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 185 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
169 icm %r12,3,__LC_SVC_ILC 186 mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
170 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 187 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
171 st %r12,SP_SVCNR(%r15)
172 mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
173 la %r12,0
174 stg %r12,__SF_BACKCHAIN(%r15)
175 .endm 188 .endm
176 189
177 .macro RESTORE_ALL psworg,sync 190 .macro RESTORE_ALL psworg,sync
@@ -187,6 +200,13 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
187 lpswe \psworg # back to caller 200 lpswe \psworg # back to caller
188 .endm 201 .endm
189 202
203 .macro LAST_BREAK
204 srag %r10,%r11,23
205 jz 0f
206 stg %r11,__TI_last_break(%r12)
2070:
208 .endm
209
190/* 210/*
191 * Scheduler resume function, called by switch_to 211 * Scheduler resume function, called by switch_to
192 * gpr2 = (task_struct *) prev 212 * gpr2 = (task_struct *) prev
@@ -232,143 +252,129 @@ __critical_start:
232system_call: 252system_call:
233 stpt __LC_SYNC_ENTER_TIMER 253 stpt __LC_SYNC_ENTER_TIMER
234sysc_saveall: 254sysc_saveall:
235 SAVE_ALL_BASE __LC_SAVE_AREA
236 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 255 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
237 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 256 CREATE_STACK_FRAME __LC_SAVE_AREA
238 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 257 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
258 mvc SP_ILC(4,%r15),__LC_SVC_ILC
259 stg %r7,SP_ARGS(%r15)
260 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
239sysc_vtime: 261sysc_vtime:
240 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 262 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
241sysc_stime: 263sysc_stime:
242 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 264 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
243sysc_update: 265sysc_update:
244 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 266 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
267 LAST_BREAK
245sysc_do_svc: 268sysc_do_svc:
246 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 269 llgh %r7,SP_SVCNR(%r15)
247 ltgr %r7,%r7 # test for svc 0 270 slag %r7,%r7,2 # shift and test for svc 0
248 jnz sysc_nr_ok 271 jnz sysc_nr_ok
249 # svc 0: system call number in %r1 272 # svc 0: system call number in %r1
250 cl %r1,BASED(.Lnr_syscalls) 273 llgfr %r1,%r1 # clear high word in r1
274 cghi %r1,NR_syscalls
251 jnl sysc_nr_ok 275 jnl sysc_nr_ok
252 lgfr %r7,%r1 # clear high word in r1 276 sth %r1,SP_SVCNR(%r15)
277 slag %r7,%r1,2 # shift and test for svc 0
253sysc_nr_ok: 278sysc_nr_ok:
254 mvc SP_ARGS(8,%r15),SP_R7(%r15)
255sysc_do_restart:
256 sth %r7,SP_SVCNR(%r15)
257 sllg %r7,%r7,2 # svc number * 4
258 larl %r10,sys_call_table 279 larl %r10,sys_call_table
259#ifdef CONFIG_COMPAT 280#ifdef CONFIG_COMPAT
260 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? 281 tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
261 jno sysc_noemu 282 jno sysc_noemu
262 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 283 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
263sysc_noemu: 284sysc_noemu:
264#endif 285#endif
265 tm __TI_flags+6(%r9),_TIF_SYSCALL 286 tm __TI_flags+6(%r12),_TIF_SYSCALL
266 lgf %r8,0(%r7,%r10) # load address of system call routine 287 lgf %r8,0(%r7,%r10) # load address of system call routine
267 jnz sysc_tracesys 288 jnz sysc_tracesys
268 basr %r14,%r8 # call sys_xxxx 289 basr %r14,%r8 # call sys_xxxx
269 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 290 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
270 291
271sysc_return: 292sysc_return:
272 tm __TI_flags+7(%r9),_TIF_WORK_SVC 293 LOCKDEP_SYS_EXIT
294sysc_tif:
295 tm __TI_flags+7(%r12),_TIF_WORK_SVC
273 jnz sysc_work # there is work to do (signals etc.) 296 jnz sysc_work # there is work to do (signals etc.)
274sysc_restore: 297sysc_restore:
275#ifdef CONFIG_TRACE_IRQFLAGS
276 larl %r1,sysc_restore_trace_psw
277 lpswe 0(%r1)
278sysc_restore_trace:
279 TRACE_IRQS_CHECK
280 LOCKDEP_SYS_EXIT
281#endif
282sysc_leave:
283 RESTORE_ALL __LC_RETURN_PSW,1 298 RESTORE_ALL __LC_RETURN_PSW,1
284sysc_done: 299sysc_done:
285 300
286#ifdef CONFIG_TRACE_IRQFLAGS
287 .section .data,"aw",@progbits
288 .align 8
289 .globl sysc_restore_trace_psw
290sysc_restore_trace_psw:
291 .quad 0, sysc_restore_trace
292 .previous
293#endif
294
295#
296# recheck if there is more work to do
297# 301#
298sysc_work_loop: 302# There is work to do, but first we need to check if we return to userspace.
299 tm __TI_flags+7(%r9),_TIF_WORK_SVC
300 jz sysc_restore # there is no work to do
301#
302# One of the work bits is on. Find out which one.
303# 303#
304sysc_work: 304sysc_work:
305 tm SP_PSW+1(%r15),0x01 # returning to user ? 305 tm SP_PSW+1(%r15),0x01 # returning to user ?
306 jno sysc_restore 306 jno sysc_restore
307 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 307
308#
309# One of the work bits is on. Find out which one.
310#
311sysc_work_tif:
312 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
308 jo sysc_mcck_pending 313 jo sysc_mcck_pending
309 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 314 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
310 jo sysc_reschedule 315 jo sysc_reschedule
311 tm __TI_flags+7(%r9),_TIF_SIGPENDING 316 tm __TI_flags+7(%r12),_TIF_SIGPENDING
312 jnz sysc_sigpending 317 jo sysc_sigpending
313 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 318 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
314 jnz sysc_notify_resume 319 jo sysc_notify_resume
315 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 320 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
316 jo sysc_restart 321 jo sysc_restart
317 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 322 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
318 jo sysc_singlestep 323 jo sysc_singlestep
319 j sysc_restore 324 j sysc_return # beware of critical section cleanup
320sysc_work_done:
321 325
322# 326#
323# _TIF_NEED_RESCHED is set, call schedule 327# _TIF_NEED_RESCHED is set, call schedule
324# 328#
325sysc_reschedule: 329sysc_reschedule:
326 larl %r14,sysc_work_loop 330 larl %r14,sysc_return
327 jg schedule # return point is sysc_return 331 jg schedule # return point is sysc_return
328 332
329# 333#
330# _TIF_MCCK_PENDING is set, call handler 334# _TIF_MCCK_PENDING is set, call handler
331# 335#
332sysc_mcck_pending: 336sysc_mcck_pending:
333 larl %r14,sysc_work_loop 337 larl %r14,sysc_return
334 jg s390_handle_mcck # TIF bit will be cleared by handler 338 jg s390_handle_mcck # TIF bit will be cleared by handler
335 339
336# 340#
337# _TIF_SIGPENDING is set, call do_signal 341# _TIF_SIGPENDING is set, call do_signal
338# 342#
339sysc_sigpending: 343sysc_sigpending:
340 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 344 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
341 la %r2,SP_PTREGS(%r15) # load pt_regs 345 la %r2,SP_PTREGS(%r15) # load pt_regs
342 brasl %r14,do_signal # call do_signal 346 brasl %r14,do_signal # call do_signal
343 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 347 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
344 jo sysc_restart 348 jo sysc_restart
345 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 349 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
346 jo sysc_singlestep 350 jo sysc_singlestep
347 j sysc_work_loop 351 j sysc_return
348 352
349# 353#
350# _TIF_NOTIFY_RESUME is set, call do_notify_resume 354# _TIF_NOTIFY_RESUME is set, call do_notify_resume
351# 355#
352sysc_notify_resume: 356sysc_notify_resume:
353 la %r2,SP_PTREGS(%r15) # load pt_regs 357 la %r2,SP_PTREGS(%r15) # load pt_regs
354 larl %r14,sysc_work_loop 358 larl %r14,sysc_return
355 jg do_notify_resume # call do_notify_resume 359 jg do_notify_resume # call do_notify_resume
356 360
357# 361#
358# _TIF_RESTART_SVC is set, set up registers and restart svc 362# _TIF_RESTART_SVC is set, set up registers and restart svc
359# 363#
360sysc_restart: 364sysc_restart:
361 ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 365 ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
362 lg %r7,SP_R2(%r15) # load new svc number 366 lg %r7,SP_R2(%r15) # load new svc number
363 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument 367 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
364 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 368 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
365 j sysc_do_restart # restart svc 369 sth %r7,SP_SVCNR(%r15)
370 slag %r7,%r7,2
371 j sysc_nr_ok # restart svc
366 372
367# 373#
368# _TIF_SINGLE_STEP is set, call do_single_step 374# _TIF_SINGLE_STEP is set, call do_single_step
369# 375#
370sysc_singlestep: 376sysc_singlestep:
371 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 377 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
372 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 378 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
373 la %r2,SP_PTREGS(%r15) # address of register-save area 379 la %r2,SP_PTREGS(%r15) # address of register-save area
374 larl %r14,sysc_return # load adr. of system return 380 larl %r14,sysc_return # load adr. of system return
@@ -381,8 +387,8 @@ sysc_singlestep:
381sysc_tracesys: 387sysc_tracesys:
382 la %r2,SP_PTREGS(%r15) # load pt_regs 388 la %r2,SP_PTREGS(%r15) # load pt_regs
383 la %r3,0 389 la %r3,0
384 srl %r7,2 390 llgh %r0,SP_SVCNR(%r15)
385 stg %r7,SP_R2(%r15) 391 stg %r0,SP_R2(%r15)
386 brasl %r14,do_syscall_trace_enter 392 brasl %r14,do_syscall_trace_enter
387 lghi %r0,NR_syscalls 393 lghi %r0,NR_syscalls
388 clgr %r0,%r2 394 clgr %r0,%r2
@@ -395,7 +401,7 @@ sysc_tracego:
395 basr %r14,%r8 # call sys_xxx 401 basr %r14,%r8 # call sys_xxx
396 stg %r2,SP_R2(%r15) # store return value 402 stg %r2,SP_R2(%r15) # store return value
397sysc_tracenogo: 403sysc_tracenogo:
398 tm __TI_flags+6(%r9),_TIF_SYSCALL 404 tm __TI_flags+6(%r12),_TIF_SYSCALL
399 jz sysc_return 405 jz sysc_return
400 la %r2,SP_PTREGS(%r15) # load pt_regs 406 la %r2,SP_PTREGS(%r15) # load pt_regs
401 larl %r14,sysc_return # return point is sysc_return 407 larl %r14,sysc_return # return point is sysc_return
@@ -407,7 +413,7 @@ sysc_tracenogo:
407 .globl ret_from_fork 413 .globl ret_from_fork
408ret_from_fork: 414ret_from_fork:
409 lg %r13,__LC_SVC_NEW_PSW+8 415 lg %r13,__LC_SVC_NEW_PSW+8
410 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 416 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
411 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 417 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
412 jo 0f 418 jo 0f
413 stg %r15,SP_R15(%r15) # store stack pointer for new kthread 419 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -437,12 +443,14 @@ kernel_execve:
437 br %r14 443 br %r14
438 # execve succeeded. 444 # execve succeeded.
4390: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4450: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
446# TRACE_IRQS_OFF
440 lg %r15,__LC_KERNEL_STACK # load ksp 447 lg %r15,__LC_KERNEL_STACK # load ksp
441 aghi %r15,-SP_SIZE # make room for registers & psw 448 aghi %r15,-SP_SIZE # make room for registers & psw
442 lg %r13,__LC_SVC_NEW_PSW+8 449 lg %r13,__LC_SVC_NEW_PSW+8
443 lg %r9,__LC_THREAD_INFO
444 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 450 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
451 lg %r12,__LC_THREAD_INFO
445 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 452 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
453# TRACE_IRQS_ON
446 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 454 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
447 brasl %r14,execve_tail 455 brasl %r14,execve_tail
448 j sysc_return 456 j sysc_return
@@ -467,20 +475,23 @@ pgm_check_handler:
467 * for LPSW?). 475 * for LPSW?).
468 */ 476 */
469 stpt __LC_SYNC_ENTER_TIMER 477 stpt __LC_SYNC_ENTER_TIMER
470 SAVE_ALL_BASE __LC_SAVE_AREA
471 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 478 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
472 jnz pgm_per # got per exception -> special case 479 jnz pgm_per # got per exception -> special case
473 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 480 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
474 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 481 CREATE_STACK_FRAME __LC_SAVE_AREA
482 xc SP_ILC(4,%r15),SP_ILC(%r15)
483 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
484 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
475 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 485 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
476 jz pgm_no_vtime 486 jz pgm_no_vtime
477 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 487 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
478 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 488 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
479 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 489 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
490 LAST_BREAK
480pgm_no_vtime: 491pgm_no_vtime:
481 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 492 HANDLE_SIE_INTERCEPT
482 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 493 TRACE_IRQS_CHECK_OFF
483 TRACE_IRQS_OFF 494 stg %r11,SP_ARGS(%r15)
484 lgf %r3,__LC_PGM_ILC # load program interruption code 495 lgf %r3,__LC_PGM_ILC # load program interruption code
485 lghi %r8,0x7f 496 lghi %r8,0x7f
486 ngr %r8,%r3 497 ngr %r8,%r3
@@ -489,8 +500,10 @@ pgm_do_call:
489 larl %r1,pgm_check_table 500 larl %r1,pgm_check_table
490 lg %r1,0(%r8,%r1) # load address of handler routine 501 lg %r1,0(%r8,%r1) # load address of handler routine
491 la %r2,SP_PTREGS(%r15) # address of register-save area 502 la %r2,SP_PTREGS(%r15) # address of register-save area
492 larl %r14,sysc_return 503 basr %r14,%r1 # branch to interrupt-handler
493 br %r1 # branch to interrupt-handler 504pgm_exit:
505 TRACE_IRQS_CHECK_ON
506 j sysc_return
494 507
495# 508#
496# handle per exception 509# handle per exception
@@ -502,55 +515,68 @@ pgm_per:
502 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW 515 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
503 je pgm_svcper 516 je pgm_svcper
504# no interesting special case, ignore PER event 517# no interesting special case, ignore PER event
505 lmg %r12,%r15,__LC_SAVE_AREA
506 lpswe __LC_PGM_OLD_PSW 518 lpswe __LC_PGM_OLD_PSW
507 519
508# 520#
509# Normal per exception 521# Normal per exception
510# 522#
511pgm_per_std: 523pgm_per_std:
512 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 524 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
513 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 525 CREATE_STACK_FRAME __LC_SAVE_AREA
526 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
527 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
514 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 528 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
515 jz pgm_no_vtime2 529 jz pgm_no_vtime2
516 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 530 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
517 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 531 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
518 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 532 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
533 LAST_BREAK
519pgm_no_vtime2: 534pgm_no_vtime2:
520 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 535 HANDLE_SIE_INTERCEPT
521 TRACE_IRQS_OFF 536 TRACE_IRQS_CHECK_OFF
522 lg %r1,__TI_task(%r9) 537 lg %r1,__TI_task(%r12)
523 tm SP_PSW+1(%r15),0x01 # kernel per event ? 538 tm SP_PSW+1(%r15),0x01 # kernel per event ?
524 jz kernel_per 539 jz kernel_per
525 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 540 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
526 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 541 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
527 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 542 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
528 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 543 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
529 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
530 lghi %r8,0x7f 545 lghi %r8,0x7f
531 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
532 je sysc_return 547 je pgm_exit2
533 j pgm_do_call 548 sll %r8,3
549 larl %r1,pgm_check_table
550 lg %r1,0(%r8,%r1) # load address of handler routine
551 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return
534 557
535# 558#
536# it was a single stepped SVC that is causing all the trouble 559# it was a single stepped SVC that is causing all the trouble
537# 560#
538pgm_svcper: 561pgm_svcper:
539 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 562 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
540 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 563 CREATE_STACK_FRAME __LC_SAVE_AREA
564 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
565 mvc SP_ILC(4,%r15),__LC_SVC_ILC
566 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
541 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 567 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
542 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 568 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 569 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 570 LAST_BREAK
545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 571 TRACE_IRQS_OFF
546 lg %r8,__TI_task(%r9) 572 lg %r8,__TI_task(%r12)
547 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 573 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
548 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 574 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
549 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 575 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 576 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
551 TRACE_IRQS_ON 577 TRACE_IRQS_ON
552 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
553 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 578 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
579 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
554 j sysc_do_svc 580 j sysc_do_svc
555 581
556# 582#
@@ -559,8 +585,8 @@ pgm_svcper:
559kernel_per: 585kernel_per:
560 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 586 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
561 la %r2,SP_PTREGS(%r15) # address of register-save area 587 la %r2,SP_PTREGS(%r15) # address of register-save area
562 larl %r14,sysc_restore # load adr. of system ret, no work 588 brasl %r14,do_single_step
563 jg do_single_step # branch to do_single_step 589 j pgm_exit
564 590
565/* 591/*
566 * IO interrupt handler routine 592 * IO interrupt handler routine
@@ -569,162 +595,133 @@ kernel_per:
569io_int_handler: 595io_int_handler:
570 stck __LC_INT_CLOCK 596 stck __LC_INT_CLOCK
571 stpt __LC_ASYNC_ENTER_TIMER 597 stpt __LC_ASYNC_ENTER_TIMER
572 SAVE_ALL_BASE __LC_SAVE_AREA+32 598 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
573 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 599 CREATE_STACK_FRAME __LC_SAVE_AREA+40
574 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 600 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
601 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
575 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 602 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
576 jz io_no_vtime 603 jz io_no_vtime
577 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 604 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
578 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 605 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
579 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 606 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
607 LAST_BREAK
580io_no_vtime: 608io_no_vtime:
581 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 609 HANDLE_SIE_INTERCEPT
582 TRACE_IRQS_OFF 610 TRACE_IRQS_OFF
583 la %r2,SP_PTREGS(%r15) # address of register-save area 611 la %r2,SP_PTREGS(%r15) # address of register-save area
584 brasl %r14,do_IRQ # call standard irq handler 612 brasl %r14,do_IRQ # call standard irq handler
585io_return: 613io_return:
586 tm __TI_flags+7(%r9),_TIF_WORK_INT 614 LOCKDEP_SYS_EXIT
615 TRACE_IRQS_ON
616io_tif:
617 tm __TI_flags+7(%r12),_TIF_WORK_INT
587 jnz io_work # there is work to do (signals etc.) 618 jnz io_work # there is work to do (signals etc.)
588io_restore: 619io_restore:
589#ifdef CONFIG_TRACE_IRQFLAGS
590 larl %r1,io_restore_trace_psw
591 lpswe 0(%r1)
592io_restore_trace:
593 TRACE_IRQS_CHECK
594 LOCKDEP_SYS_EXIT
595#endif
596io_leave:
597 RESTORE_ALL __LC_RETURN_PSW,0 620 RESTORE_ALL __LC_RETURN_PSW,0
598io_done: 621io_done:
599 622
600#ifdef CONFIG_TRACE_IRQFLAGS
601 .section .data,"aw",@progbits
602 .align 8
603 .globl io_restore_trace_psw
604io_restore_trace_psw:
605 .quad 0, io_restore_trace
606 .previous
607#endif
608
609# 623#
610# There is work todo, we need to check if we return to userspace, then 624# There is work todo, find out in which context we have been interrupted:
611# check, if we are in SIE, if yes leave it 625# 1) if we return to user space we can do all _TIF_WORK_INT work
626# 2) if we return to kernel code and kvm is enabled check if we need to
627# modify the psw to leave SIE
628# 3) if we return to kernel code and preemptive scheduling is enabled check
629# the preemption counter and if it is zero call preempt_schedule_irq
630# Before any work can be done, a switch to the kernel stack is required.
612# 631#
613io_work: 632io_work:
614 tm SP_PSW+1(%r15),0x01 # returning to user ? 633 tm SP_PSW+1(%r15),0x01 # returning to user ?
615#ifndef CONFIG_PREEMPT 634 jo io_work_user # yes -> do resched & signal
616#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 635#ifdef CONFIG_PREEMPT
617 jnz io_work_user # yes -> no need to check for SIE
618 la %r1, BASED(sie_opcode) # we return to kernel here
619 lg %r2, SP_PSW+8(%r15)
620 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
621 jne io_restore # no-> return to kernel
622 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
623 aghi %r1, 4
624 stg %r1, SP_PSW+8(%r15)
625 j io_restore # return to kernel
626#else
627 jno io_restore # no-> skip resched & signal
628#endif
629#else
630 jnz io_work_user # yes -> do resched & signal
631#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
632 la %r1, BASED(sie_opcode)
633 lg %r2, SP_PSW+8(%r15)
634 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
635 jne 0f # no -> leave PSW alone
636 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
637 aghi %r1, 4
638 stg %r1, SP_PSW+8(%r15)
6390:
640#endif
641 # check for preemptive scheduling 636 # check for preemptive scheduling
642 icm %r0,15,__TI_precount(%r9) 637 icm %r0,15,__TI_precount(%r12)
643 jnz io_restore # preemption is disabled 638 jnz io_restore # preemption is disabled
639 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
640 jno io_restore
644 # switch to kernel stack 641 # switch to kernel stack
645 lg %r1,SP_R15(%r15) 642 lg %r1,SP_R15(%r15)
646 aghi %r1,-SP_SIZE 643 aghi %r1,-SP_SIZE
647 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 644 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
648 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 645 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
649 lgr %r15,%r1 646 lgr %r15,%r1
650io_resume_loop: 647 # TRACE_IRQS_ON already done at io_return, call
651 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 648 # TRACE_IRQS_OFF to keep things symmetrical
652 jno io_restore 649 TRACE_IRQS_OFF
653 larl %r14,io_resume_loop 650 brasl %r14,preempt_schedule_irq
654 jg preempt_schedule_irq 651 j io_return
652#else
653 j io_restore
655#endif 654#endif
656 655
656#
657# Need to do work before returning to userspace, switch to kernel stack
658#
657io_work_user: 659io_work_user:
658 lg %r1,__LC_KERNEL_STACK 660 lg %r1,__LC_KERNEL_STACK
659 aghi %r1,-SP_SIZE 661 aghi %r1,-SP_SIZE
660 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 662 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
661 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 663 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
662 lgr %r15,%r1 664 lgr %r15,%r1
665
663# 666#
664# One of the work bits is on. Find out which one. 667# One of the work bits is on. Find out which one.
665# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED 668# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
666# and _TIF_MCCK_PENDING 669# and _TIF_MCCK_PENDING
667# 670#
668io_work_loop: 671io_work_tif:
669 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 672 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
670 jo io_mcck_pending 673 jo io_mcck_pending
671 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 674 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
672 jo io_reschedule 675 jo io_reschedule
673 tm __TI_flags+7(%r9),_TIF_SIGPENDING 676 tm __TI_flags+7(%r12),_TIF_SIGPENDING
674 jnz io_sigpending 677 jo io_sigpending
675 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 678 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
676 jnz io_notify_resume 679 jo io_notify_resume
677 j io_restore 680 j io_return # beware of critical section cleanup
678io_work_done:
679
680#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
681sie_opcode:
682 .long 0xb2140000
683#endif
684 681
685# 682#
686# _TIF_MCCK_PENDING is set, call handler 683# _TIF_MCCK_PENDING is set, call handler
687# 684#
688io_mcck_pending: 685io_mcck_pending:
686 # TRACE_IRQS_ON already done at io_return
689 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 687 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
690 j io_work_loop 688 TRACE_IRQS_OFF
689 j io_return
691 690
692# 691#
693# _TIF_NEED_RESCHED is set, call schedule 692# _TIF_NEED_RESCHED is set, call schedule
694# 693#
695io_reschedule: 694io_reschedule:
696 TRACE_IRQS_ON 695 # TRACE_IRQS_ON already done at io_return
697 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 696 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
698 brasl %r14,schedule # call scheduler 697 brasl %r14,schedule # call scheduler
699 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 698 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
700 TRACE_IRQS_OFF 699 TRACE_IRQS_OFF
701 tm __TI_flags+7(%r9),_TIF_WORK_INT 700 j io_return
702 jz io_restore # there is no work to do
703 j io_work_loop
704 701
705# 702#
706# _TIF_SIGPENDING or is set, call do_signal 703# _TIF_SIGPENDING or is set, call do_signal
707# 704#
708io_sigpending: 705io_sigpending:
709 TRACE_IRQS_ON 706 # TRACE_IRQS_ON already done at io_return
710 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 707 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
711 la %r2,SP_PTREGS(%r15) # load pt_regs 708 la %r2,SP_PTREGS(%r15) # load pt_regs
712 brasl %r14,do_signal # call do_signal 709 brasl %r14,do_signal # call do_signal
713 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 710 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
714 TRACE_IRQS_OFF 711 TRACE_IRQS_OFF
715 j io_work_loop 712 j io_return
716 713
717# 714#
718# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 715# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
719# 716#
720io_notify_resume: 717io_notify_resume:
721 TRACE_IRQS_ON 718 # TRACE_IRQS_ON already done at io_return
722 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 719 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
723 la %r2,SP_PTREGS(%r15) # load pt_regs 720 la %r2,SP_PTREGS(%r15) # load pt_regs
724 brasl %r14,do_notify_resume # call do_notify_resume 721 brasl %r14,do_notify_resume # call do_notify_resume
725 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 722 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
726 TRACE_IRQS_OFF 723 TRACE_IRQS_OFF
727 j io_work_loop 724 j io_return
728 725
729/* 726/*
730 * External interrupt handler routine 727 * External interrupt handler routine
@@ -733,16 +730,18 @@ io_notify_resume:
733ext_int_handler: 730ext_int_handler:
734 stck __LC_INT_CLOCK 731 stck __LC_INT_CLOCK
735 stpt __LC_ASYNC_ENTER_TIMER 732 stpt __LC_ASYNC_ENTER_TIMER
736 SAVE_ALL_BASE __LC_SAVE_AREA+32 733 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
737 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 734 CREATE_STACK_FRAME __LC_SAVE_AREA+40
738 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 735 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
736 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
739 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 737 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
740 jz ext_no_vtime 738 jz ext_no_vtime
741 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 739 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
742 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 740 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
743 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 741 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
742 LAST_BREAK
744ext_no_vtime: 743ext_no_vtime:
745 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 744 HANDLE_SIE_INTERCEPT
746 TRACE_IRQS_OFF 745 TRACE_IRQS_OFF
747 la %r2,SP_PTREGS(%r15) # address of register-save area 746 la %r2,SP_PTREGS(%r15) # address of register-save area
748 llgh %r3,__LC_EXT_INT_CODE # get interruption code 747 llgh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -756,17 +755,18 @@ __critical_end:
756 */ 755 */
757 .globl mcck_int_handler 756 .globl mcck_int_handler
758mcck_int_handler: 757mcck_int_handler:
759 stck __LC_INT_CLOCK 758 stck __LC_MCCK_CLOCK
760 la %r1,4095 # revalidate r1 759 la %r1,4095 # revalidate r1
761 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 760 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
762 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 761 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
763 SAVE_ALL_BASE __LC_SAVE_AREA+64 762 stmg %r11,%r15,__LC_SAVE_AREA+80
763 larl %r13,system_call
764 lg %r11,__LC_LAST_BREAK
764 la %r12,__LC_MCK_OLD_PSW 765 la %r12,__LC_MCK_OLD_PSW
765 tm __LC_MCCK_CODE,0x80 # system damage? 766 tm __LC_MCCK_CODE,0x80 # system damage?
766 jo mcck_int_main # yes -> rest of mcck code invalid 767 jo mcck_int_main # yes -> rest of mcck code invalid
767 la %r14,4095 768 la %r14,4095
768 mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER 769 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
769 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
770 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 770 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
771 jo 1f 771 jo 1f
772 la %r14,__LC_SYNC_ENTER_TIMER 772 la %r14,__LC_SYNC_ENTER_TIMER
@@ -780,7 +780,7 @@ mcck_int_handler:
780 jl 0f 780 jl 0f
781 la %r14,__LC_LAST_UPDATE_TIMER 781 la %r14,__LC_LAST_UPDATE_TIMER
7820: spt 0(%r14) 7820: spt 0(%r14)
783 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 783 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7841: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7841: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
785 jno mcck_int_main # no -> skip cleanup critical 785 jno mcck_int_main # no -> skip cleanup critical
786 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 786 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -796,16 +796,19 @@ mcck_int_main:
796 srag %r14,%r14,PAGE_SHIFT 796 srag %r14,%r14,PAGE_SHIFT
797 jz 0f 797 jz 0f
798 lg %r15,__LC_PANIC_STACK # load panic stack 798 lg %r15,__LC_PANIC_STACK # load panic stack
7990: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 7990: aghi %r15,-SP_SIZE # make room for registers & psw
800 CREATE_STACK_FRAME __LC_SAVE_AREA+80
801 mvc SP_PSW(16,%r15),0(%r12)
802 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
800 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 803 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
801 jno mcck_no_vtime # no -> no timer update 804 jno mcck_no_vtime # no -> no timer update
802 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 805 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
803 jz mcck_no_vtime 806 jz mcck_no_vtime
804 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 807 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
805 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 808 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
806 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 809 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
810 LAST_BREAK
807mcck_no_vtime: 811mcck_no_vtime:
808 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
809 la %r2,SP_PTREGS(%r15) # load pt_regs 812 la %r2,SP_PTREGS(%r15) # load pt_regs
810 brasl %r14,s390_do_machine_check 813 brasl %r14,s390_do_machine_check
811 tm SP_PSW+1(%r15),0x01 # returning to user ? 814 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -816,8 +819,9 @@ mcck_no_vtime:
816 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 819 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
817 lgr %r15,%r1 820 lgr %r15,%r1
818 stosm __SF_EMPTY(%r15),0x04 # turn dat on 821 stosm __SF_EMPTY(%r15),0x04 # turn dat on
819 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 822 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
820 jno mcck_return 823 jno mcck_return
824 HANDLE_SIE_INTERCEPT
821 TRACE_IRQS_OFF 825 TRACE_IRQS_OFF
822 brasl %r14,s390_handle_mcck 826 brasl %r14,s390_handle_mcck
823 TRACE_IRQS_ON 827 TRACE_IRQS_ON
@@ -825,11 +829,11 @@ mcck_return:
825 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW 829 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
826 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 830 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
827 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 831 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
828 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
829 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 832 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
830 jno 0f 833 jno 0f
831 stpt __LC_EXIT_TIMER 834 stpt __LC_EXIT_TIMER
8320: lpswe __LC_RETURN_MCCK_PSW # back to caller 8350: lpswe __LC_RETURN_MCCK_PSW # back to caller
836mcck_done:
833 837
834/* 838/*
835 * Restart interruption handler, kick starter for additional CPUs 839 * Restart interruption handler, kick starter for additional CPUs
@@ -885,14 +889,14 @@ stack_overflow:
885 lg %r15,__LC_PANIC_STACK # change to panic stack 889 lg %r15,__LC_PANIC_STACK # change to panic stack
886 aghi %r15,-SP_SIZE 890 aghi %r15,-SP_SIZE
887 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 891 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
888 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 892 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
889 la %r1,__LC_SAVE_AREA 893 la %r1,__LC_SAVE_AREA
890 chi %r12,__LC_SVC_OLD_PSW 894 chi %r12,__LC_SVC_OLD_PSW
891 je 0f 895 je 0f
892 chi %r12,__LC_PGM_OLD_PSW 896 chi %r12,__LC_PGM_OLD_PSW
893 je 0f 897 je 0f
894 la %r1,__LC_SAVE_AREA+32 898 la %r1,__LC_SAVE_AREA+40
8950: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8990: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
896 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 900 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
897 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 901 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
898 la %r2,SP_PTREGS(%r15) # load pt_regs 902 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -901,18 +905,14 @@ stack_overflow:
901 905
902cleanup_table_system_call: 906cleanup_table_system_call:
903 .quad system_call, sysc_do_svc 907 .quad system_call, sysc_do_svc
904cleanup_table_sysc_return: 908cleanup_table_sysc_tif:
905 .quad sysc_return, sysc_leave 909 .quad sysc_tif, sysc_restore
906cleanup_table_sysc_leave: 910cleanup_table_sysc_restore:
907 .quad sysc_leave, sysc_done 911 .quad sysc_restore, sysc_done
908cleanup_table_sysc_work_loop: 912cleanup_table_io_tif:
909 .quad sysc_work_loop, sysc_work_done 913 .quad io_tif, io_restore
910cleanup_table_io_return: 914cleanup_table_io_restore:
911 .quad io_return, io_leave 915 .quad io_restore, io_done
912cleanup_table_io_leave:
913 .quad io_leave, io_done
914cleanup_table_io_work_loop:
915 .quad io_work_loop, io_work_done
916 916
917cleanup_critical: 917cleanup_critical:
918 clc 8(8,%r12),BASED(cleanup_table_system_call) 918 clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -920,61 +920,54 @@ cleanup_critical:
920 clc 8(8,%r12),BASED(cleanup_table_system_call+8) 920 clc 8(8,%r12),BASED(cleanup_table_system_call+8)
921 jl cleanup_system_call 921 jl cleanup_system_call
9220: 9220:
923 clc 8(8,%r12),BASED(cleanup_table_sysc_return) 923 clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
924 jl 0f 924 jl 0f
925 clc 8(8,%r12),BASED(cleanup_table_sysc_return+8) 925 clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
926 jl cleanup_sysc_return 926 jl cleanup_sysc_tif
9270: 9270:
928 clc 8(8,%r12),BASED(cleanup_table_sysc_leave) 928 clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
929 jl 0f 929 jl 0f
930 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) 930 clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
931 jl cleanup_sysc_leave 931 jl cleanup_sysc_restore
9320: 9320:
933 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) 933 clc 8(8,%r12),BASED(cleanup_table_io_tif)
934 jl 0f 934 jl 0f
935 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) 935 clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
936 jl cleanup_sysc_return 936 jl cleanup_io_tif
9370: 9370:
938 clc 8(8,%r12),BASED(cleanup_table_io_return) 938 clc 8(8,%r12),BASED(cleanup_table_io_restore)
939 jl 0f 939 jl 0f
940 clc 8(8,%r12),BASED(cleanup_table_io_return+8) 940 clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
941 jl cleanup_io_return 941 jl cleanup_io_restore
9420:
943 clc 8(8,%r12),BASED(cleanup_table_io_leave)
944 jl 0f
945 clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
946 jl cleanup_io_leave
9470:
948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
949 jl 0f
950 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
951 jl cleanup_io_return
9520: 9420:
953 br %r14 943 br %r14
954 944
955cleanup_system_call: 945cleanup_system_call:
956 mvc __LC_RETURN_PSW(16),0(%r12) 946 mvc __LC_RETURN_PSW(16),0(%r12)
957 cghi %r12,__LC_MCK_OLD_PSW
958 je 0f
959 la %r12,__LC_SAVE_AREA+32
960 j 1f
9610: la %r12,__LC_SAVE_AREA+64
9621:
963 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) 947 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
964 jh 0f 948 jh 0f
949 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
950 cghi %r12,__LC_MCK_OLD_PSW
951 je 0f
965 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 952 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9530: cghi %r12,__LC_MCK_OLD_PSW
954 la %r12,__LC_SAVE_AREA+80
955 je 0f
956 la %r12,__LC_SAVE_AREA+40
9660: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 9570: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
967 jhe cleanup_vtime 958 jhe cleanup_vtime
968 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) 959 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
969 jh 0f 960 jh 0f
970 mvc __LC_SAVE_AREA(32),0(%r12) 961 mvc __LC_SAVE_AREA(40),0(%r12)
9710: stg %r13,8(%r12) 9620: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
972 stg %r12,__LC_SAVE_AREA+96 # argh 963 aghi %r15,-SP_SIZE # make room for registers & psw
973 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 964 stg %r15,32(%r12)
974 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 965 stg %r11,0(%r12)
975 lg %r12,__LC_SAVE_AREA+96 # argh 966 CREATE_STACK_FRAME __LC_SAVE_AREA
976 stg %r15,24(%r12) 967 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
977 llgh %r7,__LC_SVC_INT_CODE 968 mvc SP_ILC(4,%r15),__LC_SVC_ILC
969 stg %r7,SP_ARGS(%r15)
970 mvc 8(8,%r12),__LC_THREAD_INFO
978cleanup_vtime: 971cleanup_vtime:
979 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 972 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
980 jhe cleanup_stime 973 jhe cleanup_stime
@@ -985,7 +978,11 @@ cleanup_stime:
985 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 978 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
986cleanup_update: 979cleanup_update:
987 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 980 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
988 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) 981 srag %r12,%r11,23
982 lg %r12,__LC_THREAD_INFO
983 jz 0f
984 stg %r11,__TI_last_break(%r12)
9850: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
989 la %r12,__LC_RETURN_PSW 986 la %r12,__LC_RETURN_PSW
990 br %r14 987 br %r14
991cleanup_system_call_insn: 988cleanup_system_call_insn:
@@ -995,55 +992,54 @@ cleanup_system_call_insn:
995 .quad sysc_stime 992 .quad sysc_stime
996 .quad sysc_update 993 .quad sysc_update
997 994
998cleanup_sysc_return: 995cleanup_sysc_tif:
999 mvc __LC_RETURN_PSW(8),0(%r12) 996 mvc __LC_RETURN_PSW(8),0(%r12)
1000 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) 997 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
1001 la %r12,__LC_RETURN_PSW 998 la %r12,__LC_RETURN_PSW
1002 br %r14 999 br %r14
1003 1000
1004cleanup_sysc_leave: 1001cleanup_sysc_restore:
1005 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) 1002 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
1006 je 3f 1003 je 2f
1007 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) 1004 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
1008 jhe 0f 1005 jhe 0f
1006 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1007 cghi %r12,__LC_MCK_OLD_PSW
1008 je 0f
1009 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1009 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
10100: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10100: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1011 cghi %r12,__LC_MCK_OLD_PSW 1011 cghi %r12,__LC_MCK_OLD_PSW
1012 jne 1f 1012 la %r12,__LC_SAVE_AREA+80
1013 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) 1013 je 1f
1014 j 2f 1014 la %r12,__LC_SAVE_AREA+40
10151: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) 10151: mvc 0(40,%r12),SP_R11(%r15)
10162: lmg %r0,%r11,SP_R0(%r15) 1016 lmg %r0,%r10,SP_R0(%r15)
1017 lg %r15,SP_R15(%r15) 1017 lg %r15,SP_R15(%r15)
10183: la %r12,__LC_RETURN_PSW 10182: la %r12,__LC_RETURN_PSW
1019 br %r14 1019 br %r14
1020cleanup_sysc_leave_insn: 1020cleanup_sysc_restore_insn:
1021 .quad sysc_done - 4 1021 .quad sysc_done - 4
1022 .quad sysc_done - 16 1022 .quad sysc_done - 16
1023 1023
1024cleanup_io_return: 1024cleanup_io_tif:
1025 mvc __LC_RETURN_PSW(8),0(%r12) 1025 mvc __LC_RETURN_PSW(8),0(%r12)
1026 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) 1026 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
1027 la %r12,__LC_RETURN_PSW 1027 la %r12,__LC_RETURN_PSW
1028 br %r14 1028 br %r14
1029 1029
1030cleanup_io_leave: 1030cleanup_io_restore:
1031 clc 8(8,%r12),BASED(cleanup_io_leave_insn) 1031 clc 8(8,%r12),BASED(cleanup_io_restore_insn)
1032 je 3f 1032 je 1f
1033 clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) 1033 clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
1034 jhe 0f 1034 jhe 0f
1035 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1035 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
10360: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10360: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1037 cghi %r12,__LC_MCK_OLD_PSW 1037 mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
1038 jne 1f 1038 lmg %r0,%r10,SP_R0(%r15)
1039 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
1040 j 2f
10411: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
10422: lmg %r0,%r11,SP_R0(%r15)
1043 lg %r15,SP_R15(%r15) 1039 lg %r15,SP_R15(%r15)
10443: la %r12,__LC_RETURN_PSW 10401: la %r12,__LC_RETURN_PSW
1045 br %r14 1041 br %r14
1046cleanup_io_leave_insn: 1042cleanup_io_restore_insn:
1047 .quad io_done - 4 1043 .quad io_done - 4
1048 .quad io_done - 16 1044 .quad io_done - 16
1049 1045
@@ -1051,13 +1047,6 @@ cleanup_io_leave_insn:
1051 * Integer constants 1047 * Integer constants
1052 */ 1048 */
1053 .align 4 1049 .align 4
1054.Lconst:
1055.Lnr_syscalls: .long NR_syscalls
1056.L0x0130: .short 0x130
1057.L0x0140: .short 0x140
1058.L0x0150: .short 0x150
1059.L0x0160: .short 0x160
1060.L0x0170: .short 0x170
1061.Lcritical_start: 1050.Lcritical_start:
1062 .quad __critical_start 1051 .quad __critical_start
1063.Lcritical_end: 1052.Lcritical_end:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5a82bc68193e..6a83d0581317 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -13,7 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <trace/syscall.h> 15#include <trace/syscall.h>
16#include <asm/lowcore.h> 16#include <asm/asm-offsets.h>
17 17
18#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
19 19
@@ -200,13 +200,3 @@ out:
200 return parent; 200 return parent;
201} 201}
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203
204#ifdef CONFIG_FTRACE_SYSCALLS
205
206extern unsigned int sys_call_table[];
207
208unsigned long __init arch_syscall_addr(int nr)
209{
210 return (unsigned long)sys_call_table[nr];
211}
212#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index c52b4f7742fa..51838ad42d56 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2009 2 * Copyright IBM Corp. 1999,2010
3 * 3 *
4 * Author(s): Hartmut Penner <hp@de.ibm.com> 4 * Author(s): Hartmut Penner <hp@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -22,12 +22,9 @@
22 */ 22 */
23 23
24#include <linux/init.h> 24#include <linux/init.h>
25#include <asm/setup.h>
26#include <asm/lowcore.h>
27#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
28#include <asm/thread_info.h> 26#include <asm/thread_info.h>
29#include <asm/page.h> 27#include <asm/page.h>
30#include <asm/cpu.h>
31 28
32#ifdef CONFIG_64BIT 29#ifdef CONFIG_64BIT
33#define ARCH_OFFSET 4 30#define ARCH_OFFSET 4
@@ -288,19 +285,7 @@ iplstart:
288 bz .Lagain1 # skip dateset trailer 285 bz .Lagain1 # skip dateset trailer
289 la %r5,0(%r4,%r2) 286 la %r5,0(%r4,%r2)
290 lr %r3,%r2 287 lr %r3,%r2
291.Lidebc: 288 la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
292 tm 0(%r5),0x80 # high order bit set ?
293 bo .Ldocv # yes -> convert from EBCDIC
294 ahi %r5,-1
295 bct %r3,.Lidebc
296 b .Lnocv
297.Ldocv:
298 l %r3,.Lcvtab
299 tr 0(256,%r4),0(%r3) # convert parameters to ascii
300 tr 256(256,%r4),0(%r3)
301 tr 512(256,%r4),0(%r3)
302 tr 768(122,%r4),0(%r3)
303.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
304 mvc 0(256,%r3),0(%r4) 289 mvc 0(256,%r3),0(%r4)
305 mvc 256(256,%r3),256(%r4) 290 mvc 256(256,%r3),256(%r4)
306 mvc 512(256,%r3),512(%r4) 291 mvc 512(256,%r3),512(%r4)
@@ -343,8 +328,8 @@ iplstart:
343# 328#
344# reset files in VM reader 329# reset files in VM reader
345# 330#
346 stidp __LC_CPUID # store cpuid 331 stidp __LC_SAVE_AREA # store cpuid
347 tm __LC_CPUID,0xff # running VM ? 332 tm __LC_SAVE_AREA,0xff # running VM ?
348 bno .Lnoreset 333 bno .Lnoreset
349 la %r2,.Lreset 334 la %r2,.Lreset
350 lhi %r3,26 335 lhi %r3,26
@@ -384,7 +369,6 @@ iplstart:
384.Linitrd:.long _end + 0x400000 # default address of initrd 369.Linitrd:.long _end + 0x400000 # default address of initrd
385.Lparm: .long PARMAREA 370.Lparm: .long PARMAREA
386.Lstartup: .long startup 371.Lstartup: .long startup
387.Lcvtab:.long _ebcasc # ebcdic to ascii table
388.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 372.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
389 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 373 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
390 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 374 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
@@ -417,13 +401,10 @@ start:
417.sk8x8: 401.sk8x8:
418 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer 402 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
419.gotr: 403.gotr:
420 l %r10,.tbl # EBCDIC to ASCII table
421 tr 0(240,%r8),0(%r10)
422 slr %r0,%r0 404 slr %r0,%r0
423 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) 405 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
424 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) 406 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
425 j startup # continue with startup 407 j startup # continue with startup
426.tbl: .long _ebcasc # translate table
427.cmd: .long COMMAND_LINE # address of command line buffer 408.cmd: .long COMMAND_LINE # address of command line buffer
428.parm: .long PARMAREA 409.parm: .long PARMAREA
429.lowcase: 410.lowcase:
@@ -467,16 +448,15 @@ start:
467# or linload or SALIPL 448# or linload or SALIPL
468# 449#
469 .org 0x10000 450 .org 0x10000
470startup:basr %r13,0 # get base 451 .globl startup
452startup:
453 basr %r13,0 # get base
471.LPG0: 454.LPG0:
472 xc 0x200(256),0x200 # partially clear lowcore 455 xc 0x200(256),0x200 # partially clear lowcore
473 xc 0x300(256),0x300 456 xc 0x300(256),0x300
474 l %r1,5f-.LPG0(%r13) 457 stck __LC_LAST_UPDATE_CLOCK
475 stck 0(%r1) 458 spt 5f-.LPG0(%r13)
476 spt 6f-.LPG0(%r13) 459 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
477 mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1)
478 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
479 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
480#ifndef CONFIG_MARCH_G5 460#ifndef CONFIG_MARCH_G5
481 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 461 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
482 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 462 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
@@ -494,7 +474,6 @@ startup:basr %r13,0 # get base
494 cl %r0,2f+12-.LPG0(%r13) 474 cl %r0,2f+12-.LPG0(%r13)
495 je 3f 475 je 3f
4961: l %r15,.Lstack-.LPG0(%r13) 4761: l %r15,.Lstack-.LPG0(%r13)
497 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
498 ahi %r15,-96 477 ahi %r15,-96
499 la %r2,.Lals_string-.LPG0(%r13) 478 la %r2,.Lals_string-.LPG0(%r13)
500 l %r3,.Lsclp_print-.LPG0(%r13) 479 l %r3,.Lsclp_print-.LPG0(%r13)
@@ -505,7 +484,7 @@ startup:basr %r13,0 # get base
505.Lsclp_print: 484.Lsclp_print:
506 .long _sclp_print_early 485 .long _sclp_print_early
507.Lstack: 486.Lstack:
508 .long init_thread_union 487 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
509 .align 16 488 .align 16
5102: .long 0x000a0000,0x8badcccc 4892: .long 0x000a0000,0x8badcccc
511#if defined(CONFIG_64BIT) 490#if defined(CONFIG_64BIT)
@@ -532,13 +511,25 @@ startup:basr %r13,0 # get base
5323: 5113:
533#endif 512#endif
534 513
514#ifdef CONFIG_64BIT
515 mvi __LC_AR_MODE_ID,1 # set esame flag
516 slr %r0,%r0 # set cpuid to zero
517 lhi %r1,2 # mode 2 = esame (dump)
518 sigp %r1,%r0,0x12 # switch to esame mode
519 sam64 # switch to 64 bit mode
520 larl %r13,4f
521 lmh %r0,%r15,0(%r13) # clear high-order half
522 jg startup_continue
5234: .fill 16,4,0x0
524#else
525 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
535 l %r13,4f-.LPG0(%r13) 526 l %r13,4f-.LPG0(%r13)
536 b 0(%r13) 527 b 0(%r13)
537 .align 4 528 .align 8
5384: .long startup_continue 5294: .long startup_continue
5395: .long sched_clock_base_cc 530#endif
540 .align 8 531 .align 8
5416: .long 0x7fffffff,0xffffffff 5325: .long 0x7fffffff,0xffffffff
542 533
543# 534#
544# params at 10400 (setup.h) 535# params at 10400 (setup.h)
@@ -552,8 +543,4 @@ startup:basr %r13,0 # get base
552 .byte "root=/dev/ram0 ro" 543 .byte "root=/dev/ram0 ro"
553 .byte 0 544 .byte 0
554 545
555#ifdef CONFIG_64BIT 546 .org 0x11000
556#include "head64.S"
557#else
558#include "head31.S"
559#endif
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 602b508cd4c4..b8f8dc126102 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head31.S 2 * arch/s390/kernel/head31.S
3 * 3 *
4 * Copyright (C) IBM Corp. 2005,2006 4 * Copyright (C) IBM Corp. 2005,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,13 +10,19 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 basr %r13,0 # get base
17.LPG1: 22.LPG1:
18 23
19 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 24 l %r1,.Lbase_cc-.LPG1(%r13)
25 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
20 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 26 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
21 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 27 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
22 # move IPL device to lowcore 28 # move IPL device to lowcore
@@ -69,12 +75,14 @@ startup_continue:
69.Lduald:.rept 8 75.Lduald:.rept 8
70 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
71 .endr 77 .endr
78.Lbase_cc:
79 .long sched_clock_base_cc
72 80
73 .org 0x12000
74 .globl _ehead 81 .globl _ehead
75_ehead: 82_ehead:
83
76#ifdef CONFIG_SHARED_KERNEL 84#ifdef CONFIG_SHARED_KERNEL
77 .org 0x100000 85 .org 0x100000 - 0x11000 # head.o ends at 0x11000
78#endif 86#endif
79 87
80# 88#
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index d984a2a380c3..cdef68717416 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/head64.S 2 * arch/s390/kernel/head64.S
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright (C) IBM Corp. 1999,2010
5 * 5 *
6 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 * Author(s): Hartmut Penner <hp@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,81 +10,17 @@
10 * 10 *
11 */ 11 */
12 12
13 .org 0x11000 13#include <linux/init.h>
14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h>
16#include <asm/page.h>
14 17
18__HEAD
19 .globl startup_continue
15startup_continue: 20startup_continue:
16 basr %r13,0 # get base 21 larl %r1,sched_clock_base_cc
17.LPG1: sll %r13,1 # remove high order bit 22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
18 srl %r13,1 23 larl %r13,.LPG1 # get base
19
20#ifdef CONFIG_ZFCPDUMP
21
22 # check if we have been ipled using zfcp dump:
23
24 tm 0xb9,0x01 # test if subchannel is enabled
25 jno .nodump # subchannel disabled
26 l %r1,0xb8
27 la %r5,.Lipl_schib-.LPG1(%r13)
28 stsch 0(%r5) # get schib of subchannel
29 jne .nodump # schib not available
30 tm 5(%r5),0x01 # devno valid?
31 jno .nodump
32 tm 4(%r5),0x80 # qdio capable device?
33 jno .nodump
34 l %r2,20(%r0) # address of ipl parameter block
35 lhi %r3,0
36 ic %r3,0x148(%r2) # get opt field
37 chi %r3,0x20 # load with dump?
38 jne .nodump
39
40 # store all prefix registers in case of load with dump:
41
42 la %r7,0 # base register for 0 page
43 la %r8,0 # first cpu
44 l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
45 ahi %r11,4 # skip boot cpu
46 lr %r12,%r11
47 ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
48 stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
491:
50 cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
51 je 4f # if yes get next cpu
522:
53 lr %r9,%r7
54 sigp %r9,%r8,0x9 # stop & store status of cpu
55 brc 8,3f # accepted
56 brc 4,4f # status stored: next cpu
57 brc 2,2b # busy: try again
58 brc 1,4f # not op: next cpu
593:
60 mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
61 ahi %r11,4 # next element in prefix array
62 clr %r11,%r12
63 je 5f # no more space in prefix array
644:
65 ahi %r8,1 # next cpu (r8 += 1)
66 chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
67 jle 1b # jump if not last cpu
685:
69 lhi %r1,2 # mode 2 = esame (dump)
70 j 6f
71 .align 4
72.Lipl_schib:
73 .rept 13
74 .long 0
75 .endr
76.nodump:
77 lhi %r1,1 # mode 1 = esame (normal ipl)
786:
79#else
80 lhi %r1,1 # mode 1 = esame (normal ipl)
81#endif /* CONFIG_ZFCPDUMP */
82 mvi __LC_AR_MODE_ID,1 # set esame flag
83 slr %r0,%r0 # set cpuid to zero
84 sigp %r1,%r0,0x12 # switch to esame mode
85 sam64 # switch to 64 bit mode
86 llgfr %r13,%r13 # clear high-order half of base reg
87 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
88 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 24 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
89 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 25 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
90 # move IPL device to lowcore 26 # move IPL device to lowcore
@@ -108,6 +44,7 @@ startup_continue:
108 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, 44 lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
109 # virtual and never return ... 45 # virtual and never return ...
110 .align 16 46 .align 16
47.LPG1:
111.Lentry:.quad 0x0000000180000000,_stext 48.Lentry:.quad 0x0000000180000000,_stext
112.Lctl: .quad 0x04350002 # cr0: various things 49.Lctl: .quad 0x04350002 # cr0: various things
113 .quad 0 # cr1: primary space segment table 50 .quad 0 # cr1: primary space segment table
@@ -129,13 +66,6 @@ startup_continue:
129.L4malign:.quad 0xffffffffffc00000 66.L4malign:.quad 0xffffffffffc00000
130.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 67.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
131.Lnop: .long 0x07000700 68.Lnop: .long 0x07000700
132.Lzero64:.fill 16,4,0x0
133#ifdef CONFIG_ZFCPDUMP
134.Lcurrent_cpu:
135 .long 0x0
136.Lpref_arr_ptr:
137 .long zfcpdump_prefix_array
138#endif /* CONFIG_ZFCPDUMP */
139.Lparmaddr: 69.Lparmaddr:
140 .quad PARMAREA 70 .quad PARMAREA
141 .align 64 71 .align 64
@@ -146,11 +76,11 @@ startup_continue:
146 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
147 .endr 77 .endr
148 78
149 .org 0x12000
150 .globl _ehead 79 .globl _ehead
151_ehead: 80_ehead:
81
152#ifdef CONFIG_SHARED_KERNEL 82#ifdef CONFIG_SHARED_KERNEL
153 .org 0x100000 83 .org 0x100000 - 0x11000 # head.o ends at 0x11000
154#endif 84#endif
155 85
156# 86#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4d73296fed74..a689070be287 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -15,6 +15,7 @@
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/gfp.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
19#include <asm/smp.h> 20#include <asm/smp.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
@@ -402,8 +403,9 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
402static struct kobj_attribute sys_ipl_device_attr = 403static struct kobj_attribute sys_ipl_device_attr =
403 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL); 404 __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
404 405
405static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 406static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
406 char *buf, loff_t off, size_t count) 407 struct bin_attribute *attr, char *buf,
408 loff_t off, size_t count)
407{ 409{
408 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, 410 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
409 IPL_PARMBLOCK_SIZE); 411 IPL_PARMBLOCK_SIZE);
@@ -418,8 +420,9 @@ static struct bin_attribute ipl_parameter_attr = {
418 .read = &ipl_parameter_read, 420 .read = &ipl_parameter_read,
419}; 421};
420 422
421static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *attr, 423static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
422 char *buf, loff_t off, size_t count) 424 struct bin_attribute *attr, char *buf,
425 loff_t off, size_t count)
423{ 426{
424 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; 427 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
425 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; 428 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
@@ -553,7 +556,7 @@ out:
553 return rc; 556 return rc;
554} 557}
555 558
556static void ipl_run(struct shutdown_trigger *trigger) 559static void __ipl_run(void *unused)
557{ 560{
558 diag308(DIAG308_IPL, NULL); 561 diag308(DIAG308_IPL, NULL);
559 if (MACHINE_IS_VM) 562 if (MACHINE_IS_VM)
@@ -562,6 +565,11 @@ static void ipl_run(struct shutdown_trigger *trigger)
562 reipl_ccw_dev(&ipl_info.data.ccw.dev_id); 565 reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
563} 566}
564 567
568static void ipl_run(struct shutdown_trigger *trigger)
569{
570 smp_switch_to_ipl_cpu(__ipl_run, NULL);
571}
572
565static int __init ipl_init(void) 573static int __init ipl_init(void)
566{ 574{
567 int rc; 575 int rc;
@@ -688,7 +696,7 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
688 696
689/* FCP reipl device attributes */ 697/* FCP reipl device attributes */
690 698
691static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, 699static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
692 struct bin_attribute *attr, 700 struct bin_attribute *attr,
693 char *buf, loff_t off, size_t count) 701 char *buf, loff_t off, size_t count)
694{ 702{
@@ -698,7 +706,7 @@ static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
698 return memory_read_from_buffer(buf, count, &off, scp_data, size); 706 return memory_read_from_buffer(buf, count, &off, scp_data, size);
699} 707}
700 708
701static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, 709static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
702 struct bin_attribute *attr, 710 struct bin_attribute *attr,
703 char *buf, loff_t off, size_t count) 711 char *buf, loff_t off, size_t count)
704{ 712{
@@ -1039,7 +1047,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
1039 sprintf(dst + pos, " PARM %s", vmparm); 1047 sprintf(dst + pos, " PARM %s", vmparm);
1040} 1048}
1041 1049
1042static void reipl_run(struct shutdown_trigger *trigger) 1050static void __reipl_run(void *unused)
1043{ 1051{
1044 struct ccw_dev_id devid; 1052 struct ccw_dev_id devid;
1045 static char buf[128]; 1053 static char buf[128];
@@ -1087,6 +1095,11 @@ static void reipl_run(struct shutdown_trigger *trigger)
1087 disabled_wait((unsigned long) __builtin_return_address(0)); 1095 disabled_wait((unsigned long) __builtin_return_address(0));
1088} 1096}
1089 1097
1098static void reipl_run(struct shutdown_trigger *trigger)
1099{
1100 smp_switch_to_ipl_cpu(__reipl_run, NULL);
1101}
1102
1090static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) 1103static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
1091{ 1104{
1092 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; 1105 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
@@ -1369,20 +1382,18 @@ static struct kobj_attribute dump_type_attr =
1369 1382
1370static struct kset *dump_kset; 1383static struct kset *dump_kset;
1371 1384
1372static void dump_run(struct shutdown_trigger *trigger) 1385static void __dump_run(void *unused)
1373{ 1386{
1374 struct ccw_dev_id devid; 1387 struct ccw_dev_id devid;
1375 static char buf[100]; 1388 static char buf[100];
1376 1389
1377 switch (dump_method) { 1390 switch (dump_method) {
1378 case DUMP_METHOD_CCW_CIO: 1391 case DUMP_METHOD_CCW_CIO:
1379 smp_send_stop();
1380 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1392 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1381 devid.ssid = 0; 1393 devid.ssid = 0;
1382 reipl_ccw_dev(&devid); 1394 reipl_ccw_dev(&devid);
1383 break; 1395 break;
1384 case DUMP_METHOD_CCW_VM: 1396 case DUMP_METHOD_CCW_VM:
1385 smp_send_stop();
1386 sprintf(buf, "STORE STATUS"); 1397 sprintf(buf, "STORE STATUS");
1387 __cpcmd(buf, NULL, 0, NULL); 1398 __cpcmd(buf, NULL, 0, NULL);
1388 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 1399 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
@@ -1396,10 +1407,17 @@ static void dump_run(struct shutdown_trigger *trigger)
1396 diag308(DIAG308_SET, dump_block_fcp); 1407 diag308(DIAG308_SET, dump_block_fcp);
1397 diag308(DIAG308_DUMP, NULL); 1408 diag308(DIAG308_DUMP, NULL);
1398 break; 1409 break;
1399 case DUMP_METHOD_NONE: 1410 default:
1400 return; 1411 break;
1401 } 1412 }
1402 printk(KERN_EMERG "Dump failed!\n"); 1413}
1414
1415static void dump_run(struct shutdown_trigger *trigger)
1416{
1417 if (dump_method == DUMP_METHOD_NONE)
1418 return;
1419 smp_send_stop();
1420 smp_switch_to_ipl_cpu(__dump_run, NULL);
1403} 1421}
1404 1422
1405static int __init dump_ccw_init(void) 1423static int __init dump_ccw_init(void)
@@ -1577,7 +1595,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
1577static int vmcmd_init(void) 1595static int vmcmd_init(void)
1578{ 1596{
1579 if (!MACHINE_IS_VM) 1597 if (!MACHINE_IS_VM)
1580 return -ENOTSUPP; 1598 return -EOPNOTSUPP;
1581 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); 1599 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
1582 if (!vmcmd_kset) 1600 if (!vmcmd_kset)
1583 return -ENOMEM; 1601 return -ENOMEM;
@@ -1595,7 +1613,7 @@ static void stop_run(struct shutdown_trigger *trigger)
1595{ 1613{
1596 if (strcmp(trigger->name, ON_PANIC_STR) == 0) 1614 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1597 disabled_wait((unsigned long) __builtin_return_address(0)); 1615 disabled_wait((unsigned long) __builtin_return_address(0));
1598 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 1616 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
1599 cpu_relax(); 1617 cpu_relax();
1600 for (;;); 1618 for (;;);
1601} 1619}
@@ -1902,7 +1920,6 @@ void __init ipl_update_parameters(void)
1902void __init ipl_save_parameters(void) 1920void __init ipl_save_parameters(void)
1903{ 1921{
1904 struct cio_iplinfo iplinfo; 1922 struct cio_iplinfo iplinfo;
1905 unsigned int *ipl_ptr;
1906 void *src, *dst; 1923 void *src, *dst;
1907 1924
1908 if (cio_get_iplinfo(&iplinfo)) 1925 if (cio_get_iplinfo(&iplinfo))
@@ -1913,11 +1930,10 @@ void __init ipl_save_parameters(void)
1913 if (!iplinfo.is_qdio) 1930 if (!iplinfo.is_qdio)
1914 return; 1931 return;
1915 ipl_flags |= IPL_PARMBLOCK_VALID; 1932 ipl_flags |= IPL_PARMBLOCK_VALID;
1916 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; 1933 src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
1917 src = (void *)(unsigned long)*ipl_ptr;
1918 dst = (void *)IPL_PARMBLOCK_ORIGIN; 1934 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1919 memmove(dst, src, PAGE_SIZE); 1935 memmove(dst, src, PAGE_SIZE);
1920 *ipl_ptr = IPL_PARMBLOCK_ORIGIN; 1936 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
1921} 1937}
1922 1938
1923static LIST_HEAD(rcall); 1939static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 86783efa24ee..2a3d2bf6f083 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32 33
33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 34DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 35DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -62,6 +63,8 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
62 case 0x0b: /* bsm */ 63 case 0x0b: /* bsm */
63 case 0x83: /* diag */ 64 case 0x83: /* diag */
64 case 0x44: /* ex */ 65 case 0x44: /* ex */
66 case 0xac: /* stnsm */
67 case 0xad: /* stosm */
65 return -EINVAL; 68 return -EINVAL;
66 } 69 }
67 switch (*(__u16 *) instruction) { 70 switch (*(__u16 *) instruction) {
@@ -71,6 +74,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
71 case 0xb258: /* bsg */ 74 case 0xb258: /* bsg */
72 case 0xb218: /* pc */ 75 case 0xb218: /* pc */
73 case 0xb228: /* pt */ 76 case 0xb228: /* pt */
77 case 0xb98d: /* epsw */
74 return -EINVAL; 78 return -EINVAL;
75 } 79 }
76 return 0; 80 return 0;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 131d7ee8b416..a922d51df6bf 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -54,11 +54,11 @@ void machine_shutdown(void)
54{ 54{
55} 55}
56 56
57void machine_kexec(struct kimage *image) 57static void __machine_kexec(void *data)
58{ 58{
59 relocate_kernel_t data_mover; 59 relocate_kernel_t data_mover;
60 struct kimage *image = data;
60 61
61 smp_send_stop();
62 pfault_fini(); 62 pfault_fini();
63 s390_reset_system(); 63 s390_reset_system();
64 64
@@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image)
68 (*data_mover)(&image->head, image->start); 68 (*data_mover)(&image->head, image->start);
69 for (;;); 69 for (;;);
70} 70}
71
72void machine_kexec(struct kimage *image)
73{
74 smp_send_stop();
75 smp_switch_to_ipl_cpu(__machine_kexec, image);
76}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 639380a0c45c..22cfd634c355 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -55,8 +55,10 @@ void *module_alloc(unsigned long size)
55/* Free memory returned from module_alloc */ 55/* Free memory returned from module_alloc */
56void module_free(struct module *mod, void *module_region) 56void module_free(struct module *mod, void *module_region)
57{ 57{
58 vfree(mod->arch.syminfo); 58 if (mod) {
59 mod->arch.syminfo = NULL; 59 vfree(mod->arch.syminfo);
60 mod->arch.syminfo = NULL;
61 }
60 vfree(module_region); 62 vfree(module_region);
61} 63}
62 64
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 015e27da40eb..ac151399ef34 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
255 int umode; 255 int umode;
256 256
257 nmi_enter(); 257 nmi_enter();
258 s390_idle_check(); 258 s390_idle_check(regs, S390_lowcore.mcck_clock,
259 S390_lowcore.mcck_enter_timer);
259 260
260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 261 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
261 mcck = &__get_cpu_var(cpu_mcck); 262 mcck = &__get_cpu_var(cpu_mcck);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 00b6d1d292f2..1039fdea15b5 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -16,9 +16,9 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/slab.h>
19#include <linux/unistd.h> 20#include <linux/unistd.h>
20#include <linux/ptrace.h> 21#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/user.h> 23#include <linux/user.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0729f36c2fe3..ecb2d02b02e4 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,24 +18,42 @@
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
20 20
21static DEFINE_PER_CPU(struct cpuid, cpu_id);
22
23/*
24 * cpu_init - initializes state that is per-CPU.
25 */
26void __cpuinit cpu_init(void)
27{
28 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
29
30 get_cpu_id(id);
31 atomic_inc(&init_mm.mm_count);
32 current->active_mm = &init_mm;
33 BUG_ON(current->mm);
34 enter_lazy_tlb(&init_mm, current);
35}
36
37/*
38 * print_cpu_info - print basic information about a cpu
39 */
21void __cpuinit print_cpu_info(void) 40void __cpuinit print_cpu_info(void)
22{ 41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
23 pr_info("Processor %d started, address %d, identification %06X\n", 44 pr_info("Processor %d started, address %d, identification %06X\n",
24 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, 45 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
25 S390_lowcore.cpu_id.ident);
26} 46}
27 47
28/* 48/*
29 * show_cpuinfo - Get information on one CPU for use by procfs. 49 * show_cpuinfo - Get information on one CPU for use by procfs.
30 */ 50 */
31
32static int show_cpuinfo(struct seq_file *m, void *v) 51static int show_cpuinfo(struct seq_file *m, void *v)
33{ 52{
34 static const char *hwcap_str[10] = { 53 static const char *hwcap_str[10] = {
35 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 54 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
36 "edat", "etf3eh", "highgprs" 55 "edat", "etf3eh", "highgprs"
37 }; 56 };
38 struct _lowcore *lc;
39 unsigned long n = (unsigned long) v - 1; 57 unsigned long n = (unsigned long) v - 1;
40 int i; 58 int i;
41 59
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
55 } 73 }
56 74
57 if (cpu_online(n)) { 75 if (cpu_online(n)) {
58#ifdef CONFIG_SMP 76 struct cpuid *id = &per_cpu(cpu_id, n);
59 lc = (smp_processor_id() == n) ?
60 &S390_lowcore : lowcore_ptr[n];
61#else
62 lc = &S390_lowcore;
63#endif
64 seq_printf(m, "processor %li: " 77 seq_printf(m, "processor %li: "
65 "version = %02X, " 78 "version = %02X, "
66 "identification = %06X, " 79 "identification = %06X, "
67 "machine = %04X\n", 80 "machine = %04X\n",
68 n, lc->cpu_id.version, 81 n, id->version, id->ident, id->machine);
69 lc->cpu_id.ident,
70 lc->cpu_id.machine);
71 } 82 }
72 preempt_enable(); 83 preempt_enable();
73 return 0; 84 return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7cf464234419..83339d33c4b1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -57,6 +57,7 @@
57enum s390_regset { 57enum s390_regset {
58 REGSET_GENERAL, 58 REGSET_GENERAL,
59 REGSET_FP, 59 REGSET_FP,
60 REGSET_LAST_BREAK,
60 REGSET_GENERAL_EXTENDED, 61 REGSET_GENERAL_EXTENDED,
61}; 62};
62 63
@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
381 copied += sizeof(unsigned long); 382 copied += sizeof(unsigned long);
382 } 383 }
383 return 0; 384 return 0;
385 case PTRACE_GET_LAST_BREAK:
386 put_user(task_thread_info(child)->last_break,
387 (unsigned long __user *) data);
388 return 0;
384 default: 389 default:
385 /* Removing high order bit from addr (only for 31 bit). */ 390 /* Removing high order bit from addr (only for 31 bit). */
386 addr &= PSW_ADDR_INSN; 391 addr &= PSW_ADDR_INSN;
@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
633 copied += sizeof(unsigned int); 638 copied += sizeof(unsigned int);
634 } 639 }
635 return 0; 640 return 0;
641 case PTRACE_GET_LAST_BREAK:
642 put_user(task_thread_info(child)->last_break,
643 (unsigned int __user *) data);
644 return 0;
636 } 645 }
637 return compat_ptrace_request(child, request, addr, data); 646 return compat_ptrace_request(child, request, addr, data);
638} 647}
@@ -640,7 +649,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
640 649
641asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 650asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
642{ 651{
643 long ret; 652 long ret = 0;
644 653
645 /* Do the secure computing check first. */ 654 /* Do the secure computing check first. */
646 secure_computing(regs->gprs[2]); 655 secure_computing(regs->gprs[2]);
@@ -649,7 +658,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
649 * The sysc_tracesys code in entry.S stored the system 658 * The sysc_tracesys code in entry.S stored the system
650 * call number to gprs[2]. 659 * call number to gprs[2].
651 */ 660 */
652 ret = regs->gprs[2];
653 if (test_thread_flag(TIF_SYSCALL_TRACE) && 661 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
654 (tracehook_report_syscall_entry(regs) || 662 (tracehook_report_syscall_entry(regs) ||
655 regs->gprs[2] >= NR_syscalls)) { 663 regs->gprs[2] >= NR_syscalls)) {
@@ -671,7 +679,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
671 regs->gprs[2], regs->orig_gpr2, 679 regs->gprs[2], regs->orig_gpr2,
672 regs->gprs[3], regs->gprs[4], 680 regs->gprs[3], regs->gprs[4],
673 regs->gprs[5]); 681 regs->gprs[5]);
674 return ret; 682 return ret ?: regs->gprs[2];
675} 683}
676 684
677asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) 685asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
@@ -798,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target,
798 return rc; 806 return rc;
799} 807}
800 808
809#ifdef CONFIG_64BIT
810
811static int s390_last_break_get(struct task_struct *target,
812 const struct user_regset *regset,
813 unsigned int pos, unsigned int count,
814 void *kbuf, void __user *ubuf)
815{
816 if (count > 0) {
817 if (kbuf) {
818 unsigned long *k = kbuf;
819 *k = task_thread_info(target)->last_break;
820 } else {
821 unsigned long __user *u = ubuf;
822 if (__put_user(task_thread_info(target)->last_break, u))
823 return -EFAULT;
824 }
825 }
826 return 0;
827}
828
829#endif
830
801static const struct user_regset s390_regsets[] = { 831static const struct user_regset s390_regsets[] = {
802 [REGSET_GENERAL] = { 832 [REGSET_GENERAL] = {
803 .core_note_type = NT_PRSTATUS, 833 .core_note_type = NT_PRSTATUS,
@@ -815,6 +845,15 @@ static const struct user_regset s390_regsets[] = {
815 .get = s390_fpregs_get, 845 .get = s390_fpregs_get,
816 .set = s390_fpregs_set, 846 .set = s390_fpregs_set,
817 }, 847 },
848#ifdef CONFIG_64BIT
849 [REGSET_LAST_BREAK] = {
850 .core_note_type = NT_S390_LAST_BREAK,
851 .n = 1,
852 .size = sizeof(long),
853 .align = sizeof(long),
854 .get = s390_last_break_get,
855 },
856#endif
818}; 857};
819 858
820static const struct user_regset_view user_s390_view = { 859static const struct user_regset_view user_s390_view = {
@@ -949,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target,
949 return rc; 988 return rc;
950} 989}
951 990
991static int s390_compat_last_break_get(struct task_struct *target,
992 const struct user_regset *regset,
993 unsigned int pos, unsigned int count,
994 void *kbuf, void __user *ubuf)
995{
996 compat_ulong_t last_break;
997
998 if (count > 0) {
999 last_break = task_thread_info(target)->last_break;
1000 if (kbuf) {
1001 unsigned long *k = kbuf;
1002 *k = last_break;
1003 } else {
1004 unsigned long __user *u = ubuf;
1005 if (__put_user(last_break, u))
1006 return -EFAULT;
1007 }
1008 }
1009 return 0;
1010}
1011
952static const struct user_regset s390_compat_regsets[] = { 1012static const struct user_regset s390_compat_regsets[] = {
953 [REGSET_GENERAL] = { 1013 [REGSET_GENERAL] = {
954 .core_note_type = NT_PRSTATUS, 1014 .core_note_type = NT_PRSTATUS,
@@ -966,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = {
966 .get = s390_fpregs_get, 1026 .get = s390_fpregs_get,
967 .set = s390_fpregs_set, 1027 .set = s390_fpregs_set,
968 }, 1028 },
1029 [REGSET_LAST_BREAK] = {
1030 .core_note_type = NT_S390_LAST_BREAK,
1031 .n = 1,
1032 .size = sizeof(long),
1033 .align = sizeof(long),
1034 .get = s390_compat_last_break_get,
1035 },
969 [REGSET_GENERAL_EXTENDED] = { 1036 [REGSET_GENERAL_EXTENDED] = {
970 .core_note_type = NT_S390_HIGH_GPRS, 1037 .core_note_type = NT_S390_HIGH_GPRS,
971 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1038 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
@@ -992,3 +1059,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
992#endif 1059#endif
993 return &user_s390_view; 1060 return &user_s390_view;
994} 1061}
1062
1063static const char *gpr_names[NUM_GPRS] = {
1064 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1065 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1066};
1067
1068unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1069{
1070 if (offset >= NUM_GPRS)
1071 return 0;
1072 return regs->gprs[offset];
1073}
1074
1075int regs_query_register_offset(const char *name)
1076{
1077 unsigned long offset;
1078
1079 if (!name || *name != 'r')
1080 return -EINVAL;
1081 if (strict_strtoul(name + 1, 10, &offset))
1082 return -EINVAL;
1083 if (offset >= NUM_GPRS)
1084 return -EINVAL;
1085 return offset;
1086}
1087
1088const char *regs_query_register_name(unsigned int offset)
1089{
1090 if (offset >= NUM_GPRS)
1091 return NULL;
1092 return gpr_names[offset];
1093}
1094
1095static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1096{
1097 unsigned long ksp = kernel_stack_pointer(regs);
1098
1099 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1100}
1101
1102/**
1103 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1104 * @regs:pt_regs which contains kernel stack pointer.
1105 * @n:stack entry number.
1106 *
1107 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1108 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1109 * this returns 0.
1110 */
1111unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1112{
1113 unsigned long addr;
1114
1115 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1116 if (!regs_within_kernel_stack(regs, addr))
1117 return 0;
1118 return *(unsigned long *)addr;
1119}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 2f481cc3d1c9..cb899d9f8505 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,7 +6,7 @@
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 */ 7 */
8 8
9#include <asm/lowcore.h> 9#include <asm/asm-offsets.h>
10 10
11# 11#
12# do_reipl_asm 12# do_reipl_asm
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 774147824c3d..5e73dee63baa 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,7 +4,7 @@
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
6 6
7#include <asm/lowcore.h> 7#include <asm/asm-offsets.h>
8 8
9# 9#
10# do_reipl_asm 10# do_reipl_asm
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 59618bcd99b7..9ce641b5291f 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
120 struct pt_regs *old_regs; 120 struct pt_regs *old_regs;
121 121
122 old_regs = set_irq_regs(regs); 122 old_regs = set_irq_regs(regs);
123 s390_idle_check(); 123 s390_idle_check(regs, S390_lowcore.int_clock,
124 S390_lowcore.async_enter_timer);
124 irq_enter(); 125 irq_enter();
125 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 126 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
126 /* Serve timer interrupts first. */ 127 /* Serve timer interrupts first. */
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index e27ca63076d1..2e82fdd89320 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -9,8 +9,10 @@
9 */ 9 */
10 10
11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler 11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
12LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
12LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter 13LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
13LC_EXT_INT_CODE = 0x86 # addr of ext int code 14LC_EXT_INT_CODE = 0x86 # addr of ext int code
15LC_AR_MODE_ID = 0xa3
14 16
15# 17#
16# Subroutine which waits synchronously until either an external interruption 18# Subroutine which waits synchronously until either an external interruption
@@ -30,8 +32,16 @@ _sclp_wait_int:
30.LbaseS1: 32.LbaseS1:
31 ahi %r15,-96 # create stack frame 33 ahi %r15,-96 # create stack frame
32 la %r8,LC_EXT_NEW_PSW # register int handler 34 la %r8,LC_EXT_NEW_PSW # register int handler
33 mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) 35 la %r9,.LextpswS1-.LbaseS1(%r13)
34 mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) 36#ifdef CONFIG_64BIT
37 tm LC_AR_MODE_ID,1
38 jno .Lesa1
39 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
40 la %r9,.LextpswS1_64-.LbaseS1(%r13)
41.Lesa1:
42#endif
43 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
44 mvc 0(16,%r8),0(%r9)
35 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 45 lhi %r6,0x0200 # cr mask for ext int (cr0.54)
36 ltr %r2,%r2 46 ltr %r2,%r2
37 jz .LsetctS1 47 jz .LsetctS1
@@ -64,15 +74,19 @@ _sclp_wait_int:
64.LtimeoutS1: 74.LtimeoutS1:
65 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting 75 lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting
66 # restore old handler 76 # restore old handler
67 mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) 77 mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
68 lm %r6,%r15,120(%r15) # restore registers 78 lm %r6,%r15,120(%r15) # restore registers
69 br %r14 # return to caller 79 br %r14 # return to caller
70 80
71 .align 8 81 .align 8
72.LoldpswS1: 82.LoldpswS1:
73 .long 0, 0 # old ext int PSW 83 .long 0, 0, 0, 0 # old ext int PSW
74.LextpswS1: 84.LextpswS1:
75 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 85 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
86#ifdef CONFIG_64BIT
87.LextpswS1_64:
88 .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
89#endif
76.LwaitpswS1: 90.LwaitpswS1:
77 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 91 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
78.LtimeS1: 92.LtimeS1:
@@ -221,7 +235,7 @@ _sclp_print:
221 lh %r9,0(%r8) # update sccb length 235 lh %r9,0(%r8) # update sccb length
222 ar %r9,%r6 236 ar %r9,%r6
223 sth %r9,0(%r8) 237 sth %r9,0(%r8)
224 ar %r7,%r6 # update current mto adress 238 ar %r7,%r6 # update current mto address
225 ltr %r0,%r0 # more characters? 239 ltr %r0,%r0 # more characters?
226 jnz .LinitmtoS4 240 jnz .LinitmtoS4
227 l %r2,.LwritedataS4-.LbaseS4(%r13)# write data 241 l %r2,.LwritedataS4-.LbaseS4(%r13)# write data
@@ -250,6 +264,13 @@ _sclp_print:
250_sclp_print_early: 264_sclp_print_early:
251 stm %r6,%r15,24(%r15) # save registers 265 stm %r6,%r15,24(%r15) # save registers
252 ahi %r15,-96 # create stack frame 266 ahi %r15,-96 # create stack frame
267#ifdef CONFIG_64BIT
268 tm LC_AR_MODE_ID,1
269 jno .Lesa2
270 ahi %r15,-80
271 stmh %r6,%r15,96(%r15) # store upper register halves
272.Lesa2:
273#endif
253 lr %r10,%r2 # save string pointer 274 lr %r10,%r2 # save string pointer
254 lhi %r2,0 275 lhi %r2,0
255 bras %r14,_sclp_setup # enable console 276 bras %r14,_sclp_setup # enable console
@@ -262,6 +283,13 @@ _sclp_print_early:
262 lhi %r2,1 283 lhi %r2,1
263 bras %r14,_sclp_setup # disable console 284 bras %r14,_sclp_setup # disable console
264.LendS5: 285.LendS5:
286#ifdef CONFIG_64BIT
287 tm LC_AR_MODE_ID,1
288 jno .Lesa3
289 lmh %r6,%r15,96(%r15) # store upper register halves
290 ahi %r15,80
291.Lesa3:
292#endif
265 lm %r6,%r15,120(%r15) # restore registers 293 lm %r6,%r15,120(%r15) # restore registers
266 br %r14 294 br %r14
267 295
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8d8957b38ab3..c8e8e1354e1d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
@@ -25,7 +25,6 @@
25#include <linux/stddef.h> 25#include <linux/stddef.h>
26#include <linux/unistd.h> 26#include <linux/unistd.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/user.h> 28#include <linux/user.h>
30#include <linux/tty.h> 29#include <linux/tty.h>
31#include <linux/ioport.h> 30#include <linux/ioport.h>
@@ -114,22 +113,6 @@ static struct resource data_resource = {
114}; 113};
115 114
116/* 115/*
117 * cpu_init() initializes state that is per-CPU.
118 */
119void __cpuinit cpu_init(void)
120{
121 /*
122 * Store processor id in lowcore (used e.g. in timer_interrupt)
123 */
124 get_cpu_id(&S390_lowcore.cpu_id);
125
126 atomic_inc(&init_mm.mm_count);
127 current->active_mm = &init_mm;
128 BUG_ON(current->mm);
129 enter_lazy_tlb(&init_mm, current);
130}
131
132/*
133 * condev= and conmode= setup parameter. 116 * condev= and conmode= setup parameter.
134 */ 117 */
135 118
@@ -386,25 +369,18 @@ static void setup_addressing_mode(void)
386 pr_info("Address spaces switched, " 369 pr_info("Address spaces switched, "
387 "mvcos not available\n"); 370 "mvcos not available\n");
388 } 371 }
389#ifdef CONFIG_TRACE_IRQFLAGS
390 sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
391 io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
392#endif
393} 372}
394 373
395static void __init 374static void __init
396setup_lowcore(void) 375setup_lowcore(void)
397{ 376{
398 struct _lowcore *lc; 377 struct _lowcore *lc;
399 int lc_pages;
400 378
401 /* 379 /*
402 * Setup lowcore for boot cpu 380 * Setup lowcore for boot cpu
403 */ 381 */
404 lc_pages = sizeof(void *) == 8 ? 2 : 1; 382 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
405 lc = (struct _lowcore *) 383 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
406 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
407 memset(lc, 0, lc_pages * PAGE_SIZE);
408 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 384 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
409 lc->restart_psw.addr = 385 lc->restart_psw.addr =
410 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 386 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
@@ -436,11 +412,12 @@ setup_lowcore(void)
436#ifndef CONFIG_64BIT 412#ifndef CONFIG_64BIT
437 if (MACHINE_HAS_IEEE) { 413 if (MACHINE_HAS_IEEE) {
438 lc->extended_save_area_addr = (__u32) 414 lc->extended_save_area_addr = (__u32)
439 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 415 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
440 /* enable extended save area */ 416 /* enable extended save area */
441 __ctl_set_bit(14, 29); 417 __ctl_set_bit(14, 29);
442 } 418 }
443#else 419#else
420 lc->cmf_hpp = -1ULL;
444 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 421 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
445#endif 422#endif
446 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 423 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -699,6 +676,7 @@ static void __init setup_hwcaps(void)
699 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 676 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
700 unsigned long long facility_list_extended; 677 unsigned long long facility_list_extended;
701 unsigned int facility_list; 678 unsigned int facility_list;
679 struct cpuid cpu_id;
702 int i; 680 int i;
703 681
704 facility_list = stfl(); 682 facility_list = stfl();
@@ -760,7 +738,8 @@ static void __init setup_hwcaps(void)
760 */ 738 */
761 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 739 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
762 740
763 switch (S390_lowcore.cpu_id.machine) { 741 get_cpu_id(&cpu_id);
742 switch (cpu_id.machine) {
764 case 0x9672: 743 case 0x9672:
765#if !defined(CONFIG_64BIT) 744#if !defined(CONFIG_64BIT)
766 default: /* Use "g5" as default for 31 bit kernels. */ 745 default: /* Use "g5" as default for 31 bit kernels. */
@@ -804,7 +783,7 @@ setup_arch(char **cmdline_p)
804 if (MACHINE_IS_VM) 783 if (MACHINE_IS_VM)
805 pr_info("Linux is running as a z/VM " 784 pr_info("Linux is running as a z/VM "
806 "guest operating system in 31-bit mode\n"); 785 "guest operating system in 31-bit mode\n");
807 else 786 else if (MACHINE_IS_LPAR)
808 pr_info("Linux is running natively in 31-bit mode\n"); 787 pr_info("Linux is running natively in 31-bit mode\n");
809 if (MACHINE_HAS_IEEE) 788 if (MACHINE_HAS_IEEE)
810 pr_info("The hardware system has IEEE compatible " 789 pr_info("The hardware system has IEEE compatible "
@@ -818,7 +797,7 @@ setup_arch(char **cmdline_p)
818 "guest operating system in 64-bit mode\n"); 797 "guest operating system in 64-bit mode\n");
819 else if (MACHINE_IS_KVM) 798 else if (MACHINE_IS_KVM)
820 pr_info("Linux is running under KVM in 64-bit mode\n"); 799 pr_info("Linux is running under KVM in 64-bit mode\n");
821 else 800 else if (MACHINE_IS_LPAR)
822 pr_info("Linux is running natively in 64-bit mode\n"); 801 pr_info("Linux is running natively in 64-bit mode\n");
823#endif /* CONFIG_64BIT */ 802#endif /* CONFIG_64BIT */
824 803
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6289945562b0..ee7ac8b11782 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
313 To avoid breaking binary compatibility, they are passed as args. */ 313 To avoid breaking binary compatibility, they are passed as args. */
314 regs->gprs[4] = current->thread.trap_no; 314 regs->gprs[4] = current->thread.trap_no;
315 regs->gprs[5] = current->thread.prot_addr; 315 regs->gprs[5] = current->thread.prot_addr;
316 regs->gprs[6] = task_thread_info(current)->last_break;
316 317
317 /* Place signal number on stack to allow backtrace from handler. */ 318 /* Place signal number on stack to allow backtrace from handler. */
318 if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) 319 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
376 regs->gprs[2] = map_signal(sig); 377 regs->gprs[2] = map_signal(sig);
377 regs->gprs[3] = (unsigned long) &frame->info; 378 regs->gprs[3] = (unsigned long) &frame->info;
378 regs->gprs[4] = (unsigned long) &frame->uc; 379 regs->gprs[4] = (unsigned long) &frame->uc;
380 regs->gprs[5] = task_thread_info(current)->last_break;
379 return 0; 381 return 0;
380 382
381give_sigsegv: 383give_sigsegv:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 76a6fdd46c45..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -36,6 +36,8 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/timex.h> 37#include <linux/timex.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39#include <linux/slab.h>
40#include <asm/asm-offsets.h>
39#include <asm/ipl.h> 41#include <asm/ipl.h>
40#include <asm/setup.h> 42#include <asm/setup.h>
41#include <asm/sigp.h> 43#include <asm/sigp.h>
@@ -53,7 +55,7 @@
53#include "entry.h" 55#include "entry.h"
54 56
55/* logical cpu to cpu address */ 57/* logical cpu to cpu address */
56int __cpu_logical_map[NR_CPUS]; 58unsigned short __cpu_logical_map[NR_CPUS];
57 59
58static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
59 61
@@ -72,13 +74,13 @@ static int cpu_management;
72 74
73static DEFINE_PER_CPU(struct cpu, cpu_devices); 75static DEFINE_PER_CPU(struct cpu, cpu_devices);
74 76
75static void smp_ext_bitcall(int, ec_bit_sig); 77static void smp_ext_bitcall(int, int);
76 78
77static int cpu_stopped(int cpu) 79static int raw_cpu_stopped(int cpu)
78{ 80{
79 __u32 status; 81 u32 status;
80 82
81 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { 83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
82 case sigp_status_stored: 84 case sigp_status_stored:
83 /* Check for stopped and check stop state */ 85 /* Check for stopped and check stop state */
84 if (status & 0x50) 86 if (status & 0x50)
@@ -90,6 +92,44 @@ static int cpu_stopped(int cpu)
90 return 0; 92 return 0;
91} 93}
92 94
95static inline int cpu_stopped(int cpu)
96{
97 return raw_cpu_stopped(cpu_logical_map(cpu));
98}
99
100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101{
102 struct _lowcore *lc, *current_lc;
103 struct stack_frame *sf;
104 struct pt_regs *regs;
105 unsigned long sp;
106
107 if (smp_processor_id() == 0)
108 func(data);
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()];
113 lc = lowcore_ptr[0];
114 if (!lc)
115 lc = current_lc;
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118 if (!cpu_online(0))
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
121 cpu_relax();
122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
131}
132
93void smp_send_stop(void) 133void smp_send_stop(void)
94{ 134{
95 int cpu, rc; 135 int cpu, rc;
@@ -103,7 +143,7 @@ void smp_send_stop(void)
103 if (cpu == smp_processor_id()) 143 if (cpu == smp_processor_id())
104 continue; 144 continue;
105 do { 145 do {
106 rc = signal_processor(cpu, sigp_stop); 146 rc = sigp(cpu, sigp_stop);
107 } while (rc == sigp_busy); 147 } while (rc == sigp_busy);
108 148
109 while (!cpu_stopped(cpu)) 149 while (!cpu_stopped(cpu))
@@ -139,13 +179,13 @@ static void do_ext_call_interrupt(__u16 code)
139 * Send an external call sigp to another cpu and return without waiting 179 * Send an external call sigp to another cpu and return without waiting
140 * for its completion. 180 * for its completion.
141 */ 181 */
142static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 182static void smp_ext_bitcall(int cpu, int sig)
143{ 183{
144 /* 184 /*
145 * Set signaling bit in lowcore of target cpu and kick it 185 * Set signaling bit in lowcore of target cpu and kick it
146 */ 186 */
147 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 187 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
148 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 188 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
149 udelay(10); 189 udelay(10);
150} 190}
151 191
@@ -239,24 +279,8 @@ void smp_ctl_clear_bit(int cr, int bit)
239} 279}
240EXPORT_SYMBOL(smp_ctl_clear_bit); 280EXPORT_SYMBOL(smp_ctl_clear_bit);
241 281
242/*
243 * In early ipl state a temp. logically cpu number is needed, so the sigp
244 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
245 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
246 */
247#define CPU_INIT_NO 1
248
249#ifdef CONFIG_ZFCPDUMP 282#ifdef CONFIG_ZFCPDUMP
250 283
251/*
252 * zfcpdump_prefix_array holds prefix registers for the following scenario:
253 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
254 * save its prefix registers, since they get lost, when switching from 31 bit
255 * to 64 bit.
256 */
257unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
258 __attribute__((__section__(".data")));
259
260static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 284static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
261{ 285{
262 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 286 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
@@ -266,21 +290,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
266 "the dump\n", cpu, NR_CPUS - 1); 290 "the dump\n", cpu, NR_CPUS - 1);
267 return; 291 return;
268 } 292 }
269 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 293 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
270 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 294 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
271 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
272 sigp_busy)
273 cpu_relax(); 295 cpu_relax();
274 memcpy(zfcpdump_save_areas[cpu], 296 memcpy_real(zfcpdump_save_areas[cpu],
275 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 297 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
276 SAVE_AREA_SIZE); 298 sizeof(struct save_area));
277#ifdef CONFIG_64BIT
278 /* copy original prefix register */
279 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
280#endif
281} 299}
282 300
283union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 301struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
284EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 302EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
285 303
286#else 304#else
@@ -389,8 +407,7 @@ static void __init smp_detect_cpus(void)
389 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { 407 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
390 if (cpu == boot_cpu_addr) 408 if (cpu == boot_cpu_addr)
391 continue; 409 continue;
392 __cpu_logical_map[CPU_INIT_NO] = cpu; 410 if (!raw_cpu_stopped(cpu))
393 if (!cpu_stopped(CPU_INIT_NO))
394 continue; 411 continue;
395 smp_get_save_area(c_cpus, cpu); 412 smp_get_save_area(c_cpus, cpu);
396 c_cpus++; 413 c_cpus++;
@@ -413,8 +430,7 @@ static void __init smp_detect_cpus(void)
413 cpu_addr = info->cpu[cpu].address; 430 cpu_addr = info->cpu[cpu].address;
414 if (cpu_addr == boot_cpu_addr) 431 if (cpu_addr == boot_cpu_addr)
415 continue; 432 continue;
416 __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 433 if (!raw_cpu_stopped(cpu_addr)) {
417 if (!cpu_stopped(CPU_INIT_NO)) {
418 s_cpus++; 434 s_cpus++;
419 continue; 435 continue;
420 } 436 }
@@ -533,18 +549,18 @@ static void smp_free_lowcore(int cpu)
533/* Upping and downing of CPUs */ 549/* Upping and downing of CPUs */
534int __cpuinit __cpu_up(unsigned int cpu) 550int __cpuinit __cpu_up(unsigned int cpu)
535{ 551{
536 struct task_struct *idle;
537 struct _lowcore *cpu_lowcore; 552 struct _lowcore *cpu_lowcore;
553 struct task_struct *idle;
538 struct stack_frame *sf; 554 struct stack_frame *sf;
539 sigp_ccode ccode;
540 u32 lowcore; 555 u32 lowcore;
556 int ccode;
541 557
542 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 558 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
543 return -EIO; 559 return -EIO;
544 if (smp_alloc_lowcore(cpu)) 560 if (smp_alloc_lowcore(cpu))
545 return -ENOMEM; 561 return -ENOMEM;
546 do { 562 do {
547 ccode = signal_processor(cpu, sigp_initial_cpu_reset); 563 ccode = sigp(cpu, sigp_initial_cpu_reset);
548 if (ccode == sigp_busy) 564 if (ccode == sigp_busy)
549 udelay(10); 565 udelay(10);
550 if (ccode == sigp_not_operational) 566 if (ccode == sigp_not_operational)
@@ -552,7 +568,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
552 } while (ccode == sigp_busy); 568 } while (ccode == sigp_busy);
553 569
554 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 570 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
555 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 571 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
556 udelay(10); 572 udelay(10);
557 573
558 idle = current_set[cpu]; 574 idle = current_set[cpu];
@@ -578,7 +594,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
578 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 594 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
579 eieio(); 595 eieio();
580 596
581 while (signal_processor(cpu, sigp_restart) == sigp_busy) 597 while (sigp(cpu, sigp_restart) == sigp_busy)
582 udelay(10); 598 udelay(10);
583 599
584 while (!cpu_online(cpu)) 600 while (!cpu_online(cpu))
@@ -640,7 +656,7 @@ void __cpu_die(unsigned int cpu)
640 /* Wait until target cpu is down */ 656 /* Wait until target cpu is down */
641 while (!cpu_stopped(cpu)) 657 while (!cpu_stopped(cpu))
642 cpu_relax(); 658 cpu_relax();
643 while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) 659 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
644 udelay(10); 660 udelay(10);
645 smp_free_lowcore(cpu); 661 smp_free_lowcore(cpu);
646 pr_info("Processor %d stopped\n", cpu); 662 pr_info("Processor %d stopped\n", cpu);
@@ -649,7 +665,7 @@ void __cpu_die(unsigned int cpu)
649void cpu_die(void) 665void cpu_die(void)
650{ 666{
651 idle_task_exit(); 667 idle_task_exit();
652 while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) 668 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
653 cpu_relax(); 669 cpu_relax();
654 for (;;); 670 for (;;);
655} 671}
@@ -765,7 +781,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
765 get_online_cpus(); 781 get_online_cpus();
766 mutex_lock(&smp_cpu_state_mutex); 782 mutex_lock(&smp_cpu_state_mutex);
767 rc = -EBUSY; 783 rc = -EBUSY;
768 if (cpu_online(cpu)) 784 /* disallow configuration changes of online cpus and cpu 0 */
785 if (cpu_online(cpu) || cpu == 0)
769 goto out; 786 goto out;
770 rc = 0; 787 rc = 0;
771 switch (val) { 788 switch (val) {
@@ -927,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
927 struct cpu *c = &per_cpu(cpu_devices, cpu); 944 struct cpu *c = &per_cpu(cpu_devices, cpu);
928 struct sys_device *s = &c->sysdev; 945 struct sys_device *s = &c->sysdev;
929 struct s390_idle_data *idle; 946 struct s390_idle_data *idle;
947 int err = 0;
930 948
931 switch (action) { 949 switch (action) {
932 case CPU_ONLINE: 950 case CPU_ONLINE:
933 case CPU_ONLINE_FROZEN: 951 case CPU_ONLINE_FROZEN:
934 idle = &per_cpu(s390_idle, cpu); 952 idle = &per_cpu(s390_idle, cpu);
935 memset(idle, 0, sizeof(struct s390_idle_data)); 953 memset(idle, 0, sizeof(struct s390_idle_data));
936 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 954 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
937 return NOTIFY_BAD;
938 break; 955 break;
939 case CPU_DEAD: 956 case CPU_DEAD:
940 case CPU_DEAD_FROZEN: 957 case CPU_DEAD_FROZEN:
941 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
942 break; 959 break;
943 } 960 }
944 return NOTIFY_OK; 961 return notifier_from_errno(err);
945} 962}
946 963
947static struct notifier_block __cpuinitdata smp_cpu_nb = { 964static struct notifier_block __cpuinitdata smp_cpu_nb = {
@@ -1004,7 +1021,9 @@ out:
1004 return rc; 1021 return rc;
1005} 1022}
1006 1023
1007static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, 1024static ssize_t __ref rescan_store(struct sysdev_class *class,
1025 struct sysdev_class_attribute *attr,
1026 const char *buf,
1008 size_t count) 1027 size_t count)
1009{ 1028{
1010 int rc; 1029 int rc;
@@ -1015,7 +1034,9 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
1015static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); 1034static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1016#endif /* CONFIG_HOTPLUG_CPU */ 1035#endif /* CONFIG_HOTPLUG_CPU */
1017 1036
1018static ssize_t dispatching_show(struct sysdev_class *class, char *buf) 1037static ssize_t dispatching_show(struct sysdev_class *class,
1038 struct sysdev_class_attribute *attr,
1039 char *buf)
1019{ 1040{
1020 ssize_t count; 1041 ssize_t count;
1021 1042
@@ -1025,7 +1046,9 @@ static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1025 return count; 1046 return count;
1026} 1047}
1027 1048
1028static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, 1049static ssize_t dispatching_store(struct sysdev_class *dev,
1050 struct sysdev_class_attribute *attr,
1051 const char *buf,
1029 size_t count) 1052 size_t count)
1030{ 1053{
1031 int val, rc; 1054 int val, rc;
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
new file mode 100644
index 000000000000..469f11b574fa
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu.S
@@ -0,0 +1,58 @@
1/*
2 * 31-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stm %r6,%r15,__SF_GPRS(%r15)
23 lr %r1,%r15
24 ahi %r15,-STACK_FRAME_OVERHEAD
25 st %r1,__SF_BACKCHAIN(%r15)
26 basr %r13,0
270: la %r1,.gprregs_addr-0b(%r13)
28 l %r1,0(%r1)
29 stm %r0,%r15,0(%r1)
301: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
31 brc 2,1b /* busy, try again */
322: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
33 brc 2,2b /* busy, try again */
343: j 3b
35
36 .globl smp_restart_cpu
37smp_restart_cpu:
38 basr %r13,0
390: la %r1,.gprregs_addr-0b(%r13)
40 l %r1,0(%r1)
41 lm %r0,%r15,0(%r1)
421: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
43 brc 10,1b /* busy, accepted (status 0), running */
44 tmll %r0,0x40 /* Test if calling CPU is stopped */
45 jz 1b
46 ltr %r4,%r4 /* New stack ? */
47 jz 1f
48 lr %r15,%r4
491: basr %r14,%r2
50
51.gprregs_addr:
52 .long .gprregs
53
54 .section .data,"aw",@progbits
55.gprregs:
56 .rept 16
57 .long 0
58 .endr
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
new file mode 100644
index 000000000000..d94aacc898cb
--- /dev/null
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -0,0 +1,51 @@
1/*
2 * 64-bit switch cpu code
3 *
4 * Copyright IBM Corp. 2009
5 *
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/ptrace.h>
10
11# smp_switch_to_cpu switches to destination cpu and executes the passed function
12# Parameter: %r2 - function to call
13# %r3 - function parameter
14# %r4 - stack poiner
15# %r5 - current cpu
16# %r6 - destination cpu
17
18 .section .text
19 .align 4
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stmg %r6,%r15,__SF_GPRS(%r15)
23 lgr %r1,%r15
24 aghi %r15,-STACK_FRAME_OVERHEAD
25 stg %r1,__SF_BACKCHAIN(%r15)
26 larl %r1,.gprregs
27 stmg %r0,%r15,0(%r1)
281: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */
29 brc 2,1b /* busy, try again */
302: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */
31 brc 2,2b /* busy, try again */
323: j 3b
33
34 .globl smp_restart_cpu
35smp_restart_cpu:
36 larl %r1,.gprregs
37 lmg %r0,%r15,0(%r1)
381: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
39 brc 10,1b /* busy, accepted (status 0), running */
40 tmll %r0,0x40 /* Test if calling CPU is stopped */
41 jz 1b
42 ltgr %r4,%r4 /* New stack ? */
43 jz 1f
44 lgr %r15,%r4
451: basr %r14,%r2
46
47 .section .data,"aw",@progbits
48.gprregs:
49 .rept 16
50 .quad 0
51 .endr
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index e5cd623cb025..1f066e46e83e 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -176,7 +176,7 @@ pgm_check_entry:
176 cgr %r1,%r2 176 cgr %r1,%r2
177 je restore_registers /* r1 = r2 -> nothing to do */ 177 je restore_registers /* r1 = r2 -> nothing to do */
178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 178 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
179 mvc __LC_RESTART_PSW(16,%r0),0(%r4) 179 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
1803: 1803:
181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 181 sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
182 brc 8,4f /* accepted */ 182 brc 8,4f /* accepted */
@@ -256,6 +256,9 @@ restore_registers:
256 lghi %r2,0 256 lghi %r2,0
257 brasl %r14,arch_set_page_states 257 brasl %r14,arch_set_page_states
258 258
259 /* Reinitialize the channel subsystem */
260 brasl %r14,channel_subsystem_reinit
261
259 /* Return 0 */ 262 /* Return 0 */
260 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 263 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
261 lghi %r2,0 264 lghi %r2,0
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 86a74c9c9e63..7b6b0f81a283 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -33,13 +33,12 @@
33#include "entry.h" 33#include "entry.h"
34 34
35/* 35/*
36 * Perform the select(nd, in, out, ex, tv) and mmap() system 36 * Perform the mmap() system call. Linux for S/390 isn't able to handle more
37 * calls. Linux for S/390 isn't able to handle more than 5 37 * than 5 system call parameters, so this system call uses a memory block
38 * system call parameters, so these system calls used a memory 38 * for parameter passing.
39 * block for parameter passing..
40 */ 39 */
41 40
42struct mmap_arg_struct { 41struct s390_mmap_arg_struct {
43 unsigned long addr; 42 unsigned long addr;
44 unsigned long len; 43 unsigned long len;
45 unsigned long prot; 44 unsigned long prot;
@@ -48,9 +47,9 @@ struct mmap_arg_struct {
48 unsigned long offset; 47 unsigned long offset;
49}; 48};
50 49
51SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) 50SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
52{ 51{
53 struct mmap_arg_struct a; 52 struct s390_mmap_arg_struct a;
54 int error = -EFAULT; 53 int error = -EFAULT;
55 54
56 if (copy_from_user(&a, arg, sizeof(a))) 55 if (copy_from_user(&a, arg, sizeof(a)))
@@ -60,29 +59,12 @@ out:
60 return error; 59 return error;
61} 60}
62 61
63SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
64{
65 struct mmap_arg_struct a;
66 long error = -EFAULT;
67
68 if (copy_from_user(&a, arg, sizeof(a)))
69 goto out;
70
71 error = -EINVAL;
72 if (a.offset & ~PAGE_MASK)
73 goto out;
74
75 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
76out:
77 return error;
78}
79
80/* 62/*
81 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 63 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
82 * 64 *
83 * This is really horribly ugly. 65 * This is really horribly ugly.
84 */ 66 */
85SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second, 67SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
86 unsigned long, third, void __user *, ptr) 68 unsigned long, third, void __user *, ptr)
87{ 69{
88 struct ipc_kludge tmp; 70 struct ipc_kludge tmp;
@@ -149,17 +131,6 @@ SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second,
149} 131}
150 132
151#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
152SYSCALL_DEFINE1(s390_newuname, struct new_utsname __user *, name)
153{
154 int ret = sys_newuname(name);
155
156 if (personality(current->personality) == PER_LINUX32 && !ret) {
157 ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
158 if (ret) ret = -EFAULT;
159 }
160 return ret;
161}
162
163SYSCALL_DEFINE1(s390_personality, unsigned long, personality) 134SYSCALL_DEFINE1(s390_personality, unsigned long, personality)
164{ 135{
165 int ret; 136 int ret;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 30eca070d426..201ce6bed34e 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -98,7 +98,7 @@ SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
98SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper) 98SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
99SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper) 99SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
100SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */ 100SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
101SYSCALL(sys_s390_old_mmap,sys_s390_old_mmap,old32_mmap_wrapper) /* 90 */ 101SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */
102SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper) 102SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
103SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper) 103SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
104SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper) 104SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
@@ -125,12 +125,12 @@ NI_SYSCALL /* vm86old for i386 */
125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper) 125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ 126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) 127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) 128SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) 129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) 130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */ 131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) 132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper) 133SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */ 134NI_SYSCALL /* modify_ldt for i386 */
135SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) 135SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ 136SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index b5e75e1061c8..a0ffc7717ed6 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <asm/ebcdic.h> 15#include <asm/ebcdic.h>
15#include <asm/sysinfo.h> 16#include <asm/sysinfo.h>
16#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 65065ac48ed3..15a7536452d5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -36,6 +36,7 @@
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/clocksource.h> 37#include <linux/clocksource.h>
38#include <linux/clockchips.h> 38#include <linux/clockchips.h>
39#include <linux/gfp.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/delay.h> 41#include <asm/delay.h>
41#include <asm/s390_ext.h> 42#include <asm/s390_ext.h>
@@ -51,14 +52,6 @@
51#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 52#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
52#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 53#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
53 54
54/*
55 * Create a small time difference between the timer interrupts
56 * on the different cpus to avoid lock contention.
57 */
58#define CPU_DEVIATION (smp_processor_id() << 12)
59
60#define TICK_SIZE tick
61
62u64 sched_clock_base_cc = -1; /* Force to data section. */ 55u64 sched_clock_base_cc = -1; /* Force to data section. */
63EXPORT_SYMBOL_GPL(sched_clock_base_cc); 56EXPORT_SYMBOL_GPL(sched_clock_base_cc);
64 57
@@ -81,15 +74,15 @@ unsigned long long monotonic_clock(void)
81} 74}
82EXPORT_SYMBOL(monotonic_clock); 75EXPORT_SYMBOL(monotonic_clock);
83 76
84void tod_to_timeval(__u64 todval, struct timespec *xtime) 77void tod_to_timeval(__u64 todval, struct timespec *xt)
85{ 78{
86 unsigned long long sec; 79 unsigned long long sec;
87 80
88 sec = todval >> 12; 81 sec = todval >> 12;
89 do_div(sec, 1000000); 82 do_div(sec, 1000000);
90 xtime->tv_sec = sec; 83 xt->tv_sec = sec;
91 todval -= (sec * 1000000) << 12; 84 todval -= (sec * 1000000) << 12;
92 xtime->tv_nsec = ((todval * 1000) >> 12); 85 xt->tv_nsec = ((todval * 1000) >> 12);
93} 86}
94EXPORT_SYMBOL(tod_to_timeval); 87EXPORT_SYMBOL(tod_to_timeval);
95 88
@@ -224,10 +217,11 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
224 ++vdso_data->tb_update_count; 217 ++vdso_data->tb_update_count;
225 smp_wmb(); 218 smp_wmb();
226 vdso_data->xtime_tod_stamp = clock->cycle_last; 219 vdso_data->xtime_tod_stamp = clock->cycle_last;
227 vdso_data->xtime_clock_sec = xtime.tv_sec; 220 vdso_data->xtime_clock_sec = wall_time->tv_sec;
228 vdso_data->xtime_clock_nsec = xtime.tv_nsec; 221 vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
229 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 222 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
230 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 223 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
224 vdso_data->ntp_mult = mult;
231 smp_wmb(); 225 smp_wmb();
232 ++vdso_data->tb_update_count; 226 ++vdso_data->tb_update_count;
233} 227}
@@ -397,7 +391,6 @@ static void __init time_init_wq(void)
397 if (time_sync_wq) 391 if (time_sync_wq)
398 return; 392 return;
399 time_sync_wq = create_singlethread_workqueue("timesync"); 393 time_sync_wq = create_singlethread_workqueue("timesync");
400 stop_machine_create();
401} 394}
402 395
403/* 396/*
@@ -531,8 +524,11 @@ void etr_switch_to_local(void)
531 if (!etr_eacr.sl) 524 if (!etr_eacr.sl)
532 return; 525 return;
533 disable_sync_clock(NULL); 526 disable_sync_clock(NULL);
534 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 527 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
535 queue_work(time_sync_wq, &etr_work); 528 etr_eacr.es = etr_eacr.sl = 0;
529 etr_setr(&etr_eacr);
530 queue_work(time_sync_wq, &etr_work);
531 }
536} 532}
537 533
538/* 534/*
@@ -546,8 +542,11 @@ void etr_sync_check(void)
546 if (!etr_eacr.es) 542 if (!etr_eacr.es)
547 return; 543 return;
548 disable_sync_clock(NULL); 544 disable_sync_clock(NULL);
549 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 545 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
550 queue_work(time_sync_wq, &etr_work); 546 etr_eacr.es = 0;
547 etr_setr(&etr_eacr);
548 queue_work(time_sync_wq, &etr_work);
549 }
551} 550}
552 551
553/* 552/*
@@ -909,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
909 * Do not try to get the alternate port aib if the clock 908 * Do not try to get the alternate port aib if the clock
910 * is not in sync yet. 909 * is not in sync yet.
911 */ 910 */
912 if (!check_sync_clock()) 911 if (!eacr.es || !check_sync_clock())
913 return eacr; 912 return eacr;
914 913
915 /* 914 /*
@@ -1071,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
1071 * If the clock is in sync just update the eacr and return. 1070 * If the clock is in sync just update the eacr and return.
1072 * If there is no valid sync port wait for a port update. 1071 * If there is no valid sync port wait for a port update.
1073 */ 1072 */
1074 if (check_sync_clock() || sync_port < 0) { 1073 if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1075 etr_update_eacr(eacr); 1074 etr_update_eacr(eacr);
1076 etr_set_tolec_timeout(now); 1075 etr_set_tolec_timeout(now);
1077 goto out_unlock; 1076 goto out_unlock;
@@ -1124,14 +1123,18 @@ static struct sys_device etr_port1_dev = {
1124/* 1123/*
1125 * ETR class attributes 1124 * ETR class attributes
1126 */ 1125 */
1127static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf) 1126static ssize_t etr_stepping_port_show(struct sysdev_class *class,
1127 struct sysdev_class_attribute *attr,
1128 char *buf)
1128{ 1129{
1129 return sprintf(buf, "%i\n", etr_port0.esw.p); 1130 return sprintf(buf, "%i\n", etr_port0.esw.p);
1130} 1131}
1131 1132
1132static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); 1133static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1133 1134
1134static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf) 1135static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
1136 struct sysdev_class_attribute *attr,
1137 char *buf)
1135{ 1138{
1136 char *mode_str; 1139 char *mode_str;
1137 1140
@@ -1592,7 +1595,9 @@ static struct sysdev_class stp_sysclass = {
1592 .name = "stp", 1595 .name = "stp",
1593}; 1596};
1594 1597
1595static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf) 1598static ssize_t stp_ctn_id_show(struct sysdev_class *class,
1599 struct sysdev_class_attribute *attr,
1600 char *buf)
1596{ 1601{
1597 if (!stp_online) 1602 if (!stp_online)
1598 return -ENODATA; 1603 return -ENODATA;
@@ -1602,7 +1607,9 @@ static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
1602 1607
1603static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); 1608static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1604 1609
1605static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf) 1610static ssize_t stp_ctn_type_show(struct sysdev_class *class,
1611 struct sysdev_class_attribute *attr,
1612 char *buf)
1606{ 1613{
1607 if (!stp_online) 1614 if (!stp_online)
1608 return -ENODATA; 1615 return -ENODATA;
@@ -1611,7 +1618,9 @@ static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
1611 1618
1612static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); 1619static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1613 1620
1614static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf) 1621static ssize_t stp_dst_offset_show(struct sysdev_class *class,
1622 struct sysdev_class_attribute *attr,
1623 char *buf)
1615{ 1624{
1616 if (!stp_online || !(stp_info.vbits & 0x2000)) 1625 if (!stp_online || !(stp_info.vbits & 0x2000))
1617 return -ENODATA; 1626 return -ENODATA;
@@ -1620,7 +1629,9 @@ static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
1620 1629
1621static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); 1630static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1622 1631
1623static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf) 1632static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
1633 struct sysdev_class_attribute *attr,
1634 char *buf)
1624{ 1635{
1625 if (!stp_online || !(stp_info.vbits & 0x8000)) 1636 if (!stp_online || !(stp_info.vbits & 0x8000))
1626 return -ENODATA; 1637 return -ENODATA;
@@ -1629,7 +1640,9 @@ static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
1629 1640
1630static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); 1641static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1631 1642
1632static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf) 1643static ssize_t stp_stratum_show(struct sysdev_class *class,
1644 struct sysdev_class_attribute *attr,
1645 char *buf)
1633{ 1646{
1634 if (!stp_online) 1647 if (!stp_online)
1635 return -ENODATA; 1648 return -ENODATA;
@@ -1638,7 +1651,9 @@ static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
1638 1651
1639static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); 1652static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1640 1653
1641static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf) 1654static ssize_t stp_time_offset_show(struct sysdev_class *class,
1655 struct sysdev_class_attribute *attr,
1656 char *buf)
1642{ 1657{
1643 if (!stp_online || !(stp_info.vbits & 0x0800)) 1658 if (!stp_online || !(stp_info.vbits & 0x0800))
1644 return -ENODATA; 1659 return -ENODATA;
@@ -1647,7 +1662,9 @@ static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
1647 1662
1648static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); 1663static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1649 1664
1650static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf) 1665static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
1666 struct sysdev_class_attribute *attr,
1667 char *buf)
1651{ 1668{
1652 if (!stp_online || !(stp_info.vbits & 0x4000)) 1669 if (!stp_online || !(stp_info.vbits & 0x4000))
1653 return -ENODATA; 1670 return -ENODATA;
@@ -1657,7 +1674,9 @@ static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
1657static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, 1674static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1658 stp_time_zone_offset_show, NULL); 1675 stp_time_zone_offset_show, NULL);
1659 1676
1660static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf) 1677static ssize_t stp_timing_mode_show(struct sysdev_class *class,
1678 struct sysdev_class_attribute *attr,
1679 char *buf)
1661{ 1680{
1662 if (!stp_online) 1681 if (!stp_online)
1663 return -ENODATA; 1682 return -ENODATA;
@@ -1666,7 +1685,9 @@ static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
1666 1685
1667static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); 1686static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1668 1687
1669static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf) 1688static ssize_t stp_timing_state_show(struct sysdev_class *class,
1689 struct sysdev_class_attribute *attr,
1690 char *buf)
1670{ 1691{
1671 if (!stp_online) 1692 if (!stp_online)
1672 return -ENODATA; 1693 return -ENODATA;
@@ -1675,12 +1696,15 @@ static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
1675 1696
1676static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); 1697static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1677 1698
1678static ssize_t stp_online_show(struct sysdev_class *class, char *buf) 1699static ssize_t stp_online_show(struct sysdev_class *class,
1700 struct sysdev_class_attribute *attr,
1701 char *buf)
1679{ 1702{
1680 return sprintf(buf, "%i\n", stp_online); 1703 return sprintf(buf, "%i\n", stp_online);
1681} 1704}
1682 1705
1683static ssize_t stp_online_store(struct sysdev_class *class, 1706static ssize_t stp_online_store(struct sysdev_class *class,
1707 struct sysdev_class_attribute *attr,
1684 const char *buf, size_t count) 1708 const char *buf, size_t count)
1685{ 1709{
1686 unsigned int value; 1710 unsigned int value;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14ef6f05e432..bcef00766a64 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,8 @@ struct tl_cpu {
37}; 37};
38 38
39struct tl_container { 39struct tl_container {
40 unsigned char reserved[8]; 40 unsigned char reserved[7];
41 unsigned char id;
41}; 42};
42 43
43union tl_entry { 44union tl_entry {
@@ -58,6 +59,7 @@ struct tl_info {
58 59
59struct core_info { 60struct core_info {
60 struct core_info *next; 61 struct core_info *next;
62 unsigned char id;
61 cpumask_t mask; 63 cpumask_t mask;
62}; 64};
63 65
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
73static DEFINE_SPINLOCK(topology_lock); 75static DEFINE_SPINLOCK(topology_lock);
74 76
75cpumask_t cpu_core_map[NR_CPUS]; 77cpumask_t cpu_core_map[NR_CPUS];
78unsigned char cpu_core_id[NR_CPUS];
76 79
77static cpumask_t cpu_coregroup_map(unsigned int cpu) 80static cpumask_t cpu_coregroup_map(unsigned int cpu)
78{ 81{
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
116 for_each_present_cpu(lcpu) { 119 for_each_present_cpu(lcpu) {
117 if (cpu_logical_map(lcpu) == rcpu) { 120 if (cpu_logical_map(lcpu) == rcpu) {
118 cpu_set(lcpu, core->mask); 121 cpu_set(lcpu, core->mask);
122 cpu_core_id[lcpu] = core->id;
119 smp_cpu_polarization[lcpu] = tl_cpu->pp; 123 smp_cpu_polarization[lcpu] = tl_cpu->pp;
120 } 124 }
121 } 125 }
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
158 break; 162 break;
159 case 1: 163 case 1:
160 core = core->next; 164 core = core->next;
165 core->id = tle->container.id;
161 break; 166 break;
162 case 0: 167 case 0:
163 add_cpus_to_core(&tle->cpu, core); 168 add_cpus_to_core(&tle->cpu, core);
@@ -165,10 +170,11 @@ static void tl_to_cores(struct tl_info *info)
165 default: 170 default:
166 clear_cores(); 171 clear_cores();
167 machine_has_topology = 0; 172 machine_has_topology = 0;
168 return; 173 goto out;
169 } 174 }
170 tle = next_tle(tle); 175 tle = next_tle(tle);
171 } 176 }
177out:
172 spin_unlock_irq(&topology_lock); 178 spin_unlock_irq(&topology_lock);
173} 179}
174 180
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 6e7ad63854c0..5d8f0f3d0250 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -46,13 +46,7 @@
46 46
47pgm_check_handler_t *pgm_check_table[128]; 47pgm_check_handler_t *pgm_check_table[128];
48 48
49#ifdef CONFIG_SYSCTL 49int show_unhandled_signals;
50#ifdef CONFIG_PROCESS_DEBUG
51int sysctl_userprocess_debug = 1;
52#else
53int sysctl_userprocess_debug = 0;
54#endif
55#endif
56 50
57extern pgm_check_handler_t do_protection_exception; 51extern pgm_check_handler_t do_protection_exception;
58extern pgm_check_handler_t do_dat_exception; 52extern pgm_check_handler_t do_dat_exception;
@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err)
315 do_exit(SIGSEGV); 309 do_exit(SIGSEGV);
316} 310}
317 311
318static void inline 312static void inline report_user_fault(struct pt_regs *regs, long int_code,
319report_user_fault(long interruption_code, struct pt_regs *regs) 313 int signr)
320{ 314{
321#if defined(CONFIG_SYSCTL) 315 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
322 if (!sysctl_userprocess_debug)
323 return; 316 return;
324#endif 317 if (!unhandled_signal(current, signr))
325#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 318 return;
326 printk("User process fault: interruption code 0x%lX\n", 319 if (!printk_ratelimit())
327 interruption_code); 320 return;
321 printk("User process fault: interruption code 0x%lX ", int_code);
322 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
323 printk("\n");
328 show_regs(regs); 324 show_regs(regs);
329#endif
330} 325}
331 326
332int is_valid_bugaddr(unsigned long addr) 327int is_valid_bugaddr(unsigned long addr)
@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
354 349
355 tsk->thread.trap_no = interruption_code & 0xffff; 350 tsk->thread.trap_no = interruption_code & 0xffff;
356 force_sig_info(signr, info, tsk); 351 force_sig_info(signr, info, tsk);
357 report_user_fault(interruption_code, regs); 352 report_user_fault(regs, interruption_code, signr);
358 } else { 353 } else {
359 const struct exception_table_entry *fixup; 354 const struct exception_table_entry *fixup;
360 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 355 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code)
390{ 385{
391 if (regs->psw.mask & PSW_MASK_PSTATE) { 386 if (regs->psw.mask & PSW_MASK_PSTATE) {
392 local_irq_enable(); 387 local_irq_enable();
388 report_user_fault(regs, interruption_code, SIGSEGV);
393 do_exit(SIGSEGV); 389 do_exit(SIGSEGV);
394 report_user_fault(interruption_code, regs);
395 } else 390 } else
396 die("Unknown program exception", regs, interruption_code); 391 die("Unknown program exception", regs, interruption_code);
397} 392}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 5f99e66c51c3..6b83870507d5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -23,6 +23,7 @@
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/compat.h> 25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
@@ -101,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
101/* 102/*
102 * Allocate/free per cpu vdso data. 103 * Allocate/free per cpu vdso data.
103 */ 104 */
104#ifdef CONFIG_64BIT
105#define SEGMENT_ORDER 2 105#define SEGMENT_ORDER 2
106#else
107#define SEGMENT_ORDER 1
108#endif
109 106
110int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 107int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
111{ 108{
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 4a98909a8310..969643954273 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,13 +38,13 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
42 lr %r2,%r0 42 lr %r2,%r0
43 lhi %r0,1000 43 l %r0,__VDSO_NTP_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 ahi %r0,1000 47 a %r0,__VDSO_NTP_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 srdl %r0,12
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
@@ -86,13 +86,13 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 86 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 87 brc 3,12f
88 ahi %r0,-1 88 ahi %r0,-1
8912: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
90 lr %r2,%r0 90 lr %r2,%r0
91 lhi %r0,1000 91 l %r0,__VDSO_NTP_MULT(%r5)
92 ltr %r1,%r1 92 ltr %r1,%r1
93 mr %r0,%r0 93 mr %r0,%r0
94 jnm 13f 94 jnm 13f
95 ahi %r0,1000 95 a %r0,__VDSO_NTP_MULT(%r5)
9613: alr %r0,%r2 9613: alr %r0,%r2
97 srdl %r0,12 97 srdl %r0,12
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index ad8acfc949fb..2d3633175e3b 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,13 +35,13 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 lhi %r0,1000 40 l %r0,__VDSO_NTP_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 ahi %r0,1000 44 a %r0,__VDSO_NTP_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12 46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 49106c6e6f88..f40467884a03 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -36,7 +36,7 @@ __kernel_clock_gettime:
36 stck 48(%r15) /* Store TOD clock */ 36 stck 48(%r15) /* Store TOD clock */
37 lg %r1,48(%r15) 37 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 mghi %r1,1000 39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
42 lg %r0,__VDSO_XTIME_SEC(%r5) 42 lg %r0,__VDSO_XTIME_SEC(%r5)
@@ -64,7 +64,7 @@ __kernel_clock_gettime:
64 stck 48(%r15) /* Store TOD clock */ 64 stck 48(%r15) /* Store TOD clock */
65 lg %r1,48(%r15) 65 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 mghi %r1,1000 67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 70 lg %r0,__VDSO_XTIME_SEC(%r5)
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index f873e75634e1..36ee674722ec 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,7 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 mghi %r1,1000 34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ 37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b59a812a010e..3479f1b0d4e0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk)
121} 121}
122EXPORT_SYMBOL_GPL(account_system_vtime); 122EXPORT_SYMBOL_GPL(account_system_vtime);
123 123
124void vtime_start_cpu(void) 124void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
125{ 125{
126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
128 __u64 idle_time, expires; 128 __u64 idle_time, expires;
129 129
130 if (idle->idle_enter == 0ULL)
131 return;
132
130 /* Account time spent with enabled wait psw loaded as idle time. */ 133 /* Account time spent with enabled wait psw loaded as idle time. */
131 idle_time = S390_lowcore.int_clock - idle->idle_enter; 134 idle_time = int_clock - idle->idle_enter;
132 account_idle_time(idle_time); 135 account_idle_time(idle_time);
133 S390_lowcore.steal_timer += 136 S390_lowcore.steal_timer +=
134 idle->idle_enter - S390_lowcore.last_update_clock; 137 idle->idle_enter - S390_lowcore.last_update_clock;
135 S390_lowcore.last_update_clock = S390_lowcore.int_clock; 138 S390_lowcore.last_update_clock = int_clock;
136 139
137 /* Account system time spent going idle. */ 140 /* Account system time spent going idle. */
138 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 141 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
139 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer; 142 S390_lowcore.last_update_timer = enter_timer;
140 143
141 /* Restart vtime CPU timer */ 144 /* Restart vtime CPU timer */
142 if (vq->do_spt) { 145 if (vq->do_spt) {
143 /* Program old expire value but first save progress. */ 146 /* Program old expire value but first save progress. */
144 expires = vq->idle - S390_lowcore.async_enter_timer; 147 expires = vq->idle - enter_timer;
145 expires += get_vtimer(); 148 expires += get_vtimer();
146 set_vtimer(expires); 149 set_vtimer(expires);
147 } else { 150 } else {
148 /* Don't account the CPU timer delta while the cpu was idle. */ 151 /* Don't account the CPU timer delta while the cpu was idle. */
149 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; 152 vq->elapsed -= vq->idle - enter_timer;
150 } 153 }
151 154
152 idle->sequence++; 155 idle->sequence++;