aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/asm-offsets.c27
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/compat_ptrace.h56
-rw-r--r--arch/s390/kernel/compat_wrapper.S33
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/diag.c21
-rw-r--r--arch/s390/kernel/dis.c148
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S322
-rw-r--r--arch/s390/kernel/entry.h6
-rw-r--r--arch/s390/kernel/entry64.S122
-rw-r--r--arch/s390/kernel/ftrace.c238
-rw-r--r--arch/s390/kernel/head.S10
-rw-r--r--arch/s390/kernel/irq.c177
-rw-r--r--arch/s390/kernel/jump_label.c59
-rw-r--r--arch/s390/kernel/kprobes.c523
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/mcount.S32
-rw-r--r--arch/s390/kernel/mcount64.S29
-rw-r--r--arch/s390/kernel/mem_detect.c4
-rw-r--r--arch/s390/kernel/nmi.c13
-rw-r--r--arch/s390/kernel/process.c80
-rw-r--r--arch/s390/kernel/processor.c20
-rw-r--r--arch/s390/kernel/ptrace.c309
-rw-r--r--arch/s390/kernel/reipl64.S2
-rw-r--r--arch/s390/kernel/s390_ext.c142
-rw-r--r--arch/s390/kernel/setup.c146
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c93
-rw-r--r--arch/s390/kernel/switch_cpu.S4
-rw-r--r--arch/s390/kernel/switch_cpu64.S4
-rw-r--r--arch/s390/kernel/syscalls.S5
-rw-r--r--arch/s390/kernel/sysinfo.c43
-rw-r--r--arch/s390/kernel/time.c34
-rw-r--r--arch/s390/kernel/topology.c250
-rw-r--r--arch/s390/kernel/traps.c222
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kernel/vtime.c32
46 files changed, 1789 insertions, 1544 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 64230bc392fa..df3732249baa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
20 20
21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22 22
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 vdso.o vtime.o sysinfo.o nmi.o sclp.o 26 sysinfo.o jump_label.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 5232278d79ad..edfbd17d7082 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,14 +23,16 @@ int main(void)
23{ 23{
24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 26 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
28 BLANK(); 27 BLANK();
29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
30 BLANK(); 29 BLANK();
31 DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); 30 DEFINE(__THREAD_per_cause,
32 DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); 31 offsetof(struct task_struct, thread.per_event.cause));
33 DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); 32 DEFINE(__THREAD_per_address,
33 offsetof(struct task_struct, thread.per_event.address));
34 DEFINE(__THREAD_per_paid,
35 offsetof(struct task_struct, thread.per_event.paid));
34 BLANK(); 36 BLANK();
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 37 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -66,9 +68,9 @@ int main(void)
66 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 68 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
67 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 69 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
68 /* constants used by the vdso */ 70 /* constants used by the vdso */
69 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 71 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
70 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 72 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
71 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 73 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
72 BLANK(); 74 BLANK();
73 /* constants for SIGP */ 75 /* constants for SIGP */
74 DEFINE(__SIGP_STOP, sigp_stop); 76 DEFINE(__SIGP_STOP, sigp_stop);
@@ -84,9 +86,10 @@ int main(void)
84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); 86 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 87 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 88 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); 89 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
90 DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
88 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 91 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
89 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 92 DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
90 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); 93 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
91 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 94 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
92 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); 95 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
@@ -121,13 +124,11 @@ int main(void)
121 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); 124 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
122 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); 125 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
123 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); 126 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
127 DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
124 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); 128 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
125 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
126 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
127 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
128 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
129 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
130 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 132 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 133 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 134 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@@ -142,10 +143,8 @@ int main(void)
142 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 143 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
143 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 144 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
144#ifdef CONFIG_32BIT 145#ifdef CONFIG_32BIT
145 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params));
146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); 146 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
147#else /* CONFIG_32BIT */ 147#else /* CONFIG_32BIT */
148 DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2));
149 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); 148 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
150 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); 149 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
151 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); 150 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1e6449c79ab6..53acaa86dd94 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -25,7 +25,6 @@
25#include <linux/resource.h> 25#include <linux/resource.h>
26#include <linux/times.h> 26#include <linux/times.h>
27#include <linux/smp.h> 27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/sem.h> 28#include <linux/sem.h>
30#include <linux/msg.h> 29#include <linux/msg.h>
31#include <linux/shm.h> 30#include <linux/shm.h>
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 123dd660d7fb..12b823833510 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -4,40 +4,19 @@
4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
5#include "compat_linux.h" /* needed for psw_compat_t */ 5#include "compat_linux.h" /* needed for psw_compat_t */
6 6
7typedef struct { 7struct compat_per_struct_kernel {
8 __u32 cr[NUM_CR_WORDS]; 8 __u32 cr9; /* PER control bits */
9} per_cr_words32; 9 __u32 cr10; /* PER starting address */
10 10 __u32 cr11; /* PER ending address */
11typedef struct { 11 __u32 bits; /* Obsolete software bits */
12 __u16 perc_atmid; /* 0x096 */ 12 __u32 starting_addr; /* User specified start address */
13 __u32 address; /* 0x098 */ 13 __u32 ending_addr; /* User specified end address */
14 __u8 access_id; /* 0x0a1 */ 14 __u16 perc_atmid; /* PER trap ATMID */
15} per_lowcore_words32; 15 __u32 address; /* PER trap instruction address */
16 16 __u8 access_id; /* PER trap access identification */
17typedef struct { 17};
18 union {
19 per_cr_words32 words;
20 } control_regs;
21 /*
22 * Use these flags instead of setting em_instruction_fetch
23 * directly they are used so that single stepping can be
24 * switched on & off while not affecting other tracing
25 */
26 unsigned single_step : 1;
27 unsigned instruction_fetch : 1;
28 unsigned : 30;
29 /*
30 * These addresses are copied into cr10 & cr11 if single
31 * stepping is switched off
32 */
33 __u32 starting_addr;
34 __u32 ending_addr;
35 union {
36 per_lowcore_words32 words;
37 } lowcore;
38} per_struct32;
39 18
40struct user_regs_struct32 19struct compat_user_regs_struct
41{ 20{
42 psw_compat_t psw; 21 psw_compat_t psw;
43 u32 gprs[NUM_GPRS]; 22 u32 gprs[NUM_GPRS];
@@ -50,15 +29,14 @@ struct user_regs_struct32
50 * itself as there is no "official" ptrace interface for hardware 29 * itself as there is no "official" ptrace interface for hardware
51 * watchpoints. This is the way intel does it. 30 * watchpoints. This is the way intel does it.
52 */ 31 */
53 per_struct32 per_info; 32 struct compat_per_struct_kernel per_info;
54 u32 ieee_instruction_pointer; 33 u32 ieee_instruction_pointer; /* obsolete, always 0 */
55 /* Used to give failing instruction back to user for ieee exceptions */
56}; 34};
57 35
58struct user32 { 36struct compat_user {
59 /* We start with the registers, to mimic the way that "memory" 37 /* We start with the registers, to mimic the way that "memory"
60 is returned from the ptrace(3,...) function. */ 38 is returned from the ptrace(3,...) function. */
61 struct user_regs_struct32 regs; /* Where the registers are actually stored */ 39 struct compat_user_regs_struct regs;
62 /* The rest of this junk is to help gdb figure out what goes where */ 40 /* The rest of this junk is to help gdb figure out what goes where */
63 u32 u_tsize; /* Text segment size (pages). */ 41 u32 u_tsize; /* Text segment size (pages). */
64 u32 u_dsize; /* Data segment size (pages). */ 42 u32 u_dsize; /* Data segment size (pages). */
@@ -80,6 +58,6 @@ typedef struct
80 __u32 len; 58 __u32 len;
81 __u32 kernel_addr; 59 __u32 kernel_addr;
82 __u32 process_addr; 60 __u32 process_addr;
83} ptrace_area_emu31; 61} compat_ptrace_area;
84 62
85#endif /* _PTRACE32_H */ 63#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 8e60fb23b90d..1f5eb789c3a7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1877,3 +1877,36 @@ sys_prlimit64_wrapper:
1877 llgtr %r4,%r4 # const struct rlimit64 __user * 1877 llgtr %r4,%r4 # const struct rlimit64 __user *
1878 llgtr %r5,%r5 # struct rlimit64 __user * 1878 llgtr %r5,%r5 # struct rlimit64 __user *
1879 jg sys_prlimit64 # branch to system call 1879 jg sys_prlimit64 # branch to system call
1880
1881 .globl sys_name_to_handle_at_wrapper
1882sys_name_to_handle_at_wrapper:
1883 lgfr %r2,%r2 # int
1884 llgtr %r3,%r3 # const char __user *
1885 llgtr %r4,%r4 # struct file_handle __user *
1886 llgtr %r5,%r5 # int __user *
1887 lgfr %r6,%r6 # int
1888 jg sys_name_to_handle_at
1889
1890 .globl compat_sys_open_by_handle_at_wrapper
1891compat_sys_open_by_handle_at_wrapper:
1892 lgfr %r2,%r2 # int
1893 llgtr %r3,%r3 # struct file_handle __user *
1894 lgfr %r4,%r4 # int
1895 jg compat_sys_open_by_handle_at
1896
1897 .globl compat_sys_clock_adjtime_wrapper
1898compat_sys_clock_adjtime_wrapper:
1899 lgfr %r2,%r2 # clockid_t (int)
1900 llgtr %r3,%r3 # struct compat_timex __user *
1901 jg compat_sys_clock_adjtime
1902
1903 .globl sys_syncfs_wrapper
1904sys_syncfs_wrapper:
1905 lgfr %r2,%r2 # int
1906 jg sys_syncfs
1907
1908 .globl sys_setns_wrapper
1909sys_setns_wrapper:
1910 lgfr %r2,%r2 # int
1911 lgfr %r3,%r3 # int
1912 jg sys_setns
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 98192261491d..5ad6bc078bfd 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -174,6 +174,7 @@ static const struct file_operations debug_file_ops = {
174 .write = debug_input, 174 .write = debug_input,
175 .open = debug_open, 175 .open = debug_open,
176 .release = debug_close, 176 .release = debug_close,
177 .llseek = no_llseek,
177}; 178};
178 179
179static struct dentry *debug_debugfs_root_entry; 180static struct dentry *debug_debugfs_root_entry;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index c032d11da8a1..8237fc07ac79 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -9,27 +9,6 @@
9#include <asm/diag.h> 9#include <asm/diag.h>
10 10
11/* 11/*
12 * Diagnose 10: Release pages
13 */
14void diag10(unsigned long addr)
15{
16 if (addr >= 0x7ff00000)
17 return;
18 asm volatile(
19#ifdef CONFIG_64BIT
20 " sam31\n"
21 " diag %0,%0,0x10\n"
22 "0: sam64\n"
23#else
24 " diag %0,%0,0x10\n"
25 "0:\n"
26#endif
27 EX_TABLE(0b, 0b)
28 : : "a" (addr));
29}
30EXPORT_SYMBOL(diag10);
31
32/*
33 * Diagnose 14: Input spool file manipulation 12 * Diagnose 14: Input spool file manipulation
34 */ 13 */
35int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 14int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index b39b27d68b45..1ca3d1d6a86c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -30,9 +30,9 @@
30#include <asm/atomic.h> 30#include <asm/atomic.h>
31#include <asm/mathemu.h> 31#include <asm/mathemu.h>
32#include <asm/cpcmd.h> 32#include <asm/cpcmd.h>
33#include <asm/s390_ext.h>
34#include <asm/lowcore.h> 33#include <asm/lowcore.h>
35#include <asm/debug.h> 34#include <asm/debug.h>
35#include <asm/irq.h>
36 36
37#ifndef CONFIG_64BIT 37#ifndef CONFIG_64BIT
38#define ONELONG "%08lx: " 38#define ONELONG "%08lx: "
@@ -113,7 +113,7 @@ enum {
113 INSTR_INVALID, 113 INSTR_INVALID,
114 INSTR_E, 114 INSTR_E,
115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, 115 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, 116 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, 117 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
118 INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, 118 INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
119 INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, 119 INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
@@ -122,13 +122,14 @@ enum {
122 INSTR_RRE_RR, INSTR_RRE_RR_OPT, 122 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, 123 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, 124 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
125 INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, 125 INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
126 INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, 126 INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 127 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, 128 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
129 INSTR_RSI_RRP, 129 INSTR_RSI_RRP,
130 INSTR_RSL_R0RD, 130 INSTR_RSL_R0RD,
131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, 131 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
132 INSTR_RSY_RDRM,
132 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 133 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
133 INSTR_RS_RURD, 134 INSTR_RS_RURD,
134 INSTR_RXE_FRRD, INSTR_RXE_RRRD, 135 INSTR_RXE_FRRD, INSTR_RXE_RRRD,
@@ -139,7 +140,7 @@ enum {
139 INSTR_SIY_IRD, INSTR_SIY_URD, 140 INSTR_SIY_IRD, INSTR_SIY_URD,
140 INSTR_SI_URD, 141 INSTR_SI_URD,
141 INSTR_SSE_RDRD, 142 INSTR_SSE_RDRD,
142 INSTR_SSF_RRDRD, 143 INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
143 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, 144 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
144 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, 145 INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
145 INSTR_S_00, INSTR_S_RD, 146 INSTR_S_00, INSTR_S_RD,
@@ -152,7 +153,7 @@ struct operand {
152}; 153};
153 154
154struct insn { 155struct insn {
155 const char name[6]; 156 const char name[5];
156 unsigned char opfrag; 157 unsigned char opfrag;
157 unsigned char format; 158 unsigned char format;
158}; 159};
@@ -217,6 +218,7 @@ static const unsigned char formats[][7] = {
217 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, 218 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
218 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, 219 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
219 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, 220 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
221 [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
220 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, 222 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
221 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, 223 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
222 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, 224 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -248,6 +250,7 @@ static const unsigned char formats[][7] = {
248 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, 250 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
249 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, 251 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
250 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, 252 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
253 [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
251 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, 254 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
252 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, 255 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
253 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, 256 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
@@ -269,6 +272,7 @@ static const unsigned char formats[][7] = {
269 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, 272 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
270 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, 273 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
271 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 274 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
275 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
272 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, 276 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
273 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, 277 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
274 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, 278 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -290,6 +294,7 @@ static const unsigned char formats[][7] = {
290 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, 294 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
291 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, 295 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
292 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, 296 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
297 [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 },
293 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 298 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
294 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 299 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
295 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 300 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -300,6 +305,36 @@ static const unsigned char formats[][7] = {
300 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, 305 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
301}; 306};
302 307
308enum {
309 LONG_INSN_ALGHSIK,
310 LONG_INSN_ALHSIK,
311 LONG_INSN_CLFHSI,
312 LONG_INSN_CLGFRL,
313 LONG_INSN_CLGHRL,
314 LONG_INSN_CLGHSI,
315 LONG_INSN_CLHHSI,
316 LONG_INSN_LLGFRL,
317 LONG_INSN_LLGHRL,
318 LONG_INSN_POPCNT,
319 LONG_INSN_RISBHG,
320 LONG_INSN_RISBLG,
321};
322
323static char *long_insn_name[] = {
324 [LONG_INSN_ALGHSIK] = "alghsik",
325 [LONG_INSN_ALHSIK] = "alhsik",
326 [LONG_INSN_CLFHSI] = "clfhsi",
327 [LONG_INSN_CLGFRL] = "clgfrl",
328 [LONG_INSN_CLGHRL] = "clghrl",
329 [LONG_INSN_CLGHSI] = "clghsi",
330 [LONG_INSN_CLHHSI] = "clhhsi",
331 [LONG_INSN_LLGFRL] = "llgfrl",
332 [LONG_INSN_LLGHRL] = "llghrl",
333 [LONG_INSN_POPCNT] = "popcnt",
334 [LONG_INSN_RISBHG] = "risbhg",
335 [LONG_INSN_RISBLG] = "risblk",
336};
337
303static struct insn opcode[] = { 338static struct insn opcode[] = {
304#ifdef CONFIG_64BIT 339#ifdef CONFIG_64BIT
305 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 340 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
@@ -637,6 +672,7 @@ static struct insn opcode_b2[] = {
637 { "rp", 0x77, INSTR_S_RD }, 672 { "rp", 0x77, INSTR_S_RD },
638 { "stcke", 0x78, INSTR_S_RD }, 673 { "stcke", 0x78, INSTR_S_RD },
639 { "sacf", 0x79, INSTR_S_RD }, 674 { "sacf", 0x79, INSTR_S_RD },
675 { "spp", 0x80, INSTR_S_RD },
640 { "stsi", 0x7d, INSTR_S_RD }, 676 { "stsi", 0x7d, INSTR_S_RD },
641 { "srnm", 0x99, INSTR_S_RD }, 677 { "srnm", 0x99, INSTR_S_RD },
642 { "stfpc", 0x9c, INSTR_S_RD }, 678 { "stfpc", 0x9c, INSTR_S_RD },
@@ -881,6 +917,35 @@ static struct insn opcode_b9[] = {
881 { "pfmf", 0xaf, INSTR_RRE_RR }, 917 { "pfmf", 0xaf, INSTR_RRE_RR },
882 { "trte", 0xbf, INSTR_RRF_M0RR }, 918 { "trte", 0xbf, INSTR_RRF_M0RR },
883 { "trtre", 0xbd, INSTR_RRF_M0RR }, 919 { "trtre", 0xbd, INSTR_RRF_M0RR },
920 { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
921 { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
922 { "alhhh", 0xca, INSTR_RRF_R0RR2 },
923 { "alhhl", 0xca, INSTR_RRF_R0RR2 },
924 { "slhhh", 0xcb, INSTR_RRF_R0RR2 },
925 { "chhr ", 0xcd, INSTR_RRE_RR },
926 { "clhhr", 0xcf, INSTR_RRE_RR },
927 { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
928 { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
929 { "slhhl", 0xdb, INSTR_RRF_R0RR2 },
930 { "chlr", 0xdd, INSTR_RRE_RR },
931 { "clhlr", 0xdf, INSTR_RRE_RR },
932 { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
933 { "locgr", 0xe2, INSTR_RRF_M0RR },
934 { "ngrk", 0xe4, INSTR_RRF_R0RR2 },
935 { "ogrk", 0xe6, INSTR_RRF_R0RR2 },
936 { "xgrk", 0xe7, INSTR_RRF_R0RR2 },
937 { "agrk", 0xe8, INSTR_RRF_R0RR2 },
938 { "sgrk", 0xe9, INSTR_RRF_R0RR2 },
939 { "algrk", 0xea, INSTR_RRF_R0RR2 },
940 { "slgrk", 0xeb, INSTR_RRF_R0RR2 },
941 { "locr", 0xf2, INSTR_RRF_M0RR },
942 { "nrk", 0xf4, INSTR_RRF_R0RR2 },
943 { "ork", 0xf6, INSTR_RRF_R0RR2 },
944 { "xrk", 0xf7, INSTR_RRF_R0RR2 },
945 { "ark", 0xf8, INSTR_RRF_R0RR2 },
946 { "srk", 0xf9, INSTR_RRF_R0RR2 },
947 { "alrk", 0xfa, INSTR_RRF_R0RR2 },
948 { "slrk", 0xfb, INSTR_RRF_R0RR2 },
884#endif 949#endif
885 { "kmac", 0x1e, INSTR_RRE_RR }, 950 { "kmac", 0x1e, INSTR_RRE_RR },
886 { "lrvr", 0x1f, INSTR_RRE_RR }, 951 { "lrvr", 0x1f, INSTR_RRE_RR },
@@ -949,9 +1014,9 @@ static struct insn opcode_c4[] = {
949 { "lgfrl", 0x0c, INSTR_RIL_RP }, 1014 { "lgfrl", 0x0c, INSTR_RIL_RP },
950 { "lhrl", 0x05, INSTR_RIL_RP }, 1015 { "lhrl", 0x05, INSTR_RIL_RP },
951 { "lghrl", 0x04, INSTR_RIL_RP }, 1016 { "lghrl", 0x04, INSTR_RIL_RP },
952 { "llgfrl", 0x0e, INSTR_RIL_RP }, 1017 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
953 { "llhrl", 0x02, INSTR_RIL_RP }, 1018 { "llhrl", 0x02, INSTR_RIL_RP },
954 { "llghrl", 0x06, INSTR_RIL_RP }, 1019 { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
955 { "strl", 0x0f, INSTR_RIL_RP }, 1020 { "strl", 0x0f, INSTR_RIL_RP },
956 { "stgrl", 0x0b, INSTR_RIL_RP }, 1021 { "stgrl", 0x0b, INSTR_RIL_RP },
957 { "sthrl", 0x07, INSTR_RIL_RP }, 1022 { "sthrl", 0x07, INSTR_RIL_RP },
@@ -968,9 +1033,9 @@ static struct insn opcode_c6[] = {
968 { "cghrl", 0x04, INSTR_RIL_RP }, 1033 { "cghrl", 0x04, INSTR_RIL_RP },
969 { "clrl", 0x0f, INSTR_RIL_RP }, 1034 { "clrl", 0x0f, INSTR_RIL_RP },
970 { "clgrl", 0x0a, INSTR_RIL_RP }, 1035 { "clgrl", 0x0a, INSTR_RIL_RP },
971 { "clgfrl", 0x0e, INSTR_RIL_RP }, 1036 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
972 { "clhrl", 0x07, INSTR_RIL_RP }, 1037 { "clhrl", 0x07, INSTR_RIL_RP },
973 { "clghrl", 0x06, INSTR_RIL_RP }, 1038 { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
974 { "pfdrl", 0x02, INSTR_RIL_UP }, 1039 { "pfdrl", 0x02, INSTR_RIL_UP },
975 { "exrl", 0x00, INSTR_RIL_RP }, 1040 { "exrl", 0x00, INSTR_RIL_RP },
976#endif 1041#endif
@@ -982,6 +1047,20 @@ static struct insn opcode_c8[] = {
982 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 1047 { "mvcos", 0x00, INSTR_SSF_RRDRD },
983 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1048 { "ectg", 0x01, INSTR_SSF_RRDRD },
984 { "csst", 0x02, INSTR_SSF_RRDRD }, 1049 { "csst", 0x02, INSTR_SSF_RRDRD },
1050 { "lpd", 0x04, INSTR_SSF_RRDRD2 },
1051 { "lpdg ", 0x05, INSTR_SSF_RRDRD2 },
1052#endif
1053 { "", 0, INSTR_INVALID }
1054};
1055
1056static struct insn opcode_cc[] = {
1057#ifdef CONFIG_64BIT
1058 { "brcth", 0x06, INSTR_RIL_RP },
1059 { "aih", 0x08, INSTR_RIL_RI },
1060 { "alsih", 0x0a, INSTR_RIL_RI },
1061 { "alsih", 0x0b, INSTR_RIL_RI },
1062 { "cih", 0x0d, INSTR_RIL_RI },
1063 { "clih ", 0x0f, INSTR_RIL_RI },
985#endif 1064#endif
986 { "", 0, INSTR_INVALID } 1065 { "", 0, INSTR_INVALID }
987}; 1066};
@@ -1063,6 +1142,16 @@ static struct insn opcode_e3[] = {
1063 { "mfy", 0x5c, INSTR_RXY_RRRD }, 1142 { "mfy", 0x5c, INSTR_RXY_RRRD },
1064 { "mhy", 0x7c, INSTR_RXY_RRRD }, 1143 { "mhy", 0x7c, INSTR_RXY_RRRD },
1065 { "pfd", 0x36, INSTR_RXY_URRD }, 1144 { "pfd", 0x36, INSTR_RXY_URRD },
1145 { "lbh", 0xc0, INSTR_RXY_RRRD },
1146 { "llch", 0xc2, INSTR_RXY_RRRD },
1147 { "stch", 0xc3, INSTR_RXY_RRRD },
1148 { "lhh", 0xc4, INSTR_RXY_RRRD },
1149 { "llhh", 0xc6, INSTR_RXY_RRRD },
1150 { "sthh", 0xc7, INSTR_RXY_RRRD },
1151 { "lfh", 0xca, INSTR_RXY_RRRD },
1152 { "stfh", 0xcb, INSTR_RXY_RRRD },
1153 { "chf", 0xcd, INSTR_RXY_RRRD },
1154 { "clhf", 0xcf, INSTR_RXY_RRRD },
1066#endif 1155#endif
1067 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1156 { "lrv", 0x1e, INSTR_RXY_RRRD },
1068 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1157 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1080,9 +1169,9 @@ static struct insn opcode_e5[] = {
1080 { "chhsi", 0x54, INSTR_SIL_RDI }, 1169 { "chhsi", 0x54, INSTR_SIL_RDI },
1081 { "chsi", 0x5c, INSTR_SIL_RDI }, 1170 { "chsi", 0x5c, INSTR_SIL_RDI },
1082 { "cghsi", 0x58, INSTR_SIL_RDI }, 1171 { "cghsi", 0x58, INSTR_SIL_RDI },
1083 { "clhhsi", 0x55, INSTR_SIL_RDU }, 1172 { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
1084 { "clfhsi", 0x5d, INSTR_SIL_RDU }, 1173 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
1085 { "clghsi", 0x59, INSTR_SIL_RDU }, 1174 { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
1086 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1175 { "mvhhi", 0x44, INSTR_SIL_RDI },
1087 { "mvhi", 0x4c, INSTR_SIL_RDI }, 1176 { "mvhi", 0x4c, INSTR_SIL_RDI },
1088 { "mvghi", 0x48, INSTR_SIL_RDI }, 1177 { "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1137,6 +1226,24 @@ static struct insn opcode_eb[] = {
1137 { "alsi", 0x6e, INSTR_SIY_IRD }, 1226 { "alsi", 0x6e, INSTR_SIY_IRD },
1138 { "algsi", 0x7e, INSTR_SIY_IRD }, 1227 { "algsi", 0x7e, INSTR_SIY_IRD },
1139 { "ecag", 0x4c, INSTR_RSY_RRRD }, 1228 { "ecag", 0x4c, INSTR_RSY_RRRD },
1229 { "srak", 0xdc, INSTR_RSY_RRRD },
1230 { "slak", 0xdd, INSTR_RSY_RRRD },
1231 { "srlk", 0xde, INSTR_RSY_RRRD },
1232 { "sllk", 0xdf, INSTR_RSY_RRRD },
1233 { "locg", 0xe2, INSTR_RSY_RDRM },
1234 { "stocg", 0xe3, INSTR_RSY_RDRM },
1235 { "lang", 0xe4, INSTR_RSY_RRRD },
1236 { "laog", 0xe6, INSTR_RSY_RRRD },
1237 { "laxg", 0xe7, INSTR_RSY_RRRD },
1238 { "laag", 0xe8, INSTR_RSY_RRRD },
1239 { "laalg", 0xea, INSTR_RSY_RRRD },
1240 { "loc", 0xf2, INSTR_RSY_RDRM },
1241 { "stoc", 0xf3, INSTR_RSY_RDRM },
1242 { "lan", 0xf4, INSTR_RSY_RRRD },
1243 { "lao", 0xf6, INSTR_RSY_RRRD },
1244 { "lax", 0xf7, INSTR_RSY_RRRD },
1245 { "laa", 0xf8, INSTR_RSY_RRRD },
1246 { "laal", 0xfa, INSTR_RSY_RRRD },
1140#endif 1247#endif
1141 { "rll", 0x1d, INSTR_RSY_RRRD }, 1248 { "rll", 0x1d, INSTR_RSY_RRRD },
1142 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1249 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1172,6 +1279,12 @@ static struct insn opcode_ec[] = {
1172 { "rxsbg", 0x57, INSTR_RIE_RRUUU }, 1279 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1173 { "rosbg", 0x56, INSTR_RIE_RRUUU }, 1280 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1174 { "risbg", 0x55, INSTR_RIE_RRUUU }, 1281 { "risbg", 0x55, INSTR_RIE_RRUUU },
1282 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
1283 { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
1284 { "ahik", 0xd8, INSTR_RIE_RRI0 },
1285 { "aghik", 0xd9, INSTR_RIE_RRI0 },
1286 { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
1287 { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
1175#endif 1288#endif
1176 { "", 0, INSTR_INVALID } 1289 { "", 0, INSTR_INVALID }
1177}; 1290};
@@ -1321,6 +1434,9 @@ static struct insn *find_insn(unsigned char *code)
1321 case 0xc8: 1434 case 0xc8:
1322 table = opcode_c8; 1435 table = opcode_c8;
1323 break; 1436 break;
1437 case 0xcc:
1438 table = opcode_cc;
1439 break;
1324 case 0xe3: 1440 case 0xe3:
1325 table = opcode_e3; 1441 table = opcode_e3;
1326 opfrag = code[5]; 1442 opfrag = code[5];
@@ -1367,7 +1483,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1367 ptr = buffer; 1483 ptr = buffer;
1368 insn = find_insn(code); 1484 insn = find_insn(code);
1369 if (insn) { 1485 if (insn) {
1370 ptr += sprintf(ptr, "%.5s\t", insn->name); 1486 if (insn->name[0] == '\0')
1487 ptr += sprintf(ptr, "%s\t",
1488 long_insn_name[(int) insn->name[1]]);
1489 else
1490 ptr += sprintf(ptr, "%.5s\t", insn->name);
1371 /* Extract the operands. */ 1491 /* Extract the operands. */
1372 separator = 0; 1492 separator = 0;
1373 for (ops = formats[insn->format] + 1, i = 0; 1493 for (ops = formats[insn->format] + 1, i = 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index c00856ad4e5a..068f8465c4ee 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -94,6 +94,7 @@ static noinline __init void create_kernel_nss(void)
94 unsigned int sinitrd_pfn, einitrd_pfn; 94 unsigned int sinitrd_pfn, einitrd_pfn;
95#endif 95#endif
96 int response; 96 int response;
97 int hlen;
97 size_t len; 98 size_t len;
98 char *savesys_ptr; 99 char *savesys_ptr;
99 char defsys_cmd[DEFSYS_CMD_SIZE]; 100 char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -124,24 +125,27 @@ static noinline __init void create_kernel_nss(void)
124 end_pfn = PFN_UP(__pa(&_end)); 125 end_pfn = PFN_UP(__pa(&_end));
125 min_size = end_pfn << 2; 126 min_size = end_pfn << 2;
126 127
127 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X", 128 hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
128 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1, 129 "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
129 eshared_pfn, end_pfn); 130 kernel_nss_name, stext_pfn - 1, stext_pfn,
131 eshared_pfn - 1, eshared_pfn, end_pfn);
130 132
131#ifdef CONFIG_BLK_DEV_INITRD 133#ifdef CONFIG_BLK_DEV_INITRD
132 if (INITRD_START && INITRD_SIZE) { 134 if (INITRD_START && INITRD_SIZE) {
133 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START)); 135 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
134 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE)); 136 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
135 min_size = einitrd_pfn << 2; 137 min_size = einitrd_pfn << 2;
136 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd, 138 hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
137 sinitrd_pfn, einitrd_pfn); 139 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
138 } 140 }
139#endif 141#endif
140 142
141 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13", 143 snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
142 defsys_cmd, min_size); 144 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
143 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 145 defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
144 kernel_nss_name, kernel_nss_name); 146 snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
147 kernel_nss_name, kernel_nss_name);
148 savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
145 149
146 __cpcmd(defsys_cmd, NULL, 0, &response); 150 __cpcmd(defsys_cmd, NULL, 0, &response);
147 151
@@ -208,7 +212,8 @@ static noinline __init void init_kernel_storage_key(void)
208 end_pfn = PFN_UP(__pa(&_end)); 212 end_pfn = PFN_UP(__pa(&_end));
209 213
210 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 214 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
211 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 215 page_set_storage_key(init_pfn << PAGE_SHIFT,
216 PAGE_DEFAULT_KEY, 0);
212} 217}
213 218
214static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); 219static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
@@ -255,13 +260,33 @@ static noinline __init void setup_lowcore_early(void)
255 s390_base_pgm_handler_fn = early_pgm_check_handler; 260 s390_base_pgm_handler_fn = early_pgm_check_handler;
256} 261}
257 262
263static noinline __init void setup_facility_list(void)
264{
265 unsigned long nr;
266
267 S390_lowcore.stfl_fac_list = 0;
268 asm volatile(
269 " .insn s,0xb2b10000,0(0)\n" /* stfl */
270 "0:\n"
271 EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
272 memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
273 nr = 4; /* # bytes stored by stfl */
274 if (test_facility(7)) {
275 /* More facility bits available with stfle */
276 register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
277 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
278 : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
279 : : "cc");
280 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
281 }
282 memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
283 MAX_FACILITY_BIT/8 - nr);
284}
285
258static noinline __init void setup_hpage(void) 286static noinline __init void setup_hpage(void)
259{ 287{
260#ifndef CONFIG_DEBUG_PAGEALLOC 288#ifndef CONFIG_DEBUG_PAGEALLOC
261 unsigned int facilities; 289 if (!test_facility(2) || !test_facility(8))
262
263 facilities = stfl();
264 if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
265 return; 290 return;
266 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; 291 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
267 __ctl_set_bit(0, 23); 292 __ctl_set_bit(0, 23);
@@ -355,18 +380,15 @@ static __init void detect_diag44(void)
355static __init void detect_machine_facilities(void) 380static __init void detect_machine_facilities(void)
356{ 381{
357#ifdef CONFIG_64BIT 382#ifdef CONFIG_64BIT
358 unsigned int facilities; 383 if (test_facility(3))
359 unsigned long long facility_bits;
360
361 facilities = stfl();
362 if (facilities & (1 << 28))
363 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 384 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
364 if (facilities & (1 << 23)) 385 if (test_facility(8))
365 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 386 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
366 if (facilities & (1 << 4)) 387 if (test_facility(11))
388 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
389 if (test_facility(27))
367 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
368 if ((stfle(&facility_bits, 1) > 0) && 391 if (test_facility(40))
369 (facility_bits & (1ULL << (63 - 40))))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
371#endif 393#endif
372} 394}
@@ -447,6 +469,7 @@ void __init startup_init(void)
447 lockdep_off(); 469 lockdep_off();
448 sort_main_extable(); 470 sort_main_extable();
449 setup_lowcore_early(); 471 setup_lowcore_early();
472 setup_facility_list();
450 detect_machine_type(); 473 detect_machine_type();
451 ipl_update_parameters(); 474 ipl_update_parameters();
452 setup_boot_command_line(); 475 setup_boot_command_line();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index bea9ee37ac9d..0476174dfff5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,7 +9,6 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/sys.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <asm/cache.h> 14#include <asm/cache.h>
@@ -49,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
49SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
50 49
51_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
52 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
53_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING) 53 _TIF_MCCK_PENDING)
55_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -72,25 +71,9 @@ STACK_SIZE = 1 << STACK_SHIFT
72 l %r1,BASED(.Ltrace_irq_off_caller) 71 l %r1,BASED(.Ltrace_irq_off_caller)
73 basr %r14,%r1 72 basr %r14,%r1
74 .endm 73 .endm
75
76 .macro TRACE_IRQS_CHECK_ON
77 tm SP_PSW(%r15),0x03 # irqs enabled?
78 bz BASED(0f)
79 TRACE_IRQS_ON
800:
81 .endm
82
83 .macro TRACE_IRQS_CHECK_OFF
84 tm SP_PSW(%r15),0x03 # irqs enabled?
85 bz BASED(0f)
86 TRACE_IRQS_OFF
870:
88 .endm
89#else 74#else
90#define TRACE_IRQS_ON 75#define TRACE_IRQS_ON
91#define TRACE_IRQS_OFF 76#define TRACE_IRQS_OFF
92#define TRACE_IRQS_CHECK_ON
93#define TRACE_IRQS_CHECK_OFF
94#endif 77#endif
95 78
96#ifdef CONFIG_LOCKDEP 79#ifdef CONFIG_LOCKDEP
@@ -126,31 +109,36 @@ STACK_SIZE = 1 << STACK_SHIFT
1261: stm %r10,%r11,\lc_sum 1091: stm %r10,%r11,\lc_sum
127 .endm 110 .endm
128 111
129 .macro SAVE_ALL_BASE savearea 112 .macro SAVE_ALL_SVC psworg,savearea
130 stm %r12,%r15,\savearea 113 stm %r12,%r15,\savearea
131 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 114 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
115 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
116 s %r15,BASED(.Lc_spsize) # make room for registers & psw
132 .endm 117 .endm
133 118
134 .macro SAVE_ALL_SVC psworg,savearea 119 .macro SAVE_ALL_BASE savearea
135 la %r12,\psworg 120 stm %r12,%r15,\savearea
136 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 121 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
137 .endm 122 .endm
138 123
139 .macro SAVE_ALL_SYNC psworg,savearea 124 .macro SAVE_ALL_PGM psworg,savearea
140 la %r12,\psworg
141 tm \psworg+1,0x01 # test problem state bit 125 tm \psworg+1,0x01 # test problem state bit
142 bz BASED(2f) # skip stack setup save
143 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
144#ifdef CONFIG_CHECK_STACK 126#ifdef CONFIG_CHECK_STACK
145 b BASED(3f) 127 bnz BASED(1f)
1462: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 128 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
147 bz BASED(stack_overflow) 129 bnz BASED(2f)
1483: 130 la %r12,\psworg
131 b BASED(stack_overflow)
132#else
133 bz BASED(2f)
149#endif 134#endif
1502: 1351: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
1362: s %r15,BASED(.Lc_spsize) # make room for registers & psw
151 .endm 137 .endm
152 138
153 .macro SAVE_ALL_ASYNC psworg,savearea 139 .macro SAVE_ALL_ASYNC psworg,savearea
140 stm %r12,%r15,\savearea
141 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
154 la %r12,\psworg 142 la %r12,\psworg
155 tm \psworg+1,0x01 # test problem state bit 143 tm \psworg+1,0x01 # test problem state bit
156 bnz BASED(1f) # from user -> load async stack 144 bnz BASED(1f) # from user -> load async stack
@@ -165,27 +153,23 @@ STACK_SIZE = 1 << STACK_SHIFT
1650: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 1530: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
166 slr %r14,%r15 154 slr %r14,%r15
167 sra %r14,STACK_SHIFT 155 sra %r14,STACK_SHIFT
168 be BASED(2f)
1691: l %r15,__LC_ASYNC_STACK
170#ifdef CONFIG_CHECK_STACK 156#ifdef CONFIG_CHECK_STACK
171 b BASED(3f) 157 bnz BASED(1f)
1722: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 158 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
173 bz BASED(stack_overflow) 159 bnz BASED(2f)
1743: 160 b BASED(stack_overflow)
161#else
162 bz BASED(2f)
175#endif 163#endif
1762: 1641: l %r15,__LC_ASYNC_STACK
1652: s %r15,BASED(.Lc_spsize) # make room for registers & psw
177 .endm 166 .endm
178 167
179 .macro CREATE_STACK_FRAME psworg,savearea 168 .macro CREATE_STACK_FRAME savearea
180 s %r15,BASED(.Lc_spsize) # make room for registers & psw 169 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
181 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
182 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 170 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
183 icm %r12,12,__LC_SVC_ILC
184 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
185 st %r12,SP_ILC(%r15)
186 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 171 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
187 la %r12,0 172 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
188 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
189 .endm 173 .endm
190 174
191 .macro RESTORE_ALL psworg,sync 175 .macro RESTORE_ALL psworg,sync
@@ -198,6 +182,14 @@ STACK_SIZE = 1 << STACK_SHIFT
198 lpsw \psworg # back to caller 182 lpsw \psworg # back to caller
199 .endm 183 .endm
200 184
185 .macro REENABLE_IRQS
186 mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
187 ni __SF_EMPTY(%r15),0xbf
188 ssm __SF_EMPTY(%r15)
189 .endm
190
191 .section .kprobes.text, "ax"
192
201/* 193/*
202 * Scheduler resume function, called by switch_to 194 * Scheduler resume function, called by switch_to
203 * gpr2 = (task_struct *) prev 195 * gpr2 = (task_struct *) prev
@@ -208,31 +200,22 @@ STACK_SIZE = 1 << STACK_SHIFT
208 .globl __switch_to 200 .globl __switch_to
209__switch_to: 201__switch_to:
210 basr %r1,0 202 basr %r1,0
211__switch_to_base: 2030: l %r4,__THREAD_info(%r2) # get thread_info of prev
212 tm __THREAD_per(%r3),0xe8 # new process is using per ? 204 l %r5,__THREAD_info(%r3) # get thread_info of next
213 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
214 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
215 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
216 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
217 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
218__switch_to_noper:
219 l %r4,__THREAD_info(%r2) # get thread_info of prev
220 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 205 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
221 bz __switch_to_no_mcck-__switch_to_base(%r1) 206 bz 1f-0b(%r1)
222 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 207 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
223 l %r4,__THREAD_info(%r3) # get thread_info of next 208 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
224 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 2091: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
225__switch_to_no_mcck: 210 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
226 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 211 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
227 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
228 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
229 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 214 st %r3,__LC_CURRENT # store task struct of next
230 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 215 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
231 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 216 st %r5,__LC_THREAD_INFO # store thread info of next
232 l %r3,__THREAD_info(%r3) # load thread_info from task struct 217 ahi %r5,STACK_SIZE # end of kernel stack of next
233 st %r3,__LC_THREAD_INFO 218 st %r5,__LC_KERNEL_STACK # store end of kernel stack
234 ahi %r3,STACK_SIZE
235 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
236 br %r14 219 br %r14
237 220
238__critical_start: 221__critical_start:
@@ -245,10 +228,11 @@ __critical_start:
245system_call: 228system_call:
246 stpt __LC_SYNC_ENTER_TIMER 229 stpt __LC_SYNC_ENTER_TIMER
247sysc_saveall: 230sysc_saveall:
248 SAVE_ALL_BASE __LC_SAVE_AREA
249 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 231 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
250 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 232 CREATE_STACK_FRAME __LC_SAVE_AREA
251 lh %r7,0x8a # get svc number from lowcore 233 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
234 mvc SP_ILC(4,%r15),__LC_SVC_ILC
235 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
252sysc_vtime: 236sysc_vtime:
253 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 237 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
254sysc_stime: 238sysc_stime:
@@ -256,21 +240,20 @@ sysc_stime:
256sysc_update: 240sysc_update:
257 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 241 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
258sysc_do_svc: 242sysc_do_svc:
259 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 243 xr %r7,%r7
260 ltr %r7,%r7 # test for svc 0 244 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
261 bnz BASED(sysc_nr_ok) # svc number > 0 245 bnz BASED(sysc_nr_ok) # svc number > 0
262 # svc 0: system call number in %r1 246 # svc 0: system call number in %r1
263 cl %r1,BASED(.Lnr_syscalls) 247 cl %r1,BASED(.Lnr_syscalls)
264 bnl BASED(sysc_nr_ok) 248 bnl BASED(sysc_nr_ok)
249 sth %r1,SP_SVCNR(%r15)
265 lr %r7,%r1 # copy svc number to %r7 250 lr %r7,%r1 # copy svc number to %r7
266sysc_nr_ok: 251sysc_nr_ok:
267 mvc SP_ARGS(4,%r15),SP_R7(%r15)
268sysc_do_restart:
269 sth %r7,SP_SVCNR(%r15)
270 sll %r7,2 # svc number *4 252 sll %r7,2 # svc number *4
271 l %r8,BASED(.Lsysc_table) 253 l %r10,BASED(.Lsysc_table)
272 tm __TI_flags+2(%r9),_TIF_SYSCALL 254 tm __TI_flags+2(%r12),_TIF_SYSCALL
273 l %r8,0(%r7,%r8) # get system call addr. 255 mvc SP_ARGS(4,%r15),SP_R7(%r15)
256 l %r8,0(%r7,%r10) # get system call addr.
274 bnz BASED(sysc_tracesys) 257 bnz BASED(sysc_tracesys)
275 basr %r14,%r8 # call sys_xxxx 258 basr %r14,%r8 # call sys_xxxx
276 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 259 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -278,7 +261,7 @@ sysc_do_restart:
278sysc_return: 261sysc_return:
279 LOCKDEP_SYS_EXIT 262 LOCKDEP_SYS_EXIT
280sysc_tif: 263sysc_tif:
281 tm __TI_flags+3(%r9),_TIF_WORK_SVC 264 tm __TI_flags+3(%r12),_TIF_WORK_SVC
282 bnz BASED(sysc_work) # there is work to do (signals etc.) 265 bnz BASED(sysc_work) # there is work to do (signals etc.)
283sysc_restore: 266sysc_restore:
284 RESTORE_ALL __LC_RETURN_PSW,1 267 RESTORE_ALL __LC_RETURN_PSW,1
@@ -295,17 +278,17 @@ sysc_work:
295# One of the work bits is on. Find out which one. 278# One of the work bits is on. Find out which one.
296# 279#
297sysc_work_tif: 280sysc_work_tif:
298 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 281 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
299 bo BASED(sysc_mcck_pending) 282 bo BASED(sysc_mcck_pending)
300 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 283 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
301 bo BASED(sysc_reschedule) 284 bo BASED(sysc_reschedule)
302 tm __TI_flags+3(%r9),_TIF_SIGPENDING 285 tm __TI_flags+3(%r12),_TIF_SIGPENDING
303 bo BASED(sysc_sigpending) 286 bo BASED(sysc_sigpending)
304 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 287 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
305 bo BASED(sysc_notify_resume) 288 bo BASED(sysc_notify_resume)
306 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 289 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
307 bo BASED(sysc_restart) 290 bo BASED(sysc_restart)
308 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 291 tm __TI_flags+3(%r12),_TIF_PER_TRAP
309 bo BASED(sysc_singlestep) 292 bo BASED(sysc_singlestep)
310 b BASED(sysc_return) # beware of critical section cleanup 293 b BASED(sysc_return) # beware of critical section cleanup
311 294
@@ -329,13 +312,13 @@ sysc_mcck_pending:
329# _TIF_SIGPENDING is set, call do_signal 312# _TIF_SIGPENDING is set, call do_signal
330# 313#
331sysc_sigpending: 314sysc_sigpending:
332 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 315 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
333 la %r2,SP_PTREGS(%r15) # load pt_regs 316 la %r2,SP_PTREGS(%r15) # load pt_regs
334 l %r1,BASED(.Ldo_signal) 317 l %r1,BASED(.Ldo_signal)
335 basr %r14,%r1 # call do_signal 318 basr %r14,%r1 # call do_signal
336 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 319 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
337 bo BASED(sysc_restart) 320 bo BASED(sysc_restart)
338 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 321 tm __TI_flags+3(%r12),_TIF_PER_TRAP
339 bo BASED(sysc_singlestep) 322 bo BASED(sysc_singlestep)
340 b BASED(sysc_return) 323 b BASED(sysc_return)
341 324
@@ -353,23 +336,23 @@ sysc_notify_resume:
353# _TIF_RESTART_SVC is set, set up registers and restart svc 336# _TIF_RESTART_SVC is set, set up registers and restart svc
354# 337#
355sysc_restart: 338sysc_restart:
356 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 339 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
357 l %r7,SP_R2(%r15) # load new svc number 340 l %r7,SP_R2(%r15) # load new svc number
358 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 341 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
359 lm %r2,%r6,SP_R2(%r15) # load svc arguments 342 lm %r2,%r6,SP_R2(%r15) # load svc arguments
360 b BASED(sysc_do_restart) # restart svc 343 sth %r7,SP_SVCNR(%r15)
344 b BASED(sysc_nr_ok) # restart svc
361 345
362# 346#
363# _TIF_SINGLE_STEP is set, call do_single_step 347# _TIF_PER_TRAP is set, call do_per_trap
364# 348#
365sysc_singlestep: 349sysc_singlestep:
366 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 350 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
367 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 351 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
368 mvi SP_SVCNR+1(%r15),0xff
369 la %r2,SP_PTREGS(%r15) # address of register-save area 352 la %r2,SP_PTREGS(%r15) # address of register-save area
370 l %r1,BASED(.Lhandle_per) # load adr. of per handler 353 l %r1,BASED(.Lhandle_per) # load adr. of per handler
371 la %r14,BASED(sysc_return) # load adr. of system return 354 la %r14,BASED(sysc_return) # load adr. of system return
372 br %r1 # branch to do_single_step 355 br %r1 # branch to do_per_trap
373 356
374# 357#
375# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 358# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -379,22 +362,23 @@ sysc_tracesys:
379 l %r1,BASED(.Ltrace_entry) 362 l %r1,BASED(.Ltrace_entry)
380 la %r2,SP_PTREGS(%r15) # load pt_regs 363 la %r2,SP_PTREGS(%r15) # load pt_regs
381 la %r3,0 364 la %r3,0
382 srl %r7,2 365 xr %r0,%r0
383 st %r7,SP_R2(%r15) 366 icm %r0,3,SP_SVCNR(%r15)
367 st %r0,SP_R2(%r15)
384 basr %r14,%r1 368 basr %r14,%r1
385 cl %r2,BASED(.Lnr_syscalls) 369 cl %r2,BASED(.Lnr_syscalls)
386 bnl BASED(sysc_tracenogo) 370 bnl BASED(sysc_tracenogo)
387 l %r8,BASED(.Lsysc_table)
388 lr %r7,%r2 371 lr %r7,%r2
389 sll %r7,2 # svc number *4 372 sll %r7,2 # svc number *4
390 l %r8,0(%r7,%r8) 373 l %r8,0(%r7,%r10)
391sysc_tracego: 374sysc_tracego:
392 lm %r3,%r6,SP_R3(%r15) 375 lm %r3,%r6,SP_R3(%r15)
376 mvc SP_ARGS(4,%r15),SP_R7(%r15)
393 l %r2,SP_ORIG_R2(%r15) 377 l %r2,SP_ORIG_R2(%r15)
394 basr %r14,%r8 # call sys_xxx 378 basr %r14,%r8 # call sys_xxx
395 st %r2,SP_R2(%r15) # store return value 379 st %r2,SP_R2(%r15) # store return value
396sysc_tracenogo: 380sysc_tracenogo:
397 tm __TI_flags+2(%r9),_TIF_SYSCALL 381 tm __TI_flags+2(%r12),_TIF_SYSCALL
398 bz BASED(sysc_return) 382 bz BASED(sysc_return)
399 l %r1,BASED(.Ltrace_exit) 383 l %r1,BASED(.Ltrace_exit)
400 la %r2,SP_PTREGS(%r15) # load pt_regs 384 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -407,7 +391,7 @@ sysc_tracenogo:
407 .globl ret_from_fork 391 .globl ret_from_fork
408ret_from_fork: 392ret_from_fork:
409 l %r13,__LC_SVC_NEW_PSW+4 393 l %r13,__LC_SVC_NEW_PSW+4
410 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 394 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
411 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 395 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
412 bo BASED(0f) 396 bo BASED(0f)
413 st %r15,SP_R15(%r15) # store stack pointer for new kthread 397 st %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -440,13 +424,11 @@ kernel_execve:
440 br %r14 424 br %r14
441 # execve succeeded. 425 # execve succeeded.
4420: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4260: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
443 TRACE_IRQS_OFF
444 l %r15,__LC_KERNEL_STACK # load ksp 427 l %r15,__LC_KERNEL_STACK # load ksp
445 s %r15,BASED(.Lc_spsize) # make room for registers & psw 428 s %r15,BASED(.Lc_spsize) # make room for registers & psw
446 l %r9,__LC_THREAD_INFO
447 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 429 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
430 l %r12,__LC_THREAD_INFO
448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 431 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449 TRACE_IRQS_ON
450 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 432 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
451 l %r1,BASED(.Lexecve_tail) 433 l %r1,BASED(.Lexecve_tail)
452 basr %r14,%r1 434 basr %r14,%r1
@@ -475,27 +457,28 @@ pgm_check_handler:
475 SAVE_ALL_BASE __LC_SAVE_AREA 457 SAVE_ALL_BASE __LC_SAVE_AREA
476 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 458 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
477 bnz BASED(pgm_per) # got per exception -> special case 459 bnz BASED(pgm_per) # got per exception -> special case
478 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 460 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
479 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 461 CREATE_STACK_FRAME __LC_SAVE_AREA
462 xc SP_ILC(4,%r15),SP_ILC(%r15)
463 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
464 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
480 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 465 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
481 bz BASED(pgm_no_vtime) 466 bz BASED(pgm_no_vtime)
482 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 467 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
483 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 468 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
484 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 469 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
485pgm_no_vtime: 470pgm_no_vtime:
486 TRACE_IRQS_CHECK_OFF
487 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
488 l %r3,__LC_PGM_ILC # load program interruption code 471 l %r3,__LC_PGM_ILC # load program interruption code
472 l %r4,__LC_TRANS_EXC_CODE
473 REENABLE_IRQS
489 la %r8,0x7f 474 la %r8,0x7f
490 nr %r8,%r3 475 nr %r8,%r3
491pgm_do_call:
492 l %r7,BASED(.Ljump_table)
493 sll %r8,2 476 sll %r8,2
494 l %r7,0(%r8,%r7) # load address of handler routine 477 l %r1,BASED(.Ljump_table)
478 l %r1,0(%r8,%r1) # load address of handler routine
495 la %r2,SP_PTREGS(%r15) # address of register-save area 479 la %r2,SP_PTREGS(%r15) # address of register-save area
496 basr %r14,%r7 # branch to interrupt-handler 480 basr %r14,%r1 # branch to interrupt-handler
497pgm_exit: 481pgm_exit:
498 TRACE_IRQS_CHECK_ON
499 b BASED(sysc_return) 482 b BASED(sysc_return)
500 483
501# 484#
@@ -515,55 +498,54 @@ pgm_per:
515# Normal per exception 498# Normal per exception
516# 499#
517pgm_per_std: 500pgm_per_std:
518 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 501 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
519 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 502 CREATE_STACK_FRAME __LC_SAVE_AREA
503 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
504 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
520 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 505 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
521 bz BASED(pgm_no_vtime2) 506 bz BASED(pgm_no_vtime2)
522 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 507 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
523 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 508 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
524 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 509 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
525pgm_no_vtime2: 510pgm_no_vtime2:
526 TRACE_IRQS_CHECK_OFF 511 l %r1,__TI_task(%r12)
527 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
528 l %r1,__TI_task(%r9)
529 tm SP_PSW+1(%r15),0x01 # kernel per event ? 512 tm SP_PSW+1(%r15),0x01 # kernel per event ?
530 bz BASED(kernel_per) 513 bz BASED(kernel_per)
531 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 514 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
532 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 515 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
533 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 516 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
534 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 517 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
535 l %r3,__LC_PGM_ILC # load program interruption code 518 l %r3,__LC_PGM_ILC # load program interruption code
519 l %r4,__LC_TRANS_EXC_CODE
520 REENABLE_IRQS
536 la %r8,0x7f 521 la %r8,0x7f
537 nr %r8,%r3 # clear per-event-bit and ilc 522 nr %r8,%r3 # clear per-event-bit and ilc
538 be BASED(pgm_exit2) # only per or per+check ? 523 be BASED(pgm_exit2) # only per or per+check ?
539 l %r7,BASED(.Ljump_table)
540 sll %r8,2 524 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine 525 l %r1,BASED(.Ljump_table)
526 l %r1,0(%r8,%r1) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area 527 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler 528 basr %r14,%r1 # branch to interrupt-handler
544pgm_exit2: 529pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return) 530 b BASED(sysc_return)
548 531
549# 532#
550# it was a single stepped SVC that is causing all the trouble 533# it was a single stepped SVC that is causing all the trouble
551# 534#
552pgm_svcper: 535pgm_svcper:
553 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 536 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
554 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 537 CREATE_STACK_FRAME __LC_SAVE_AREA
538 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
539 mvc SP_ILC(4,%r15),__LC_SVC_ILC
540 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
555 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 541 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
556 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 542 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
557 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
558 lh %r7,0x8a # get svc number from lowcore 544 l %r8,__TI_task(%r12)
559 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 545 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
560 TRACE_IRQS_OFF 546 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
561 l %r8,__TI_task(%r9) 547 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
562 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 548 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
563 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
564 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
565 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
566 TRACE_IRQS_ON
567 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 549 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
568 lm %r2,%r6,SP_R2(%r15) # load svc arguments 550 lm %r2,%r6,SP_R2(%r15) # load svc arguments
569 b BASED(sysc_do_svc) 551 b BASED(sysc_do_svc)
@@ -572,8 +554,8 @@ pgm_svcper:
572# per was called from kernel, must be kprobes 554# per was called from kernel, must be kprobes
573# 555#
574kernel_per: 556kernel_per:
575 mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check 557 REENABLE_IRQS
576 mvi SP_SVCNR+1(%r15),0xff 558 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
577 la %r2,SP_PTREGS(%r15) # address of register-save area 559 la %r2,SP_PTREGS(%r15) # address of register-save area
578 l %r1,BASED(.Lhandle_per) # load adr. of per handler 560 l %r1,BASED(.Lhandle_per) # load adr. of per handler
579 basr %r14,%r1 # branch to do_single_step 561 basr %r14,%r1 # branch to do_single_step
@@ -587,9 +569,10 @@ kernel_per:
587io_int_handler: 569io_int_handler:
588 stck __LC_INT_CLOCK 570 stck __LC_INT_CLOCK
589 stpt __LC_ASYNC_ENTER_TIMER 571 stpt __LC_ASYNC_ENTER_TIMER
590 SAVE_ALL_BASE __LC_SAVE_AREA+16
591 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 572 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
592 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 573 CREATE_STACK_FRAME __LC_SAVE_AREA+16
574 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
575 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
593 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 576 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
594 bz BASED(io_no_vtime) 577 bz BASED(io_no_vtime)
595 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 578 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -597,7 +580,6 @@ io_int_handler:
597 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 580 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
598io_no_vtime: 581io_no_vtime:
599 TRACE_IRQS_OFF 582 TRACE_IRQS_OFF
600 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
601 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 583 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
602 la %r2,SP_PTREGS(%r15) # address of register-save area 584 la %r2,SP_PTREGS(%r15) # address of register-save area
603 basr %r14,%r1 # branch to standard irq handler 585 basr %r14,%r1 # branch to standard irq handler
@@ -605,7 +587,7 @@ io_return:
605 LOCKDEP_SYS_EXIT 587 LOCKDEP_SYS_EXIT
606 TRACE_IRQS_ON 588 TRACE_IRQS_ON
607io_tif: 589io_tif:
608 tm __TI_flags+3(%r9),_TIF_WORK_INT 590 tm __TI_flags+3(%r12),_TIF_WORK_INT
609 bnz BASED(io_work) # there is work to do (signals etc.) 591 bnz BASED(io_work) # there is work to do (signals etc.)
610io_restore: 592io_restore:
611 RESTORE_ALL __LC_RETURN_PSW,0 593 RESTORE_ALL __LC_RETURN_PSW,0
@@ -623,9 +605,9 @@ io_work:
623 bo BASED(io_work_user) # yes -> do resched & signal 605 bo BASED(io_work_user) # yes -> do resched & signal
624#ifdef CONFIG_PREEMPT 606#ifdef CONFIG_PREEMPT
625 # check for preemptive scheduling 607 # check for preemptive scheduling
626 icm %r0,15,__TI_precount(%r9) 608 icm %r0,15,__TI_precount(%r12)
627 bnz BASED(io_restore) # preemption disabled 609 bnz BASED(io_restore) # preemption disabled
628 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 610 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
629 bno BASED(io_restore) 611 bno BASED(io_restore)
630 # switch to kernel stack 612 # switch to kernel stack
631 l %r1,SP_R15(%r15) 613 l %r1,SP_R15(%r15)
@@ -659,13 +641,13 @@ io_work_user:
659# and _TIF_MCCK_PENDING 641# and _TIF_MCCK_PENDING
660# 642#
661io_work_tif: 643io_work_tif:
662 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 644 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
663 bo BASED(io_mcck_pending) 645 bo BASED(io_mcck_pending)
664 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 646 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
665 bo BASED(io_reschedule) 647 bo BASED(io_reschedule)
666 tm __TI_flags+3(%r9),_TIF_SIGPENDING 648 tm __TI_flags+3(%r12),_TIF_SIGPENDING
667 bo BASED(io_sigpending) 649 bo BASED(io_sigpending)
668 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 650 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
669 bo BASED(io_notify_resume) 651 bo BASED(io_notify_resume)
670 b BASED(io_return) # beware of critical section cleanup 652 b BASED(io_return) # beware of critical section cleanup
671 653
@@ -725,19 +707,20 @@ io_notify_resume:
725ext_int_handler: 707ext_int_handler:
726 stck __LC_INT_CLOCK 708 stck __LC_INT_CLOCK
727 stpt __LC_ASYNC_ENTER_TIMER 709 stpt __LC_ASYNC_ENTER_TIMER
728 SAVE_ALL_BASE __LC_SAVE_AREA+16
729 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 710 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
730 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 711 CREATE_STACK_FRAME __LC_SAVE_AREA+16
712 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
713 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
731 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 714 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
732 bz BASED(ext_no_vtime) 715 bz BASED(ext_no_vtime)
733 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 716 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
734 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 717 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
735 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 718 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
736ext_no_vtime: 719ext_no_vtime:
737 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
738 TRACE_IRQS_OFF 720 TRACE_IRQS_OFF
739 la %r2,SP_PTREGS(%r15) # address of register-save area 721 la %r2,SP_PTREGS(%r15) # address of register-save area
740 lh %r3,__LC_EXT_INT_CODE # get interruption code 722 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
723 l %r4,__LC_EXT_PARAMS # get external parameters
741 l %r1,BASED(.Ldo_extint) 724 l %r1,BASED(.Ldo_extint)
742 basr %r14,%r1 725 basr %r14,%r1
743 b BASED(io_return) 726 b BASED(io_return)
@@ -788,7 +771,10 @@ mcck_int_main:
788 sra %r14,PAGE_SHIFT 771 sra %r14,PAGE_SHIFT
789 be BASED(0f) 772 be BASED(0f)
790 l %r15,__LC_PANIC_STACK # load panic stack 773 l %r15,__LC_PANIC_STACK # load panic stack
7910: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 7740: s %r15,BASED(.Lc_spsize) # make room for registers & psw
775 CREATE_STACK_FRAME __LC_SAVE_AREA+32
776 mvc SP_PSW(8,%r15),0(%r12)
777 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
792 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 778 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
793 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 779 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
794 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 780 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -797,7 +783,6 @@ mcck_int_main:
797 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 783 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER 784 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
799mcck_no_vtime: 785mcck_no_vtime:
800 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
801 la %r2,SP_PTREGS(%r15) # load pt_regs 786 la %r2,SP_PTREGS(%r15) # load pt_regs
802 l %r1,BASED(.Ls390_mcck) 787 l %r1,BASED(.Ls390_mcck)
803 basr %r14,%r1 # call machine check handler 788 basr %r14,%r1 # call machine check handler
@@ -809,7 +794,7 @@ mcck_no_vtime:
809 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 794 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
810 lr %r15,%r1 795 lr %r15,%r1
811 stosm __SF_EMPTY(%r15),0x04 # turn dat on 796 stosm __SF_EMPTY(%r15),0x04 # turn dat on
812 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 797 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
813 bno BASED(mcck_return) 798 bno BASED(mcck_return)
814 TRACE_IRQS_OFF 799 TRACE_IRQS_OFF
815 l %r1,BASED(.Ls390_handle_mcck) 800 l %r1,BASED(.Ls390_handle_mcck)
@@ -852,7 +837,7 @@ restart_base:
852 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 837 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
853 basr %r14,0 838 basr %r14,0
854 l %r14,restart_addr-.(%r14) 839 l %r14,restart_addr-.(%r14)
855 br %r14 # branch to start_secondary 840 basr %r14,%r14 # branch to start_secondary
856restart_addr: 841restart_addr:
857 .long start_secondary 842 .long start_secondary
858 .align 8 843 .align 8
@@ -874,6 +859,8 @@ restart_crash:
874restart_go: 859restart_go:
875#endif 860#endif
876 861
862 .section .kprobes.text, "ax"
863
877#ifdef CONFIG_CHECK_STACK 864#ifdef CONFIG_CHECK_STACK
878/* 865/*
879 * The synchronous or the asynchronous stack overflowed. We are dead. 866 * The synchronous or the asynchronous stack overflowed. We are dead.
@@ -956,12 +943,13 @@ cleanup_system_call:
956 bh BASED(0f) 943 bh BASED(0f)
957 mvc __LC_SAVE_AREA(16),0(%r12) 944 mvc __LC_SAVE_AREA(16),0(%r12)
9580: st %r13,4(%r12) 9450: st %r13,4(%r12)
959 st %r12,__LC_SAVE_AREA+48 # argh 946 l %r15,__LC_KERNEL_STACK # problem state -> load ksp
960 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 947 s %r15,BASED(.Lc_spsize) # make room for registers & psw
961 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
962 l %r12,__LC_SAVE_AREA+48 # argh
963 st %r15,12(%r12) 948 st %r15,12(%r12)
964 lh %r7,0x8a 949 CREATE_STACK_FRAME __LC_SAVE_AREA
950 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
951 mvc SP_ILC(4,%r15),__LC_SVC_ILC
952 mvc 0(4,%r12),__LC_THREAD_INFO
965cleanup_vtime: 953cleanup_vtime:
966 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 954 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
967 bhe BASED(cleanup_stime) 955 bhe BASED(cleanup_stime)
@@ -1059,7 +1047,7 @@ cleanup_io_restore_insn:
1059.Ldo_signal: .long do_signal 1047.Ldo_signal: .long do_signal
1060.Ldo_notify_resume: 1048.Ldo_notify_resume:
1061 .long do_notify_resume 1049 .long do_notify_resume
1062.Lhandle_per: .long do_single_step 1050.Lhandle_per: .long do_per_trap
1063.Ldo_execve: .long do_execve 1051.Ldo_execve: .long do_execve
1064.Lexecve_tail: .long execve_tail 1052.Lexecve_tail: .long execve_tail
1065.Ljump_table: .long pgm_check_table 1053.Ljump_table: .long pgm_check_table
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index ff579b6bde06..17a6f83a2d67 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,21 +5,21 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7 7
8typedef void pgm_check_handler_t(struct pt_regs *, long); 8typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long);
9extern pgm_check_handler_t *pgm_check_table[128]; 9extern pgm_check_handler_t *pgm_check_table[128];
10pgm_check_handler_t do_protection_exception; 10pgm_check_handler_t do_protection_exception;
11pgm_check_handler_t do_dat_exception; 11pgm_check_handler_t do_dat_exception;
12 12
13extern int sysctl_userprocess_debug; 13extern int sysctl_userprocess_debug;
14 14
15void do_single_step(struct pt_regs *regs); 15void do_per_trap(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit); 16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs); 17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs); 18void do_signal(struct pt_regs *regs);
19int handle_signal32(unsigned long sig, struct k_sigaction *ka, 19int handle_signal32(unsigned long sig, struct k_sigaction *ka,
20 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 20 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
21 21
22void do_extint(struct pt_regs *regs, unsigned short code); 22void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
23int __cpuinit start_secondary(void *cpuvoid); 23int __cpuinit start_secondary(void *cpuvoid);
24void __init startup_init(void); 24void __init startup_init(void);
25void die(const char * str, struct pt_regs * regs, long err); 25void die(const char * str, struct pt_regs * regs, long err);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 8bccec15ea90..d61967e2eab0 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51STACK_SIZE = 1 << STACK_SHIFT 51STACK_SIZE = 1 << STACK_SHIFT
52 52
53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -79,25 +79,9 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
79 basr %r2,%r0 79 basr %r2,%r0
80 brasl %r14,trace_hardirqs_off_caller 80 brasl %r14,trace_hardirqs_off_caller
81 .endm 81 .endm
82
83 .macro TRACE_IRQS_CHECK_ON
84 tm SP_PSW(%r15),0x03 # irqs enabled?
85 jz 0f
86 TRACE_IRQS_ON
870:
88 .endm
89
90 .macro TRACE_IRQS_CHECK_OFF
91 tm SP_PSW(%r15),0x03 # irqs enabled?
92 jz 0f
93 TRACE_IRQS_OFF
940:
95 .endm
96#else 82#else
97#define TRACE_IRQS_ON 83#define TRACE_IRQS_ON
98#define TRACE_IRQS_OFF 84#define TRACE_IRQS_OFF
99#define TRACE_IRQS_CHECK_ON
100#define TRACE_IRQS_CHECK_OFF
101#endif 85#endif
102 86
103#ifdef CONFIG_LOCKDEP 87#ifdef CONFIG_LOCKDEP
@@ -207,6 +191,14 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
2070: 1910:
208 .endm 192 .endm
209 193
194 .macro REENABLE_IRQS
195 mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
196 ni __SF_EMPTY(%r15),0xbf
197 ssm __SF_EMPTY(%r15)
198 .endm
199
200 .section .kprobes.text, "ax"
201
210/* 202/*
211 * Scheduler resume function, called by switch_to 203 * Scheduler resume function, called by switch_to
212 * gpr2 = (task_struct *) prev 204 * gpr2 = (task_struct *) prev
@@ -216,30 +208,22 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
216 */ 208 */
217 .globl __switch_to 209 .globl __switch_to
218__switch_to: 210__switch_to:
219 tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? 211 lg %r4,__THREAD_info(%r2) # get thread_info of prev
220 jz __switch_to_noper # if not we're fine 212 lg %r5,__THREAD_info(%r3) # get thread_info of next
221 stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
222 clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
223 je __switch_to_noper # we got away without bashing TLB's
224 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
225__switch_to_noper:
226 lg %r4,__THREAD_info(%r2) # get thread_info of prev
227 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
228 jz __switch_to_no_mcck 214 jz 0f
229 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 215 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
230 lg %r4,__THREAD_info(%r3) # get thread_info of next 216 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
231 oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 2170: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
232__switch_to_no_mcck: 218 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
233 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 219 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
234 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
235 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
236 lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 222 stg %r3,__LC_CURRENT # store task struct of next
237 stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct 223 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
238 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 224 stg %r5,__LC_THREAD_INFO # store thread info of next
239 lg %r3,__THREAD_info(%r3) # load thread_info from task struct 225 aghi %r5,STACK_SIZE # end of kernel stack of next
240 stg %r3,__LC_THREAD_INFO 226 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
241 aghi %r3,STACK_SIZE
242 stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
243 br %r14 227 br %r14
244 228
245__critical_start: 229__critical_start:
@@ -256,7 +240,6 @@ sysc_saveall:
256 CREATE_STACK_FRAME __LC_SAVE_AREA 240 CREATE_STACK_FRAME __LC_SAVE_AREA
257 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW 241 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
258 mvc SP_ILC(4,%r15),__LC_SVC_ILC 242 mvc SP_ILC(4,%r15),__LC_SVC_ILC
259 stg %r7,SP_ARGS(%r15)
260 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 243 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
261sysc_vtime: 244sysc_vtime:
262 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 245 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -284,6 +267,7 @@ sysc_nr_ok:
284sysc_noemu: 267sysc_noemu:
285#endif 268#endif
286 tm __TI_flags+6(%r12),_TIF_SYSCALL 269 tm __TI_flags+6(%r12),_TIF_SYSCALL
270 mvc SP_ARGS(8,%r15),SP_R7(%r15)
287 lgf %r8,0(%r7,%r10) # load address of system call routine 271 lgf %r8,0(%r7,%r10) # load address of system call routine
288 jnz sysc_tracesys 272 jnz sysc_tracesys
289 basr %r14,%r8 # call sys_xxxx 273 basr %r14,%r8 # call sys_xxxx
@@ -319,7 +303,7 @@ sysc_work_tif:
319 jo sysc_notify_resume 303 jo sysc_notify_resume
320 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 304 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
321 jo sysc_restart 305 jo sysc_restart
322 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 306 tm __TI_flags+7(%r12),_TIF_PER_TRAP
323 jo sysc_singlestep 307 jo sysc_singlestep
324 j sysc_return # beware of critical section cleanup 308 j sysc_return # beware of critical section cleanup
325 309
@@ -341,12 +325,12 @@ sysc_mcck_pending:
341# _TIF_SIGPENDING is set, call do_signal 325# _TIF_SIGPENDING is set, call do_signal
342# 326#
343sysc_sigpending: 327sysc_sigpending:
344 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 328 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
345 la %r2,SP_PTREGS(%r15) # load pt_regs 329 la %r2,SP_PTREGS(%r15) # load pt_regs
346 brasl %r14,do_signal # call do_signal 330 brasl %r14,do_signal # call do_signal
347 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 331 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
348 jo sysc_restart 332 jo sysc_restart
349 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 333 tm __TI_flags+7(%r12),_TIF_PER_TRAP
350 jo sysc_singlestep 334 jo sysc_singlestep
351 j sysc_return 335 j sysc_return
352 336
@@ -371,14 +355,14 @@ sysc_restart:
371 j sysc_nr_ok # restart svc 355 j sysc_nr_ok # restart svc
372 356
373# 357#
374# _TIF_SINGLE_STEP is set, call do_single_step 358# _TIF_PER_TRAP is set, call do_per_trap
375# 359#
376sysc_singlestep: 360sysc_singlestep:
377 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 361 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
378 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 362 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
379 la %r2,SP_PTREGS(%r15) # address of register-save area 363 la %r2,SP_PTREGS(%r15) # address of register-save area
380 larl %r14,sysc_return # load adr. of system return 364 larl %r14,sysc_return # load adr. of system return
381 jg do_single_step # branch to do_sigtrap 365 jg do_per_trap
382 366
383# 367#
384# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 368# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -397,6 +381,7 @@ sysc_tracesys:
397 lgf %r8,0(%r7,%r10) 381 lgf %r8,0(%r7,%r10)
398sysc_tracego: 382sysc_tracego:
399 lmg %r3,%r6,SP_R3(%r15) 383 lmg %r3,%r6,SP_R3(%r15)
384 mvc SP_ARGS(8,%r15),SP_R7(%r15)
400 lg %r2,SP_ORIG_R2(%r15) 385 lg %r2,SP_ORIG_R2(%r15)
401 basr %r14,%r8 # call sys_xxx 386 basr %r14,%r8 # call sys_xxx
402 stg %r2,SP_R2(%r15) # store return value 387 stg %r2,SP_R2(%r15) # store return value
@@ -443,14 +428,12 @@ kernel_execve:
443 br %r14 428 br %r14
444 # execve succeeded. 429 # execve succeeded.
4450: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4300: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
446# TRACE_IRQS_OFF
447 lg %r15,__LC_KERNEL_STACK # load ksp 431 lg %r15,__LC_KERNEL_STACK # load ksp
448 aghi %r15,-SP_SIZE # make room for registers & psw 432 aghi %r15,-SP_SIZE # make room for registers & psw
449 lg %r13,__LC_SVC_NEW_PSW+8 433 lg %r13,__LC_SVC_NEW_PSW+8
450 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 434 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
451 lg %r12,__LC_THREAD_INFO 435 lg %r12,__LC_THREAD_INFO
452 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 436 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
453# TRACE_IRQS_ON
454 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 437 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
455 brasl %r14,execve_tail 438 brasl %r14,execve_tail
456 j sysc_return 439 j sysc_return
@@ -490,19 +473,18 @@ pgm_check_handler:
490 LAST_BREAK 473 LAST_BREAK
491pgm_no_vtime: 474pgm_no_vtime:
492 HANDLE_SIE_INTERCEPT 475 HANDLE_SIE_INTERCEPT
493 TRACE_IRQS_CHECK_OFF
494 stg %r11,SP_ARGS(%r15) 476 stg %r11,SP_ARGS(%r15)
495 lgf %r3,__LC_PGM_ILC # load program interruption code 477 lgf %r3,__LC_PGM_ILC # load program interruption code
478 lg %r4,__LC_TRANS_EXC_CODE
479 REENABLE_IRQS
496 lghi %r8,0x7f 480 lghi %r8,0x7f
497 ngr %r8,%r3 481 ngr %r8,%r3
498pgm_do_call:
499 sll %r8,3 482 sll %r8,3
500 larl %r1,pgm_check_table 483 larl %r1,pgm_check_table
501 lg %r1,0(%r8,%r1) # load address of handler routine 484 lg %r1,0(%r8,%r1) # load address of handler routine
502 la %r2,SP_PTREGS(%r15) # address of register-save area 485 la %r2,SP_PTREGS(%r15) # address of register-save area
503 basr %r14,%r1 # branch to interrupt-handler 486 basr %r14,%r1 # branch to interrupt-handler
504pgm_exit: 487pgm_exit:
505 TRACE_IRQS_CHECK_ON
506 j sysc_return 488 j sysc_return
507 489
508# 490#
@@ -533,15 +515,16 @@ pgm_per_std:
533 LAST_BREAK 515 LAST_BREAK
534pgm_no_vtime2: 516pgm_no_vtime2:
535 HANDLE_SIE_INTERCEPT 517 HANDLE_SIE_INTERCEPT
536 TRACE_IRQS_CHECK_OFF
537 lg %r1,__TI_task(%r12) 518 lg %r1,__TI_task(%r12)
538 tm SP_PSW+1(%r15),0x01 # kernel per event ? 519 tm SP_PSW+1(%r15),0x01 # kernel per event ?
539 jz kernel_per 520 jz kernel_per
540 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 521 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
541 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 522 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
542 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 523 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
543 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 524 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
544 lgf %r3,__LC_PGM_ILC # load program interruption code 525 lgf %r3,__LC_PGM_ILC # load program interruption code
526 lg %r4,__LC_TRANS_EXC_CODE
527 REENABLE_IRQS
545 lghi %r8,0x7f 528 lghi %r8,0x7f
546 ngr %r8,%r3 # clear per-event-bit and ilc 529 ngr %r8,%r3 # clear per-event-bit and ilc
547 je pgm_exit2 530 je pgm_exit2
@@ -551,8 +534,6 @@ pgm_no_vtime2:
551 la %r2,SP_PTREGS(%r15) # address of register-save area 534 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler 535 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2: 536pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return 537 j sysc_return
557 538
558# 539#
@@ -568,13 +549,11 @@ pgm_svcper:
568 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 549 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
569 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 550 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
570 LAST_BREAK 551 LAST_BREAK
571 TRACE_IRQS_OFF
572 lg %r8,__TI_task(%r12) 552 lg %r8,__TI_task(%r12)
573 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 553 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
574 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 554 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
575 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 555 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
576 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 556 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
577 TRACE_IRQS_ON
578 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 557 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
579 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 558 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
580 j sysc_do_svc 559 j sysc_do_svc
@@ -583,9 +562,10 @@ pgm_svcper:
583# per was called from kernel, must be kprobes 562# per was called from kernel, must be kprobes
584# 563#
585kernel_per: 564kernel_per:
565 REENABLE_IRQS
586 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 566 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
587 la %r2,SP_PTREGS(%r15) # address of register-save area 567 la %r2,SP_PTREGS(%r15) # address of register-save area
588 brasl %r14,do_single_step 568 brasl %r14,do_per_trap
589 j pgm_exit 569 j pgm_exit
590 570
591/* 571/*
@@ -743,8 +723,11 @@ ext_int_handler:
743ext_no_vtime: 723ext_no_vtime:
744 HANDLE_SIE_INTERCEPT 724 HANDLE_SIE_INTERCEPT
745 TRACE_IRQS_OFF 725 TRACE_IRQS_OFF
726 lghi %r1,4096
746 la %r2,SP_PTREGS(%r15) # address of register-save area 727 la %r2,SP_PTREGS(%r15) # address of register-save area
747 llgh %r3,__LC_EXT_INT_CODE # get interruption code 728 llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
729 llgf %r4,__LC_EXT_PARAMS # get external parameter
730 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
748 brasl %r14,do_extint 731 brasl %r14,do_extint
749 j io_return 732 j io_return
750 733
@@ -859,7 +842,7 @@ restart_base:
859 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 842 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
860 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 843 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
861 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 844 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
862 jg start_secondary 845 brasl %r14,start_secondary
863 .align 8 846 .align 8
864restart_vtime: 847restart_vtime:
865 .long 0x7fffffff,0xffffffff 848 .long 0x7fffffff,0xffffffff
@@ -879,6 +862,8 @@ restart_crash:
879restart_go: 862restart_go:
880#endif 863#endif
881 864
865 .section .kprobes.text, "ax"
866
882#ifdef CONFIG_CHECK_STACK 867#ifdef CONFIG_CHECK_STACK
883/* 868/*
884 * The synchronous or the asynchronous stack overflowed. We are dead. 869 * The synchronous or the asynchronous stack overflowed. We are dead.
@@ -966,7 +951,6 @@ cleanup_system_call:
966 CREATE_STACK_FRAME __LC_SAVE_AREA 951 CREATE_STACK_FRAME __LC_SAVE_AREA
967 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW 952 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
968 mvc SP_ILC(4,%r15),__LC_SVC_ILC 953 mvc SP_ILC(4,%r15),__LC_SVC_ILC
969 stg %r7,SP_ARGS(%r15)
970 mvc 8(8,%r12),__LC_THREAD_INFO 954 mvc 8(8,%r12),__LC_THREAD_INFO
971cleanup_vtime: 955cleanup_vtime:
972 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 956 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 6a83d0581317..78bdf0e5dff7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -4,7 +4,7 @@
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
@@ -12,176 +12,144 @@
12#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kprobes.h>
15#include <trace/syscall.h> 16#include <trace/syscall.h>
16#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
17 18
19#ifdef CONFIG_64BIT
20#define MCOUNT_OFFSET_RET 12
21#else
22#define MCOUNT_OFFSET_RET 22
23#endif
24
18#ifdef CONFIG_DYNAMIC_FTRACE 25#ifdef CONFIG_DYNAMIC_FTRACE
19 26
20void ftrace_disable_code(void); 27void ftrace_disable_code(void);
21void ftrace_disable_return(void); 28void ftrace_enable_insn(void);
22void ftrace_call_code(void);
23void ftrace_nop_code(void);
24
25#define FTRACE_INSN_SIZE 4
26 29
27#ifdef CONFIG_64BIT 30#ifdef CONFIG_64BIT
28 31/*
32 * The 64-bit mcount code looks like this:
33 * stg %r14,8(%r15) # offset 0
34 * > larl %r1,<&counter> # offset 6
35 * > brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * Total length is 24 bytes. The middle two instructions of the mcount
38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
39 * The 64-bit enabled ftrace code block looks like this:
40 * stg %r14,8(%r15) # offset 0
41 * > lg %r1,__LC_FTRACE_FUNC # offset 6
42 * > lgr %r0,%r0 # offset 12
43 * > basr %r14,%r1 # offset 16
44 * lg %r14,8(%15) # offset 18
45 * The return points of the mcount/ftrace function have the same offset 18.
46 * The 64-bit disable ftrace code block looks like this:
47 * stg %r14,8(%r15) # offset 0
48 * > jg .+18 # offset 6
49 * > lgr %r0,%r0 # offset 12
50 * > basr %r14,%r1 # offset 16
51 * lg %r14,8(%15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible.
54 */
29asm( 55asm(
30 " .align 4\n" 56 " .align 4\n"
31 "ftrace_disable_code:\n" 57 "ftrace_disable_code:\n"
32 " j 0f\n" 58 " jg 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
36 "ftrace_disable_return:\n"
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n" 59 " lgr %r0,%r0\n"
39 "0:\n"); 60 " basr %r14,%r1\n"
40 61 "0:\n"
41asm(
42 " .align 4\n" 62 " .align 4\n"
43 "ftrace_nop_code:\n" 63 "ftrace_enable_insn:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 64 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
45 65
46asm( 66#define FTRACE_INSN_SIZE 6
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
50 67
51#else /* CONFIG_64BIT */ 68#else /* CONFIG_64BIT */
52 69/*
70 * The 31-bit mcount code looks like this:
71 * st %r14,4(%r15) # offset 0
72 * > bras %r1,0f # offset 4
73 * > .long _mcount # offset 8
74 * > .long <&counter> # offset 12
75 * > 0: l %r14,0(%r1) # offset 16
76 * > l %r1,4(%r1) # offset 20
77 * basr %r14,%r14 # offset 24
78 * l %r14,4(%r15) # offset 26
79 * Total length is 30 bytes. The twenty bytes starting from offset 4
80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
81 * The 31-bit enabled ftrace code block looks like this:
82 * st %r14,4(%r15) # offset 0
83 * > l %r14,__LC_FTRACE_FUNC # offset 4
84 * > j 0f # offset 8
85 * > .fill 12,1,0x07 # offset 12
86 * 0: basr %r14,%r14 # offset 24
87 * l %r14,4(%r14) # offset 26
88 * The return points of the mcount/ftrace function have the same offset 26.
89 * The 31-bit disabled ftrace code block looks like this:
90 * st %r14,4(%r15) # offset 0
91 * > j .+26 # offset 4
92 * > j 0f # offset 8
93 * > .fill 12,1,0x07 # offset 12
94 * 0: basr %r14,%r14 # offset 24
95 * l %r14,4(%r14) # offset 26
96 * The j instruction branches to offset 30 to skip as many instructions
97 * as possible.
98 */
53asm( 99asm(
54 " .align 4\n" 100 " .align 4\n"
55 "ftrace_disable_code:\n" 101 "ftrace_disable_code:\n"
102 " j 1f\n"
56 " j 0f\n" 103 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 104 " .fill 12,1,0x07\n"
58 " basr %r14,%r1\n" 105 "0: basr %r14,%r14\n"
59 "ftrace_disable_return:\n" 106 "1:\n"
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
69
70asm(
71 " .align 4\n" 107 " .align 4\n"
72 "ftrace_nop_code:\n" 108 "ftrace_enable_insn:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 109 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
74 110
75asm( 111#define FTRACE_INSN_SIZE 4
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
79 112
80#endif /* CONFIG_64BIT */ 113#endif /* CONFIG_64BIT */
81 114
82static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
85{
86 unsigned char replaced[MCOUNT_INSN_SIZE];
87
88 /*
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
94 */
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
102}
103
104static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
106{
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
110}
111 115
112int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr) 117 unsigned long addr)
114{ 118{
115 if (addr == MCOUNT_ADDR) 119 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
116 return ftrace_make_initial_nop(mod, rec, addr); 120 MCOUNT_INSN_SIZE))
117 return ftrace_modify_code(rec->ip, 121 return -EPERM;
118 ftrace_call_code, FTRACE_INSN_SIZE, 122 return 0;
119 ftrace_nop_code, FTRACE_INSN_SIZE);
120} 123}
121 124
122int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
123{ 126{
124 return ftrace_modify_code(rec->ip, 127 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
125 ftrace_nop_code, FTRACE_INSN_SIZE, 128 FTRACE_INSN_SIZE))
126 ftrace_call_code, FTRACE_INSN_SIZE); 129 return -EPERM;
130 return 0;
127} 131}
128 132
129int ftrace_update_ftrace_func(ftrace_func_t func) 133int ftrace_update_ftrace_func(ftrace_func_t func)
130{ 134{
131 ftrace_dyn_func = (unsigned long)func;
132 return 0; 135 return 0;
133} 136}
134 137
135int __init ftrace_dyn_arch_init(void *data) 138int __init ftrace_dyn_arch_init(void *data)
136{ 139{
137 *(unsigned long *)data = 0; 140 *(unsigned long *) data = 0;
138 return 0; 141 return 0;
139} 142}
140 143
141#endif /* CONFIG_DYNAMIC_FTRACE */ 144#endif /* CONFIG_DYNAMIC_FTRACE */
142 145
143#ifdef CONFIG_FUNCTION_GRAPH_TRACER 146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
144#ifdef CONFIG_DYNAMIC_FTRACE
145/*
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
151 */
152int ftrace_enable_ftrace_graph_caller(void)
153{
154 unsigned short opcode = 0xa704;
155
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
157}
158
159int ftrace_disable_ftrace_graph_caller(void)
160{
161 unsigned short opcode = 0xa7f4;
162
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
164}
165
166static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
167{
168 return addr - (ftrace_disable_return - ftrace_disable_code);
169}
170
171#else /* CONFIG_DYNAMIC_FTRACE */
172
173static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
174{
175 return addr - MCOUNT_OFFSET_RET;
176}
177
178#endif /* CONFIG_DYNAMIC_FTRACE */
179
180/* 147/*
181 * Hook the return address and push it in the stack of return addresses 148 * Hook the return address and push it in the stack of return addresses
182 * in current thread info. 149 * in current thread info.
183 */ 150 */
184unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152 unsigned long ip)
185{ 153{
186 struct ftrace_graph_ent trace; 154 struct ftrace_graph_ent trace;
187 155
@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
189 goto out; 157 goto out;
190 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 158 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
191 goto out; 159 goto out;
192 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 160 trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
193 /* Only trace if the calling function expects to. */ 161 /* Only trace if the calling function expects to. */
194 if (!ftrace_graph_entry(&trace)) { 162 if (!ftrace_graph_entry(&trace)) {
195 current->curr_ret_stack--; 163 current->curr_ret_stack--;
196 goto out; 164 goto out;
197 } 165 }
198 parent = (unsigned long)return_to_handler; 166 parent = (unsigned long) return_to_handler;
199out: 167out:
200 return parent; 168 return parent;
201} 169}
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181 unsigned short offset;
182
183 offset = ((void *) prepare_ftrace_return -
184 (void *) ftrace_graph_caller) / 2;
185 return probe_kernel_write(ftrace_graph_caller + 2,
186 &offset, sizeof(offset));
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191 static unsigned short offset = 0x0002;
192
193 return probe_kernel_write(ftrace_graph_caller + 2,
194 &offset, sizeof(offset));
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
202#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index db1696e210af..fb317bf2c378 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -460,7 +460,7 @@ startup:
460#ifndef CONFIG_MARCH_G5 460#ifndef CONFIG_MARCH_G5
461 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 461 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
462 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 462 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
463 stfl __LC_STFL_FAC_LIST # store facility list 463 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
464 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 464 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
465 jz 0f 465 jz 0f
466 la %r0,0 466 la %r0,0
@@ -488,7 +488,9 @@ startup:
488 .align 16 488 .align 16
4892: .long 0x000a0000,0x8badcccc 4892: .long 0x000a0000,0x8badcccc
490#if defined(CONFIG_64BIT) 490#if defined(CONFIG_64BIT)
491#if defined(CONFIG_MARCH_Z10) 491#if defined(CONFIG_MARCH_Z196)
492 .long 0xc100efe3, 0xf46c0000
493#elif defined(CONFIG_MARCH_Z10)
492 .long 0xc100efe3, 0xf0680000 494 .long 0xc100efe3, 0xf0680000
493#elif defined(CONFIG_MARCH_Z9_109) 495#elif defined(CONFIG_MARCH_Z9_109)
494 .long 0xc100efc3, 0x00000000 496 .long 0xc100efc3, 0x00000000
@@ -498,7 +500,9 @@ startup:
498 .long 0xc0000000, 0x00000000 500 .long 0xc0000000, 0x00000000
499#endif 501#endif
500#else 502#else
501#if defined(CONFIG_MARCH_Z10) 503#if defined(CONFIG_MARCH_Z196)
504 .long 0x8100c880, 0x00000000
505#elif defined(CONFIG_MARCH_Z10)
502 .long 0x8100c880, 0x00000000 506 .long 0x8100c880, 0x00000000
503#elif defined(CONFIG_MARCH_Z9_109) 507#elif defined(CONFIG_MARCH_Z9_109)
504 .long 0x8100c880, 0x00000000 508 .long 0x8100c880, 0x00000000
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 026a37a94fc9..e3264f6a9720 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,28 +1,66 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * Copyright IBM Corp. 2004,2011
3 * 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Copyright IBM Corp. 2004,2007 4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Thomas Spatzier <tspat@de.ibm.com>,
6 * Thomas Spatzier (tspat@de.ibm.com)
7 * 6 *
8 * This file contains interrupt related functions. 7 * This file contains interrupt related functions.
9 */ 8 */
10 9
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
14#include <linux/interrupt.h> 11#include <linux/interrupt.h>
15#include <linux/seq_file.h> 12#include <linux/seq_file.h>
16#include <linux/cpu.h>
17#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
18#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/ftrace.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/cpu.h>
21#include <asm/irq_regs.h>
22#include <asm/cputime.h>
23#include <asm/lowcore.h>
24#include <asm/irq.h>
25#include "entry.h"
26
27struct irq_class {
28 char *name;
29 char *desc;
30};
31
32static const struct irq_class intrclass_names[] = {
33 {.name = "EXT" },
34 {.name = "I/O" },
35 {.name = "CLK", .desc = "[EXT] Clock Comparator" },
36 {.name = "IPI", .desc = "[EXT] Signal Processor" },
37 {.name = "TMR", .desc = "[EXT] CPU Timer" },
38 {.name = "TAL", .desc = "[EXT] Timing Alert" },
39 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
40 {.name = "DSD", .desc = "[EXT] DASD Diag" },
41 {.name = "VRT", .desc = "[EXT] Virtio" },
42 {.name = "SCP", .desc = "[EXT] Service Call" },
43 {.name = "IUC", .desc = "[EXT] IUCV" },
44 {.name = "CPM", .desc = "[EXT] CPU Measurement" },
45 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
46 {.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
47 {.name = "DAS", .desc = "[I/O] DASD" },
48 {.name = "C15", .desc = "[I/O] 3215" },
49 {.name = "C70", .desc = "[I/O] 3270" },
50 {.name = "TAP", .desc = "[I/O] Tape" },
51 {.name = "VMR", .desc = "[I/O] Unit Record Devices" },
52 {.name = "LCS", .desc = "[I/O] LCS" },
53 {.name = "CLW", .desc = "[I/O] CLAW" },
54 {.name = "CTC", .desc = "[I/O] CTC" },
55 {.name = "APB", .desc = "[I/O] AP Bus" },
56 {.name = "NMI", .desc = "[NMI] Machine Check" },
57};
19 58
20/* 59/*
21 * show_interrupts is needed by /proc/interrupts. 60 * show_interrupts is needed by /proc/interrupts.
22 */ 61 */
23int show_interrupts(struct seq_file *p, void *v) 62int show_interrupts(struct seq_file *p, void *v)
24{ 63{
25 static const char *intrclass_names[] = { "EXT", "I/O", };
26 int i = *(loff_t *) v, j; 64 int i = *(loff_t *) v, j;
27 65
28 get_online_cpus(); 66 get_online_cpus();
@@ -34,15 +72,16 @@ int show_interrupts(struct seq_file *p, void *v)
34 } 72 }
35 73
36 if (i < NR_IRQS) { 74 if (i < NR_IRQS) {
37 seq_printf(p, "%s: ", intrclass_names[i]); 75 seq_printf(p, "%s: ", intrclass_names[i].name);
38#ifndef CONFIG_SMP 76#ifndef CONFIG_SMP
39 seq_printf(p, "%10u ", kstat_irqs(i)); 77 seq_printf(p, "%10u ", kstat_irqs(i));
40#else 78#else
41 for_each_online_cpu(j) 79 for_each_online_cpu(j)
42 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 80 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
43#endif 81#endif
82 if (intrclass_names[i].desc)
83 seq_printf(p, " %s", intrclass_names[i].desc);
44 seq_putc(p, '\n'); 84 seq_putc(p, '\n');
45
46 } 85 }
47 put_online_cpus(); 86 put_online_cpus();
48 return 0; 87 return 0;
@@ -52,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
52 * For compatibilty only. S/390 specific setup of interrupts et al. is done 91 * For compatibilty only. S/390 specific setup of interrupts et al. is done
53 * much later in init_channel_subsystem(). 92 * much later in init_channel_subsystem().
54 */ 93 */
55void __init 94void __init init_IRQ(void)
56init_IRQ(void)
57{ 95{
58 /* nothing... */ 96 /* nothing... */
59} 97}
@@ -104,3 +142,116 @@ void init_irq_proc(void)
104 create_prof_cpu_mask(root_irq_dir); 142 create_prof_cpu_mask(root_irq_dir);
105} 143}
106#endif 144#endif
145
146/*
147 * ext_int_hash[index] is the start of the list for all external interrupts
148 * that hash to this index. With the current set of external interrupts
149 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
150 * iucv and 0x2603 pfault) this is always the first element.
151 */
152
153struct ext_int_info {
154 struct ext_int_info *next;
155 ext_int_handler_t handler;
156 u16 code;
157};
158
159static struct ext_int_info *ext_int_hash[256];
160
161static inline int ext_hash(u16 code)
162{
163 return (code + (code >> 9)) & 0xff;
164}
165
166int register_external_interrupt(u16 code, ext_int_handler_t handler)
167{
168 struct ext_int_info *p;
169 int index;
170
171 p = kmalloc(sizeof(*p), GFP_ATOMIC);
172 if (!p)
173 return -ENOMEM;
174 p->code = code;
175 p->handler = handler;
176 index = ext_hash(code);
177 p->next = ext_int_hash[index];
178 ext_int_hash[index] = p;
179 return 0;
180}
181EXPORT_SYMBOL(register_external_interrupt);
182
183int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
184{
185 struct ext_int_info *p, *q;
186 int index;
187
188 index = ext_hash(code);
189 q = NULL;
190 p = ext_int_hash[index];
191 while (p) {
192 if (p->code == code && p->handler == handler)
193 break;
194 q = p;
195 p = p->next;
196 }
197 if (!p)
198 return -ENOENT;
199 if (q)
200 q->next = p->next;
201 else
202 ext_int_hash[index] = p->next;
203 kfree(p);
204 return 0;
205}
206EXPORT_SYMBOL(unregister_external_interrupt);
207
208void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
209 unsigned int param32, unsigned long param64)
210{
211 struct pt_regs *old_regs;
212 unsigned short code;
213 struct ext_int_info *p;
214 int index;
215
216 code = (unsigned short) ext_int_code;
217 old_regs = set_irq_regs(regs);
218 s390_idle_check(regs, S390_lowcore.int_clock,
219 S390_lowcore.async_enter_timer);
220 irq_enter();
221 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
222 /* Serve timer interrupts first. */
223 clock_comparator_work();
224 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
225 if (code != 0x1004)
226 __get_cpu_var(s390_idle).nohz_delay = 1;
227 index = ext_hash(code);
228 for (p = ext_int_hash[index]; p; p = p->next) {
229 if (likely(p->code == code))
230 p->handler(ext_int_code, param32, param64);
231 }
232 irq_exit();
233 set_irq_regs(old_regs);
234}
235
236static DEFINE_SPINLOCK(sc_irq_lock);
237static int sc_irq_refcount;
238
239void service_subclass_irq_register(void)
240{
241 spin_lock(&sc_irq_lock);
242 if (!sc_irq_refcount)
243 ctl_set_bit(0, 9);
244 sc_irq_refcount++;
245 spin_unlock(&sc_irq_lock);
246}
247EXPORT_SYMBOL(service_subclass_irq_register);
248
249void service_subclass_irq_unregister(void)
250{
251 spin_lock(&sc_irq_lock);
252 sc_irq_refcount--;
253 if (!sc_irq_refcount)
254 ctl_clear_bit(0, 9);
255 spin_unlock(&sc_irq_lock);
256}
257EXPORT_SYMBOL(service_subclass_irq_unregister);
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
new file mode 100644
index 000000000000..44cc06bedf77
--- /dev/null
+++ b/arch/s390/kernel/jump_label.c
@@ -0,0 +1,59 @@
1/*
2 * Jump label s390 support
3 *
4 * Copyright IBM Corp. 2011
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7#include <linux/module.h>
8#include <linux/uaccess.h>
9#include <linux/stop_machine.h>
10#include <linux/jump_label.h>
11#include <asm/ipl.h>
12
13#ifdef HAVE_JUMP_LABEL
14
15struct insn {
16 u16 opcode;
17 s32 offset;
18} __packed;
19
20struct insn_args {
21 unsigned long *target;
22 struct insn *insn;
23 ssize_t size;
24};
25
26static int __arch_jump_label_transform(void *data)
27{
28 struct insn_args *args = data;
29 int rc;
30
31 rc = probe_kernel_write(args->target, args->insn, args->size);
32 WARN_ON_ONCE(rc < 0);
33 return 0;
34}
35
36void arch_jump_label_transform(struct jump_entry *entry,
37 enum jump_label_type type)
38{
39 struct insn_args args;
40 struct insn insn;
41
42 if (type == JUMP_LABEL_ENABLE) {
43 /* brcl 15,offset */
44 insn.opcode = 0xc0f4;
45 insn.offset = (entry->target - entry->code) >> 1;
46 } else {
47 /* brcl 0,0 */
48 insn.opcode = 0xc004;
49 insn.offset = 0;
50 }
51
52 args.target = (void *) entry->code;
53 args.insn = &insn;
54 args.size = JUMP_LABEL_NOP_SIZE;
55
56 stop_machine(__arch_jump_label_transform, &args, NULL);
57}
58
59#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2a3d2bf6f083..1d05d669107c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -30,35 +30,16 @@
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/hardirq.h>
33 34
34DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 35DEFINE_PER_CPU(struct kprobe *, current_kprobe);
35DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
36 37
37struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 38struct kretprobe_blackpoint kretprobe_blacklist[] = { };
38 39
39int __kprobes arch_prepare_kprobe(struct kprobe *p) 40static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
40{ 41{
41 /* Make sure the probe isn't going on a difficult instruction */ 42 switch (insn[0] >> 8) {
42 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
43 return -EINVAL;
44
45 if ((unsigned long)p->addr & 0x01)
46 return -EINVAL;
47
48 /* Use the get_insn_slot() facility for correctness */
49 if (!(p->ainsn.insn = get_insn_slot()))
50 return -ENOMEM;
51
52 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
53
54 get_instruction_type(&p->ainsn);
55 p->opcode = *p->addr;
56 return 0;
57}
58
59int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
60{
61 switch (*(__u8 *) instruction) {
62 case 0x0c: /* bassm */ 43 case 0x0c: /* bassm */
63 case 0x0b: /* bsm */ 44 case 0x0b: /* bsm */
64 case 0x83: /* diag */ 45 case 0x83: /* diag */
@@ -67,7 +48,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
67 case 0xad: /* stosm */ 48 case 0xad: /* stosm */
68 return -EINVAL; 49 return -EINVAL;
69 } 50 }
70 switch (*(__u16 *) instruction) { 51 switch (insn[0]) {
71 case 0x0101: /* pr */ 52 case 0x0101: /* pr */
72 case 0xb25a: /* bsa */ 53 case 0xb25a: /* bsa */
73 case 0xb240: /* bakr */ 54 case 0xb240: /* bakr */
@@ -80,93 +61,92 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
80 return 0; 61 return 0;
81} 62}
82 63
83void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) 64static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
84{ 65{
85 /* default fixup method */ 66 /* default fixup method */
86 ainsn->fixup = FIXUP_PSW_NORMAL; 67 int fixup = FIXUP_PSW_NORMAL;
87
88 /* save r1 operand */
89 ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
90
91 /* save the instruction length (pop 5-5) in bytes */
92 switch (*(__u8 *) (ainsn->insn) >> 6) {
93 case 0:
94 ainsn->ilen = 2;
95 break;
96 case 1:
97 case 2:
98 ainsn->ilen = 4;
99 break;
100 case 3:
101 ainsn->ilen = 6;
102 break;
103 }
104 68
105 switch (*(__u8 *) ainsn->insn) { 69 switch (insn[0] >> 8) {
106 case 0x05: /* balr */ 70 case 0x05: /* balr */
107 case 0x0d: /* basr */ 71 case 0x0d: /* basr */
108 ainsn->fixup = FIXUP_RETURN_REGISTER; 72 fixup = FIXUP_RETURN_REGISTER;
109 /* if r2 = 0, no branch will be taken */ 73 /* if r2 = 0, no branch will be taken */
110 if ((*ainsn->insn & 0x0f) == 0) 74 if ((insn[0] & 0x0f) == 0)
111 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; 75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
112 break; 76 break;
113 case 0x06: /* bctr */ 77 case 0x06: /* bctr */
114 case 0x07: /* bcr */ 78 case 0x07: /* bcr */
115 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 79 fixup = FIXUP_BRANCH_NOT_TAKEN;
116 break; 80 break;
117 case 0x45: /* bal */ 81 case 0x45: /* bal */
118 case 0x4d: /* bas */ 82 case 0x4d: /* bas */
119 ainsn->fixup = FIXUP_RETURN_REGISTER; 83 fixup = FIXUP_RETURN_REGISTER;
120 break; 84 break;
121 case 0x47: /* bc */ 85 case 0x47: /* bc */
122 case 0x46: /* bct */ 86 case 0x46: /* bct */
123 case 0x86: /* bxh */ 87 case 0x86: /* bxh */
124 case 0x87: /* bxle */ 88 case 0x87: /* bxle */
125 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 89 fixup = FIXUP_BRANCH_NOT_TAKEN;
126 break; 90 break;
127 case 0x82: /* lpsw */ 91 case 0x82: /* lpsw */
128 ainsn->fixup = FIXUP_NOT_REQUIRED; 92 fixup = FIXUP_NOT_REQUIRED;
129 break; 93 break;
130 case 0xb2: /* lpswe */ 94 case 0xb2: /* lpswe */
131 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { 95 if ((insn[0] & 0xff) == 0xb2)
132 ainsn->fixup = FIXUP_NOT_REQUIRED; 96 fixup = FIXUP_NOT_REQUIRED;
133 }
134 break; 97 break;
135 case 0xa7: /* bras */ 98 case 0xa7: /* bras */
136 if ((*ainsn->insn & 0x0f) == 0x05) { 99 if ((insn[0] & 0x0f) == 0x05)
137 ainsn->fixup |= FIXUP_RETURN_REGISTER; 100 fixup |= FIXUP_RETURN_REGISTER;
138 }
139 break; 101 break;
140 case 0xc0: 102 case 0xc0:
141 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ 103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
142 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ 104 (insn[0] & 0x0f) == 0x05) /* brasl */
143 ainsn->fixup |= FIXUP_RETURN_REGISTER; 105 fixup |= FIXUP_RETURN_REGISTER;
144 break; 106 break;
145 case 0xeb: 107 case 0xeb:
146 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ 108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
147 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ 109 (insn[2] & 0xff) == 0x45) /* bxleg */
148 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 110 fixup = FIXUP_BRANCH_NOT_TAKEN;
149 }
150 break; 111 break;
151 case 0xe3: /* bctg */ 112 case 0xe3: /* bctg */
152 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { 113 if ((insn[2] & 0xff) == 0x46)
153 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 114 fixup = FIXUP_BRANCH_NOT_TAKEN;
154 }
155 break; 115 break;
156 } 116 }
117 return fixup;
157} 118}
158 119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode(p->addr))
127 return -EINVAL;
128
129 p->opcode = *p->addr;
130 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
131
132 return 0;
133}
134
135struct ins_replace_args {
136 kprobe_opcode_t *ptr;
137 kprobe_opcode_t opcode;
138};
139
159static int __kprobes swap_instruction(void *aref) 140static int __kprobes swap_instruction(void *aref)
160{ 141{
161 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 142 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
162 unsigned long status = kcb->kprobe_status; 143 unsigned long status = kcb->kprobe_status;
163 struct ins_replace_args *args = aref; 144 struct ins_replace_args *args = aref;
164 int rc;
165 145
166 kcb->kprobe_status = KPROBE_SWAP_INST; 146 kcb->kprobe_status = KPROBE_SWAP_INST;
167 rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); 147 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
168 kcb->kprobe_status = status; 148 kcb->kprobe_status = status;
169 return rc; 149 return 0;
170} 150}
171 151
172void __kprobes arch_arm_kprobe(struct kprobe *p) 152void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -174,8 +154,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
174 struct ins_replace_args args; 154 struct ins_replace_args args;
175 155
176 args.ptr = p->addr; 156 args.ptr = p->addr;
177 args.old = p->opcode; 157 args.opcode = BREAKPOINT_INSTRUCTION;
178 args.new = BREAKPOINT_INSTRUCTION;
179 stop_machine(swap_instruction, &args, NULL); 158 stop_machine(swap_instruction, &args, NULL);
180} 159}
181 160
@@ -184,64 +163,69 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
184 struct ins_replace_args args; 163 struct ins_replace_args args;
185 164
186 args.ptr = p->addr; 165 args.ptr = p->addr;
187 args.old = BREAKPOINT_INSTRUCTION; 166 args.opcode = p->opcode;
188 args.new = p->opcode;
189 stop_machine(swap_instruction, &args, NULL); 167 stop_machine(swap_instruction, &args, NULL);
190} 168}
191 169
192void __kprobes arch_remove_kprobe(struct kprobe *p) 170void __kprobes arch_remove_kprobe(struct kprobe *p)
193{ 171{
194 if (p->ainsn.insn) {
195 free_insn_slot(p->ainsn.insn, 0);
196 p->ainsn.insn = NULL;
197 }
198} 172}
199 173
200static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 174static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs,
176 unsigned long ip)
201{ 177{
202 per_cr_bits kprobe_per_regs[1]; 178 struct per_regs per_kprobe;
203 179
204 memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 180 /* Set up the PER control registers %cr9-%cr11 */
205 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; 181 per_kprobe.control = PER_EVENT_IFETCH;
182 per_kprobe.start = ip;
183 per_kprobe.end = ip;
206 184
207 /* Set up the per control reg info, will pass to lctl */ 185 /* Save control regs and psw mask */
208 kprobe_per_regs[0].em_instruction_fetch = 1; 186 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
209 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; 187 kcb->kprobe_saved_imask = regs->psw.mask &
210 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; 188 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
211 189
212 /* Set the PER control regs, turns on single step for this address */ 190 /* Set PER control regs, turns on single step for the given address */
213 __ctl_load(kprobe_per_regs, 9, 11); 191 __ctl_load(per_kprobe, 9, 11);
214 regs->psw.mask |= PSW_MASK_PER; 192 regs->psw.mask |= PSW_MASK_PER;
215 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); 193 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
194 regs->psw.addr = ip | PSW_ADDR_AMODE;
216} 195}
217 196
218static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 197static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
198 struct pt_regs *regs,
199 unsigned long ip)
219{ 200{
220 kcb->prev_kprobe.kp = kprobe_running(); 201 /* Restore control regs and psw mask, set new psw address */
221 kcb->prev_kprobe.status = kcb->kprobe_status; 202 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
222 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; 203 regs->psw.mask &= ~PSW_MASK_PER;
223 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, 204 regs->psw.mask |= kcb->kprobe_saved_imask;
224 sizeof(kcb->kprobe_saved_ctl)); 205 regs->psw.addr = ip | PSW_ADDR_AMODE;
225} 206}
226 207
227static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 208/*
209 * Activate a kprobe by storing its pointer to current_kprobe. The
210 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
211 * two kprobes can be active, see KPROBE_REENTER.
212 */
213static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
228{ 214{
229 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 215 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
230 kcb->kprobe_status = kcb->prev_kprobe.status; 216 kcb->prev_kprobe.status = kcb->kprobe_status;
231 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; 217 __get_cpu_var(current_kprobe) = p;
232 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
233 sizeof(kcb->kprobe_saved_ctl));
234} 218}
235 219
236static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 220/*
237 struct kprobe_ctlblk *kcb) 221 * Deactivate a kprobe by backing up to the previous state. If the
222 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
223 * for any other state prev_kprobe.kp will be NULL.
224 */
225static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
238{ 226{
239 __get_cpu_var(current_kprobe) = p; 227 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
240 /* Save the interrupt and per flags */ 228 kcb->kprobe_status = kcb->prev_kprobe.status;
241 kcb->kprobe_saved_imask = regs->psw.mask &
242 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
243 /* Save the control regs that govern PER */
244 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
245} 229}
246 230
247void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 231void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -250,79 +234,104 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
250 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 234 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
251 235
252 /* Replace the return addr with trampoline addr */ 236 /* Replace the return addr with trampoline addr */
253 regs->gprs[14] = (unsigned long)&kretprobe_trampoline; 237 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
238}
239
240static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
241 struct kprobe *p)
242{
243 switch (kcb->kprobe_status) {
244 case KPROBE_HIT_SSDONE:
245 case KPROBE_HIT_ACTIVE:
246 kprobes_inc_nmissed_count(p);
247 break;
248 case KPROBE_HIT_SS:
249 case KPROBE_REENTER:
250 default:
251 /*
252 * A kprobe on the code path to single step an instruction
253 * is a BUG. The code path resides in the .kprobes.text
254 * section and is executed with interrupts disabled.
255 */
256 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
257 dump_kprobe(p);
258 BUG();
259 }
254} 260}
255 261
256static int __kprobes kprobe_handler(struct pt_regs *regs) 262static int __kprobes kprobe_handler(struct pt_regs *regs)
257{ 263{
258 struct kprobe *p;
259 int ret = 0;
260 unsigned long *addr = (unsigned long *)
261 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
262 struct kprobe_ctlblk *kcb; 264 struct kprobe_ctlblk *kcb;
265 struct kprobe *p;
263 266
264 /* 267 /*
265 * We don't want to be preempted for the entire 268 * We want to disable preemption for the entire duration of kprobe
266 * duration of kprobe processing 269 * processing. That includes the calls to the pre/post handlers
270 * and single stepping the kprobe instruction.
267 */ 271 */
268 preempt_disable(); 272 preempt_disable();
269 kcb = get_kprobe_ctlblk(); 273 kcb = get_kprobe_ctlblk();
274 p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
270 275
271 /* Check we're not actually recursing */ 276 if (p) {
272 if (kprobe_running()) { 277 if (kprobe_running()) {
273 p = get_kprobe(addr); 278 /*
274 if (p) { 279 * We have hit a kprobe while another is still
275 if (kcb->kprobe_status == KPROBE_HIT_SS && 280 * active. This can happen in the pre and post
276 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 281 * handler. Single step the instruction of the
277 regs->psw.mask &= ~PSW_MASK_PER; 282 * new probe but do not call any handler function
278 regs->psw.mask |= kcb->kprobe_saved_imask; 283 * of this secondary kprobe.
279 goto no_kprobe; 284 * push_kprobe and pop_kprobe saves and restores
280 } 285 * the currently active kprobe.
281 /* We have reentered the kprobe_handler(), since
282 * another probe was hit while within the handler.
283 * We here save the original kprobes variables and
284 * just single step on the instruction of the new probe
285 * without calling any user handlers.
286 */ 286 */
287 save_previous_kprobe(kcb); 287 kprobe_reenter_check(kcb, p);
288 set_current_kprobe(p, regs, kcb); 288 push_kprobe(kcb, p);
289 kprobes_inc_nmissed_count(p);
290 prepare_singlestep(p, regs);
291 kcb->kprobe_status = KPROBE_REENTER; 289 kcb->kprobe_status = KPROBE_REENTER;
292 return 1;
293 } else { 290 } else {
294 p = __get_cpu_var(current_kprobe); 291 /*
295 if (p->break_handler && p->break_handler(p, regs)) { 292 * If we have no pre-handler or it returned 0, we
296 goto ss_probe; 293 * continue with single stepping. If we have a
297 } 294 * pre-handler and it returned non-zero, it prepped
295 * for calling the break_handler below on re-entry
296 * for jprobe processing, so get out doing nothing
297 * more here.
298 */
299 push_kprobe(kcb, p);
300 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
301 if (p->pre_handler && p->pre_handler(p, regs))
302 return 1;
303 kcb->kprobe_status = KPROBE_HIT_SS;
298 } 304 }
299 goto no_kprobe; 305 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
300 }
301
302 p = get_kprobe(addr);
303 if (!p)
304 /*
305 * No kprobe at this address. The fault has not been
306 * caused by a kprobe breakpoint. The race of breakpoint
307 * vs. kprobe remove does not exist because on s390 we
308 * use stop_machine to arm/disarm the breakpoints.
309 */
310 goto no_kprobe;
311
312 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
313 set_current_kprobe(p, regs, kcb);
314 if (p->pre_handler && p->pre_handler(p, regs))
315 /* handler has already set things up, so skip ss setup */
316 return 1; 306 return 1;
317 307 } else if (kprobe_running()) {
318ss_probe: 308 p = __get_cpu_var(current_kprobe);
319 prepare_singlestep(p, regs); 309 if (p->break_handler && p->break_handler(p, regs)) {
320 kcb->kprobe_status = KPROBE_HIT_SS; 310 /*
321 return 1; 311 * Continuation after the jprobe completed and
322 312 * caused the jprobe_return trap. The jprobe
323no_kprobe: 313 * break_handler "returns" to the original
314 * function that still has the kprobe breakpoint
315 * installed. We continue with single stepping.
316 */
317 kcb->kprobe_status = KPROBE_HIT_SS;
318 enable_singlestep(kcb, regs,
319 (unsigned long) p->ainsn.insn);
320 return 1;
321 } /* else:
322 * No kprobe at this address and the current kprobe
323 * has no break handler (no jprobe!). The kernel just
324 * exploded, let the standard trap handler pick up the
325 * pieces.
326 */
327 } /* else:
328 * No kprobe at this address and no active kprobe. The trap has
329 * not been caused by a kprobe breakpoint. The race of breakpoint
330 * vs. kprobe remove does not exist because on s390 as we use
331 * stop_machine to arm/disarm the breakpoints.
332 */
324 preempt_enable_no_resched(); 333 preempt_enable_no_resched();
325 return ret; 334 return 0;
326} 335}
327 336
328/* 337/*
@@ -343,11 +352,12 @@ static void __used kretprobe_trampoline_holder(void)
343static int __kprobes trampoline_probe_handler(struct kprobe *p, 352static int __kprobes trampoline_probe_handler(struct kprobe *p,
344 struct pt_regs *regs) 353 struct pt_regs *regs)
345{ 354{
346 struct kretprobe_instance *ri = NULL; 355 struct kretprobe_instance *ri;
347 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
348 struct hlist_node *node, *tmp; 357 struct hlist_node *node, *tmp;
349 unsigned long flags, orig_ret_address = 0; 358 unsigned long flags, orig_ret_address;
350 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 359 unsigned long trampoline_address;
360 kprobe_opcode_t *correct_ret_addr;
351 361
352 INIT_HLIST_HEAD(&empty_rp); 362 INIT_HLIST_HEAD(&empty_rp);
353 kretprobe_hash_lock(current, &head, &flags); 363 kretprobe_hash_lock(current, &head, &flags);
@@ -365,30 +375,55 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
365 * real return address, and all the rest will point to 375 * real return address, and all the rest will point to
366 * kretprobe_trampoline 376 * kretprobe_trampoline
367 */ 377 */
378 ri = NULL;
379 orig_ret_address = 0;
380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline;
368 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
369 if (ri->task != current) 383 if (ri->task != current)
370 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
371 continue; 385 continue;
372 386
373 if (ri->rp && ri->rp->handler) 387 orig_ret_address = (unsigned long) ri->ret_addr;
388
389 if (orig_ret_address != trampoline_address)
390 /*
391 * This is the real return address. Any other
392 * instances associated with this task are for
393 * other calls deeper on the call stack
394 */
395 break;
396 }
397
398 kretprobe_assert(ri, orig_ret_address, trampoline_address);
399
400 correct_ret_addr = ri->ret_addr;
401 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
402 if (ri->task != current)
403 /* another task is sharing our hash bucket */
404 continue;
405
406 orig_ret_address = (unsigned long) ri->ret_addr;
407
408 if (ri->rp && ri->rp->handler) {
409 ri->ret_addr = correct_ret_addr;
374 ri->rp->handler(ri, regs); 410 ri->rp->handler(ri, regs);
411 }
375 412
376 orig_ret_address = (unsigned long)ri->ret_addr;
377 recycle_rp_inst(ri, &empty_rp); 413 recycle_rp_inst(ri, &empty_rp);
378 414
379 if (orig_ret_address != trampoline_address) { 415 if (orig_ret_address != trampoline_address)
380 /* 416 /*
381 * This is the real return address. Any other 417 * This is the real return address. Any other
382 * instances associated with this task are for 418 * instances associated with this task are for
383 * other calls deeper on the call stack 419 * other calls deeper on the call stack
384 */ 420 */
385 break; 421 break;
386 }
387 } 422 }
388 kretprobe_assert(ri, orig_ret_address, trampoline_address); 423
389 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 424 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
390 425
391 reset_current_kprobe(); 426 pop_kprobe(get_kprobe_ctlblk());
392 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
393 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
394 429
@@ -415,55 +450,42 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
415static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 450static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
416{ 451{
417 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 452 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
453 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
454 int fixup = get_fixup_type(p->ainsn.insn);
418 455
419 regs->psw.addr &= PSW_ADDR_INSN; 456 if (fixup & FIXUP_PSW_NORMAL)
420 457 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
421 if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
422 regs->psw.addr = (unsigned long)p->addr +
423 ((unsigned long)regs->psw.addr -
424 (unsigned long)p->ainsn.insn);
425 458
426 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) 459 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
427 if ((unsigned long)regs->psw.addr - 460 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
428 (unsigned long)p->ainsn.insn == p->ainsn.ilen) 461 if (ip - (unsigned long) p->ainsn.insn == ilen)
429 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; 462 ip = (unsigned long) p->addr + ilen;
463 }
430 464
431 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) 465 if (fixup & FIXUP_RETURN_REGISTER) {
432 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + 466 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
433 (regs->gprs[p->ainsn.reg] - 467 regs->gprs[reg] += (unsigned long) p->addr -
434 (unsigned long)p->ainsn.insn)) 468 (unsigned long) p->ainsn.insn;
435 | PSW_ADDR_AMODE; 469 }
436 470
437 regs->psw.addr |= PSW_ADDR_AMODE; 471 disable_singlestep(kcb, regs, ip);
438 /* turn off PER mode */
439 regs->psw.mask &= ~PSW_MASK_PER;
440 /* Restore the original per control regs */
441 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
442 regs->psw.mask |= kcb->kprobe_saved_imask;
443} 472}
444 473
445static int __kprobes post_kprobe_handler(struct pt_regs *regs) 474static int __kprobes post_kprobe_handler(struct pt_regs *regs)
446{ 475{
447 struct kprobe *cur = kprobe_running();
448 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 struct kprobe *p = kprobe_running();
449 478
450 if (!cur) 479 if (!p)
451 return 0; 480 return 0;
452 481
453 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 482 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
454 kcb->kprobe_status = KPROBE_HIT_SSDONE; 483 kcb->kprobe_status = KPROBE_HIT_SSDONE;
455 cur->post_handler(cur, regs, 0); 484 p->post_handler(p, regs, 0);
456 } 485 }
457 486
458 resume_execution(cur, regs); 487 resume_execution(p, regs);
459 488 pop_kprobe(kcb);
460 /*Restore back the original saved kprobes variables and continue. */
461 if (kcb->kprobe_status == KPROBE_REENTER) {
462 restore_previous_kprobe(kcb);
463 goto out;
464 }
465 reset_current_kprobe();
466out:
467 preempt_enable_no_resched(); 489 preempt_enable_no_resched();
468 490
469 /* 491 /*
@@ -471,17 +493,16 @@ out:
471 * will have PER set, in which case, continue the remaining processing 493 * will have PER set, in which case, continue the remaining processing
472 * of do_single_step, as if this is not a probe hit. 494 * of do_single_step, as if this is not a probe hit.
473 */ 495 */
474 if (regs->psw.mask & PSW_MASK_PER) { 496 if (regs->psw.mask & PSW_MASK_PER)
475 return 0; 497 return 0;
476 }
477 498
478 return 1; 499 return 1;
479} 500}
480 501
481int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 502static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
482{ 503{
483 struct kprobe *cur = kprobe_running();
484 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 504 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
505 struct kprobe *p = kprobe_running();
485 const struct exception_table_entry *entry; 506 const struct exception_table_entry *entry;
486 507
487 switch(kcb->kprobe_status) { 508 switch(kcb->kprobe_status) {
@@ -497,13 +518,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
497 * and allow the page fault handler to continue as a 518 * and allow the page fault handler to continue as a
498 * normal page fault. 519 * normal page fault.
499 */ 520 */
500 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; 521 disable_singlestep(kcb, regs, (unsigned long) p->addr);
501 regs->psw.mask &= ~PSW_MASK_PER; 522 pop_kprobe(kcb);
502 regs->psw.mask |= kcb->kprobe_saved_imask;
503 if (kcb->kprobe_status == KPROBE_REENTER)
504 restore_previous_kprobe(kcb);
505 else
506 reset_current_kprobe();
507 preempt_enable_no_resched(); 523 preempt_enable_no_resched();
508 break; 524 break;
509 case KPROBE_HIT_ACTIVE: 525 case KPROBE_HIT_ACTIVE:
@@ -513,7 +529,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
513 * we can also use npre/npostfault count for accouting 529 * we can also use npre/npostfault count for accouting
514 * these specific fault cases. 530 * these specific fault cases.
515 */ 531 */
516 kprobes_inc_nmissed_count(cur); 532 kprobes_inc_nmissed_count(p);
517 533
518 /* 534 /*
519 * We come here because instructions in the pre/post 535 * We come here because instructions in the pre/post
@@ -522,7 +538,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
522 * copy_from_user(), get_user() etc. Let the 538 * copy_from_user(), get_user() etc. Let the
523 * user-specified handler try to fix it first. 539 * user-specified handler try to fix it first.
524 */ 540 */
525 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 541 if (p->fault_handler && p->fault_handler(p, regs, trapnr))
526 return 1; 542 return 1;
527 543
528 /* 544 /*
@@ -546,57 +562,71 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
546 return 0; 562 return 0;
547} 563}
548 564
565int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
566{
567 int ret;
568
569 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
570 local_irq_disable();
571 ret = kprobe_trap_handler(regs, trapnr);
572 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
573 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
574 return ret;
575}
576
549/* 577/*
550 * Wrapper routine to for handling exceptions. 578 * Wrapper routine to for handling exceptions.
551 */ 579 */
552int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 580int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
553 unsigned long val, void *data) 581 unsigned long val, void *data)
554{ 582{
555 struct die_args *args = (struct die_args *)data; 583 struct die_args *args = (struct die_args *) data;
584 struct pt_regs *regs = args->regs;
556 int ret = NOTIFY_DONE; 585 int ret = NOTIFY_DONE;
557 586
587 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
588 local_irq_disable();
589
558 switch (val) { 590 switch (val) {
559 case DIE_BPT: 591 case DIE_BPT:
560 if (kprobe_handler(args->regs)) 592 if (kprobe_handler(regs))
561 ret = NOTIFY_STOP; 593 ret = NOTIFY_STOP;
562 break; 594 break;
563 case DIE_SSTEP: 595 case DIE_SSTEP:
564 if (post_kprobe_handler(args->regs)) 596 if (post_kprobe_handler(regs))
565 ret = NOTIFY_STOP; 597 ret = NOTIFY_STOP;
566 break; 598 break;
567 case DIE_TRAP: 599 case DIE_TRAP:
568 /* kprobe_running() needs smp_processor_id() */ 600 if (!preemptible() && kprobe_running() &&
569 preempt_disable(); 601 kprobe_trap_handler(regs, args->trapnr))
570 if (kprobe_running() &&
571 kprobe_fault_handler(args->regs, args->trapnr))
572 ret = NOTIFY_STOP; 602 ret = NOTIFY_STOP;
573 preempt_enable();
574 break; 603 break;
575 default: 604 default:
576 break; 605 break;
577 } 606 }
607
608 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
609 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
610
578 return ret; 611 return ret;
579} 612}
580 613
581int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 614int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
582{ 615{
583 struct jprobe *jp = container_of(p, struct jprobe, kp); 616 struct jprobe *jp = container_of(p, struct jprobe, kp);
584 unsigned long addr;
585 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 617 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
618 unsigned long stack;
586 619
587 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 620 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
588 621
589 /* setup return addr to the jprobe handler routine */ 622 /* setup return addr to the jprobe handler routine */
590 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; 623 regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
624 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
591 625
592 /* r14 is the function return address */
593 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
594 /* r15 is the stack pointer */ 626 /* r15 is the stack pointer */
595 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; 627 stack = (unsigned long) regs->gprs[15];
596 addr = (unsigned long)kcb->jprobe_saved_r15;
597 628
598 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, 629 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
599 MIN_STACK_SIZE(addr));
600 return 1; 630 return 1;
601} 631}
602 632
@@ -613,30 +643,29 @@ void __kprobes jprobe_return_end(void)
613int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 643int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
614{ 644{
615 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 645 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
616 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); 646 unsigned long stack;
647
648 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
617 649
618 /* Put the regs back */ 650 /* Put the regs back */
619 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 651 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
620 /* put the stack back */ 652 /* put the stack back */
621 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 653 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
622 MIN_STACK_SIZE(stack_addr));
623 preempt_enable_no_resched(); 654 preempt_enable_no_resched();
624 return 1; 655 return 1;
625} 656}
626 657
627static struct kprobe trampoline_p = { 658static struct kprobe trampoline = {
628 .addr = (kprobe_opcode_t *) & kretprobe_trampoline, 659 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
629 .pre_handler = trampoline_probe_handler 660 .pre_handler = trampoline_probe_handler
630}; 661};
631 662
632int __init arch_init_kprobes(void) 663int __init arch_init_kprobes(void)
633{ 664{
634 return register_kprobe(&trampoline_p); 665 return register_kprobe(&trampoline);
635} 666}
636 667
637int __kprobes arch_trampoline_kprobe(struct kprobe *p) 668int __kprobes arch_trampoline_kprobe(struct kprobe *p)
638{ 669{
639 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) 670 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
640 return 1;
641 return 0;
642} 671}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index a922d51df6bf..b09b9c62573e 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -12,6 +12,7 @@
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/ftrace.h>
15#include <asm/cio.h> 16#include <asm/cio.h>
16#include <asm/setup.h> 17#include <asm/setup.h>
17#include <asm/pgtable.h> 18#include <asm/pgtable.h>
@@ -71,6 +72,7 @@ static void __machine_kexec(void *data)
71 72
72void machine_kexec(struct kimage *image) 73void machine_kexec(struct kimage *image)
73{ 74{
75 tracer_disable();
74 smp_send_stop(); 76 smp_send_stop();
75 smp_switch_to_ipl_cpu(__machine_kexec, image); 77 smp_switch_to_ipl_cpu(__machine_kexec, image);
76} 78}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index dfe015d7398c..1e6a55795628 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,22 +18,12 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .long ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
28 stm %r2,%r5,16(%r15) 24 stm %r2,%r5,16(%r15)
29 bras %r1,2f 25 bras %r1,2f
30#ifdef CONFIG_DYNAMIC_FTRACE
310: .long ftrace_dyn_func
32#else
330: .long ftrace_trace_function 260: .long ftrace_trace_function
34#endif
351: .long function_trace_stop 271: .long function_trace_stop
362: l %r2,1b-0b(%r1) 282: l %r2,1b-0b(%r1)
37 icm %r2,0xf,0(%r2) 29 icm %r2,0xf,0(%r2)
@@ -47,21 +39,15 @@ ftrace_caller:
47 l %r14,0(%r14) 39 l %r14,0(%r14)
48 basr %r14,%r14 40 basr %r14,%r14
49#ifdef CONFIG_FUNCTION_GRAPH_TRACER 41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50#ifdef CONFIG_DYNAMIC_FTRACE 42 l %r2,100(%r15)
43 l %r3,152(%r15)
51 .globl ftrace_graph_caller 44 .globl ftrace_graph_caller
52ftrace_graph_caller: 45ftrace_graph_caller:
53 # This unconditional branch gets runtime patched. Change only if 46# The bras instruction gets runtime patched to call prepare_ftrace_return.
54 # you know what you are doing. See ftrace_enable_graph_caller(). 47# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
55 j 1f 48# bras %r14,prepare_ftrace_return
56#endif 49 bras %r14,0f
57 bras %r1,0f 500: st %r2,100(%r15)
58 .long prepare_ftrace_return
590: l %r2,152(%r15)
60 l %r4,0(%r1)
61 l %r3,100(%r15)
62 basr %r14,%r4
63 st %r2,100(%r15)
641:
65#endif 51#endif
66 ahi %r15,96 52 ahi %r15,96
67 l %r14,56(%r15) 53 l %r14,56(%r15)
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index c37211c6092b..e73667286ac0 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -7,6 +7,8 @@
7 7
8#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
9 9
10 .section .kprobes.text, "ax"
11
10 .globl ftrace_stub 12 .globl ftrace_stub
11ftrace_stub: 13ftrace_stub:
12 br %r14 14 br %r14
@@ -16,12 +18,6 @@ _mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE 18#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14 19 br %r14
18 20
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .quad ftrace_stub
23 .previous
24
25 .globl ftrace_caller 21 .globl ftrace_caller
26ftrace_caller: 22ftrace_caller:
27#endif 23#endif
@@ -35,26 +31,19 @@ ftrace_caller:
35 stg %r1,__SF_BACKCHAIN(%r15) 31 stg %r1,__SF_BACKCHAIN(%r15)
36 lgr %r2,%r14 32 lgr %r2,%r14
37 lg %r3,168(%r15) 33 lg %r3,168(%r15)
38#ifdef CONFIG_DYNAMIC_FTRACE
39 larl %r14,ftrace_dyn_func
40#else
41 larl %r14,ftrace_trace_function 34 larl %r14,ftrace_trace_function
42#endif
43 lg %r14,0(%r14) 35 lg %r14,0(%r14)
44 basr %r14,%r14 36 basr %r14,%r14
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER 37#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46#ifdef CONFIG_DYNAMIC_FTRACE 38 lg %r2,168(%r15)
39 lg %r3,272(%r15)
47 .globl ftrace_graph_caller 40 .globl ftrace_graph_caller
48ftrace_graph_caller: 41ftrace_graph_caller:
49 # This unconditional branch gets runtime patched. Change only if 42# The bras instruction gets runtime patched to call prepare_ftrace_return.
50 # you know what you are doing. See ftrace_enable_graph_caller(). 43# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
51 j 0f 44# bras %r14,prepare_ftrace_return
52#endif 45 bras %r14,0f
53 lg %r2,272(%r15) 460: stg %r2,168(%r15)
54 lg %r3,168(%r15)
55 brasl %r14,prepare_ftrace_return
56 stg %r2,168(%r15)
570:
58#endif 47#endif
59 aghi %r15,160 48 aghi %r15,160
60 lmg %r2,%r5,32(%r15) 49 lmg %r2,%r5,32(%r15)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 559af0d07878..0fbe4e32f7ba 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -54,11 +54,11 @@ void detect_memory_layout(struct mem_chunk chunk[])
54 * right thing and we don't get scheduled away with low address 54 * right thing and we don't get scheduled away with low address
55 * protection disabled. 55 * protection disabled.
56 */ 56 */
57 flags = __raw_local_irq_stnsm(0xf8); 57 flags = __arch_local_irq_stnsm(0xf8);
58 __ctl_store(cr0, 0, 0); 58 __ctl_store(cr0, 0, 0);
59 __ctl_clear_bit(0, 28); 59 __ctl_clear_bit(0, 28);
60 find_memory_chunks(chunk); 60 find_memory_chunks(chunk);
61 __ctl_load(cr0, 0, 0); 61 __ctl_load(cr0, 0, 0);
62 __raw_local_irq_ssm(flags); 62 arch_local_irq_restore(flags);
63} 63}
64EXPORT_SYMBOL(detect_memory_layout); 64EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index ac151399ef34..fab88431a06f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -8,6 +8,7 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/kernel_stat.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/hardirq.h> 14#include <linux/hardirq.h>
@@ -95,7 +96,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
95static int notrace s390_revalidate_registers(struct mci *mci) 96static int notrace s390_revalidate_registers(struct mci *mci)
96{ 97{
97 int kill_task; 98 int kill_task;
98 u64 tmpclock;
99 u64 zero; 99 u64 zero;
100 void *fpt_save_area, *fpt_creg_save_area; 100 void *fpt_save_area, *fpt_creg_save_area;
101 101
@@ -214,11 +214,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
214 : "0", "cc"); 214 : "0", "cc");
215#endif 215#endif
216 /* Revalidate clock comparator register */ 216 /* Revalidate clock comparator register */
217 asm volatile( 217 if (S390_lowcore.clock_comparator == -1)
218 " stck 0(%1)\n" 218 set_clock_comparator(S390_lowcore.mcck_clock);
219 " sckc 0(%1)" 219 else
220 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 220 set_clock_comparator(S390_lowcore.clock_comparator);
221
222 /* Check if old PSW is valid */ 221 /* Check if old PSW is valid */
223 if (!mci->wp) 222 if (!mci->wp)
224 /* 223 /*
@@ -257,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
257 nmi_enter(); 256 nmi_enter();
258 s390_idle_check(regs, S390_lowcore.mcck_clock, 257 s390_idle_check(regs, S390_lowcore.mcck_clock,
259 S390_lowcore.mcck_enter_timer); 258 S390_lowcore.mcck_enter_timer);
260 259 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
261 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
262 mcck = &__get_cpu_var(cpu_mcck); 261 mcck = &__get_cpu_var(cpu_mcck);
263 umode = user_mode(regs); 262 umode = user_mode(regs);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d3a2d1c6438e..541a7509faeb 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -9,38 +9,27 @@
9 9
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/errno.h>
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/mm.h> 14#include <linux/mm.h>
16#include <linux/fs.h>
17#include <linux/smp.h> 15#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/slab.h> 16#include <linux/slab.h>
20#include <linux/unistd.h>
21#include <linux/ptrace.h>
22#include <linux/vmalloc.h>
23#include <linux/user.h>
24#include <linux/interrupt.h> 17#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/reboot.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/notifier.h>
30#include <linux/tick.h> 18#include <linux/tick.h>
31#include <linux/elfcore.h> 19#include <linux/personality.h>
32#include <linux/kernel_stat.h>
33#include <linux/syscalls.h> 20#include <linux/syscalls.h>
34#include <linux/compat.h> 21#include <linux/compat.h>
35#include <asm/compat.h> 22#include <linux/kprobes.h>
36#include <asm/uaccess.h> 23#include <linux/random.h>
37#include <asm/pgtable.h> 24#include <linux/module.h>
38#include <asm/system.h> 25#include <asm/system.h>
39#include <asm/io.h> 26#include <asm/io.h>
40#include <asm/processor.h> 27#include <asm/processor.h>
41#include <asm/irq.h> 28#include <asm/irq.h>
42#include <asm/timer.h> 29#include <asm/timer.h>
43#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
32#include <asm/smp.h>
44#include "entry.h" 33#include "entry.h"
45 34
46asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 35asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,18 +64,13 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 */ 64 */
76static void default_idle(void) 65static void default_idle(void)
77{ 66{
78 /* CPU is going idle. */ 67 if (cpu_is_offline(smp_processor_id()))
68 cpu_die();
79 local_irq_disable(); 69 local_irq_disable();
80 if (need_resched()) { 70 if (need_resched()) {
81 local_irq_enable(); 71 local_irq_enable();
82 return; 72 return;
83 } 73 }
84#ifdef CONFIG_HOTPLUG_CPU
85 if (cpu_is_offline(smp_processor_id())) {
86 preempt_enable_no_resched();
87 cpu_die();
88 }
89#endif
90 local_mcck_disable(); 74 local_mcck_disable();
91 if (test_thread_flag(TIF_MCCK_PENDING)) { 75 if (test_thread_flag(TIF_MCCK_PENDING)) {
92 local_mcck_enable(); 76 local_mcck_enable();
@@ -116,15 +100,17 @@ void cpu_idle(void)
116 } 100 }
117} 101}
118 102
119extern void kernel_thread_starter(void); 103extern void __kprobes kernel_thread_starter(void);
120 104
121asm( 105asm(
122 ".align 4\n" 106 ".section .kprobes.text, \"ax\"\n"
107 ".global kernel_thread_starter\n"
123 "kernel_thread_starter:\n" 108 "kernel_thread_starter:\n"
124 " la 2,0(10)\n" 109 " la 2,0(10)\n"
125 " basr 14,9\n" 110 " basr 14,9\n"
126 " la 2,0\n" 111 " la 2,0\n"
127 " br 11\n"); 112 " br 11\n"
113 ".previous\n");
128 114
129int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 115int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
130{ 116{
@@ -214,8 +200,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
214 /* start new process with ar4 pointing to the correct address space */ 200 /* start new process with ar4 pointing to the correct address space */
215 p->thread.mm_segment = get_fs(); 201 p->thread.mm_segment = get_fs();
216 /* Don't copy debug registers */ 202 /* Don't copy debug registers */
217 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 203 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
204 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 205 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
206 clear_tsk_thread_flag(p, TIF_PER_TRAP);
219 /* Initialize per thread user and system timer values */ 207 /* Initialize per thread user and system timer values */
220 ti = task_thread_info(p); 208 ti = task_thread_info(p);
221 ti->user_timer = 0; 209 ti->user_timer = 0;
@@ -331,3 +319,39 @@ unsigned long get_wchan(struct task_struct *p)
331 } 319 }
332 return 0; 320 return 0;
333} 321}
322
323unsigned long arch_align_stack(unsigned long sp)
324{
325 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
326 sp -= get_random_int() & ~PAGE_MASK;
327 return sp & ~0xf;
328}
329
330static inline unsigned long brk_rnd(void)
331{
332 /* 8MB for 32bit, 1GB for 64bit */
333 if (is_32bit_task())
334 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
335 else
336 return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
337}
338
339unsigned long arch_randomize_brk(struct mm_struct *mm)
340{
341 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
342
343 if (ret < mm->brk)
344 return mm->brk;
345 return ret;
346}
347
348unsigned long randomize_et_dyn(unsigned long base)
349{
350 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
351
352 if (!(current->flags & PF_RANDOMIZE))
353 return base;
354 if (ret < base)
355 return base;
356 return ret;
357}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index ecb2d02b02e4..311e9d712888 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,7 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/cpu.h>
17#include <asm/elf.h> 17#include <asm/elf.h>
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
@@ -35,17 +35,6 @@ void __cpuinit cpu_init(void)
35} 35}
36 36
37/* 37/*
38 * print_cpu_info - print basic information about a cpu
39 */
40void __cpuinit print_cpu_info(void)
41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
44 pr_info("Processor %d started, address %d, identification %06X\n",
45 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
46}
47
48/*
49 * show_cpuinfo - Get information on one CPU for use by procfs. 38 * show_cpuinfo - Get information on one CPU for use by procfs.
50 */ 39 */
51static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
@@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
57 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
58 int i; 47 int i;
59 48
60 s390_adjust_jiffies();
61 preempt_disable();
62 if (!n) { 49 if (!n) {
50 s390_adjust_jiffies();
63 seq_printf(m, "vendor_id : IBM/S390\n" 51 seq_printf(m, "vendor_id : IBM/S390\n"
64 "# processors : %i\n" 52 "# processors : %i\n"
65 "bogomips per cpu: %lu.%02lu\n", 53 "bogomips per cpu: %lu.%02lu\n",
@@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
71 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
72 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
73 } 61 }
74 62 get_online_cpus();
75 if (cpu_online(n)) { 63 if (cpu_online(n)) {
76 struct cpuid *id = &per_cpu(cpu_id, n); 64 struct cpuid *id = &per_cpu(cpu_id, n);
77 seq_printf(m, "processor %li: " 65 seq_printf(m, "processor %li: "
@@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
80 "machine = %04X\n", 68 "machine = %04X\n",
81 n, id->version, id->ident, id->machine); 69 n, id->version, id->ident, id->machine);
82 } 70 }
83 preempt_enable(); 71 put_online_cpus();
84 return 0; 72 return 0;
85} 73}
86 74
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 83339d33c4b1..ef86ad243986 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,25 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/ptrace.c 2 * Ptrace user space interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */ 7 */
24 8
25#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -61,76 +45,58 @@ enum s390_regset {
61 REGSET_GENERAL_EXTENDED, 45 REGSET_GENERAL_EXTENDED,
62}; 46};
63 47
64static void 48void update_per_regs(struct task_struct *task)
65FixPerRegisters(struct task_struct *task)
66{ 49{
67 struct pt_regs *regs; 50 static const struct per_regs per_single_step = {
68 per_struct *per_info; 51 .control = PER_EVENT_IFETCH,
69 per_cr_words cr_words; 52 .start = 0,
70 53 .end = PSW_ADDR_INSN,
71 regs = task_pt_regs(task); 54 };
72 per_info = (per_struct *) &task->thread.per_info; 55 struct pt_regs *regs = task_pt_regs(task);
73 per_info->control_regs.bits.em_instruction_fetch = 56 struct thread_struct *thread = &task->thread;
74 per_info->single_step | per_info->instruction_fetch; 57 const struct per_regs *new;
75 58 struct per_regs old;
76 if (per_info->single_step) { 59
77 per_info->control_regs.bits.starting_addr = 0; 60 /* TIF_SINGLE_STEP overrides the user specified PER registers. */
78#ifdef CONFIG_COMPAT 61 new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
79 if (is_compat_task()) 62 &per_single_step : &thread->per_user;
80 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 63
81 else 64 /* Take care of the PER enablement bit in the PSW. */
82#endif 65 if (!(new->control & PER_EVENT_MASK)) {
83 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
84 } else {
85 per_info->control_regs.bits.starting_addr =
86 per_info->starting_addr;
87 per_info->control_regs.bits.ending_addr =
88 per_info->ending_addr;
89 }
90 /*
91 * if any of the control reg tracing bits are on
92 * we switch on per in the psw
93 */
94 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
95 regs->psw.mask |= PSW_MASK_PER;
96 else
97 regs->psw.mask &= ~PSW_MASK_PER; 66 regs->psw.mask &= ~PSW_MASK_PER;
98 67 return;
99 if (per_info->control_regs.bits.em_storage_alteration)
100 per_info->control_regs.bits.storage_alt_space_ctl = 1;
101 else
102 per_info->control_regs.bits.storage_alt_space_ctl = 0;
103
104 if (task == current) {
105 __ctl_store(cr_words, 9, 11);
106 if (memcmp(&cr_words, &per_info->control_regs.words,
107 sizeof(cr_words)) != 0)
108 __ctl_load(per_info->control_regs.words, 9, 11);
109 } 68 }
69 regs->psw.mask |= PSW_MASK_PER;
70 __ctl_store(old, 9, 11);
71 if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
72 __ctl_load(*new, 9, 11);
110} 73}
111 74
112void user_enable_single_step(struct task_struct *task) 75void user_enable_single_step(struct task_struct *task)
113{ 76{
114 task->thread.per_info.single_step = 1; 77 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
115 FixPerRegisters(task); 78 if (task == current)
79 update_per_regs(task);
116} 80}
117 81
118void user_disable_single_step(struct task_struct *task) 82void user_disable_single_step(struct task_struct *task)
119{ 83{
120 task->thread.per_info.single_step = 0; 84 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
121 FixPerRegisters(task); 85 if (task == current)
86 update_per_regs(task);
122} 87}
123 88
124/* 89/*
125 * Called by kernel/ptrace.c when detaching.. 90 * Called by kernel/ptrace.c when detaching..
126 * 91 *
127 * Make sure single step bits etc are not set. 92 * Clear all debugging related fields.
128 */ 93 */
129void 94void ptrace_disable(struct task_struct *task)
130ptrace_disable(struct task_struct *child)
131{ 95{
132 /* make sure the single step bit is not set. */ 96 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
133 user_disable_single_step(child); 97 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
98 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
99 clear_tsk_thread_flag(task, TIF_PER_TRAP);
134} 100}
135 101
136#ifndef CONFIG_64BIT 102#ifndef CONFIG_64BIT
@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
139# define __ADDR_MASK 7 105# define __ADDR_MASK 7
140#endif 106#endif
141 107
108static inline unsigned long __peek_user_per(struct task_struct *child,
109 addr_t addr)
110{
111 struct per_struct_kernel *dummy = NULL;
112
113 if (addr == (addr_t) &dummy->cr9)
114 /* Control bits of the active per set. */
115 return test_thread_flag(TIF_SINGLE_STEP) ?
116 PER_EVENT_IFETCH : child->thread.per_user.control;
117 else if (addr == (addr_t) &dummy->cr10)
118 /* Start address of the active per set. */
119 return test_thread_flag(TIF_SINGLE_STEP) ?
120 0 : child->thread.per_user.start;
121 else if (addr == (addr_t) &dummy->cr11)
122 /* End address of the active per set. */
123 return test_thread_flag(TIF_SINGLE_STEP) ?
124 PSW_ADDR_INSN : child->thread.per_user.end;
125 else if (addr == (addr_t) &dummy->bits)
126 /* Single-step bit. */
127 return test_thread_flag(TIF_SINGLE_STEP) ?
128 (1UL << (BITS_PER_LONG - 1)) : 0;
129 else if (addr == (addr_t) &dummy->starting_addr)
130 /* Start address of the user specified per set. */
131 return child->thread.per_user.start;
132 else if (addr == (addr_t) &dummy->ending_addr)
133 /* End address of the user specified per set. */
134 return child->thread.per_user.end;
135 else if (addr == (addr_t) &dummy->perc_atmid)
136 /* PER code, ATMID and AI of the last PER trap */
137 return (unsigned long)
138 child->thread.per_event.cause << (BITS_PER_LONG - 16);
139 else if (addr == (addr_t) &dummy->address)
140 /* Address of the last PER trap */
141 return child->thread.per_event.address;
142 else if (addr == (addr_t) &dummy->access_id)
143 /* Access id of the last PER trap */
144 return (unsigned long)
145 child->thread.per_event.paid << (BITS_PER_LONG - 8);
146 return 0;
147}
148
142/* 149/*
143 * Read the word at offset addr from the user area of a process. The 150 * Read the word at offset addr from the user area of a process. The
144 * trouble here is that the information is littered over different 151 * trouble here is that the information is littered over different
@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
204 211
205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 212 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
206 /* 213 /*
207 * per_info is found in the thread structure 214 * Handle access to the per_info structure.
208 */ 215 */
209 offset = addr - (addr_t) &dummy->regs.per_info; 216 addr -= (addr_t) &dummy->regs.per_info;
210 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); 217 tmp = __peek_user_per(child, addr);
211 218
212 } else 219 } else
213 tmp = 0; 220 tmp = 0;
@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
237 return put_user(tmp, (addr_t __user *) data); 244 return put_user(tmp, (addr_t __user *) data);
238} 245}
239 246
247static inline void __poke_user_per(struct task_struct *child,
248 addr_t addr, addr_t data)
249{
250 struct per_struct_kernel *dummy = NULL;
251
252 /*
253 * There are only three fields in the per_info struct that the
254 * debugger user can write to.
255 * 1) cr9: the debugger wants to set a new PER event mask
256 * 2) starting_addr: the debugger wants to set a new starting
257 * address to use with the PER event mask.
258 * 3) ending_addr: the debugger wants to set a new ending
259 * address to use with the PER event mask.
260 * The user specified PER event mask and the start and end
261 * addresses are used only if single stepping is not in effect.
262 * Writes to any other field in per_info are ignored.
263 */
264 if (addr == (addr_t) &dummy->cr9)
265 /* PER event mask of the user specified per set. */
266 child->thread.per_user.control =
267 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
268 else if (addr == (addr_t) &dummy->starting_addr)
269 /* Starting address of the user specified per set. */
270 child->thread.per_user.start = data;
271 else if (addr == (addr_t) &dummy->ending_addr)
272 /* Ending address of the user specified per set. */
273 child->thread.per_user.end = data;
274}
275
240/* 276/*
241 * Write a word to the user area of a process at location addr. This 277 * Write a word to the user area of a process at location addr. This
242 * operation does have an additional problem compared to peek_user. 278 * operation does have an additional problem compared to peek_user.
@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
311 347
312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 348 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
313 /* 349 /*
314 * per_info is found in the thread structure 350 * Handle access to the per_info structure.
315 */ 351 */
316 offset = addr - (addr_t) &dummy->regs.per_info; 352 addr -= (addr_t) &dummy->regs.per_info;
317 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; 353 __poke_user_per(child, addr, data);
318 354
319 } 355 }
320 356
321 FixPerRegisters(child);
322 return 0; 357 return 0;
323} 358}
324 359
325static int 360static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
326poke_user(struct task_struct *child, addr_t addr, addr_t data)
327{ 361{
328 addr_t mask; 362 addr_t mask;
329 363
@@ -343,7 +377,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
343 return __poke_user(child, addr, data); 377 return __poke_user(child, addr, data);
344} 378}
345 379
346long arch_ptrace(struct task_struct *child, long request, long addr, long data) 380long arch_ptrace(struct task_struct *child, long request,
381 unsigned long addr, unsigned long data)
347{ 382{
348 ptrace_area parea; 383 ptrace_area parea;
349 int copied, ret; 384 int copied, ret;
@@ -409,12 +444,53 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
409 */ 444 */
410 445
411/* 446/*
447 * Same as peek_user_per but for a 31 bit program.
448 */
449static inline __u32 __peek_user_per_compat(struct task_struct *child,
450 addr_t addr)
451{
452 struct compat_per_struct_kernel *dummy32 = NULL;
453
454 if (addr == (addr_t) &dummy32->cr9)
455 /* Control bits of the active per set. */
456 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
457 PER_EVENT_IFETCH : child->thread.per_user.control;
458 else if (addr == (addr_t) &dummy32->cr10)
459 /* Start address of the active per set. */
460 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
461 0 : child->thread.per_user.start;
462 else if (addr == (addr_t) &dummy32->cr11)
463 /* End address of the active per set. */
464 return test_thread_flag(TIF_SINGLE_STEP) ?
465 PSW32_ADDR_INSN : child->thread.per_user.end;
466 else if (addr == (addr_t) &dummy32->bits)
467 /* Single-step bit. */
468 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
469 0x80000000 : 0;
470 else if (addr == (addr_t) &dummy32->starting_addr)
471 /* Start address of the user specified per set. */
472 return (__u32) child->thread.per_user.start;
473 else if (addr == (addr_t) &dummy32->ending_addr)
474 /* End address of the user specified per set. */
475 return (__u32) child->thread.per_user.end;
476 else if (addr == (addr_t) &dummy32->perc_atmid)
477 /* PER code, ATMID and AI of the last PER trap */
478 return (__u32) child->thread.per_event.cause << 16;
479 else if (addr == (addr_t) &dummy32->address)
480 /* Address of the last PER trap */
481 return (__u32) child->thread.per_event.address;
482 else if (addr == (addr_t) &dummy32->access_id)
483 /* Access id of the last PER trap */
484 return (__u32) child->thread.per_event.paid << 24;
485 return 0;
486}
487
488/*
412 * Same as peek_user but for a 31 bit program. 489 * Same as peek_user but for a 31 bit program.
413 */ 490 */
414static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 491static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
415{ 492{
416 struct user32 *dummy32 = NULL; 493 struct compat_user *dummy32 = NULL;
417 per_struct32 *dummy_per32 = NULL;
418 addr_t offset; 494 addr_t offset;
419 __u32 tmp; 495 __u32 tmp;
420 496
@@ -464,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
464 540
465 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 541 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
466 /* 542 /*
467 * per_info is found in the thread structure 543 * Handle access to the per_info structure.
468 */ 544 */
469 offset = addr - (addr_t) &dummy32->regs.per_info; 545 addr -= (addr_t) &dummy32->regs.per_info;
470 /* This is magic. See per_struct and per_struct32. */ 546 tmp = __peek_user_per_compat(child, addr);
471 if ((offset >= (addr_t) &dummy_per32->control_regs &&
472 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
473 (offset >= (addr_t) &dummy_per32->starting_addr &&
474 offset <= (addr_t) &dummy_per32->ending_addr) ||
475 offset == (addr_t) &dummy_per32->lowcore.words.address)
476 offset = offset*2 + 4;
477 else
478 offset = offset*2;
479 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
480 547
481 } else 548 } else
482 tmp = 0; 549 tmp = 0;
@@ -497,13 +564,32 @@ static int peek_user_compat(struct task_struct *child,
497} 564}
498 565
499/* 566/*
567 * Same as poke_user_per but for a 31 bit program.
568 */
569static inline void __poke_user_per_compat(struct task_struct *child,
570 addr_t addr, __u32 data)
571{
572 struct compat_per_struct_kernel *dummy32 = NULL;
573
574 if (addr == (addr_t) &dummy32->cr9)
575 /* PER event mask of the user specified per set. */
576 child->thread.per_user.control =
577 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
578 else if (addr == (addr_t) &dummy32->starting_addr)
579 /* Starting address of the user specified per set. */
580 child->thread.per_user.start = data;
581 else if (addr == (addr_t) &dummy32->ending_addr)
582 /* Ending address of the user specified per set. */
583 child->thread.per_user.end = data;
584}
585
586/*
500 * Same as poke_user but for a 31 bit program. 587 * Same as poke_user but for a 31 bit program.
501 */ 588 */
502static int __poke_user_compat(struct task_struct *child, 589static int __poke_user_compat(struct task_struct *child,
503 addr_t addr, addr_t data) 590 addr_t addr, addr_t data)
504{ 591{
505 struct user32 *dummy32 = NULL; 592 struct compat_user *dummy32 = NULL;
506 per_struct32 *dummy_per32 = NULL;
507 __u32 tmp = (__u32) data; 593 __u32 tmp = (__u32) data;
508 addr_t offset; 594 addr_t offset;
509 595
@@ -560,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
560 646
561 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 647 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
562 /* 648 /*
563 * per_info is found in the thread structure. 649 * Handle access to the per_info structure.
564 */
565 offset = addr - (addr_t) &dummy32->regs.per_info;
566 /*
567 * This is magic. See per_struct and per_struct32.
568 * By incident the offsets in per_struct are exactly
569 * twice the offsets in per_struct32 for all fields.
570 * The 8 byte fields need special handling though,
571 * because the second half (bytes 4-7) is needed and
572 * not the first half.
573 */ 650 */
574 if ((offset >= (addr_t) &dummy_per32->control_regs && 651 addr -= (addr_t) &dummy32->regs.per_info;
575 offset < (addr_t) (&dummy_per32->control_regs + 1)) || 652 __poke_user_per_compat(child, addr, data);
576 (offset >= (addr_t) &dummy_per32->starting_addr &&
577 offset <= (addr_t) &dummy_per32->ending_addr) ||
578 offset == (addr_t) &dummy_per32->lowcore.words.address)
579 offset = offset*2 + 4;
580 else
581 offset = offset*2;
582 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
583
584 } 653 }
585 654
586 FixPerRegisters(child);
587 return 0; 655 return 0;
588} 656}
589 657
590static int poke_user_compat(struct task_struct *child, 658static int poke_user_compat(struct task_struct *child,
591 addr_t addr, addr_t data) 659 addr_t addr, addr_t data)
592{ 660{
593 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 661 if (!is_compat_task() || (addr & 3) ||
662 addr > sizeof(struct compat_user) - 3)
594 return -EIO; 663 return -EIO;
595 664
596 return __poke_user_compat(child, addr, data); 665 return __poke_user_compat(child, addr, data);
@@ -601,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
601{ 670{
602 unsigned long addr = caddr; 671 unsigned long addr = caddr;
603 unsigned long data = cdata; 672 unsigned long data = cdata;
604 ptrace_area_emu31 parea; 673 compat_ptrace_area parea;
605 int copied, ret; 674 int copied, ret;
606 675
607 switch (request) { 676 switch (request) {
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 5e73dee63baa..9eabbc90795d 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -78,7 +78,7 @@ do_reipl_asm: basr %r13,0
78 * in the ESA psw. 78 * in the ESA psw.
79 * Bit 31 of the addresses has to be 0 for the 79 * Bit 31 of the addresses has to be 0 for the
80 * 31bit lpswe instruction a fact they appear to have 80 * 31bit lpswe instruction a fact they appear to have
81 * ommited from the pop. 81 * omitted from the pop.
82 */ 82 */
83.Lnewpsw: .quad 0x0000000080000000 83.Lnewpsw: .quad 0x0000000080000000
84 .quad .Lpg1 84 .quad .Lpg1
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
deleted file mode 100644
index 9ce641b5291f..000000000000
--- a/arch/s390/kernel/s390_ext.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * arch/s390/kernel/s390_ext.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/ftrace.h>
14#include <linux/errno.h>
15#include <linux/kernel_stat.h>
16#include <linux/interrupt.h>
17#include <asm/cputime.h>
18#include <asm/lowcore.h>
19#include <asm/s390_ext.h>
20#include <asm/irq_regs.h>
21#include <asm/irq.h>
22#include "entry.h"
23
24/*
25 * ext_int_hash[index] is the start of the list for all external interrupts
26 * that hash to this index. With the current set of external interrupts
27 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
28 * iucv and 0x2603 pfault) this is always the first element.
29 */
30ext_int_info_t *ext_int_hash[256] = { NULL, };
31
32static inline int ext_hash(__u16 code)
33{
34 return (code + (code >> 9)) & 0xff;
35}
36
37int register_external_interrupt(__u16 code, ext_int_handler_t handler)
38{
39 ext_int_info_t *p;
40 int index;
41
42 p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
43 if (p == NULL)
44 return -ENOMEM;
45 p->code = code;
46 p->handler = handler;
47 index = ext_hash(code);
48 p->next = ext_int_hash[index];
49 ext_int_hash[index] = p;
50 return 0;
51}
52
53int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
54 ext_int_info_t *p)
55{
56 int index;
57
58 if (p == NULL)
59 return -EINVAL;
60 p->code = code;
61 p->handler = handler;
62 index = ext_hash(code);
63 p->next = ext_int_hash[index];
64 ext_int_hash[index] = p;
65 return 0;
66}
67
68int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
69{
70 ext_int_info_t *p, *q;
71 int index;
72
73 index = ext_hash(code);
74 q = NULL;
75 p = ext_int_hash[index];
76 while (p != NULL) {
77 if (p->code == code && p->handler == handler)
78 break;
79 q = p;
80 p = p->next;
81 }
82 if (p == NULL)
83 return -ENOENT;
84 if (q != NULL)
85 q->next = p->next;
86 else
87 ext_int_hash[index] = p->next;
88 kfree(p);
89 return 0;
90}
91
92int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
93 ext_int_info_t *p)
94{
95 ext_int_info_t *q;
96 int index;
97
98 if (p == NULL || p->code != code || p->handler != handler)
99 return -EINVAL;
100 index = ext_hash(code);
101 q = ext_int_hash[index];
102 if (p != q) {
103 while (q != NULL) {
104 if (q->next == p)
105 break;
106 q = q->next;
107 }
108 if (q == NULL)
109 return -ENOENT;
110 q->next = p->next;
111 } else
112 ext_int_hash[index] = p->next;
113 return 0;
114}
115
116void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
117{
118 ext_int_info_t *p;
119 int index;
120 struct pt_regs *old_regs;
121
122 old_regs = set_irq_regs(regs);
123 s390_idle_check(regs, S390_lowcore.int_clock,
124 S390_lowcore.async_enter_timer);
125 irq_enter();
126 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
127 /* Serve timer interrupts first. */
128 clock_comparator_work();
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 if (code != 0x1004)
131 __get_cpu_var(s390_idle).nohz_delay = 1;
132 index = ext_hash(code);
133 for (p = ext_int_hash[index]; p; p = p->next) {
134 if (likely(p->code == code))
135 p->handler(code);
136 }
137 irq_exit();
138 set_irq_regs(old_regs);
139}
140
141EXPORT_SYMBOL(register_external_interrupt);
142EXPORT_SYMBOL(unregister_external_interrupt);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c8e8e1354e1d..0c35dee10b00 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -102,16 +102,6 @@ EXPORT_SYMBOL(lowcore_ptr);
102 102
103#include <asm/setup.h> 103#include <asm/setup.h>
104 104
105static struct resource code_resource = {
106 .name = "Kernel code",
107 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
108};
109
110static struct resource data_resource = {
111 .name = "Kernel data",
112 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
113};
114
115/* 105/*
116 * condev= and conmode= setup parameter. 106 * condev= and conmode= setup parameter.
117 */ 107 */
@@ -315,8 +305,7 @@ static int set_amode_and_uaccess(unsigned long user_amode,
315 */ 305 */
316static int __init early_parse_switch_amode(char *p) 306static int __init early_parse_switch_amode(char *p)
317{ 307{
318 if (user_mode != SECONDARY_SPACE_MODE) 308 user_mode = PRIMARY_SPACE_MODE;
319 user_mode = PRIMARY_SPACE_MODE;
320 return 0; 309 return 0;
321} 310}
322early_param("switch_amode", early_parse_switch_amode); 311early_param("switch_amode", early_parse_switch_amode);
@@ -325,10 +314,6 @@ static int __init early_parse_user_mode(char *p)
325{ 314{
326 if (p && strcmp(p, "primary") == 0) 315 if (p && strcmp(p, "primary") == 0)
327 user_mode = PRIMARY_SPACE_MODE; 316 user_mode = PRIMARY_SPACE_MODE;
328#ifdef CONFIG_S390_EXEC_PROTECT
329 else if (p && strcmp(p, "secondary") == 0)
330 user_mode = SECONDARY_SPACE_MODE;
331#endif
332 else if (!p || strcmp(p, "home") == 0) 317 else if (!p || strcmp(p, "home") == 0)
333 user_mode = HOME_SPACE_MODE; 318 user_mode = HOME_SPACE_MODE;
334 else 319 else
@@ -337,31 +322,9 @@ static int __init early_parse_user_mode(char *p)
337} 322}
338early_param("user_mode", early_parse_user_mode); 323early_param("user_mode", early_parse_user_mode);
339 324
340#ifdef CONFIG_S390_EXEC_PROTECT
341/*
342 * Enable execute protection?
343 */
344static int __init early_parse_noexec(char *p)
345{
346 if (!strncmp(p, "off", 3))
347 return 0;
348 user_mode = SECONDARY_SPACE_MODE;
349 return 0;
350}
351early_param("noexec", early_parse_noexec);
352#endif /* CONFIG_S390_EXEC_PROTECT */
353
354static void setup_addressing_mode(void) 325static void setup_addressing_mode(void)
355{ 326{
356 if (user_mode == SECONDARY_SPACE_MODE) { 327 if (user_mode == PRIMARY_SPACE_MODE) {
357 if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
358 PSW32_ASC_SECONDARY))
359 pr_info("Execute protection active, "
360 "mvcos available\n");
361 else
362 pr_info("Execute protection active, "
363 "mvcos not available\n");
364 } else if (user_mode == PRIMARY_SPACE_MODE) {
365 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) 328 if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
366 pr_info("Address spaces switched, " 329 pr_info("Address spaces switched, "
367 "mvcos available\n"); 330 "mvcos available\n");
@@ -409,6 +372,9 @@ setup_lowcore(void)
409 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 372 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
410 lc->thread_info = (unsigned long) &init_thread_union; 373 lc->thread_info = (unsigned long) &init_thread_union;
411 lc->machine_flags = S390_lowcore.machine_flags; 374 lc->machine_flags = S390_lowcore.machine_flags;
375 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
376 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
377 MAX_FACILITY_BIT/8);
412#ifndef CONFIG_64BIT 378#ifndef CONFIG_64BIT
413 if (MACHINE_HAS_IEEE) { 379 if (MACHINE_HAS_IEEE) {
414 lc->extended_save_area_addr = (__u32) 380 lc->extended_save_area_addr = (__u32)
@@ -433,21 +399,43 @@ setup_lowcore(void)
433 lowcore_ptr[0] = lc; 399 lowcore_ptr[0] = lc;
434} 400}
435 401
436static void __init 402static struct resource code_resource = {
437setup_resources(void) 403 .name = "Kernel code",
404 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
405};
406
407static struct resource data_resource = {
408 .name = "Kernel data",
409 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
410};
411
412static struct resource bss_resource = {
413 .name = "Kernel bss",
414 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
415};
416
417static struct resource __initdata *standard_resources[] = {
418 &code_resource,
419 &data_resource,
420 &bss_resource,
421};
422
423static void __init setup_resources(void)
438{ 424{
439 struct resource *res, *sub_res; 425 struct resource *res, *std_res, *sub_res;
440 int i; 426 int i, j;
441 427
442 code_resource.start = (unsigned long) &_text; 428 code_resource.start = (unsigned long) &_text;
443 code_resource.end = (unsigned long) &_etext - 1; 429 code_resource.end = (unsigned long) &_etext - 1;
444 data_resource.start = (unsigned long) &_etext; 430 data_resource.start = (unsigned long) &_etext;
445 data_resource.end = (unsigned long) &_edata - 1; 431 data_resource.end = (unsigned long) &_edata - 1;
432 bss_resource.start = (unsigned long) &__bss_start;
433 bss_resource.end = (unsigned long) &__bss_stop - 1;
446 434
447 for (i = 0; i < MEMORY_CHUNKS; i++) { 435 for (i = 0; i < MEMORY_CHUNKS; i++) {
448 if (!memory_chunk[i].size) 436 if (!memory_chunk[i].size)
449 continue; 437 continue;
450 res = alloc_bootmem_low(sizeof(struct resource)); 438 res = alloc_bootmem_low(sizeof(*res));
451 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
452 switch (memory_chunk[i].type) { 440 switch (memory_chunk[i].type) {
453 case CHUNK_READ_WRITE: 441 case CHUNK_READ_WRITE:
@@ -461,40 +449,24 @@ setup_resources(void)
461 res->name = "reserved"; 449 res->name = "reserved";
462 } 450 }
463 res->start = memory_chunk[i].addr; 451 res->start = memory_chunk[i].addr;
464 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 452 res->end = res->start + memory_chunk[i].size - 1;
465 request_resource(&iomem_resource, res); 453 request_resource(&iomem_resource, res);
466 454
467 if (code_resource.start >= res->start && 455 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
468 code_resource.start <= res->end && 456 std_res = standard_resources[j];
469 code_resource.end > res->end) { 457 if (std_res->start < res->start ||
470 sub_res = alloc_bootmem_low(sizeof(struct resource)); 458 std_res->start > res->end)
471 memcpy(sub_res, &code_resource, 459 continue;
472 sizeof(struct resource)); 460 if (std_res->end > res->end) {
473 sub_res->end = res->end; 461 sub_res = alloc_bootmem_low(sizeof(*sub_res));
474 code_resource.start = res->end + 1; 462 *sub_res = *std_res;
475 request_resource(res, sub_res); 463 sub_res->end = res->end;
476 } 464 std_res->start = res->end + 1;
477 465 request_resource(res, sub_res);
478 if (code_resource.start >= res->start && 466 } else {
479 code_resource.start <= res->end && 467 request_resource(res, std_res);
480 code_resource.end <= res->end) 468 }
481 request_resource(res, &code_resource);
482
483 if (data_resource.start >= res->start &&
484 data_resource.start <= res->end &&
485 data_resource.end > res->end) {
486 sub_res = alloc_bootmem_low(sizeof(struct resource));
487 memcpy(sub_res, &data_resource,
488 sizeof(struct resource));
489 sub_res->end = res->end;
490 data_resource.start = res->end + 1;
491 request_resource(res, sub_res);
492 } 469 }
493
494 if (data_resource.start >= res->start &&
495 data_resource.start <= res->end &&
496 data_resource.end <= res->end)
497 request_resource(res, &data_resource);
498 } 470 }
499} 471}
500 472
@@ -627,7 +599,8 @@ setup_memory(void)
627 add_active_range(0, start_chunk, end_chunk); 599 add_active_range(0, start_chunk, end_chunk);
628 pfn = max(start_chunk, start_pfn); 600 pfn = max(start_chunk, start_pfn);
629 for (; pfn < end_chunk; pfn++) 601 for (; pfn < end_chunk; pfn++)
630 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 602 page_set_storage_key(PFN_PHYS(pfn),
603 PAGE_DEFAULT_KEY, 0);
631 } 604 }
632 605
633 psw_set_key(PAGE_DEFAULT_KEY); 606 psw_set_key(PAGE_DEFAULT_KEY);
@@ -674,12 +647,9 @@ setup_memory(void)
674static void __init setup_hwcaps(void) 647static void __init setup_hwcaps(void)
675{ 648{
676 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 649 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
677 unsigned long long facility_list_extended;
678 unsigned int facility_list;
679 struct cpuid cpu_id; 650 struct cpuid cpu_id;
680 int i; 651 int i;
681 652
682 facility_list = stfl();
683 /* 653 /*
684 * The store facility list bits numbers as found in the principles 654 * The store facility list bits numbers as found in the principles
685 * of operation are numbered with bit 1UL<<31 as number 0 to 655 * of operation are numbered with bit 1UL<<31 as number 0 to
@@ -699,11 +669,10 @@ static void __init setup_hwcaps(void)
699 * HWCAP_S390_ETF3EH bit 8 (22 && 30). 669 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
700 */ 670 */
701 for (i = 0; i < 6; i++) 671 for (i = 0; i < 6; i++)
702 if (facility_list & (1UL << (31 - stfl_bits[i]))) 672 if (test_facility(stfl_bits[i]))
703 elf_hwcap |= 1UL << i; 673 elf_hwcap |= 1UL << i;
704 674
705 if ((facility_list & (1UL << (31 - 22))) 675 if (test_facility(22) && test_facility(30))
706 && (facility_list & (1UL << (31 - 30))))
707 elf_hwcap |= HWCAP_S390_ETF3EH; 676 elf_hwcap |= HWCAP_S390_ETF3EH;
708 677
709 /* 678 /*
@@ -712,19 +681,15 @@ static void __init setup_hwcaps(void)
712 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information 681 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
713 * as stored by stfl, bits 32-xxx contain additional facilities. 682 * as stored by stfl, bits 32-xxx contain additional facilities.
714 * How many facility words are stored depends on the number of 683 * How many facility words are stored depends on the number of
715 * doublewords passed to the instruction. The additional facilites 684 * doublewords passed to the instruction. The additional facilities
716 * are: 685 * are:
717 * Bit 42: decimal floating point facility is installed 686 * Bit 42: decimal floating point facility is installed
718 * Bit 44: perform floating point operation facility is installed 687 * Bit 44: perform floating point operation facility is installed
719 * translated to: 688 * translated to:
720 * HWCAP_S390_DFP bit 6 (42 && 44). 689 * HWCAP_S390_DFP bit 6 (42 && 44).
721 */ 690 */
722 if ((elf_hwcap & (1UL << 2)) && 691 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
723 __stfle(&facility_list_extended, 1) > 0) { 692 elf_hwcap |= HWCAP_S390_DFP;
724 if ((facility_list_extended & (1ULL << (63 - 42)))
725 && (facility_list_extended & (1ULL << (63 - 44))))
726 elf_hwcap |= HWCAP_S390_DFP;
727 }
728 693
729 /* 694 /*
730 * Huge page support HWCAP_S390_HPAGE is bit 7. 695 * Huge page support HWCAP_S390_HPAGE is bit 7.
@@ -765,6 +730,9 @@ static void __init setup_hwcaps(void)
765 case 0x2098: 730 case 0x2098:
766 strcpy(elf_platform, "z10"); 731 strcpy(elf_platform, "z10");
767 break; 732 break;
733 case 0x2817:
734 strcpy(elf_platform, "z196");
735 break;
768 } 736 }
769} 737}
770 738
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ee7ac8b11782..abbb3c3c7aab 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
505 * Let tracing know that we've done the handler setup. 505 * Let tracing know that we've done the handler setup.
506 */ 506 */
507 tracehook_signal_handler(signr, &info, &ka, regs, 507 tracehook_signal_handler(signr, &info, &ka, regs,
508 current->thread.per_info.single_step); 508 test_thread_flag(TIF_SINGLE_STEP));
509 } 509 }
510 return; 510 return;
511 } 511 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8127ebd59c4d..1d55c95f617c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -23,6 +23,7 @@
23#define KMSG_COMPONENT "cpu" 23#define KMSG_COMPONENT "cpu"
24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 25
26#include <linux/workqueue.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
@@ -43,7 +44,6 @@
43#include <asm/sigp.h> 44#include <asm/sigp.h>
44#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/s390_ext.h>
47#include <asm/cpcmd.h> 47#include <asm/cpcmd.h>
48#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
49#include <asm/timer.h> 49#include <asm/timer.h>
@@ -156,18 +156,20 @@ void smp_send_stop(void)
156 * cpus are handled. 156 * cpus are handled.
157 */ 157 */
158 158
159static void do_ext_call_interrupt(__u16 code) 159static void do_ext_call_interrupt(unsigned int ext_int_code,
160 unsigned int param32, unsigned long param64)
160{ 161{
161 unsigned long bits; 162 unsigned long bits;
162 163
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
163 /* 165 /*
164 * handle bit signal external calls 166 * handle bit signal external calls
165 *
166 * For the ec_schedule signal we have to do nothing. All the work
167 * is done automatically when we return from the interrupt.
168 */ 167 */
169 bits = xchg(&S390_lowcore.ext_call_fast, 0); 168 bits = xchg(&S390_lowcore.ext_call_fast, 0);
170 169
170 if (test_bit(ec_schedule, &bits))
171 scheduler_ipi();
172
171 if (test_bit(ec_call_function, &bits)) 173 if (test_bit(ec_call_function, &bits))
172 generic_smp_call_function_interrupt(); 174 generic_smp_call_function_interrupt();
173 175
@@ -260,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
260 262
261 memset(&parms.orvals, 0, sizeof(parms.orvals)); 263 memset(&parms.orvals, 0, sizeof(parms.orvals));
262 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
263 parms.orvals[cr] = 1 << bit; 265 parms.orvals[cr] = 1UL << bit;
264 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
265} 267}
266EXPORT_SYMBOL(smp_ctl_set_bit); 268EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -274,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
274 276
275 memset(&parms.orvals, 0, sizeof(parms.orvals)); 277 memset(&parms.orvals, 0, sizeof(parms.orvals));
276 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
277 parms.andvals[cr] = ~(1L << bit); 279 parms.andvals[cr] = ~(1UL << bit);
278 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
279} 281}
280EXPORT_SYMBOL(smp_ctl_clear_bit); 282EXPORT_SYMBOL(smp_ctl_clear_bit);
@@ -332,7 +334,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
332 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 334 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
333 if (!cpu_stopped(logical_cpu)) 335 if (!cpu_stopped(logical_cpu))
334 continue; 336 continue;
335 cpu_set(logical_cpu, cpu_present_map); 337 set_cpu_present(logical_cpu, true);
336 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 338 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
337 logical_cpu = cpumask_next(logical_cpu, &avail); 339 logical_cpu = cpumask_next(logical_cpu, &avail);
338 if (logical_cpu >= nr_cpu_ids) 340 if (logical_cpu >= nr_cpu_ids)
@@ -364,7 +366,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
364 continue; 366 continue;
365 __cpu_logical_map[logical_cpu] = cpu_id; 367 __cpu_logical_map[logical_cpu] = cpu_id;
366 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; 368 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
367 cpu_set(logical_cpu, cpu_present_map); 369 set_cpu_present(logical_cpu, true);
368 if (cpu >= info->configured) 370 if (cpu >= info->configured)
369 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 371 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
370 else 372 else
@@ -382,7 +384,7 @@ static int __smp_rescan_cpus(void)
382{ 384{
383 cpumask_t avail; 385 cpumask_t avail;
384 386
385 cpus_xor(avail, cpu_possible_map, cpu_present_map); 387 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
386 if (smp_use_sigp_detection) 388 if (smp_use_sigp_detection)
387 return smp_rescan_cpus_sigp(avail); 389 return smp_rescan_cpus_sigp(avail);
388 else 390 else
@@ -464,29 +466,29 @@ int __cpuinit start_secondary(void *cpuvoid)
464 notify_cpu_starting(smp_processor_id()); 466 notify_cpu_starting(smp_processor_id());
465 /* Mark this cpu as online */ 467 /* Mark this cpu as online */
466 ipi_call_lock(); 468 ipi_call_lock();
467 cpu_set(smp_processor_id(), cpu_online_map); 469 set_cpu_online(smp_processor_id(), true);
468 ipi_call_unlock(); 470 ipi_call_unlock();
469 /* Switch on interrupts */ 471 /* Switch on interrupts */
470 local_irq_enable(); 472 local_irq_enable();
471 /* Print info about this processor */
472 print_cpu_info();
473 /* cpu_idle will call schedule for us */ 473 /* cpu_idle will call schedule for us */
474 cpu_idle(); 474 cpu_idle();
475 return 0; 475 return 0;
476} 476}
477 477
478static void __init smp_create_idle(unsigned int cpu) 478struct create_idle {
479 struct work_struct work;
480 struct task_struct *idle;
481 struct completion done;
482 int cpu;
483};
484
485static void __cpuinit smp_fork_idle(struct work_struct *work)
479{ 486{
480 struct task_struct *p; 487 struct create_idle *c_idle;
481 488
482 /* 489 c_idle = container_of(work, struct create_idle, work);
483 * don't care about the psw and regs settings since we'll never 490 c_idle->idle = fork_idle(c_idle->cpu);
484 * reschedule the forked task. 491 complete(&c_idle->done);
485 */
486 p = fork_idle(cpu);
487 if (IS_ERR(p))
488 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
489 current_set[cpu] = p;
490} 492}
491 493
492static int __cpuinit smp_alloc_lowcore(int cpu) 494static int __cpuinit smp_alloc_lowcore(int cpu)
@@ -550,6 +552,7 @@ static void smp_free_lowcore(int cpu)
550int __cpuinit __cpu_up(unsigned int cpu) 552int __cpuinit __cpu_up(unsigned int cpu)
551{ 553{
552 struct _lowcore *cpu_lowcore; 554 struct _lowcore *cpu_lowcore;
555 struct create_idle c_idle;
553 struct task_struct *idle; 556 struct task_struct *idle;
554 struct stack_frame *sf; 557 struct stack_frame *sf;
555 u32 lowcore; 558 u32 lowcore;
@@ -557,6 +560,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
557 560
558 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 561 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
559 return -EIO; 562 return -EIO;
563 idle = current_set[cpu];
564 if (!idle) {
565 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
566 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
567 c_idle.cpu = cpu;
568 schedule_work(&c_idle.work);
569 wait_for_completion(&c_idle.done);
570 if (IS_ERR(c_idle.idle))
571 return PTR_ERR(c_idle.idle);
572 idle = c_idle.idle;
573 current_set[cpu] = c_idle.idle;
574 }
575 init_idle(idle, cpu);
560 if (smp_alloc_lowcore(cpu)) 576 if (smp_alloc_lowcore(cpu))
561 return -ENOMEM; 577 return -ENOMEM;
562 do { 578 do {
@@ -571,7 +587,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
571 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 587 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
572 udelay(10); 588 udelay(10);
573 589
574 idle = current_set[cpu];
575 cpu_lowcore = lowcore_ptr[cpu]; 590 cpu_lowcore = lowcore_ptr[cpu];
576 cpu_lowcore->kernel_stack = (unsigned long) 591 cpu_lowcore->kernel_stack = (unsigned long)
577 task_stack_page(idle) + THREAD_SIZE; 592 task_stack_page(idle) + THREAD_SIZE;
@@ -593,6 +608,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
593 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 608 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
594 cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 609 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
595 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 610 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
611 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
612 MAX_FACILITY_BIT/8);
596 eieio(); 613 eieio();
597 614
598 while (sigp(cpu, sigp_restart) == sigp_busy) 615 while (sigp(cpu, sigp_restart) == sigp_busy)
@@ -626,7 +643,7 @@ int __cpu_disable(void)
626 struct ec_creg_mask_parms cr_parms; 643 struct ec_creg_mask_parms cr_parms;
627 int cpu = smp_processor_id(); 644 int cpu = smp_processor_id();
628 645
629 cpu_clear(cpu, cpu_online_map); 646 set_cpu_online(cpu, false);
630 647
631 /* Disable pfault pseudo page faults on this cpu. */ 648 /* Disable pfault pseudo page faults on this cpu. */
632 pfault_fini(); 649 pfault_fini();
@@ -636,8 +653,8 @@ int __cpu_disable(void)
636 653
637 /* disable all external interrupts */ 654 /* disable all external interrupts */
638 cr_parms.orvals[0] = 0; 655 cr_parms.orvals[0] = 0;
639 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 656 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
640 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 657 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
641 /* disable all I/O interrupts */ 658 /* disable all I/O interrupts */
642 cr_parms.orvals[6] = 0; 659 cr_parms.orvals[6] = 0;
643 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 660 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
@@ -661,10 +678,9 @@ void __cpu_die(unsigned int cpu)
661 udelay(10); 678 udelay(10);
662 smp_free_lowcore(cpu); 679 smp_free_lowcore(cpu);
663 atomic_dec(&init_mm.context.attach_count); 680 atomic_dec(&init_mm.context.attach_count);
664 pr_info("Processor %d stopped\n", cpu);
665} 681}
666 682
667void cpu_die(void) 683void __noreturn cpu_die(void)
668{ 684{
669 idle_task_exit(); 685 idle_task_exit();
670 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 686 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
@@ -681,14 +697,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
681#endif 697#endif
682 unsigned long async_stack, panic_stack; 698 unsigned long async_stack, panic_stack;
683 struct _lowcore *lowcore; 699 struct _lowcore *lowcore;
684 unsigned int cpu;
685 700
686 smp_detect_cpus(); 701 smp_detect_cpus();
687 702
688 /* request the 0x1201 emergency signal external interrupt */ 703 /* request the 0x1201 emergency signal external interrupt */
689 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 704 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
690 panic("Couldn't request external interrupt 0x1201"); 705 panic("Couldn't request external interrupt 0x1201");
691 print_cpu_info();
692 706
693 /* Reallocate current lowcore, but keep its contents. */ 707 /* Reallocate current lowcore, but keep its contents. */
694 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 708 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -716,9 +730,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
716 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) 730 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
717 BUG(); 731 BUG();
718#endif 732#endif
719 for_each_possible_cpu(cpu)
720 if (cpu != smp_processor_id())
721 smp_create_idle(cpu);
722} 733}
723 734
724void __init smp_prepare_boot_cpu(void) 735void __init smp_prepare_boot_cpu(void)
@@ -726,8 +737,8 @@ void __init smp_prepare_boot_cpu(void)
726 BUG_ON(smp_processor_id() != 0); 737 BUG_ON(smp_processor_id() != 0);
727 738
728 current_thread_info()->cpu = 0; 739 current_thread_info()->cpu = 0;
729 cpu_set(0, cpu_present_map); 740 set_cpu_present(0, true);
730 cpu_set(0, cpu_online_map); 741 set_cpu_online(0, true);
731 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 742 S390_lowcore.percpu_offset = __per_cpu_offset[0];
732 current_set[0] = current; 743 current_set[0] = current;
733 smp_cpu_state[0] = CPU_STATE_CONFIGURED; 744 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
@@ -1004,21 +1015,21 @@ int __ref smp_rescan_cpus(void)
1004 1015
1005 get_online_cpus(); 1016 get_online_cpus();
1006 mutex_lock(&smp_cpu_state_mutex); 1017 mutex_lock(&smp_cpu_state_mutex);
1007 newcpus = cpu_present_map; 1018 cpumask_copy(&newcpus, cpu_present_mask);
1008 rc = __smp_rescan_cpus(); 1019 rc = __smp_rescan_cpus();
1009 if (rc) 1020 if (rc)
1010 goto out; 1021 goto out;
1011 cpus_andnot(newcpus, cpu_present_map, newcpus); 1022 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1012 for_each_cpu_mask(cpu, newcpus) { 1023 for_each_cpu(cpu, &newcpus) {
1013 rc = smp_add_present_cpu(cpu); 1024 rc = smp_add_present_cpu(cpu);
1014 if (rc) 1025 if (rc)
1015 cpu_clear(cpu, cpu_present_map); 1026 set_cpu_present(cpu, false);
1016 } 1027 }
1017 rc = 0; 1028 rc = 0;
1018out: 1029out:
1019 mutex_unlock(&smp_cpu_state_mutex); 1030 mutex_unlock(&smp_cpu_state_mutex);
1020 put_online_cpus(); 1031 put_online_cpus();
1021 if (!cpus_empty(newcpus)) 1032 if (!cpumask_empty(&newcpus))
1022 topology_schedule_update(); 1033 topology_schedule_update();
1023 return rc; 1034 return rc;
1024} 1035}
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 469f11b574fa..20530dd2eab1 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -46,7 +46,9 @@ smp_restart_cpu:
46 ltr %r4,%r4 /* New stack ? */ 46 ltr %r4,%r4 /* New stack ? */
47 jz 1f 47 jz 1f
48 lr %r15,%r4 48 lr %r15,%r4
491: basr %r14,%r2 491: lr %r14,%r2 /* r14: Function to call */
50 lr %r2,%r3 /* r2 : Parameter for function*/
51 basr %r14,%r14 /* Call function */
50 52
51.gprregs_addr: 53.gprregs_addr:
52 .long .gprregs 54 .long .gprregs
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index d94aacc898cb..5be3f43898f9 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -42,7 +42,9 @@ smp_restart_cpu:
42 ltgr %r4,%r4 /* New stack ? */ 42 ltgr %r4,%r4 /* New stack ? */
43 jz 1f 43 jz 1f
44 lgr %r15,%r4 44 lgr %r15,%r4
451: basr %r14,%r2 451: lgr %r14,%r2 /* r14: Function to call */
46 lgr %r2,%r3 /* r2 : Parameter for function*/
47 basr %r14,%r14 /* Call function */
46 48
47 .section .data,"aw",@progbits 49 .section .data,"aw",@progbits
48.gprregs: 50.gprregs:
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a8fee1b14395..6ee39ef8fe4a 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -343,3 +343,8 @@ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
343SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) 343SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
344SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) 344SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
345SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) 345SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
346SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
347SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
348SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
349SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index a0ffc7717ed6..5c9e439bf3f6 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -15,6 +15,7 @@
15#include <asm/ebcdic.h> 15#include <asm/ebcdic.h>
16#include <asm/sysinfo.h> 16#include <asm/sysinfo.h>
17#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
18#include <asm/topology.h>
18 19
19/* Sigh, math-emu. Don't ask. */ 20/* Sigh, math-emu. Don't ask. */
20#include <asm/sfp-util.h> 21#include <asm/sfp-util.h>
@@ -74,6 +75,44 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
74 "Model Temp. Capacity: %-16.16s %08u\n", 75 "Model Temp. Capacity: %-16.16s %08u\n",
75 info->model_temp_cap, 76 info->model_temp_cap,
76 *(u32 *) info->model_temp_cap_rating); 77 *(u32 *) info->model_temp_cap_rating);
78 if (info->cai) {
79 len += sprintf(page + len,
80 "Capacity Adj. Ind.: %d\n",
81 info->cai);
82 len += sprintf(page + len, "Capacity Ch. Reason: %d\n",
83 info->ccr);
84 }
85 return len;
86}
87
88static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len)
89{
90 static int max_mnest;
91 int i, rc;
92
93 len += sprintf(page + len, "\n");
94 if (!MACHINE_HAS_TOPOLOGY)
95 return len;
96 if (max_mnest) {
97 stsi(info, 15, 1, max_mnest);
98 } else {
99 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
100 rc = stsi(info, 15, 1, max_mnest);
101 if (rc != -ENOSYS)
102 break;
103 }
104 }
105 len += sprintf(page + len, "CPU Topology HW: ");
106 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
107 len += sprintf(page + len, " %d", info->mag[i]);
108 len += sprintf(page + len, "\n");
109#ifdef CONFIG_SCHED_MC
110 store_topology(info);
111 len += sprintf(page + len, "CPU Topology SW: ");
112 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
113 len += sprintf(page + len, " %d", info->mag[i]);
114 len += sprintf(page + len, "\n");
115#endif
77 return len; 116 return len;
78} 117}
79 118
@@ -87,7 +126,6 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
87 ext = (struct sysinfo_1_2_2_extension *) 126 ext = (struct sysinfo_1_2_2_extension *)
88 ((unsigned long) info + info->acc_offset); 127 ((unsigned long) info + info->acc_offset);
89 128
90 len += sprintf(page + len, "\n");
91 len += sprintf(page + len, "CPUs Total: %d\n", 129 len += sprintf(page + len, "CPUs Total: %d\n",
92 info->cpus_total); 130 info->cpus_total);
93 len += sprintf(page + len, "CPUs Configured: %d\n", 131 len += sprintf(page + len, "CPUs Configured: %d\n",
@@ -217,6 +255,9 @@ static int proc_read_sysinfo(char *page, char **start,
217 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); 255 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
218 256
219 if (level >= 1) 257 if (level >= 1)
258 len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len);
259
260 if (level >= 1)
220 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); 261 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
221 262
222 if (level >= 2) 263 if (level >= 2)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 2896cac9c14a..dff933065ab6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -15,6 +15,7 @@
15#define KMSG_COMPONENT "time" 15#define KMSG_COMPONENT "time"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 17
18#include <linux/kernel_stat.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
@@ -37,9 +38,9 @@
37#include <linux/clocksource.h> 38#include <linux/clocksource.h>
38#include <linux/clockchips.h> 39#include <linux/clockchips.h>
39#include <linux/gfp.h> 40#include <linux/gfp.h>
41#include <linux/kprobes.h>
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41#include <asm/delay.h> 43#include <asm/delay.h>
42#include <asm/s390_ext.h>
43#include <asm/div64.h> 44#include <asm/div64.h>
44#include <asm/vdso.h> 45#include <asm/vdso.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
@@ -60,7 +61,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
60/* 61/*
61 * Scheduler clock - returns current time in nanosec units. 62 * Scheduler clock - returns current time in nanosec units.
62 */ 63 */
63unsigned long long notrace sched_clock(void) 64unsigned long long notrace __kprobes sched_clock(void)
64{ 65{
65 return (get_clock_monotonic() * 125) >> 9; 66 return (get_clock_monotonic() * 125) >> 9;
66} 67}
@@ -155,8 +156,11 @@ void init_cpu_timer(void)
155 __ctl_set_bit(0, 4); 156 __ctl_set_bit(0, 4);
156} 157}
157 158
158static void clock_comparator_interrupt(__u16 code) 159static void clock_comparator_interrupt(unsigned int ext_int_code,
160 unsigned int param32,
161 unsigned long param64)
159{ 162{
163 kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
160 if (S390_lowcore.clock_comparator == -1ULL) 164 if (S390_lowcore.clock_comparator == -1ULL)
161 set_clock_comparator(S390_lowcore.clock_comparator); 165 set_clock_comparator(S390_lowcore.clock_comparator);
162} 166}
@@ -164,14 +168,14 @@ static void clock_comparator_interrupt(__u16 code)
164static void etr_timing_alert(struct etr_irq_parm *); 168static void etr_timing_alert(struct etr_irq_parm *);
165static void stp_timing_alert(struct stp_irq_parm *); 169static void stp_timing_alert(struct stp_irq_parm *);
166 170
167static void timing_alert_interrupt(__u16 code) 171static void timing_alert_interrupt(unsigned int ext_int_code,
172 unsigned int param32, unsigned long param64)
168{ 173{
169 if (S390_lowcore.ext_params & 0x00c40000) 174 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
170 etr_timing_alert((struct etr_irq_parm *) 175 if (param32 & 0x00c40000)
171 &S390_lowcore.ext_params); 176 etr_timing_alert((struct etr_irq_parm *) &param32);
172 if (S390_lowcore.ext_params & 0x00038000) 177 if (param32 & 0x00038000)
173 stp_timing_alert((struct stp_irq_parm *) 178 stp_timing_alert((struct stp_irq_parm *) &param32);
174 &S390_lowcore.ext_params);
175} 179}
176 180
177static void etr_reset(void); 181static void etr_reset(void);
@@ -719,7 +723,7 @@ static void clock_sync_cpu(struct clock_sync_data *sync)
719} 723}
720 724
721/* 725/*
722 * Sync the TOD clock using the port refered to by aibp. This port 726 * Sync the TOD clock using the port referred to by aibp. This port
723 * has to be enabled and the other port has to be disabled. The 727 * has to be enabled and the other port has to be disabled. The
724 * last eacr update has to be more than 1.6 seconds in the past. 728 * last eacr update has to be more than 1.6 seconds in the past.
725 */ 729 */
@@ -805,7 +809,7 @@ static int etr_sync_clock_stop(struct etr_aib *aib, int port)
805 etr_sync.etr_port = port; 809 etr_sync.etr_port = port;
806 get_online_cpus(); 810 get_online_cpus();
807 atomic_set(&etr_sync.cpus, num_online_cpus() - 1); 811 atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
808 rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); 812 rc = stop_machine(etr_sync_clock, &etr_sync, cpu_online_mask);
809 put_online_cpus(); 813 put_online_cpus();
810 return rc; 814 return rc;
811} 815}
@@ -1007,7 +1011,7 @@ static void etr_work_fn(struct work_struct *work)
1007 eacr = etr_handle_update(&aib, eacr); 1011 eacr = etr_handle_update(&aib, eacr);
1008 1012
1009 /* 1013 /*
1010 * Select ports to enable. The prefered synchronization mode is PPS. 1014 * Select ports to enable. The preferred synchronization mode is PPS.
1011 * If a port can be enabled depends on a number of things: 1015 * If a port can be enabled depends on a number of things:
1012 * 1) The port needs to be online and uptodate. A port is not 1016 * 1) The port needs to be online and uptodate. A port is not
1013 * disabled just because it is not uptodate, but it is only 1017 * disabled just because it is not uptodate, but it is only
@@ -1086,7 +1090,7 @@ static void etr_work_fn(struct work_struct *work)
1086 /* 1090 /*
1087 * Update eacr and try to synchronize the clock. If the update 1091 * Update eacr and try to synchronize the clock. If the update
1088 * of eacr caused a stepping port switch (or if we have to 1092 * of eacr caused a stepping port switch (or if we have to
1089 * assume that a stepping port switch has occured) or the 1093 * assume that a stepping port switch has occurred) or the
1090 * clock syncing failed, reset the sync check control bit 1094 * clock syncing failed, reset the sync check control bit
1091 * and set up a timer to try again after 0.5 seconds 1095 * and set up a timer to try again after 0.5 seconds
1092 */ 1096 */
@@ -1574,7 +1578,7 @@ static void stp_work_fn(struct work_struct *work)
1574 memset(&stp_sync, 0, sizeof(stp_sync)); 1578 memset(&stp_sync, 0, sizeof(stp_sync));
1575 get_online_cpus(); 1579 get_online_cpus();
1576 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); 1580 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1577 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); 1581 stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
1578 put_online_cpus(); 1582 put_online_cpus();
1579 1583
1580 if (!check_sync_clock()) 1584 if (!check_sync_clock())
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bcef00766a64..0cd340b72632 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,159 +17,140 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/cpuset.h> 18#include <linux/cpuset.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/s390_ext.h>
21#include <asm/sysinfo.h>
22
23#define CPU_BITS 64
24#define NR_MAG 6
25 20
26#define PTF_HORIZONTAL (0UL) 21#define PTF_HORIZONTAL (0UL)
27#define PTF_VERTICAL (1UL) 22#define PTF_VERTICAL (1UL)
28#define PTF_CHECK (2UL) 23#define PTF_CHECK (2UL)
29 24
30struct tl_cpu { 25struct mask_info {
31 unsigned char reserved0[4]; 26 struct mask_info *next;
32 unsigned char :6;
33 unsigned char pp:2;
34 unsigned char reserved1;
35 unsigned short origin;
36 unsigned long mask[CPU_BITS / BITS_PER_LONG];
37};
38
39struct tl_container {
40 unsigned char reserved[7];
41 unsigned char id;
42};
43
44union tl_entry {
45 unsigned char nl;
46 struct tl_cpu cpu;
47 struct tl_container container;
48};
49
50struct tl_info {
51 unsigned char reserved0[2];
52 unsigned short length;
53 unsigned char mag[NR_MAG];
54 unsigned char reserved1;
55 unsigned char mnest;
56 unsigned char reserved2[4];
57 union tl_entry tle[0];
58};
59
60struct core_info {
61 struct core_info *next;
62 unsigned char id; 27 unsigned char id;
63 cpumask_t mask; 28 cpumask_t mask;
64}; 29};
65 30
66static int topology_enabled; 31static int topology_enabled = 1;
67static void topology_work_fn(struct work_struct *work); 32static void topology_work_fn(struct work_struct *work);
68static struct tl_info *tl_info; 33static struct sysinfo_15_1_x *tl_info;
69static struct core_info core_info;
70static int machine_has_topology;
71static struct timer_list topology_timer; 34static struct timer_list topology_timer;
72static void set_topology_timer(void); 35static void set_topology_timer(void);
73static DECLARE_WORK(topology_work, topology_work_fn); 36static DECLARE_WORK(topology_work, topology_work_fn);
74/* topology_lock protects the core linked list */ 37/* topology_lock protects the core linked list */
75static DEFINE_SPINLOCK(topology_lock); 38static DEFINE_SPINLOCK(topology_lock);
76 39
40static struct mask_info core_info;
77cpumask_t cpu_core_map[NR_CPUS]; 41cpumask_t cpu_core_map[NR_CPUS];
78unsigned char cpu_core_id[NR_CPUS]; 42unsigned char cpu_core_id[NR_CPUS];
79 43
80static cpumask_t cpu_coregroup_map(unsigned int cpu) 44#ifdef CONFIG_SCHED_BOOK
45static struct mask_info book_info;
46cpumask_t cpu_book_map[NR_CPUS];
47unsigned char cpu_book_id[NR_CPUS];
48#endif
49
50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
81{ 51{
82 struct core_info *core = &core_info;
83 unsigned long flags;
84 cpumask_t mask; 52 cpumask_t mask;
85 53
86 cpus_clear(mask); 54 cpumask_clear(&mask);
87 if (!topology_enabled || !machine_has_topology) 55 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
88 return cpu_possible_map; 56 cpumask_copy(&mask, cpumask_of(cpu));
89 spin_lock_irqsave(&topology_lock, flags); 57 return mask;
90 while (core) { 58 }
91 if (cpu_isset(cpu, core->mask)) { 59 while (info) {
92 mask = core->mask; 60 if (cpumask_test_cpu(cpu, &info->mask)) {
61 mask = info->mask;
93 break; 62 break;
94 } 63 }
95 core = core->next; 64 info = info->next;
96 } 65 }
97 spin_unlock_irqrestore(&topology_lock, flags); 66 if (cpumask_empty(&mask))
98 if (cpus_empty(mask)) 67 cpumask_copy(&mask, cpumask_of(cpu));
99 mask = cpumask_of_cpu(cpu);
100 return mask; 68 return mask;
101} 69}
102 70
103const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 71static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
104{ 72 struct mask_info *book, struct mask_info *core)
105 return &cpu_core_map[cpu];
106}
107
108static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
109{ 73{
110 unsigned int cpu; 74 unsigned int cpu;
111 75
112 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); 76 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
113 cpu < CPU_BITS; 77 cpu < TOPOLOGY_CPU_BITS;
114 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) 78 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
115 { 79 {
116 unsigned int rcpu, lcpu; 80 unsigned int rcpu, lcpu;
117 81
118 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; 82 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
119 for_each_present_cpu(lcpu) { 83 for_each_present_cpu(lcpu) {
120 if (cpu_logical_map(lcpu) == rcpu) { 84 if (cpu_logical_map(lcpu) != rcpu)
121 cpu_set(lcpu, core->mask); 85 continue;
122 cpu_core_id[lcpu] = core->id; 86#ifdef CONFIG_SCHED_BOOK
123 smp_cpu_polarization[lcpu] = tl_cpu->pp; 87 cpumask_set_cpu(lcpu, &book->mask);
124 } 88 cpu_book_id[lcpu] = book->id;
89#endif
90 cpumask_set_cpu(lcpu, &core->mask);
91 cpu_core_id[lcpu] = core->id;
92 smp_cpu_polarization[lcpu] = tl_cpu->pp;
125 } 93 }
126 } 94 }
127} 95}
128 96
129static void clear_cores(void) 97static void clear_masks(void)
130{ 98{
131 struct core_info *core = &core_info; 99 struct mask_info *info;
132 100
133 while (core) { 101 info = &core_info;
134 cpus_clear(core->mask); 102 while (info) {
135 core = core->next; 103 cpumask_clear(&info->mask);
104 info = info->next;
136 } 105 }
106#ifdef CONFIG_SCHED_BOOK
107 info = &book_info;
108 while (info) {
109 cpumask_clear(&info->mask);
110 info = info->next;
111 }
112#endif
137} 113}
138 114
139static union tl_entry *next_tle(union tl_entry *tle) 115static union topology_entry *next_tle(union topology_entry *tle)
140{ 116{
141 if (tle->nl) 117 if (!tle->nl)
142 return (union tl_entry *)((struct tl_container *)tle + 1); 118 return (union topology_entry *)((struct topology_cpu *)tle + 1);
143 else 119 return (union topology_entry *)((struct topology_container *)tle + 1);
144 return (union tl_entry *)((struct tl_cpu *)tle + 1);
145} 120}
146 121
147static void tl_to_cores(struct tl_info *info) 122static void tl_to_cores(struct sysinfo_15_1_x *info)
148{ 123{
149 union tl_entry *tle, *end; 124#ifdef CONFIG_SCHED_BOOK
150 struct core_info *core = &core_info; 125 struct mask_info *book = &book_info;
126#else
127 struct mask_info *book = NULL;
128#endif
129 struct mask_info *core = &core_info;
130 union topology_entry *tle, *end;
131
151 132
152 spin_lock_irq(&topology_lock); 133 spin_lock_irq(&topology_lock);
153 clear_cores(); 134 clear_masks();
154 tle = info->tle; 135 tle = info->tle;
155 end = (union tl_entry *)((unsigned long)info + info->length); 136 end = (union topology_entry *)((unsigned long)info + info->length);
156 while (tle < end) { 137 while (tle < end) {
157 switch (tle->nl) { 138 switch (tle->nl) {
158 case 5: 139#ifdef CONFIG_SCHED_BOOK
159 case 4:
160 case 3:
161 case 2: 140 case 2:
141 book = book->next;
142 book->id = tle->container.id;
162 break; 143 break;
144#endif
163 case 1: 145 case 1:
164 core = core->next; 146 core = core->next;
165 core->id = tle->container.id; 147 core->id = tle->container.id;
166 break; 148 break;
167 case 0: 149 case 0:
168 add_cpus_to_core(&tle->cpu, core); 150 add_cpus_to_mask(&tle->cpu, book, core);
169 break; 151 break;
170 default: 152 default:
171 clear_cores(); 153 clear_masks();
172 machine_has_topology = 0;
173 goto out; 154 goto out;
174 } 155 }
175 tle = next_tle(tle); 156 tle = next_tle(tle);
@@ -206,7 +187,7 @@ int topology_set_cpu_management(int fc)
206 int cpu; 187 int cpu;
207 int rc; 188 int rc;
208 189
209 if (!machine_has_topology) 190 if (!MACHINE_HAS_TOPOLOGY)
210 return -EOPNOTSUPP; 191 return -EOPNOTSUPP;
211 if (fc) 192 if (fc)
212 rc = ptf(PTF_VERTICAL); 193 rc = ptf(PTF_VERTICAL);
@@ -221,24 +202,43 @@ int topology_set_cpu_management(int fc)
221 202
222static void update_cpu_core_map(void) 203static void update_cpu_core_map(void)
223{ 204{
205 unsigned long flags;
224 int cpu; 206 int cpu;
225 207
226 for_each_possible_cpu(cpu) 208 spin_lock_irqsave(&topology_lock, flags);
227 cpu_core_map[cpu] = cpu_coregroup_map(cpu); 209 for_each_possible_cpu(cpu) {
210 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
211#ifdef CONFIG_SCHED_BOOK
212 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
213#endif
214 }
215 spin_unlock_irqrestore(&topology_lock, flags);
216}
217
218void store_topology(struct sysinfo_15_1_x *info)
219{
220#ifdef CONFIG_SCHED_BOOK
221 int rc;
222
223 rc = stsi(info, 15, 1, 3);
224 if (rc != -ENOSYS)
225 return;
226#endif
227 stsi(info, 15, 1, 2);
228} 228}
229 229
230int arch_update_cpu_topology(void) 230int arch_update_cpu_topology(void)
231{ 231{
232 struct tl_info *info = tl_info; 232 struct sysinfo_15_1_x *info = tl_info;
233 struct sys_device *sysdev; 233 struct sys_device *sysdev;
234 int cpu; 234 int cpu;
235 235
236 if (!machine_has_topology) { 236 if (!MACHINE_HAS_TOPOLOGY) {
237 update_cpu_core_map(); 237 update_cpu_core_map();
238 topology_update_polarization_simple(); 238 topology_update_polarization_simple();
239 return 0; 239 return 0;
240 } 240 }
241 stsi(info, 15, 1, 2); 241 store_topology(info);
242 tl_to_cores(info); 242 tl_to_cores(info);
243 update_cpu_core_map(); 243 update_cpu_core_map();
244 for_each_online_cpu(cpu) { 244 for_each_online_cpu(cpu) {
@@ -275,9 +275,9 @@ static void set_topology_timer(void)
275 275
276static int __init early_parse_topology(char *p) 276static int __init early_parse_topology(char *p)
277{ 277{
278 if (strncmp(p, "on", 2)) 278 if (strncmp(p, "off", 3))
279 return 0; 279 return 0;
280 topology_enabled = 1; 280 topology_enabled = 0;
281 return 0; 281 return 0;
282} 282}
283early_param("topology", early_parse_topology); 283early_param("topology", early_parse_topology);
@@ -287,7 +287,7 @@ static int __init init_topology_update(void)
287 int rc; 287 int rc;
288 288
289 rc = 0; 289 rc = 0;
290 if (!machine_has_topology) { 290 if (!MACHINE_HAS_TOPOLOGY) {
291 topology_update_polarization_simple(); 291 topology_update_polarization_simple();
292 goto out; 292 goto out;
293 } 293 }
@@ -299,41 +299,37 @@ out:
299} 299}
300__initcall(init_topology_update); 300__initcall(init_topology_update);
301 301
302static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
303 int offset)
304{
305 int i, nr_masks;
306
307 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
308 for (i = 0; i < info->mnest - offset; i++)
309 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
310 nr_masks = max(nr_masks, 1);
311 for (i = 0; i < nr_masks; i++) {
312 mask->next = alloc_bootmem(sizeof(struct mask_info));
313 mask = mask->next;
314 }
315}
316
302void __init s390_init_cpu_topology(void) 317void __init s390_init_cpu_topology(void)
303{ 318{
304 unsigned long long facility_bits; 319 struct sysinfo_15_1_x *info;
305 struct tl_info *info;
306 struct core_info *core;
307 int nr_cores;
308 int i; 320 int i;
309 321
310 if (stfle(&facility_bits, 1) <= 0) 322 if (!MACHINE_HAS_TOPOLOGY)
311 return;
312 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
313 return; 323 return;
314 machine_has_topology = 1;
315
316 tl_info = alloc_bootmem_pages(PAGE_SIZE); 324 tl_info = alloc_bootmem_pages(PAGE_SIZE);
317 info = tl_info; 325 info = tl_info;
318 stsi(info, 15, 1, 2); 326 store_topology(info);
319
320 nr_cores = info->mag[NR_MAG - 2];
321 for (i = 0; i < info->mnest - 2; i++)
322 nr_cores *= info->mag[NR_MAG - 3 - i];
323
324 pr_info("The CPU configuration topology of the machine is:"); 327 pr_info("The CPU configuration topology of the machine is:");
325 for (i = 0; i < NR_MAG; i++) 328 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
326 printk(" %d", info->mag[i]); 329 printk(" %d", info->mag[i]);
327 printk(" / %d\n", info->mnest); 330 printk(" / %d\n", info->mnest);
328 331 alloc_masks(info, &core_info, 2);
329 core = &core_info; 332#ifdef CONFIG_SCHED_BOOK
330 for (i = 0; i < nr_cores; i++) { 333 alloc_masks(info, &book_info, 3);
331 core->next = alloc_bootmem(sizeof(struct core_info)); 334#endif
332 core = core->next;
333 if (!core)
334 goto error;
335 }
336 return;
337error:
338 machine_has_topology = 0;
339} 335}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5d8f0f3d0250..a65d2e82f61d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -39,7 +39,6 @@
39#include <asm/atomic.h> 39#include <asm/atomic.h>
40#include <asm/mathemu.h> 40#include <asm/mathemu.h>
41#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
42#include <asm/s390_ext.h>
43#include <asm/lowcore.h> 42#include <asm/lowcore.h>
44#include <asm/debug.h> 43#include <asm/debug.h>
45#include "entry.h" 44#include "entry.h"
@@ -237,43 +236,6 @@ void show_regs(struct pt_regs *regs)
237 show_last_breaking_event(regs); 236 show_last_breaking_event(regs);
238} 237}
239 238
240/* This is called from fs/proc/array.c */
241void task_show_regs(struct seq_file *m, struct task_struct *task)
242{
243 struct pt_regs *regs;
244
245 regs = task_pt_regs(task);
246 seq_printf(m, "task: %p, ksp: %p\n",
247 task, (void *)task->thread.ksp);
248 seq_printf(m, "User PSW : %p %p\n",
249 (void *) regs->psw.mask, (void *)regs->psw.addr);
250
251 seq_printf(m, "User GPRS: " FOURLONG,
252 regs->gprs[0], regs->gprs[1],
253 regs->gprs[2], regs->gprs[3]);
254 seq_printf(m, " " FOURLONG,
255 regs->gprs[4], regs->gprs[5],
256 regs->gprs[6], regs->gprs[7]);
257 seq_printf(m, " " FOURLONG,
258 regs->gprs[8], regs->gprs[9],
259 regs->gprs[10], regs->gprs[11]);
260 seq_printf(m, " " FOURLONG,
261 regs->gprs[12], regs->gprs[13],
262 regs->gprs[14], regs->gprs[15]);
263 seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
264 task->thread.acrs[0], task->thread.acrs[1],
265 task->thread.acrs[2], task->thread.acrs[3]);
266 seq_printf(m, " %08x %08x %08x %08x\n",
267 task->thread.acrs[4], task->thread.acrs[5],
268 task->thread.acrs[6], task->thread.acrs[7]);
269 seq_printf(m, " %08x %08x %08x %08x\n",
270 task->thread.acrs[8], task->thread.acrs[9],
271 task->thread.acrs[10], task->thread.acrs[11]);
272 seq_printf(m, " %08x %08x %08x %08x\n",
273 task->thread.acrs[12], task->thread.acrs[13],
274 task->thread.acrs[14], task->thread.acrs[15]);
275}
276
277static DEFINE_SPINLOCK(die_lock); 239static DEFINE_SPINLOCK(die_lock);
278 240
279void die(const char * str, struct pt_regs * regs, long err) 241void die(const char * str, struct pt_regs * regs, long err)
@@ -329,27 +291,19 @@ int is_valid_bugaddr(unsigned long addr)
329 return 1; 291 return 1;
330} 292}
331 293
332static void __kprobes inline do_trap(long interruption_code, int signr, 294static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
333 char *str, struct pt_regs *regs, 295 struct pt_regs *regs, siginfo_t *info)
334 siginfo_t *info)
335{ 296{
336 /* 297 if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
337 * We got all needed information from the lowcore and can 298 pgm_int_code, signr) == NOTIFY_STOP)
338 * now safely switch on interrupts.
339 */
340 if (regs->psw.mask & PSW_MASK_PSTATE)
341 local_irq_enable();
342
343 if (notify_die(DIE_TRAP, str, regs, interruption_code,
344 interruption_code, signr) == NOTIFY_STOP)
345 return; 299 return;
346 300
347 if (regs->psw.mask & PSW_MASK_PSTATE) { 301 if (regs->psw.mask & PSW_MASK_PSTATE) {
348 struct task_struct *tsk = current; 302 struct task_struct *tsk = current;
349 303
350 tsk->thread.trap_no = interruption_code & 0xffff; 304 tsk->thread.trap_no = pgm_int_code & 0xffff;
351 force_sig_info(signr, info, tsk); 305 force_sig_info(signr, info, tsk);
352 report_user_fault(regs, interruption_code, signr); 306 report_user_fault(regs, pgm_int_code, signr);
353 } else { 307 } else {
354 const struct exception_table_entry *fixup; 308 const struct exception_table_entry *fixup;
355 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 309 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -361,77 +315,77 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
361 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 315 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
362 if (btt == BUG_TRAP_TYPE_WARN) 316 if (btt == BUG_TRAP_TYPE_WARN)
363 return; 317 return;
364 die(str, regs, interruption_code); 318 die(str, regs, pgm_int_code);
365 } 319 }
366 } 320 }
367} 321}
368 322
369static inline void __user *get_check_address(struct pt_regs *regs) 323static inline void __user *get_psw_address(struct pt_regs *regs,
324 long pgm_int_code)
370{ 325{
371 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 326 return (void __user *)
327 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
372} 328}
373 329
374void __kprobes do_single_step(struct pt_regs *regs) 330void __kprobes do_per_trap(struct pt_regs *regs)
375{ 331{
376 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
377 SIGTRAP) == NOTIFY_STOP){
378 return; 333 return;
379 }
380 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 334 if (tracehook_consider_fatal_signal(current, SIGTRAP))
381 force_sig(SIGTRAP, current); 335 force_sig(SIGTRAP, current);
382} 336}
383 337
384static void default_trap_handler(struct pt_regs * regs, long interruption_code) 338static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
339 unsigned long trans_exc_code)
385{ 340{
386 if (regs->psw.mask & PSW_MASK_PSTATE) { 341 if (regs->psw.mask & PSW_MASK_PSTATE) {
387 local_irq_enable(); 342 report_user_fault(regs, pgm_int_code, SIGSEGV);
388 report_user_fault(regs, interruption_code, SIGSEGV);
389 do_exit(SIGSEGV); 343 do_exit(SIGSEGV);
390 } else 344 } else
391 die("Unknown program exception", regs, interruption_code); 345 die("Unknown program exception", regs, pgm_int_code);
392} 346}
393 347
394#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 348#define DO_ERROR_INFO(name, signr, sicode, str) \
395static void name(struct pt_regs * regs, long interruption_code) \ 349static void name(struct pt_regs *regs, long pgm_int_code, \
350 unsigned long trans_exc_code) \
396{ \ 351{ \
397 siginfo_t info; \ 352 siginfo_t info; \
398 info.si_signo = signr; \ 353 info.si_signo = signr; \
399 info.si_errno = 0; \ 354 info.si_errno = 0; \
400 info.si_code = sicode; \ 355 info.si_code = sicode; \
401 info.si_addr = siaddr; \ 356 info.si_addr = get_psw_address(regs, pgm_int_code); \
402 do_trap(interruption_code, signr, str, regs, &info); \ 357 do_trap(pgm_int_code, signr, str, regs, &info); \
403} 358}
404 359
405DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 360DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
406 ILL_ILLADR, get_check_address(regs)) 361 "addressing exception")
407DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 362DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
408 ILL_ILLOPN, get_check_address(regs)) 363 "execute exception")
409DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 364DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
410 FPE_INTDIV, get_check_address(regs)) 365 "fixpoint divide exception")
411DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 366DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
412 FPE_INTOVF, get_check_address(regs)) 367 "fixpoint overflow exception")
413DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 368DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
414 FPE_FLTOVF, get_check_address(regs)) 369 "HFP overflow exception")
415DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 370DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
416 FPE_FLTUND, get_check_address(regs)) 371 "HFP underflow exception")
417DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 372DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
418 FPE_FLTRES, get_check_address(regs)) 373 "HFP significance exception")
419DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 374DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
420 FPE_FLTDIV, get_check_address(regs)) 375 "HFP divide exception")
421DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 376DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
422 FPE_FLTINV, get_check_address(regs)) 377 "HFP square root exception")
423DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 378DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
424 ILL_ILLOPN, get_check_address(regs)) 379 "operand exception")
425DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 380DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
426 ILL_PRVOPC, get_check_address(regs)) 381 "privileged operation")
427DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 382DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
428 ILL_ILLOPN, get_check_address(regs)) 383 "special operation exception")
429DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 384DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
430 ILL_ILLOPN, get_check_address(regs)) 385 "translation exception")
431 386
432static inline void 387static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
433do_fp_trap(struct pt_regs *regs, void __user *location, 388 int fpc, long pgm_int_code)
434 int fpc, long interruption_code)
435{ 389{
436 siginfo_t si; 390 siginfo_t si;
437 391
@@ -453,26 +407,19 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
453 else if (fpc & 0x0800) /* inexact */ 407 else if (fpc & 0x0800) /* inexact */
454 si.si_code = FPE_FLTRES; 408 si.si_code = FPE_FLTRES;
455 } 409 }
456 current->thread.ieee_instruction_pointer = (addr_t) location; 410 do_trap(pgm_int_code, SIGFPE,
457 do_trap(interruption_code, SIGFPE,
458 "floating point exception", regs, &si); 411 "floating point exception", regs, &si);
459} 412}
460 413
461static void illegal_op(struct pt_regs * regs, long interruption_code) 414static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
415 unsigned long trans_exc_code)
462{ 416{
463 siginfo_t info; 417 siginfo_t info;
464 __u8 opcode[6]; 418 __u8 opcode[6];
465 __u16 __user *location; 419 __u16 __user *location;
466 int signal = 0; 420 int signal = 0;
467 421
468 location = get_check_address(regs); 422 location = get_psw_address(regs, pgm_int_code);
469
470 /*
471 * We got all needed information from the lowcore and can
472 * now safely switch on interrupts.
473 */
474 if (regs->psw.mask & PSW_MASK_PSTATE)
475 local_irq_enable();
476 423
477 if (regs->psw.mask & PSW_MASK_PSTATE) { 424 if (regs->psw.mask & PSW_MASK_PSTATE) {
478 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -512,7 +459,7 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
512 * If we get an illegal op in kernel mode, send it through the 459 * If we get an illegal op in kernel mode, send it through the
513 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 460 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
514 */ 461 */
515 if (notify_die(DIE_BPT, "bpt", regs, interruption_code, 462 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
516 3, SIGTRAP) != NOTIFY_STOP) 463 3, SIGTRAP) != NOTIFY_STOP)
517 signal = SIGILL; 464 signal = SIGILL;
518 } 465 }
@@ -520,13 +467,13 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
520#ifdef CONFIG_MATHEMU 467#ifdef CONFIG_MATHEMU
521 if (signal == SIGFPE) 468 if (signal == SIGFPE)
522 do_fp_trap(regs, location, 469 do_fp_trap(regs, location,
523 current->thread.fp_regs.fpc, interruption_code); 470 current->thread.fp_regs.fpc, pgm_int_code);
524 else if (signal == SIGSEGV) { 471 else if (signal == SIGSEGV) {
525 info.si_signo = signal; 472 info.si_signo = signal;
526 info.si_errno = 0; 473 info.si_errno = 0;
527 info.si_code = SEGV_MAPERR; 474 info.si_code = SEGV_MAPERR;
528 info.si_addr = (void __user *) location; 475 info.si_addr = (void __user *) location;
529 do_trap(interruption_code, signal, 476 do_trap(pgm_int_code, signal,
530 "user address fault", regs, &info); 477 "user address fault", regs, &info);
531 } else 478 } else
532#endif 479#endif
@@ -535,28 +482,22 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
535 info.si_errno = 0; 482 info.si_errno = 0;
536 info.si_code = ILL_ILLOPC; 483 info.si_code = ILL_ILLOPC;
537 info.si_addr = (void __user *) location; 484 info.si_addr = (void __user *) location;
538 do_trap(interruption_code, signal, 485 do_trap(pgm_int_code, signal,
539 "illegal operation", regs, &info); 486 "illegal operation", regs, &info);
540 } 487 }
541} 488}
542 489
543 490
544#ifdef CONFIG_MATHEMU 491#ifdef CONFIG_MATHEMU
545asmlinkage void 492asmlinkage void specification_exception(struct pt_regs *regs,
546specification_exception(struct pt_regs * regs, long interruption_code) 493 long pgm_int_code,
494 unsigned long trans_exc_code)
547{ 495{
548 __u8 opcode[6]; 496 __u8 opcode[6];
549 __u16 __user *location = NULL; 497 __u16 __user *location = NULL;
550 int signal = 0; 498 int signal = 0;
551 499
552 location = (__u16 __user *) get_check_address(regs); 500 location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
553
554 /*
555 * We got all needed information from the lowcore and can
556 * now safely switch on interrupts.
557 */
558 if (regs->psw.mask & PSW_MASK_PSTATE)
559 local_irq_enable();
560 501
561 if (regs->psw.mask & PSW_MASK_PSTATE) { 502 if (regs->psw.mask & PSW_MASK_PSTATE) {
562 get_user(*((__u16 *) opcode), location); 503 get_user(*((__u16 *) opcode), location);
@@ -592,35 +533,29 @@ specification_exception(struct pt_regs * regs, long interruption_code)
592 533
593 if (signal == SIGFPE) 534 if (signal == SIGFPE)
594 do_fp_trap(regs, location, 535 do_fp_trap(regs, location,
595 current->thread.fp_regs.fpc, interruption_code); 536 current->thread.fp_regs.fpc, pgm_int_code);
596 else if (signal) { 537 else if (signal) {
597 siginfo_t info; 538 siginfo_t info;
598 info.si_signo = signal; 539 info.si_signo = signal;
599 info.si_errno = 0; 540 info.si_errno = 0;
600 info.si_code = ILL_ILLOPN; 541 info.si_code = ILL_ILLOPN;
601 info.si_addr = location; 542 info.si_addr = location;
602 do_trap(interruption_code, signal, 543 do_trap(pgm_int_code, signal,
603 "specification exception", regs, &info); 544 "specification exception", regs, &info);
604 } 545 }
605} 546}
606#else 547#else
607DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 548DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
608 ILL_ILLOPN, get_check_address(regs)); 549 "specification exception");
609#endif 550#endif
610 551
611static void data_exception(struct pt_regs * regs, long interruption_code) 552static void data_exception(struct pt_regs *regs, long pgm_int_code,
553 unsigned long trans_exc_code)
612{ 554{
613 __u16 __user *location; 555 __u16 __user *location;
614 int signal = 0; 556 int signal = 0;
615 557
616 location = get_check_address(regs); 558 location = get_psw_address(regs, pgm_int_code);
617
618 /*
619 * We got all needed information from the lowcore and can
620 * now safely switch on interrupts.
621 */
622 if (regs->psw.mask & PSW_MASK_PSTATE)
623 local_irq_enable();
624 559
625 if (MACHINE_HAS_IEEE) 560 if (MACHINE_HAS_IEEE)
626 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 561 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -686,19 +621,19 @@ static void data_exception(struct pt_regs * regs, long interruption_code)
686 signal = SIGILL; 621 signal = SIGILL;
687 if (signal == SIGFPE) 622 if (signal == SIGFPE)
688 do_fp_trap(regs, location, 623 do_fp_trap(regs, location,
689 current->thread.fp_regs.fpc, interruption_code); 624 current->thread.fp_regs.fpc, pgm_int_code);
690 else if (signal) { 625 else if (signal) {
691 siginfo_t info; 626 siginfo_t info;
692 info.si_signo = signal; 627 info.si_signo = signal;
693 info.si_errno = 0; 628 info.si_errno = 0;
694 info.si_code = ILL_ILLOPN; 629 info.si_code = ILL_ILLOPN;
695 info.si_addr = location; 630 info.si_addr = location;
696 do_trap(interruption_code, signal, 631 do_trap(pgm_int_code, signal, "data exception", regs, &info);
697 "data exception", regs, &info);
698 } 632 }
699} 633}
700 634
701static void space_switch_exception(struct pt_regs * regs, long int_code) 635static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
636 unsigned long trans_exc_code)
702{ 637{
703 siginfo_t info; 638 siginfo_t info;
704 639
@@ -709,11 +644,11 @@ static void space_switch_exception(struct pt_regs * regs, long int_code)
709 info.si_signo = SIGILL; 644 info.si_signo = SIGILL;
710 info.si_errno = 0; 645 info.si_errno = 0;
711 info.si_code = ILL_PRVOPC; 646 info.si_code = ILL_PRVOPC;
712 info.si_addr = get_check_address(regs); 647 info.si_addr = get_psw_address(regs, pgm_int_code);
713 do_trap(int_code, SIGILL, "space switch event", regs, &info); 648 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
714} 649}
715 650
716asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 651asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
717{ 652{
718 bust_spinlocks(1); 653 bust_spinlocks(1);
719 printk("Kernel stack overflow.\n"); 654 printk("Kernel stack overflow.\n");
@@ -758,5 +693,6 @@ void __init trap_init(void)
758 pgm_check_table[0x15] = &operand_exception; 693 pgm_check_table[0x15] = &operand_exception;
759 pgm_check_table[0x1C] = &space_switch_exception; 694 pgm_check_table[0x1C] = &space_switch_exception;
760 pgm_check_table[0x1D] = &hfp_sqrt_exception; 695 pgm_check_table[0x1D] = &hfp_sqrt_exception;
761 pfault_irq_init(); 696 /* Enable machine checks early. */
697 local_mcck_enable();
762} 698}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 6b83870507d5..d73630b4fe1d 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,11 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
84 */ 84 */
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 unsigned int facility_list; 87 vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
88
89 facility_list = stfl();
90 vd->ectg_available =
91 user_mode != HOME_SPACE_MODE && (facility_list & 1);
92} 88}
93 89
94#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
@@ -207,7 +203,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
207 if (!uses_interp) 203 if (!uses_interp)
208 return 0; 204 return 0;
209 205
210 vdso_base = mm->mmap_base;
211#ifdef CONFIG_64BIT 206#ifdef CONFIG_64BIT
212 vdso_pagelist = vdso64_pagelist; 207 vdso_pagelist = vdso64_pagelist;
213 vdso_pages = vdso64_pages; 208 vdso_pages = vdso64_pages;
@@ -237,8 +232,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
237 * fail and end up putting it elsewhere. 232 * fail and end up putting it elsewhere.
238 */ 233 */
239 down_write(&mm->mmap_sem); 234 down_write(&mm->mmap_sem);
240 vdso_base = get_unmapped_area(NULL, vdso_base, 235 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
241 vdso_pages << PAGE_SHIFT, 0, 0);
242 if (IS_ERR_VALUE(vdso_base)) { 236 if (IS_ERR_VALUE(vdso_base)) {
243 rc = vdso_base; 237 rc = vdso_base;
244 goto out_up; 238 goto out_up;
@@ -343,17 +337,17 @@ static int __init vdso_init(void)
343} 337}
344arch_initcall(vdso_init); 338arch_initcall(vdso_init);
345 339
346int in_gate_area_no_task(unsigned long addr) 340int in_gate_area_no_mm(unsigned long addr)
347{ 341{
348 return 0; 342 return 0;
349} 343}
350 344
351int in_gate_area(struct task_struct *task, unsigned long addr) 345int in_gate_area(struct mm_struct *mm, unsigned long addr)
352{ 346{
353 return 0; 347 return 0;
354} 348}
355 349
356struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 350struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
357{ 351{
358 return NULL; 352 return NULL;
359} 353}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index d13e8755a8cc..8ad2b34ad151 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso32_wrapper.o
22extra-y += vdso32.lds 22extra-y += vdso32.lds
23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 29$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
27 30
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 9532c4e6a9d2..36aaa25d05da 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,9 +19,9 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 chi %r2,CLOCK_REALTIME 22 chi %r2,__CLOCK_REALTIME
23 je 0f 23 je 0f
24 chi %r2,CLOCK_MONOTONIC 24 chi %r2,__CLOCK_MONOTONIC
25 jne 3f 25 jne 3f
260: ltr %r3,%r3 260: ltr %r3,%r3
27 jz 2f /* res == NULL */ 27 jz 2f /* res == NULL */
@@ -34,6 +34,6 @@ __kernel_clock_getres:
343: lhi %r1,__NR_clock_getres /* fallback to svc */ 343: lhi %r1,__NR_clock_getres /* fallback to svc */
35 svc 0 35 svc 0
36 br %r14 36 br %r14
374: .long CLOCK_REALTIME_RES 374: .long __CLOCK_REALTIME_RES
38 .cfi_endproc 38 .cfi_endproc
39 .size __kernel_clock_getres,.-__kernel_clock_getres 39 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 969643954273..b2224e0b974c 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,9 +21,9 @@ __kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 basr %r5,0 22 basr %r5,0
230: al %r5,21f-0b(%r5) /* get &_vdso_data */ 230: al %r5,21f-0b(%r5) /* get &_vdso_data */
24 chi %r2,CLOCK_REALTIME 24 chi %r2,__CLOCK_REALTIME
25 je 10f 25 je 10f
26 chi %r2,CLOCK_MONOTONIC 26 chi %r2,__CLOCK_MONOTONIC
27 jne 19f 27 jne 19f
28 28
29 /* CLOCK_MONOTONIC */ 29 /* CLOCK_MONOTONIC */
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 449352dda9cd..2a8ddfd12a5b 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -22,6 +22,9 @@ obj-y += vdso64_wrapper.o
22extra-y += vdso64.lds 22extra-y += vdso64.lds
23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 23CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
24 24
25# Disable gcov profiling for VDSO code
26GCOV_PROFILE := n
27
25# Force dependency (incbin is bad) 28# Force dependency (incbin is bad)
26$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 29$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
27 30
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 9ce8caafdb4e..176e1f75f9aa 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,9 +19,9 @@
19 .type __kernel_clock_getres,@function 19 .type __kernel_clock_getres,@function
20__kernel_clock_getres: 20__kernel_clock_getres:
21 .cfi_startproc 21 .cfi_startproc
22 cghi %r2,CLOCK_REALTIME 22 cghi %r2,__CLOCK_REALTIME
23 je 0f 23 je 0f
24 cghi %r2,CLOCK_MONOTONIC 24 cghi %r2,__CLOCK_MONOTONIC
25 je 0f 25 je 0f
26 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 26 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
27 jne 2f 27 jne 2f
@@ -39,6 +39,6 @@ __kernel_clock_getres:
392: lghi %r1,__NR_clock_getres /* fallback to svc */ 392: lghi %r1,__NR_clock_getres /* fallback to svc */
40 svc 0 40 svc 0
41 br %r14 41 br %r14
423: .quad CLOCK_REALTIME_RES 423: .quad __CLOCK_REALTIME_RES
43 .cfi_endproc 43 .cfi_endproc
44 .size __kernel_clock_getres,.-__kernel_clock_getres 44 .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index f40467884a03..d46c95ed5f19 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,11 +20,11 @@
20__kernel_clock_gettime: 20__kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,CLOCK_REALTIME 23 cghi %r2,__CLOCK_REALTIME
24 je 4f 24 je 4f
25 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 25 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
26 je 9f 26 je 9f
27 cghi %r2,CLOCK_MONOTONIC 27 cghi %r2,__CLOCK_MONOTONIC
28 jne 12f 28 jne 12f
29 29
30 /* CLOCK_MONOTONIC */ 30 /* CLOCK_MONOTONIC */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a68ac10213b2..56fe6bc81fee 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -77,7 +77,7 @@ SECTIONS
77 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
78 INIT_DATA_SECTION(0x100) 78 INIT_DATA_SECTION(0x100)
79 79
80 PERCPU(PAGE_SIZE) 80 PERCPU_SECTION(0x100)
81 . = ALIGN(PAGE_SIZE); 81 . = ALIGN(PAGE_SIZE);
82 __init_end = .; /* freed after init ends here */ 82 __init_end = .; /* freed after init ends here */
83 83
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3479f1b0d4e0..2d6228f60cd6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -19,11 +19,13 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h> 21#include <linux/posix-timers.h>
22#include <linux/cpu.h>
23#include <linux/kprobes.h>
22 24
23#include <asm/s390_ext.h>
24#include <asm/timer.h> 25#include <asm/timer.h>
25#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
26#include <asm/cputime.h> 27#include <asm/cputime.h>
28#include <asm/irq.h>
27 29
28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 31
@@ -42,7 +44,7 @@ static inline void set_vtimer(__u64 expires)
42 __u64 timer; 44 __u64 timer;
43 45
44 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 46 asm volatile (" STPT %0\n" /* Store current cpu timer value */
45 " SPT %1" /* Set new value immediatly afterwards */ 47 " SPT %1" /* Set new value immediately afterwards */
46 : "=m" (timer) : "m" (expires) ); 48 : "=m" (timer) : "m" (expires) );
47 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 49 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
48 S390_lowcore.last_update_timer = expires; 50 S390_lowcore.last_update_timer = expires;
@@ -121,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk)
121} 123}
122EXPORT_SYMBOL_GPL(account_system_vtime); 124EXPORT_SYMBOL_GPL(account_system_vtime);
123 125
124void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 126void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
125{ 127{
126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -161,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
161 idle->sequence++; 163 idle->sequence++;
162} 164}
163 165
164void vtime_stop_cpu(void) 166void __kprobes vtime_stop_cpu(void)
165{ 167{
166 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 168 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
167 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 169 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -314,13 +316,15 @@ static void do_callbacks(struct list_head *cb_list)
314/* 316/*
315 * Handler for the virtual CPU timer. 317 * Handler for the virtual CPU timer.
316 */ 318 */
317static void do_cpu_timer_interrupt(__u16 error_code) 319static void do_cpu_timer_interrupt(unsigned int ext_int_code,
320 unsigned int param32, unsigned long param64)
318{ 321{
319 struct vtimer_queue *vq; 322 struct vtimer_queue *vq;
320 struct vtimer_list *event, *tmp; 323 struct vtimer_list *event, *tmp;
321 struct list_head cb_list; /* the callback queue */ 324 struct list_head cb_list; /* the callback queue */
322 __u64 elapsed, next; 325 __u64 elapsed, next;
323 326
327 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
324 INIT_LIST_HEAD(&cb_list); 328 INIT_LIST_HEAD(&cb_list);
325 vq = &__get_cpu_var(virt_cpu_timer); 329 vq = &__get_cpu_var(virt_cpu_timer);
326 330
@@ -565,6 +569,23 @@ void init_cpu_vtimer(void)
565 __ctl_set_bit(0,10); 569 __ctl_set_bit(0,10);
566} 570}
567 571
572static int __cpuinit s390_nohz_notify(struct notifier_block *self,
573 unsigned long action, void *hcpu)
574{
575 struct s390_idle_data *idle;
576 long cpu = (long) hcpu;
577
578 idle = &per_cpu(s390_idle, cpu);
579 switch (action) {
580 case CPU_DYING:
581 case CPU_DYING_FROZEN:
582 idle->nohz_delay = 0;
583 default:
584 break;
585 }
586 return NOTIFY_OK;
587}
588
568void __init vtime_init(void) 589void __init vtime_init(void)
569{ 590{
570 /* request the cpu timer external interrupt */ 591 /* request the cpu timer external interrupt */
@@ -573,5 +594,6 @@ void __init vtime_init(void)
573 594
574 /* Enable cpu timer interrupts on the boot cpu. */ 595 /* Enable cpu timer interrupts on the boot cpu. */
575 init_cpu_vtimer(); 596 init_cpu_vtimer();
597 cpu_notifier(s390_nohz_notify, 0);
576} 598}
577 599