diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/dumpstack.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 424 | ||||
-rw-r--r-- | arch/s390/kernel/entry.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 372 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 136 | ||||
-rw-r--r-- | arch/s390/kernel/idle.c | 29 | ||||
-rw-r--r-- | arch/s390/kernel/irq.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 178 | ||||
-rw-r--r-- | arch/s390/kernel/mcount.S | 1 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_sf.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 115 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 25 |
23 files changed, 739 insertions, 590 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ef279a136801..e07e91605353 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -17,8 +17,8 @@ | |||
17 | * Make sure that the compiler is new enough. We want a compiler that | 17 | * Make sure that the compiler is new enough. We want a compiler that |
18 | * is known to work with the "Q" assembler constraint. | 18 | * is known to work with the "Q" assembler constraint. |
19 | */ | 19 | */ |
20 | #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) | 20 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3) |
21 | #error Your compiler is too old; please use version 3.3.3 or newer | 21 | #error Your compiler is too old; please use version 4.3 or newer |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | int main(void) | 24 | int main(void) |
@@ -156,7 +156,6 @@ int main(void) | |||
156 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 156 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
157 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 157 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
158 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | 158 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); |
159 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); | ||
160 | DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); | 159 | DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); |
161 | BLANK(); | 160 | BLANK(); |
162 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); | 161 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 009f5eb11125..34d5fa7b01b5 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, | |||
434 | ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; | 434 | ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; |
435 | } else { | 435 | } else { |
436 | /* Signal frames without vectors registers are short ! */ | 436 | /* Signal frames without vectors registers are short ! */ |
437 | __u16 __user *svc = (void *) frame + frame_size - 2; | 437 | __u16 __user *svc = (void __user *) frame + frame_size - 2; |
438 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) | 438 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) |
439 | return -EFAULT; | 439 | return -EFAULT; |
440 | restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE; | 440 | restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE; |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index c4f7a3d655b8..d7fa2f0f1425 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
@@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char | |||
218 | COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) | 218 | COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) |
219 | COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) | 219 | COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) |
220 | COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); | 220 | COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); |
221 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); | ||
222 | COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); | ||
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ee8390da6ea7..c1f21aca76e7 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -1019,7 +1019,7 @@ debug_count_numargs(char *string) | |||
1019 | */ | 1019 | */ |
1020 | 1020 | ||
1021 | debug_entry_t* | 1021 | debug_entry_t* |
1022 | debug_sprintf_event(debug_info_t* id, int level,char *string,...) | 1022 | __debug_sprintf_event(debug_info_t *id, int level, char *string, ...) |
1023 | { | 1023 | { |
1024 | va_list ap; | 1024 | va_list ap; |
1025 | int numargs,idx; | 1025 | int numargs,idx; |
@@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) | |||
1027 | debug_sprintf_entry_t *curr_event; | 1027 | debug_sprintf_entry_t *curr_event; |
1028 | debug_entry_t *active; | 1028 | debug_entry_t *active; |
1029 | 1029 | ||
1030 | if((!id) || (level > id->level)) | ||
1031 | return NULL; | ||
1032 | if (!debug_active || !id->areas) | 1030 | if (!debug_active || !id->areas) |
1033 | return NULL; | 1031 | return NULL; |
1034 | numargs=debug_count_numargs(string); | 1032 | numargs=debug_count_numargs(string); |
@@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...) | |||
1050 | 1048 | ||
1051 | return active; | 1049 | return active; |
1052 | } | 1050 | } |
1053 | EXPORT_SYMBOL(debug_sprintf_event); | 1051 | EXPORT_SYMBOL(__debug_sprintf_event); |
1054 | 1052 | ||
1055 | /* | 1053 | /* |
1056 | * debug_sprintf_exception: | 1054 | * debug_sprintf_exception: |
1057 | */ | 1055 | */ |
1058 | 1056 | ||
1059 | debug_entry_t* | 1057 | debug_entry_t* |
1060 | debug_sprintf_exception(debug_info_t* id, int level,char *string,...) | 1058 | __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...) |
1061 | { | 1059 | { |
1062 | va_list ap; | 1060 | va_list ap; |
1063 | int numargs,idx; | 1061 | int numargs,idx; |
@@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) | |||
1065 | debug_sprintf_entry_t *curr_event; | 1063 | debug_sprintf_entry_t *curr_event; |
1066 | debug_entry_t *active; | 1064 | debug_entry_t *active; |
1067 | 1065 | ||
1068 | if((!id) || (level > id->level)) | ||
1069 | return NULL; | ||
1070 | if (!debug_active || !id->areas) | 1066 | if (!debug_active || !id->areas) |
1071 | return NULL; | 1067 | return NULL; |
1072 | 1068 | ||
@@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...) | |||
1089 | 1085 | ||
1090 | return active; | 1086 | return active; |
1091 | } | 1087 | } |
1092 | EXPORT_SYMBOL(debug_sprintf_exception); | 1088 | EXPORT_SYMBOL(__debug_sprintf_exception); |
1093 | 1089 | ||
1094 | /* | 1090 | /* |
1095 | * debug_register_view: | 1091 | * debug_register_view: |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index acb412442e5e..a99852e96a77 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c | |||
@@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str) | |||
191 | console_verbose(); | 191 | console_verbose(); |
192 | spin_lock_irq(&die_lock); | 192 | spin_lock_irq(&die_lock); |
193 | bust_spinlocks(1); | 193 | bust_spinlocks(1); |
194 | printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); | 194 | printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, |
195 | regs->int_code >> 17, ++die_counter); | ||
195 | #ifdef CONFIG_PREEMPT | 196 | #ifdef CONFIG_PREEMPT |
196 | printk("PREEMPT "); | 197 | printk("PREEMPT "); |
197 | #endif | 198 | #endif |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cef2879edff3..302ac1f7f8e7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
14 | #include <linux/ctype.h> | 14 | #include <linux/ctype.h> |
15 | #include <linux/ftrace.h> | ||
16 | #include <linux/lockdep.h> | 15 | #include <linux/lockdep.h> |
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/pfn.h> | 17 | #include <linux/pfn.h> |
@@ -490,8 +489,5 @@ void __init startup_init(void) | |||
490 | detect_machine_facilities(); | 489 | detect_machine_facilities(); |
491 | setup_topology(); | 490 | setup_topology(); |
492 | sclp_early_detect(); | 491 | sclp_early_detect(); |
493 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
494 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; | ||
495 | #endif | ||
496 | lockdep_on(); | 492 | lockdep_on(); |
497 | } | 493 | } |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 70203265196f..398329b2b518 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
53 | .macro TRACE_IRQS_ON | 53 | .macro TRACE_IRQS_ON |
54 | #ifdef CONFIG_TRACE_IRQFLAGS | 54 | #ifdef CONFIG_TRACE_IRQFLAGS |
55 | basr %r2,%r0 | 55 | basr %r2,%r0 |
56 | l %r1,BASED(.Lhardirqs_on) | 56 | l %r1,BASED(.Lc_hardirqs_on) |
57 | basr %r14,%r1 # call trace_hardirqs_on_caller | 57 | basr %r14,%r1 # call trace_hardirqs_on_caller |
58 | #endif | 58 | #endif |
59 | .endm | 59 | .endm |
@@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
61 | .macro TRACE_IRQS_OFF | 61 | .macro TRACE_IRQS_OFF |
62 | #ifdef CONFIG_TRACE_IRQFLAGS | 62 | #ifdef CONFIG_TRACE_IRQFLAGS |
63 | basr %r2,%r0 | 63 | basr %r2,%r0 |
64 | l %r1,BASED(.Lhardirqs_off) | 64 | l %r1,BASED(.Lc_hardirqs_off) |
65 | basr %r14,%r1 # call trace_hardirqs_off_caller | 65 | basr %r14,%r1 # call trace_hardirqs_off_caller |
66 | #endif | 66 | #endif |
67 | .endm | 67 | .endm |
@@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
70 | #ifdef CONFIG_LOCKDEP | 70 | #ifdef CONFIG_LOCKDEP |
71 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 71 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
72 | jz .+10 | 72 | jz .+10 |
73 | l %r1,BASED(.Llockdep_sys_exit) | 73 | l %r1,BASED(.Lc_lockdep_sys_exit) |
74 | basr %r14,%r1 # call lockdep_sys_exit | 74 | basr %r14,%r1 # call lockdep_sys_exit |
75 | #endif | 75 | #endif |
76 | .endm | 76 | .endm |
@@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
87 | tmh %r8,0x0001 # interrupting from user ? | 87 | tmh %r8,0x0001 # interrupting from user ? |
88 | jnz 1f | 88 | jnz 1f |
89 | lr %r14,%r9 | 89 | lr %r14,%r9 |
90 | sl %r14,BASED(.Lcritical_start) | 90 | sl %r14,BASED(.Lc_critical_start) |
91 | cl %r14,BASED(.Lcritical_length) | 91 | cl %r14,BASED(.Lc_critical_length) |
92 | jhe 0f | 92 | jhe 0f |
93 | la %r11,\savearea # inside critical section, do cleanup | 93 | la %r11,\savearea # inside critical section, do cleanup |
94 | bras %r14,cleanup_critical | 94 | bras %r14,cleanup_critical |
@@ -162,7 +162,7 @@ ENTRY(__switch_to) | |||
162 | lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 162 | lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
163 | br %r14 | 163 | br %r14 |
164 | 164 | ||
165 | __critical_start: | 165 | .L__critical_start: |
166 | /* | 166 | /* |
167 | * SVC interrupt handler routine. System calls are synchronous events and | 167 | * SVC interrupt handler routine. System calls are synchronous events and |
168 | * are executed with interrupts enabled. | 168 | * are executed with interrupts enabled. |
@@ -170,145 +170,145 @@ __critical_start: | |||
170 | 170 | ||
171 | ENTRY(system_call) | 171 | ENTRY(system_call) |
172 | stpt __LC_SYNC_ENTER_TIMER | 172 | stpt __LC_SYNC_ENTER_TIMER |
173 | sysc_stm: | 173 | .Lsysc_stm: |
174 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 174 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
175 | l %r12,__LC_THREAD_INFO | 175 | l %r12,__LC_THREAD_INFO |
176 | l %r13,__LC_SVC_NEW_PSW+4 | 176 | l %r13,__LC_SVC_NEW_PSW+4 |
177 | lhi %r14,_PIF_SYSCALL | 177 | lhi %r14,_PIF_SYSCALL |
178 | sysc_per: | 178 | .Lsysc_per: |
179 | l %r15,__LC_KERNEL_STACK | 179 | l %r15,__LC_KERNEL_STACK |
180 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 180 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
181 | sysc_vtime: | 181 | .Lsysc_vtime: |
182 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER | 182 | UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER |
183 | stm %r0,%r7,__PT_R0(%r11) | 183 | stm %r0,%r7,__PT_R0(%r11) |
184 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 184 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
185 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW | 185 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW |
186 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 186 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
187 | st %r14,__PT_FLAGS(%r11) | 187 | st %r14,__PT_FLAGS(%r11) |
188 | sysc_do_svc: | 188 | .Lsysc_do_svc: |
189 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 189 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table |
190 | lh %r8,__PT_INT_CODE+2(%r11) | 190 | lh %r8,__PT_INT_CODE+2(%r11) |
191 | sla %r8,2 # shift and test for svc0 | 191 | sla %r8,2 # shift and test for svc0 |
192 | jnz sysc_nr_ok | 192 | jnz .Lsysc_nr_ok |
193 | # svc 0: system call number in %r1 | 193 | # svc 0: system call number in %r1 |
194 | cl %r1,BASED(.Lnr_syscalls) | 194 | cl %r1,BASED(.Lnr_syscalls) |
195 | jnl sysc_nr_ok | 195 | jnl .Lsysc_nr_ok |
196 | sth %r1,__PT_INT_CODE+2(%r11) | 196 | sth %r1,__PT_INT_CODE+2(%r11) |
197 | lr %r8,%r1 | 197 | lr %r8,%r1 |
198 | sla %r8,2 | 198 | sla %r8,2 |
199 | sysc_nr_ok: | 199 | .Lsysc_nr_ok: |
200 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 200 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
201 | st %r2,__PT_ORIG_GPR2(%r11) | 201 | st %r2,__PT_ORIG_GPR2(%r11) |
202 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 202 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
203 | l %r9,0(%r8,%r10) # get system call addr. | 203 | l %r9,0(%r8,%r10) # get system call addr. |
204 | tm __TI_flags+3(%r12),_TIF_TRACE | 204 | tm __TI_flags+3(%r12),_TIF_TRACE |
205 | jnz sysc_tracesys | 205 | jnz .Lsysc_tracesys |
206 | basr %r14,%r9 # call sys_xxxx | 206 | basr %r14,%r9 # call sys_xxxx |
207 | st %r2,__PT_R2(%r11) # store return value | 207 | st %r2,__PT_R2(%r11) # store return value |
208 | 208 | ||
209 | sysc_return: | 209 | .Lsysc_return: |
210 | LOCKDEP_SYS_EXIT | 210 | LOCKDEP_SYS_EXIT |
211 | sysc_tif: | 211 | .Lsysc_tif: |
212 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 212 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
213 | jno sysc_restore | 213 | jno .Lsysc_restore |
214 | tm __PT_FLAGS+3(%r11),_PIF_WORK | 214 | tm __PT_FLAGS+3(%r11),_PIF_WORK |
215 | jnz sysc_work | 215 | jnz .Lsysc_work |
216 | tm __TI_flags+3(%r12),_TIF_WORK | 216 | tm __TI_flags+3(%r12),_TIF_WORK |
217 | jnz sysc_work # check for thread work | 217 | jnz .Lsysc_work # check for thread work |
218 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 218 | tm __LC_CPU_FLAGS+3,_CIF_WORK |
219 | jnz sysc_work | 219 | jnz .Lsysc_work |
220 | sysc_restore: | 220 | .Lsysc_restore: |
221 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 221 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
222 | stpt __LC_EXIT_TIMER | 222 | stpt __LC_EXIT_TIMER |
223 | lm %r0,%r15,__PT_R0(%r11) | 223 | lm %r0,%r15,__PT_R0(%r11) |
224 | lpsw __LC_RETURN_PSW | 224 | lpsw __LC_RETURN_PSW |
225 | sysc_done: | 225 | .Lsysc_done: |
226 | 226 | ||
227 | # | 227 | # |
228 | # One of the work bits is on. Find out which one. | 228 | # One of the work bits is on. Find out which one. |
229 | # | 229 | # |
230 | sysc_work: | 230 | .Lsysc_work: |
231 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 231 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING |
232 | jo sysc_mcck_pending | 232 | jo .Lsysc_mcck_pending |
233 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 233 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
234 | jo sysc_reschedule | 234 | jo .Lsysc_reschedule |
235 | tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 235 | tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP |
236 | jo sysc_singlestep | 236 | jo .Lsysc_singlestep |
237 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 237 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
238 | jo sysc_sigpending | 238 | jo .Lsysc_sigpending |
239 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 239 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
240 | jo sysc_notify_resume | 240 | jo .Lsysc_notify_resume |
241 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 241 | tm __LC_CPU_FLAGS+3,_CIF_ASCE |
242 | jo sysc_uaccess | 242 | jo .Lsysc_uaccess |
243 | j sysc_return # beware of critical section cleanup | 243 | j .Lsysc_return # beware of critical section cleanup |
244 | 244 | ||
245 | # | 245 | # |
246 | # _TIF_NEED_RESCHED is set, call schedule | 246 | # _TIF_NEED_RESCHED is set, call schedule |
247 | # | 247 | # |
248 | sysc_reschedule: | 248 | .Lsysc_reschedule: |
249 | l %r1,BASED(.Lschedule) | 249 | l %r1,BASED(.Lc_schedule) |
250 | la %r14,BASED(sysc_return) | 250 | la %r14,BASED(.Lsysc_return) |
251 | br %r1 # call schedule | 251 | br %r1 # call schedule |
252 | 252 | ||
253 | # | 253 | # |
254 | # _CIF_MCCK_PENDING is set, call handler | 254 | # _CIF_MCCK_PENDING is set, call handler |
255 | # | 255 | # |
256 | sysc_mcck_pending: | 256 | .Lsysc_mcck_pending: |
257 | l %r1,BASED(.Lhandle_mcck) | 257 | l %r1,BASED(.Lc_handle_mcck) |
258 | la %r14,BASED(sysc_return) | 258 | la %r14,BASED(.Lsysc_return) |
259 | br %r1 # TIF bit will be cleared by handler | 259 | br %r1 # TIF bit will be cleared by handler |
260 | 260 | ||
261 | # | 261 | # |
262 | # _CIF_ASCE is set, load user space asce | 262 | # _CIF_ASCE is set, load user space asce |
263 | # | 263 | # |
264 | sysc_uaccess: | 264 | .Lsysc_uaccess: |
265 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 265 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE |
266 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 266 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce |
267 | j sysc_return | 267 | j .Lsysc_return |
268 | 268 | ||
269 | # | 269 | # |
270 | # _TIF_SIGPENDING is set, call do_signal | 270 | # _TIF_SIGPENDING is set, call do_signal |
271 | # | 271 | # |
272 | sysc_sigpending: | 272 | .Lsysc_sigpending: |
273 | lr %r2,%r11 # pass pointer to pt_regs | 273 | lr %r2,%r11 # pass pointer to pt_regs |
274 | l %r1,BASED(.Ldo_signal) | 274 | l %r1,BASED(.Lc_do_signal) |
275 | basr %r14,%r1 # call do_signal | 275 | basr %r14,%r1 # call do_signal |
276 | tm __PT_FLAGS+3(%r11),_PIF_SYSCALL | 276 | tm __PT_FLAGS+3(%r11),_PIF_SYSCALL |
277 | jno sysc_return | 277 | jno .Lsysc_return |
278 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments | 278 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments |
279 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 279 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table |
280 | xr %r8,%r8 # svc 0 returns -ENOSYS | 280 | xr %r8,%r8 # svc 0 returns -ENOSYS |
281 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) | 281 | clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) |
282 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | 282 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 |
283 | lh %r8,__PT_INT_CODE+2(%r11) # load new svc number | 283 | lh %r8,__PT_INT_CODE+2(%r11) # load new svc number |
284 | sla %r8,2 | 284 | sla %r8,2 |
285 | j sysc_nr_ok # restart svc | 285 | j .Lsysc_nr_ok # restart svc |
286 | 286 | ||
287 | # | 287 | # |
288 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 288 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
289 | # | 289 | # |
290 | sysc_notify_resume: | 290 | .Lsysc_notify_resume: |
291 | lr %r2,%r11 # pass pointer to pt_regs | 291 | lr %r2,%r11 # pass pointer to pt_regs |
292 | l %r1,BASED(.Ldo_notify_resume) | 292 | l %r1,BASED(.Lc_do_notify_resume) |
293 | la %r14,BASED(sysc_return) | 293 | la %r14,BASED(.Lsysc_return) |
294 | br %r1 # call do_notify_resume | 294 | br %r1 # call do_notify_resume |
295 | 295 | ||
296 | # | 296 | # |
297 | # _PIF_PER_TRAP is set, call do_per_trap | 297 | # _PIF_PER_TRAP is set, call do_per_trap |
298 | # | 298 | # |
299 | sysc_singlestep: | 299 | .Lsysc_singlestep: |
300 | ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP | 300 | ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP |
301 | lr %r2,%r11 # pass pointer to pt_regs | 301 | lr %r2,%r11 # pass pointer to pt_regs |
302 | l %r1,BASED(.Ldo_per_trap) | 302 | l %r1,BASED(.Lc_do_per_trap) |
303 | la %r14,BASED(sysc_return) | 303 | la %r14,BASED(.Lsysc_return) |
304 | br %r1 # call do_per_trap | 304 | br %r1 # call do_per_trap |
305 | 305 | ||
306 | # | 306 | # |
307 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 307 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
308 | # and after the system call | 308 | # and after the system call |
309 | # | 309 | # |
310 | sysc_tracesys: | 310 | .Lsysc_tracesys: |
311 | l %r1,BASED(.Ltrace_enter) | 311 | l %r1,BASED(.Lc_trace_enter) |
312 | lr %r2,%r11 # pass pointer to pt_regs | 312 | lr %r2,%r11 # pass pointer to pt_regs |
313 | la %r3,0 | 313 | la %r3,0 |
314 | xr %r0,%r0 | 314 | xr %r0,%r0 |
@@ -316,22 +316,22 @@ sysc_tracesys: | |||
316 | st %r0,__PT_R2(%r11) | 316 | st %r0,__PT_R2(%r11) |
317 | basr %r14,%r1 # call do_syscall_trace_enter | 317 | basr %r14,%r1 # call do_syscall_trace_enter |
318 | cl %r2,BASED(.Lnr_syscalls) | 318 | cl %r2,BASED(.Lnr_syscalls) |
319 | jnl sysc_tracenogo | 319 | jnl .Lsysc_tracenogo |
320 | lr %r8,%r2 | 320 | lr %r8,%r2 |
321 | sll %r8,2 | 321 | sll %r8,2 |
322 | l %r9,0(%r8,%r10) | 322 | l %r9,0(%r8,%r10) |
323 | sysc_tracego: | 323 | .Lsysc_tracego: |
324 | lm %r3,%r7,__PT_R3(%r11) | 324 | lm %r3,%r7,__PT_R3(%r11) |
325 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 325 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
326 | l %r2,__PT_ORIG_GPR2(%r11) | 326 | l %r2,__PT_ORIG_GPR2(%r11) |
327 | basr %r14,%r9 # call sys_xxx | 327 | basr %r14,%r9 # call sys_xxx |
328 | st %r2,__PT_R2(%r11) # store return value | 328 | st %r2,__PT_R2(%r11) # store return value |
329 | sysc_tracenogo: | 329 | .Lsysc_tracenogo: |
330 | tm __TI_flags+3(%r12),_TIF_TRACE | 330 | tm __TI_flags+3(%r12),_TIF_TRACE |
331 | jz sysc_return | 331 | jz .Lsysc_return |
332 | l %r1,BASED(.Ltrace_exit) | 332 | l %r1,BASED(.Lc_trace_exit) |
333 | lr %r2,%r11 # pass pointer to pt_regs | 333 | lr %r2,%r11 # pass pointer to pt_regs |
334 | la %r14,BASED(sysc_return) | 334 | la %r14,BASED(.Lsysc_return) |
335 | br %r1 # call do_syscall_trace_exit | 335 | br %r1 # call do_syscall_trace_exit |
336 | 336 | ||
337 | # | 337 | # |
@@ -341,18 +341,18 @@ ENTRY(ret_from_fork) | |||
341 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 341 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
342 | l %r12,__LC_THREAD_INFO | 342 | l %r12,__LC_THREAD_INFO |
343 | l %r13,__LC_SVC_NEW_PSW+4 | 343 | l %r13,__LC_SVC_NEW_PSW+4 |
344 | l %r1,BASED(.Lschedule_tail) | 344 | l %r1,BASED(.Lc_schedule_tail) |
345 | basr %r14,%r1 # call schedule_tail | 345 | basr %r14,%r1 # call schedule_tail |
346 | TRACE_IRQS_ON | 346 | TRACE_IRQS_ON |
347 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 347 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
348 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | 348 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
349 | jne sysc_tracenogo | 349 | jne .Lsysc_tracenogo |
350 | # it's a kernel thread | 350 | # it's a kernel thread |
351 | lm %r9,%r10,__PT_R9(%r11) # load gprs | 351 | lm %r9,%r10,__PT_R9(%r11) # load gprs |
352 | ENTRY(kernel_thread_starter) | 352 | ENTRY(kernel_thread_starter) |
353 | la %r2,0(%r10) | 353 | la %r2,0(%r10) |
354 | basr %r14,%r9 | 354 | basr %r14,%r9 |
355 | j sysc_tracenogo | 355 | j .Lsysc_tracenogo |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Program check handler routine | 358 | * Program check handler routine |
@@ -369,7 +369,7 @@ ENTRY(pgm_check_handler) | |||
369 | tmh %r8,0x4000 # PER bit set in old PSW ? | 369 | tmh %r8,0x4000 # PER bit set in old PSW ? |
370 | jnz 0f # -> enabled, can't be a double fault | 370 | jnz 0f # -> enabled, can't be a double fault |
371 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 371 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
372 | jnz pgm_svcper # -> single stepped svc | 372 | jnz .Lpgm_svcper # -> single stepped svc |
373 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 373 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
374 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 374 | ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
375 | j 2f | 375 | j 2f |
@@ -386,42 +386,42 @@ ENTRY(pgm_check_handler) | |||
386 | jz 0f | 386 | jz 0f |
387 | l %r1,__TI_task(%r12) | 387 | l %r1,__TI_task(%r12) |
388 | tmh %r8,0x0001 # kernel per event ? | 388 | tmh %r8,0x0001 # kernel per event ? |
389 | jz pgm_kprobe | 389 | jz .Lpgm_kprobe |
390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP | 390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP |
391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE | 392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE |
393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID | 393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID |
394 | 0: REENABLE_IRQS | 394 | 0: REENABLE_IRQS |
395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 395 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
396 | l %r1,BASED(.Ljump_table) | 396 | l %r1,BASED(.Lc_jump_table) |
397 | la %r10,0x7f | 397 | la %r10,0x7f |
398 | n %r10,__PT_INT_CODE(%r11) | 398 | n %r10,__PT_INT_CODE(%r11) |
399 | je sysc_return | 399 | je .Lsysc_return |
400 | sll %r10,2 | 400 | sll %r10,2 |
401 | l %r1,0(%r10,%r1) # load address of handler routine | 401 | l %r1,0(%r10,%r1) # load address of handler routine |
402 | lr %r2,%r11 # pass pointer to pt_regs | 402 | lr %r2,%r11 # pass pointer to pt_regs |
403 | basr %r14,%r1 # branch to interrupt-handler | 403 | basr %r14,%r1 # branch to interrupt-handler |
404 | j sysc_return | 404 | j .Lsysc_return |
405 | 405 | ||
406 | # | 406 | # |
407 | # PER event in supervisor state, must be kprobes | 407 | # PER event in supervisor state, must be kprobes |
408 | # | 408 | # |
409 | pgm_kprobe: | 409 | .Lpgm_kprobe: |
410 | REENABLE_IRQS | 410 | REENABLE_IRQS |
411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 411 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
412 | l %r1,BASED(.Ldo_per_trap) | 412 | l %r1,BASED(.Lc_do_per_trap) |
413 | lr %r2,%r11 # pass pointer to pt_regs | 413 | lr %r2,%r11 # pass pointer to pt_regs |
414 | basr %r14,%r1 # call do_per_trap | 414 | basr %r14,%r1 # call do_per_trap |
415 | j sysc_return | 415 | j .Lsysc_return |
416 | 416 | ||
417 | # | 417 | # |
418 | # single stepped system call | 418 | # single stepped system call |
419 | # | 419 | # |
420 | pgm_svcper: | 420 | .Lpgm_svcper: |
421 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW | 421 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW |
422 | mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) | 422 | mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) |
423 | lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | 423 | lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP |
424 | lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs | 424 | lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * IO interrupt handler routine | 427 | * IO interrupt handler routine |
@@ -435,9 +435,9 @@ ENTRY(io_int_handler) | |||
435 | l %r13,__LC_SVC_NEW_PSW+4 | 435 | l %r13,__LC_SVC_NEW_PSW+4 |
436 | lm %r8,%r9,__LC_IO_OLD_PSW | 436 | lm %r8,%r9,__LC_IO_OLD_PSW |
437 | tmh %r8,0x0001 # interrupting from user ? | 437 | tmh %r8,0x0001 # interrupting from user ? |
438 | jz io_skip | 438 | jz .Lio_skip |
439 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 439 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
440 | io_skip: | 440 | .Lio_skip: |
441 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 441 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
442 | stm %r0,%r7,__PT_R0(%r11) | 442 | stm %r0,%r7,__PT_R0(%r11) |
443 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 443 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC |
@@ -446,35 +446,35 @@ io_skip: | |||
446 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 446 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) |
447 | TRACE_IRQS_OFF | 447 | TRACE_IRQS_OFF |
448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
449 | io_loop: | 449 | .Lio_loop: |
450 | l %r1,BASED(.Ldo_IRQ) | 450 | l %r1,BASED(.Lc_do_IRQ) |
451 | lr %r2,%r11 # pass pointer to pt_regs | 451 | lr %r2,%r11 # pass pointer to pt_regs |
452 | lhi %r3,IO_INTERRUPT | 452 | lhi %r3,IO_INTERRUPT |
453 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | 453 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? |
454 | jz io_call | 454 | jz .Lio_call |
455 | lhi %r3,THIN_INTERRUPT | 455 | lhi %r3,THIN_INTERRUPT |
456 | io_call: | 456 | .Lio_call: |
457 | basr %r14,%r1 # call do_IRQ | 457 | basr %r14,%r1 # call do_IRQ |
458 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR | 458 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR |
459 | jz io_return | 459 | jz .Lio_return |
460 | tpi 0 | 460 | tpi 0 |
461 | jz io_return | 461 | jz .Lio_return |
462 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 462 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
463 | j io_loop | 463 | j .Lio_loop |
464 | io_return: | 464 | .Lio_return: |
465 | LOCKDEP_SYS_EXIT | 465 | LOCKDEP_SYS_EXIT |
466 | TRACE_IRQS_ON | 466 | TRACE_IRQS_ON |
467 | io_tif: | 467 | .Lio_tif: |
468 | tm __TI_flags+3(%r12),_TIF_WORK | 468 | tm __TI_flags+3(%r12),_TIF_WORK |
469 | jnz io_work # there is work to do (signals etc.) | 469 | jnz .Lio_work # there is work to do (signals etc.) |
470 | tm __LC_CPU_FLAGS+3,_CIF_WORK | 470 | tm __LC_CPU_FLAGS+3,_CIF_WORK |
471 | jnz io_work | 471 | jnz .Lio_work |
472 | io_restore: | 472 | .Lio_restore: |
473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
474 | stpt __LC_EXIT_TIMER | 474 | stpt __LC_EXIT_TIMER |
475 | lm %r0,%r15,__PT_R0(%r11) | 475 | lm %r0,%r15,__PT_R0(%r11) |
476 | lpsw __LC_RETURN_PSW | 476 | lpsw __LC_RETURN_PSW |
477 | io_done: | 477 | .Lio_done: |
478 | 478 | ||
479 | # | 479 | # |
480 | # There is work todo, find out in which context we have been interrupted: | 480 | # There is work todo, find out in which context we have been interrupted: |
@@ -483,15 +483,15 @@ io_done: | |||
483 | # the preemption counter and if it is zero call preempt_schedule_irq | 483 | # the preemption counter and if it is zero call preempt_schedule_irq |
484 | # Before any work can be done, a switch to the kernel stack is required. | 484 | # Before any work can be done, a switch to the kernel stack is required. |
485 | # | 485 | # |
486 | io_work: | 486 | .Lio_work: |
487 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 487 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
488 | jo io_work_user # yes -> do resched & signal | 488 | jo .Lio_work_user # yes -> do resched & signal |
489 | #ifdef CONFIG_PREEMPT | 489 | #ifdef CONFIG_PREEMPT |
490 | # check for preemptive scheduling | 490 | # check for preemptive scheduling |
491 | icm %r0,15,__TI_precount(%r12) | 491 | icm %r0,15,__TI_precount(%r12) |
492 | jnz io_restore # preemption disabled | 492 | jnz .Lio_restore # preemption disabled |
493 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 493 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
494 | jno io_restore | 494 | jno .Lio_restore |
495 | # switch to kernel stack | 495 | # switch to kernel stack |
496 | l %r1,__PT_R15(%r11) | 496 | l %r1,__PT_R15(%r11) |
497 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 497 | ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
@@ -499,20 +499,20 @@ io_work: | |||
499 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 499 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
500 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 500 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
501 | lr %r15,%r1 | 501 | lr %r15,%r1 |
502 | # TRACE_IRQS_ON already done at io_return, call | 502 | # TRACE_IRQS_ON already done at .Lio_return, call |
503 | # TRACE_IRQS_OFF to keep things symmetrical | 503 | # TRACE_IRQS_OFF to keep things symmetrical |
504 | TRACE_IRQS_OFF | 504 | TRACE_IRQS_OFF |
505 | l %r1,BASED(.Lpreempt_irq) | 505 | l %r1,BASED(.Lc_preempt_irq) |
506 | basr %r14,%r1 # call preempt_schedule_irq | 506 | basr %r14,%r1 # call preempt_schedule_irq |
507 | j io_return | 507 | j .Lio_return |
508 | #else | 508 | #else |
509 | j io_restore | 509 | j .Lio_restore |
510 | #endif | 510 | #endif |
511 | 511 | ||
512 | # | 512 | # |
513 | # Need to do work before returning to userspace, switch to kernel stack | 513 | # Need to do work before returning to userspace, switch to kernel stack |
514 | # | 514 | # |
515 | io_work_user: | 515 | .Lio_work_user: |
516 | l %r1,__LC_KERNEL_STACK | 516 | l %r1,__LC_KERNEL_STACK |
517 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 517 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
518 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 518 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
@@ -522,74 +522,74 @@ io_work_user: | |||
522 | # | 522 | # |
523 | # One of the work bits is on. Find out which one. | 523 | # One of the work bits is on. Find out which one. |
524 | # | 524 | # |
525 | io_work_tif: | 525 | .Lio_work_tif: |
526 | tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING | 526 | tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING |
527 | jo io_mcck_pending | 527 | jo .Lio_mcck_pending |
528 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 528 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
529 | jo io_reschedule | 529 | jo .Lio_reschedule |
530 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 530 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
531 | jo io_sigpending | 531 | jo .Lio_sigpending |
532 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 532 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
533 | jo io_notify_resume | 533 | jo .Lio_notify_resume |
534 | tm __LC_CPU_FLAGS+3,_CIF_ASCE | 534 | tm __LC_CPU_FLAGS+3,_CIF_ASCE |
535 | jo io_uaccess | 535 | jo .Lio_uaccess |
536 | j io_return # beware of critical section cleanup | 536 | j .Lio_return # beware of critical section cleanup |
537 | 537 | ||
538 | # | 538 | # |
539 | # _CIF_MCCK_PENDING is set, call handler | 539 | # _CIF_MCCK_PENDING is set, call handler |
540 | # | 540 | # |
541 | io_mcck_pending: | 541 | .Lio_mcck_pending: |
542 | # TRACE_IRQS_ON already done at io_return | 542 | # TRACE_IRQS_ON already done at .Lio_return |
543 | l %r1,BASED(.Lhandle_mcck) | 543 | l %r1,BASED(.Lc_handle_mcck) |
544 | basr %r14,%r1 # TIF bit will be cleared by handler | 544 | basr %r14,%r1 # TIF bit will be cleared by handler |
545 | TRACE_IRQS_OFF | 545 | TRACE_IRQS_OFF |
546 | j io_return | 546 | j .Lio_return |
547 | 547 | ||
548 | # | 548 | # |
549 | # _CIF_ASCE is set, load user space asce | 549 | # _CIF_ASCE is set, load user space asce |
550 | # | 550 | # |
551 | io_uaccess: | 551 | .Lio_uaccess: |
552 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE | 552 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE |
553 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 553 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce |
554 | j io_return | 554 | j .Lio_return |
555 | 555 | ||
556 | # | 556 | # |
557 | # _TIF_NEED_RESCHED is set, call schedule | 557 | # _TIF_NEED_RESCHED is set, call schedule |
558 | # | 558 | # |
559 | io_reschedule: | 559 | .Lio_reschedule: |
560 | # TRACE_IRQS_ON already done at io_return | 560 | # TRACE_IRQS_ON already done at .Lio_return |
561 | l %r1,BASED(.Lschedule) | 561 | l %r1,BASED(.Lc_schedule) |
562 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 562 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
563 | basr %r14,%r1 # call scheduler | 563 | basr %r14,%r1 # call scheduler |
564 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 564 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
565 | TRACE_IRQS_OFF | 565 | TRACE_IRQS_OFF |
566 | j io_return | 566 | j .Lio_return |
567 | 567 | ||
568 | # | 568 | # |
569 | # _TIF_SIGPENDING is set, call do_signal | 569 | # _TIF_SIGPENDING is set, call do_signal |
570 | # | 570 | # |
571 | io_sigpending: | 571 | .Lio_sigpending: |
572 | # TRACE_IRQS_ON already done at io_return | 572 | # TRACE_IRQS_ON already done at .Lio_return |
573 | l %r1,BASED(.Ldo_signal) | 573 | l %r1,BASED(.Lc_do_signal) |
574 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 574 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
575 | lr %r2,%r11 # pass pointer to pt_regs | 575 | lr %r2,%r11 # pass pointer to pt_regs |
576 | basr %r14,%r1 # call do_signal | 576 | basr %r14,%r1 # call do_signal |
577 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 577 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
578 | TRACE_IRQS_OFF | 578 | TRACE_IRQS_OFF |
579 | j io_return | 579 | j .Lio_return |
580 | 580 | ||
581 | # | 581 | # |
582 | # _TIF_SIGPENDING is set, call do_signal | 582 | # _TIF_SIGPENDING is set, call do_signal |
583 | # | 583 | # |
584 | io_notify_resume: | 584 | .Lio_notify_resume: |
585 | # TRACE_IRQS_ON already done at io_return | 585 | # TRACE_IRQS_ON already done at .Lio_return |
586 | l %r1,BASED(.Ldo_notify_resume) | 586 | l %r1,BASED(.Lc_do_notify_resume) |
587 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 587 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
588 | lr %r2,%r11 # pass pointer to pt_regs | 588 | lr %r2,%r11 # pass pointer to pt_regs |
589 | basr %r14,%r1 # call do_notify_resume | 589 | basr %r14,%r1 # call do_notify_resume |
590 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 590 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
591 | TRACE_IRQS_OFF | 591 | TRACE_IRQS_OFF |
592 | j io_return | 592 | j .Lio_return |
593 | 593 | ||
594 | /* | 594 | /* |
595 | * External interrupt handler routine | 595 | * External interrupt handler routine |
@@ -603,9 +603,9 @@ ENTRY(ext_int_handler) | |||
603 | l %r13,__LC_SVC_NEW_PSW+4 | 603 | l %r13,__LC_SVC_NEW_PSW+4 |
604 | lm %r8,%r9,__LC_EXT_OLD_PSW | 604 | lm %r8,%r9,__LC_EXT_OLD_PSW |
605 | tmh %r8,0x0001 # interrupting from user ? | 605 | tmh %r8,0x0001 # interrupting from user ? |
606 | jz ext_skip | 606 | jz .Lext_skip |
607 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER | 607 | UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER |
608 | ext_skip: | 608 | .Lext_skip: |
609 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 609 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
610 | stm %r0,%r7,__PT_R0(%r11) | 610 | stm %r0,%r7,__PT_R0(%r11) |
611 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 611 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC |
@@ -614,29 +614,29 @@ ext_skip: | |||
614 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 614 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
615 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 615 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) |
616 | TRACE_IRQS_OFF | 616 | TRACE_IRQS_OFF |
617 | l %r1,BASED(.Ldo_IRQ) | 617 | l %r1,BASED(.Lc_do_IRQ) |
618 | lr %r2,%r11 # pass pointer to pt_regs | 618 | lr %r2,%r11 # pass pointer to pt_regs |
619 | lhi %r3,EXT_INTERRUPT | 619 | lhi %r3,EXT_INTERRUPT |
620 | basr %r14,%r1 # call do_IRQ | 620 | basr %r14,%r1 # call do_IRQ |
621 | j io_return | 621 | j .Lio_return |
622 | 622 | ||
623 | /* | 623 | /* |
624 | * Load idle PSW. The second "half" of this function is in cleanup_idle. | 624 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. |
625 | */ | 625 | */ |
626 | ENTRY(psw_idle) | 626 | ENTRY(psw_idle) |
627 | st %r3,__SF_EMPTY(%r15) | 627 | st %r3,__SF_EMPTY(%r15) |
628 | basr %r1,0 | 628 | basr %r1,0 |
629 | la %r1,psw_idle_lpsw+4-.(%r1) | 629 | la %r1,.Lpsw_idle_lpsw+4-.(%r1) |
630 | st %r1,__SF_EMPTY+4(%r15) | 630 | st %r1,__SF_EMPTY+4(%r15) |
631 | oi __SF_EMPTY+4(%r15),0x80 | 631 | oi __SF_EMPTY+4(%r15),0x80 |
632 | stck __CLOCK_IDLE_ENTER(%r2) | 632 | stck __CLOCK_IDLE_ENTER(%r2) |
633 | stpt __TIMER_IDLE_ENTER(%r2) | 633 | stpt __TIMER_IDLE_ENTER(%r2) |
634 | psw_idle_lpsw: | 634 | .Lpsw_idle_lpsw: |
635 | lpsw __SF_EMPTY(%r15) | 635 | lpsw __SF_EMPTY(%r15) |
636 | br %r14 | 636 | br %r14 |
637 | psw_idle_end: | 637 | .Lpsw_idle_end: |
638 | 638 | ||
639 | __critical_end: | 639 | .L__critical_end: |
640 | 640 | ||
641 | /* | 641 | /* |
642 | * Machine check handler routines | 642 | * Machine check handler routines |
@@ -650,7 +650,7 @@ ENTRY(mcck_int_handler) | |||
650 | l %r13,__LC_SVC_NEW_PSW+4 | 650 | l %r13,__LC_SVC_NEW_PSW+4 |
651 | lm %r8,%r9,__LC_MCK_OLD_PSW | 651 | lm %r8,%r9,__LC_MCK_OLD_PSW |
652 | tm __LC_MCCK_CODE,0x80 # system damage? | 652 | tm __LC_MCCK_CODE,0x80 # system damage? |
653 | jo mcck_panic # yes -> rest of mcck code invalid | 653 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
654 | la %r14,__LC_CPU_TIMER_SAVE_AREA | 654 | la %r14,__LC_CPU_TIMER_SAVE_AREA |
655 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 655 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
656 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 656 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
@@ -668,22 +668,22 @@ ENTRY(mcck_int_handler) | |||
668 | 2: spt 0(%r14) | 668 | 2: spt 0(%r14) |
669 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 669 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
670 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 670 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
671 | jno mcck_panic # no -> skip cleanup critical | 671 | jno .Lmcck_panic # no -> skip cleanup critical |
672 | tm %r8,0x0001 # interrupting from user ? | 672 | tm %r8,0x0001 # interrupting from user ? |
673 | jz mcck_skip | 673 | jz .Lmcck_skip |
674 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER | 674 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER |
675 | mcck_skip: | 675 | .Lmcck_skip: |
676 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT | 676 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT |
677 | stm %r0,%r7,__PT_R0(%r11) | 677 | stm %r0,%r7,__PT_R0(%r11) |
678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | 678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 |
679 | stm %r8,%r9,__PT_PSW(%r11) | 679 | stm %r8,%r9,__PT_PSW(%r11) |
680 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | 680 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) |
681 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 681 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
682 | l %r1,BASED(.Ldo_machine_check) | 682 | l %r1,BASED(.Lc_do_machine_check) |
683 | lr %r2,%r11 # pass pointer to pt_regs | 683 | lr %r2,%r11 # pass pointer to pt_regs |
684 | basr %r14,%r1 # call s390_do_machine_check | 684 | basr %r14,%r1 # call s390_do_machine_check |
685 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 685 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
686 | jno mcck_return | 686 | jno .Lmcck_return |
687 | l %r1,__LC_KERNEL_STACK # switch to kernel stack | 687 | l %r1,__LC_KERNEL_STACK # switch to kernel stack |
688 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 688 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
689 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) | 689 | xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) |
@@ -691,12 +691,12 @@ mcck_skip: | |||
691 | lr %r15,%r1 | 691 | lr %r15,%r1 |
692 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 692 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
693 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING | 693 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING |
694 | jno mcck_return | 694 | jno .Lmcck_return |
695 | TRACE_IRQS_OFF | 695 | TRACE_IRQS_OFF |
696 | l %r1,BASED(.Lhandle_mcck) | 696 | l %r1,BASED(.Lc_handle_mcck) |
697 | basr %r14,%r1 # call s390_handle_mcck | 697 | basr %r14,%r1 # call s390_handle_mcck |
698 | TRACE_IRQS_ON | 698 | TRACE_IRQS_ON |
699 | mcck_return: | 699 | .Lmcck_return: |
700 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW | 700 | mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW |
701 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 701 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
702 | jno 0f | 702 | jno 0f |
@@ -706,15 +706,15 @@ mcck_return: | |||
706 | 0: lm %r0,%r15,__PT_R0(%r11) | 706 | 0: lm %r0,%r15,__PT_R0(%r11) |
707 | lpsw __LC_RETURN_MCCK_PSW | 707 | lpsw __LC_RETURN_MCCK_PSW |
708 | 708 | ||
709 | mcck_panic: | 709 | .Lmcck_panic: |
710 | l %r14,__LC_PANIC_STACK | 710 | l %r14,__LC_PANIC_STACK |
711 | slr %r14,%r15 | 711 | slr %r14,%r15 |
712 | sra %r14,PAGE_SHIFT | 712 | sra %r14,PAGE_SHIFT |
713 | jz 0f | 713 | jz 0f |
714 | l %r15,__LC_PANIC_STACK | 714 | l %r15,__LC_PANIC_STACK |
715 | j mcck_skip | 715 | j .Lmcck_skip |
716 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 716 | 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
717 | j mcck_skip | 717 | j .Lmcck_skip |
718 | 718 | ||
719 | # | 719 | # |
720 | # PSW restart interrupt handler | 720 | # PSW restart interrupt handler |
@@ -764,58 +764,58 @@ stack_overflow: | |||
764 | 1: .long kernel_stack_overflow | 764 | 1: .long kernel_stack_overflow |
765 | #endif | 765 | #endif |
766 | 766 | ||
767 | cleanup_table: | 767 | .Lcleanup_table: |
768 | .long system_call + 0x80000000 | 768 | .long system_call + 0x80000000 |
769 | .long sysc_do_svc + 0x80000000 | 769 | .long .Lsysc_do_svc + 0x80000000 |
770 | .long sysc_tif + 0x80000000 | 770 | .long .Lsysc_tif + 0x80000000 |
771 | .long sysc_restore + 0x80000000 | 771 | .long .Lsysc_restore + 0x80000000 |
772 | .long sysc_done + 0x80000000 | 772 | .long .Lsysc_done + 0x80000000 |
773 | .long io_tif + 0x80000000 | 773 | .long .Lio_tif + 0x80000000 |
774 | .long io_restore + 0x80000000 | 774 | .long .Lio_restore + 0x80000000 |
775 | .long io_done + 0x80000000 | 775 | .long .Lio_done + 0x80000000 |
776 | .long psw_idle + 0x80000000 | 776 | .long psw_idle + 0x80000000 |
777 | .long psw_idle_end + 0x80000000 | 777 | .long .Lpsw_idle_end + 0x80000000 |
778 | 778 | ||
779 | cleanup_critical: | 779 | cleanup_critical: |
780 | cl %r9,BASED(cleanup_table) # system_call | 780 | cl %r9,BASED(.Lcleanup_table) # system_call |
781 | jl 0f | 781 | jl 0f |
782 | cl %r9,BASED(cleanup_table+4) # sysc_do_svc | 782 | cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc |
783 | jl cleanup_system_call | 783 | jl .Lcleanup_system_call |
784 | cl %r9,BASED(cleanup_table+8) # sysc_tif | 784 | cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif |
785 | jl 0f | 785 | jl 0f |
786 | cl %r9,BASED(cleanup_table+12) # sysc_restore | 786 | cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore |
787 | jl cleanup_sysc_tif | 787 | jl .Lcleanup_sysc_tif |
788 | cl %r9,BASED(cleanup_table+16) # sysc_done | 788 | cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done |
789 | jl cleanup_sysc_restore | 789 | jl .Lcleanup_sysc_restore |
790 | cl %r9,BASED(cleanup_table+20) # io_tif | 790 | cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif |
791 | jl 0f | 791 | jl 0f |
792 | cl %r9,BASED(cleanup_table+24) # io_restore | 792 | cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore |
793 | jl cleanup_io_tif | 793 | jl .Lcleanup_io_tif |
794 | cl %r9,BASED(cleanup_table+28) # io_done | 794 | cl %r9,BASED(.Lcleanup_table+28) # .Lio_done |
795 | jl cleanup_io_restore | 795 | jl .Lcleanup_io_restore |
796 | cl %r9,BASED(cleanup_table+32) # psw_idle | 796 | cl %r9,BASED(.Lcleanup_table+32) # psw_idle |
797 | jl 0f | 797 | jl 0f |
798 | cl %r9,BASED(cleanup_table+36) # psw_idle_end | 798 | cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end |
799 | jl cleanup_idle | 799 | jl .Lcleanup_idle |
800 | 0: br %r14 | 800 | 0: br %r14 |
801 | 801 | ||
802 | cleanup_system_call: | 802 | .Lcleanup_system_call: |
803 | # check if stpt has been executed | 803 | # check if stpt has been executed |
804 | cl %r9,BASED(cleanup_system_call_insn) | 804 | cl %r9,BASED(.Lcleanup_system_call_insn) |
805 | jh 0f | 805 | jh 0f |
806 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 806 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
807 | chi %r11,__LC_SAVE_AREA_ASYNC | 807 | chi %r11,__LC_SAVE_AREA_ASYNC |
808 | je 0f | 808 | je 0f |
809 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | 809 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
810 | 0: # check if stm has been executed | 810 | 0: # check if stm has been executed |
811 | cl %r9,BASED(cleanup_system_call_insn+4) | 811 | cl %r9,BASED(.Lcleanup_system_call_insn+4) |
812 | jh 0f | 812 | jh 0f |
813 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) | 813 | mvc __LC_SAVE_AREA_SYNC(32),0(%r11) |
814 | 0: # set up saved registers r12, and r13 | 814 | 0: # set up saved registers r12, and r13 |
815 | st %r12,16(%r11) # r12 thread-info pointer | 815 | st %r12,16(%r11) # r12 thread-info pointer |
816 | st %r13,20(%r11) # r13 literal-pool pointer | 816 | st %r13,20(%r11) # r13 literal-pool pointer |
817 | # check if the user time calculation has been done | 817 | # check if the user time calculation has been done |
818 | cl %r9,BASED(cleanup_system_call_insn+8) | 818 | cl %r9,BASED(.Lcleanup_system_call_insn+8) |
819 | jh 0f | 819 | jh 0f |
820 | l %r10,__LC_EXIT_TIMER | 820 | l %r10,__LC_EXIT_TIMER |
821 | l %r15,__LC_EXIT_TIMER+4 | 821 | l %r15,__LC_EXIT_TIMER+4 |
@@ -824,7 +824,7 @@ cleanup_system_call: | |||
824 | st %r10,__LC_USER_TIMER | 824 | st %r10,__LC_USER_TIMER |
825 | st %r15,__LC_USER_TIMER+4 | 825 | st %r15,__LC_USER_TIMER+4 |
826 | 0: # check if the system time calculation has been done | 826 | 0: # check if the system time calculation has been done |
827 | cl %r9,BASED(cleanup_system_call_insn+12) | 827 | cl %r9,BASED(.Lcleanup_system_call_insn+12) |
828 | jh 0f | 828 | jh 0f |
829 | l %r10,__LC_LAST_UPDATE_TIMER | 829 | l %r10,__LC_LAST_UPDATE_TIMER |
830 | l %r15,__LC_LAST_UPDATE_TIMER+4 | 830 | l %r15,__LC_LAST_UPDATE_TIMER+4 |
@@ -848,20 +848,20 @@ cleanup_system_call: | |||
848 | # setup saved register 15 | 848 | # setup saved register 15 |
849 | st %r15,28(%r11) # r15 stack pointer | 849 | st %r15,28(%r11) # r15 stack pointer |
850 | # set new psw address and exit | 850 | # set new psw address and exit |
851 | l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 | 851 | l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 |
852 | br %r14 | 852 | br %r14 |
853 | cleanup_system_call_insn: | 853 | .Lcleanup_system_call_insn: |
854 | .long system_call + 0x80000000 | 854 | .long system_call + 0x80000000 |
855 | .long sysc_stm + 0x80000000 | 855 | .long .Lsysc_stm + 0x80000000 |
856 | .long sysc_vtime + 0x80000000 + 36 | 856 | .long .Lsysc_vtime + 0x80000000 + 36 |
857 | .long sysc_vtime + 0x80000000 + 76 | 857 | .long .Lsysc_vtime + 0x80000000 + 76 |
858 | 858 | ||
859 | cleanup_sysc_tif: | 859 | .Lcleanup_sysc_tif: |
860 | l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 | 860 | l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 |
861 | br %r14 | 861 | br %r14 |
862 | 862 | ||
863 | cleanup_sysc_restore: | 863 | .Lcleanup_sysc_restore: |
864 | cl %r9,BASED(cleanup_sysc_restore_insn) | 864 | cl %r9,BASED(.Lcleanup_sysc_restore_insn) |
865 | jhe 0f | 865 | jhe 0f |
866 | l %r9,12(%r11) # get saved pointer to pt_regs | 866 | l %r9,12(%r11) # get saved pointer to pt_regs |
867 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 867 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
@@ -869,15 +869,15 @@ cleanup_sysc_restore: | |||
869 | lm %r0,%r7,__PT_R0(%r9) | 869 | lm %r0,%r7,__PT_R0(%r9) |
870 | 0: lm %r8,%r9,__LC_RETURN_PSW | 870 | 0: lm %r8,%r9,__LC_RETURN_PSW |
871 | br %r14 | 871 | br %r14 |
872 | cleanup_sysc_restore_insn: | 872 | .Lcleanup_sysc_restore_insn: |
873 | .long sysc_done - 4 + 0x80000000 | 873 | .long .Lsysc_done - 4 + 0x80000000 |
874 | 874 | ||
875 | cleanup_io_tif: | 875 | .Lcleanup_io_tif: |
876 | l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 | 876 | l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 |
877 | br %r14 | 877 | br %r14 |
878 | 878 | ||
879 | cleanup_io_restore: | 879 | .Lcleanup_io_restore: |
880 | cl %r9,BASED(cleanup_io_restore_insn) | 880 | cl %r9,BASED(.Lcleanup_io_restore_insn) |
881 | jhe 0f | 881 | jhe 0f |
882 | l %r9,12(%r11) # get saved r11 pointer to pt_regs | 882 | l %r9,12(%r11) # get saved r11 pointer to pt_regs |
883 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) | 883 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) |
@@ -885,10 +885,10 @@ cleanup_io_restore: | |||
885 | lm %r0,%r7,__PT_R0(%r9) | 885 | lm %r0,%r7,__PT_R0(%r9) |
886 | 0: lm %r8,%r9,__LC_RETURN_PSW | 886 | 0: lm %r8,%r9,__LC_RETURN_PSW |
887 | br %r14 | 887 | br %r14 |
888 | cleanup_io_restore_insn: | 888 | .Lcleanup_io_restore_insn: |
889 | .long io_done - 4 + 0x80000000 | 889 | .long .Lio_done - 4 + 0x80000000 |
890 | 890 | ||
891 | cleanup_idle: | 891 | .Lcleanup_idle: |
892 | # copy interrupt clock & cpu timer | 892 | # copy interrupt clock & cpu timer |
893 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | 893 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK |
894 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | 894 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER |
@@ -897,7 +897,7 @@ cleanup_idle: | |||
897 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | 897 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK |
898 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | 898 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER |
899 | 0: # check if stck has been executed | 899 | 0: # check if stck has been executed |
900 | cl %r9,BASED(cleanup_idle_insn) | 900 | cl %r9,BASED(.Lcleanup_idle_insn) |
901 | jhe 1f | 901 | jhe 1f |
902 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | 902 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) |
903 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) | 903 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) |
@@ -913,12 +913,12 @@ cleanup_idle: | |||
913 | stm %r9,%r10,__LC_SYSTEM_TIMER | 913 | stm %r9,%r10,__LC_SYSTEM_TIMER |
914 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | 914 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) |
915 | # prepare return psw | 915 | # prepare return psw |
916 | n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits | 916 | n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits |
917 | l %r9,24(%r11) # return from psw_idle | 917 | l %r9,24(%r11) # return from psw_idle |
918 | br %r14 | 918 | br %r14 |
919 | cleanup_idle_insn: | 919 | .Lcleanup_idle_insn: |
920 | .long psw_idle_lpsw + 0x80000000 | 920 | .long .Lpsw_idle_lpsw + 0x80000000 |
921 | cleanup_idle_wait: | 921 | .Lcleanup_idle_wait: |
922 | .long 0xfcfdffff | 922 | .long 0xfcfdffff |
923 | 923 | ||
924 | /* | 924 | /* |
@@ -933,30 +933,30 @@ cleanup_idle_wait: | |||
933 | /* | 933 | /* |
934 | * Symbol constants | 934 | * Symbol constants |
935 | */ | 935 | */ |
936 | .Ldo_machine_check: .long s390_do_machine_check | 936 | .Lc_do_machine_check: .long s390_do_machine_check |
937 | .Lhandle_mcck: .long s390_handle_mcck | 937 | .Lc_handle_mcck: .long s390_handle_mcck |
938 | .Ldo_IRQ: .long do_IRQ | 938 | .Lc_do_IRQ: .long do_IRQ |
939 | .Ldo_signal: .long do_signal | 939 | .Lc_do_signal: .long do_signal |
940 | .Ldo_notify_resume: .long do_notify_resume | 940 | .Lc_do_notify_resume: .long do_notify_resume |
941 | .Ldo_per_trap: .long do_per_trap | 941 | .Lc_do_per_trap: .long do_per_trap |
942 | .Ljump_table: .long pgm_check_table | 942 | .Lc_jump_table: .long pgm_check_table |
943 | .Lschedule: .long schedule | 943 | .Lc_schedule: .long schedule |
944 | #ifdef CONFIG_PREEMPT | 944 | #ifdef CONFIG_PREEMPT |
945 | .Lpreempt_irq: .long preempt_schedule_irq | 945 | .Lc_preempt_irq: .long preempt_schedule_irq |
946 | #endif | 946 | #endif |
947 | .Ltrace_enter: .long do_syscall_trace_enter | 947 | .Lc_trace_enter: .long do_syscall_trace_enter |
948 | .Ltrace_exit: .long do_syscall_trace_exit | 948 | .Lc_trace_exit: .long do_syscall_trace_exit |
949 | .Lschedule_tail: .long schedule_tail | 949 | .Lc_schedule_tail: .long schedule_tail |
950 | .Lsysc_per: .long sysc_per + 0x80000000 | 950 | .Lc_sysc_per: .long .Lsysc_per + 0x80000000 |
951 | #ifdef CONFIG_TRACE_IRQFLAGS | 951 | #ifdef CONFIG_TRACE_IRQFLAGS |
952 | .Lhardirqs_on: .long trace_hardirqs_on_caller | 952 | .Lc_hardirqs_on: .long trace_hardirqs_on_caller |
953 | .Lhardirqs_off: .long trace_hardirqs_off_caller | 953 | .Lc_hardirqs_off: .long trace_hardirqs_off_caller |
954 | #endif | 954 | #endif |
955 | #ifdef CONFIG_LOCKDEP | 955 | #ifdef CONFIG_LOCKDEP |
956 | .Llockdep_sys_exit: .long lockdep_sys_exit | 956 | .Lc_lockdep_sys_exit: .long lockdep_sys_exit |
957 | #endif | 957 | #endif |
958 | .Lcritical_start: .long __critical_start + 0x80000000 | 958 | .Lc_critical_start: .long .L__critical_start + 0x80000000 |
959 | .Lcritical_length: .long __critical_end - __critical_start | 959 | .Lc_critical_length: .long .L__critical_end - .L__critical_start |
960 | 960 | ||
961 | .section .rodata, "a" | 961 | .section .rodata, "a" |
962 | #define SYSCALL(esa,esame,emu) .long esa | 962 | #define SYSCALL(esa,esame,emu) .long esa |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 0554b9771c9f..8e61393c8275 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -74,4 +74,6 @@ struct old_sigaction; | |||
74 | long sys_s390_personality(unsigned int personality); | 74 | long sys_s390_personality(unsigned int personality); |
75 | long sys_s390_runtime_instr(int command, int signum); | 75 | long sys_s390_runtime_instr(int command, int signum); |
76 | 76 | ||
77 | long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); | ||
78 | long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); | ||
77 | #endif /* _ENTRY_H */ | 79 | #endif /* _ENTRY_H */ |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 7b2e03afd017..c329446a951d 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
91 | .if \reason==1 | 91 | .if \reason==1 |
92 | # Some program interrupts are suppressing (e.g. protection). | 92 | # Some program interrupts are suppressing (e.g. protection). |
93 | # We must also check the instruction after SIE in that case. | 93 | # We must also check the instruction after SIE in that case. |
94 | # do_protection_exception will rewind to rewind_pad | 94 | # do_protection_exception will rewind to .Lrewind_pad |
95 | jh .+42 | 95 | jh .+42 |
96 | .else | 96 | .else |
97 | jhe .+42 | 97 | jhe .+42 |
@@ -192,7 +192,7 @@ ENTRY(__switch_to) | |||
192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
193 | br %r14 | 193 | br %r14 |
194 | 194 | ||
195 | __critical_start: | 195 | .L__critical_start: |
196 | /* | 196 | /* |
197 | * SVC interrupt handler routine. System calls are synchronous events and | 197 | * SVC interrupt handler routine. System calls are synchronous events and |
198 | * are executed with interrupts enabled. | 198 | * are executed with interrupts enabled. |
@@ -200,15 +200,15 @@ __critical_start: | |||
200 | 200 | ||
201 | ENTRY(system_call) | 201 | ENTRY(system_call) |
202 | stpt __LC_SYNC_ENTER_TIMER | 202 | stpt __LC_SYNC_ENTER_TIMER |
203 | sysc_stmg: | 203 | .Lsysc_stmg: |
204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | 204 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
205 | lg %r10,__LC_LAST_BREAK | 205 | lg %r10,__LC_LAST_BREAK |
206 | lg %r12,__LC_THREAD_INFO | 206 | lg %r12,__LC_THREAD_INFO |
207 | lghi %r14,_PIF_SYSCALL | 207 | lghi %r14,_PIF_SYSCALL |
208 | sysc_per: | 208 | .Lsysc_per: |
209 | lg %r15,__LC_KERNEL_STACK | 209 | lg %r15,__LC_KERNEL_STACK |
210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 210 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
211 | sysc_vtime: | 211 | .Lsysc_vtime: |
212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER | 212 | UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER |
213 | LAST_BREAK %r13 | 213 | LAST_BREAK %r13 |
214 | stmg %r0,%r7,__PT_R0(%r11) | 214 | stmg %r0,%r7,__PT_R0(%r11) |
@@ -216,39 +216,39 @@ sysc_vtime: | |||
216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | 216 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW |
217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 217 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
218 | stg %r14,__PT_FLAGS(%r11) | 218 | stg %r14,__PT_FLAGS(%r11) |
219 | sysc_do_svc: | 219 | .Lsysc_do_svc: |
220 | lg %r10,__TI_sysc_table(%r12) # address of system call table | 220 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
221 | llgh %r8,__PT_INT_CODE+2(%r11) | 221 | llgh %r8,__PT_INT_CODE+2(%r11) |
222 | slag %r8,%r8,2 # shift and test for svc 0 | 222 | slag %r8,%r8,2 # shift and test for svc 0 |
223 | jnz sysc_nr_ok | 223 | jnz .Lsysc_nr_ok |
224 | # svc 0: system call number in %r1 | 224 | # svc 0: system call number in %r1 |
225 | llgfr %r1,%r1 # clear high word in r1 | 225 | llgfr %r1,%r1 # clear high word in r1 |
226 | cghi %r1,NR_syscalls | 226 | cghi %r1,NR_syscalls |
227 | jnl sysc_nr_ok | 227 | jnl .Lsysc_nr_ok |
228 | sth %r1,__PT_INT_CODE+2(%r11) | 228 | sth %r1,__PT_INT_CODE+2(%r11) |
229 | slag %r8,%r1,2 | 229 | slag %r8,%r1,2 |
230 | sysc_nr_ok: | 230 | .Lsysc_nr_ok: |
231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 231 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
232 | stg %r2,__PT_ORIG_GPR2(%r11) | 232 | stg %r2,__PT_ORIG_GPR2(%r11) |
233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 233 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
234 | lgf %r9,0(%r8,%r10) # get system call add. | 234 | lgf %r9,0(%r8,%r10) # get system call add. |
235 | tm __TI_flags+7(%r12),_TIF_TRACE | 235 | tm __TI_flags+7(%r12),_TIF_TRACE |
236 | jnz sysc_tracesys | 236 | jnz .Lsysc_tracesys |
237 | basr %r14,%r9 # call sys_xxxx | 237 | basr %r14,%r9 # call sys_xxxx |
238 | stg %r2,__PT_R2(%r11) # store return value | 238 | stg %r2,__PT_R2(%r11) # store return value |
239 | 239 | ||
240 | sysc_return: | 240 | .Lsysc_return: |
241 | LOCKDEP_SYS_EXIT | 241 | LOCKDEP_SYS_EXIT |
242 | sysc_tif: | 242 | .Lsysc_tif: |
243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 243 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
244 | jno sysc_restore | 244 | jno .Lsysc_restore |
245 | tm __PT_FLAGS+7(%r11),_PIF_WORK | 245 | tm __PT_FLAGS+7(%r11),_PIF_WORK |
246 | jnz sysc_work | 246 | jnz .Lsysc_work |
247 | tm __TI_flags+7(%r12),_TIF_WORK | 247 | tm __TI_flags+7(%r12),_TIF_WORK |
248 | jnz sysc_work # check for work | 248 | jnz .Lsysc_work # check for work |
249 | tm __LC_CPU_FLAGS+7,_CIF_WORK | 249 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
250 | jnz sysc_work | 250 | jnz .Lsysc_work |
251 | sysc_restore: | 251 | .Lsysc_restore: |
252 | lg %r14,__LC_VDSO_PER_CPU | 252 | lg %r14,__LC_VDSO_PER_CPU |
253 | lmg %r0,%r10,__PT_R0(%r11) | 253 | lmg %r0,%r10,__PT_R0(%r11) |
254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | 254 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
@@ -256,101 +256,101 @@ sysc_restore: | |||
256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 256 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
257 | lmg %r11,%r15,__PT_R11(%r11) | 257 | lmg %r11,%r15,__PT_R11(%r11) |
258 | lpswe __LC_RETURN_PSW | 258 | lpswe __LC_RETURN_PSW |
259 | sysc_done: | 259 | .Lsysc_done: |
260 | 260 | ||
261 | # | 261 | # |
262 | # One of the work bits is on. Find out which one. | 262 | # One of the work bits is on. Find out which one. |
263 | # | 263 | # |
264 | sysc_work: | 264 | .Lsysc_work: |
265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | 265 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
266 | jo sysc_mcck_pending | 266 | jo .Lsysc_mcck_pending |
267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 267 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
268 | jo sysc_reschedule | 268 | jo .Lsysc_reschedule |
269 | #ifdef CONFIG_UPROBES | 269 | #ifdef CONFIG_UPROBES |
270 | tm __TI_flags+7(%r12),_TIF_UPROBE | 270 | tm __TI_flags+7(%r12),_TIF_UPROBE |
271 | jo sysc_uprobe_notify | 271 | jo .Lsysc_uprobe_notify |
272 | #endif | 272 | #endif |
273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP | 273 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
274 | jo sysc_singlestep | 274 | jo .Lsysc_singlestep |
275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | 275 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
276 | jo sysc_sigpending | 276 | jo .Lsysc_sigpending |
277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 277 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
278 | jo sysc_notify_resume | 278 | jo .Lsysc_notify_resume |
279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | 279 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
280 | jo sysc_uaccess | 280 | jo .Lsysc_uaccess |
281 | j sysc_return # beware of critical section cleanup | 281 | j .Lsysc_return # beware of critical section cleanup |
282 | 282 | ||
283 | # | 283 | # |
284 | # _TIF_NEED_RESCHED is set, call schedule | 284 | # _TIF_NEED_RESCHED is set, call schedule |
285 | # | 285 | # |
286 | sysc_reschedule: | 286 | .Lsysc_reschedule: |
287 | larl %r14,sysc_return | 287 | larl %r14,.Lsysc_return |
288 | jg schedule | 288 | jg schedule |
289 | 289 | ||
290 | # | 290 | # |
291 | # _CIF_MCCK_PENDING is set, call handler | 291 | # _CIF_MCCK_PENDING is set, call handler |
292 | # | 292 | # |
293 | sysc_mcck_pending: | 293 | .Lsysc_mcck_pending: |
294 | larl %r14,sysc_return | 294 | larl %r14,.Lsysc_return |
295 | jg s390_handle_mcck # TIF bit will be cleared by handler | 295 | jg s390_handle_mcck # TIF bit will be cleared by handler |
296 | 296 | ||
297 | # | 297 | # |
298 | # _CIF_ASCE is set, load user space asce | 298 | # _CIF_ASCE is set, load user space asce |
299 | # | 299 | # |
300 | sysc_uaccess: | 300 | .Lsysc_uaccess: |
301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | 301 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
303 | j sysc_return | 303 | j .Lsysc_return |
304 | 304 | ||
305 | # | 305 | # |
306 | # _TIF_SIGPENDING is set, call do_signal | 306 | # _TIF_SIGPENDING is set, call do_signal |
307 | # | 307 | # |
308 | sysc_sigpending: | 308 | .Lsysc_sigpending: |
309 | lgr %r2,%r11 # pass pointer to pt_regs | 309 | lgr %r2,%r11 # pass pointer to pt_regs |
310 | brasl %r14,do_signal | 310 | brasl %r14,do_signal |
311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL | 311 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL |
312 | jno sysc_return | 312 | jno .Lsysc_return |
313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments | 313 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
314 | lg %r10,__TI_sysc_table(%r12) # address of system call table | 314 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
315 | lghi %r8,0 # svc 0 returns -ENOSYS | 315 | lghi %r8,0 # svc 0 returns -ENOSYS |
316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number | 316 | llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number |
317 | cghi %r1,NR_syscalls | 317 | cghi %r1,NR_syscalls |
318 | jnl sysc_nr_ok # invalid svc number -> do svc 0 | 318 | jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 |
319 | slag %r8,%r1,2 | 319 | slag %r8,%r1,2 |
320 | j sysc_nr_ok # restart svc | 320 | j .Lsysc_nr_ok # restart svc |
321 | 321 | ||
322 | # | 322 | # |
323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume | 323 | # _TIF_NOTIFY_RESUME is set, call do_notify_resume |
324 | # | 324 | # |
325 | sysc_notify_resume: | 325 | .Lsysc_notify_resume: |
326 | lgr %r2,%r11 # pass pointer to pt_regs | 326 | lgr %r2,%r11 # pass pointer to pt_regs |
327 | larl %r14,sysc_return | 327 | larl %r14,.Lsysc_return |
328 | jg do_notify_resume | 328 | jg do_notify_resume |
329 | 329 | ||
330 | # | 330 | # |
331 | # _TIF_UPROBE is set, call uprobe_notify_resume | 331 | # _TIF_UPROBE is set, call uprobe_notify_resume |
332 | # | 332 | # |
333 | #ifdef CONFIG_UPROBES | 333 | #ifdef CONFIG_UPROBES |
334 | sysc_uprobe_notify: | 334 | .Lsysc_uprobe_notify: |
335 | lgr %r2,%r11 # pass pointer to pt_regs | 335 | lgr %r2,%r11 # pass pointer to pt_regs |
336 | larl %r14,sysc_return | 336 | larl %r14,.Lsysc_return |
337 | jg uprobe_notify_resume | 337 | jg uprobe_notify_resume |
338 | #endif | 338 | #endif |
339 | 339 | ||
340 | # | 340 | # |
341 | # _PIF_PER_TRAP is set, call do_per_trap | 341 | # _PIF_PER_TRAP is set, call do_per_trap |
342 | # | 342 | # |
343 | sysc_singlestep: | 343 | .Lsysc_singlestep: |
344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP | 344 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP |
345 | lgr %r2,%r11 # pass pointer to pt_regs | 345 | lgr %r2,%r11 # pass pointer to pt_regs |
346 | larl %r14,sysc_return | 346 | larl %r14,.Lsysc_return |
347 | jg do_per_trap | 347 | jg do_per_trap |
348 | 348 | ||
349 | # | 349 | # |
350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before | 350 | # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before |
351 | # and after the system call | 351 | # and after the system call |
352 | # | 352 | # |
353 | sysc_tracesys: | 353 | .Lsysc_tracesys: |
354 | lgr %r2,%r11 # pass pointer to pt_regs | 354 | lgr %r2,%r11 # pass pointer to pt_regs |
355 | la %r3,0 | 355 | la %r3,0 |
356 | llgh %r0,__PT_INT_CODE+2(%r11) | 356 | llgh %r0,__PT_INT_CODE+2(%r11) |
@@ -358,20 +358,20 @@ sysc_tracesys: | |||
358 | brasl %r14,do_syscall_trace_enter | 358 | brasl %r14,do_syscall_trace_enter |
359 | lghi %r0,NR_syscalls | 359 | lghi %r0,NR_syscalls |
360 | clgr %r0,%r2 | 360 | clgr %r0,%r2 |
361 | jnh sysc_tracenogo | 361 | jnh .Lsysc_tracenogo |
362 | sllg %r8,%r2,2 | 362 | sllg %r8,%r2,2 |
363 | lgf %r9,0(%r8,%r10) | 363 | lgf %r9,0(%r8,%r10) |
364 | sysc_tracego: | 364 | .Lsysc_tracego: |
365 | lmg %r3,%r7,__PT_R3(%r11) | 365 | lmg %r3,%r7,__PT_R3(%r11) |
366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 366 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
367 | lg %r2,__PT_ORIG_GPR2(%r11) | 367 | lg %r2,__PT_ORIG_GPR2(%r11) |
368 | basr %r14,%r9 # call sys_xxx | 368 | basr %r14,%r9 # call sys_xxx |
369 | stg %r2,__PT_R2(%r11) # store return value | 369 | stg %r2,__PT_R2(%r11) # store return value |
370 | sysc_tracenogo: | 370 | .Lsysc_tracenogo: |
371 | tm __TI_flags+7(%r12),_TIF_TRACE | 371 | tm __TI_flags+7(%r12),_TIF_TRACE |
372 | jz sysc_return | 372 | jz .Lsysc_return |
373 | lgr %r2,%r11 # pass pointer to pt_regs | 373 | lgr %r2,%r11 # pass pointer to pt_regs |
374 | larl %r14,sysc_return | 374 | larl %r14,.Lsysc_return |
375 | jg do_syscall_trace_exit | 375 | jg do_syscall_trace_exit |
376 | 376 | ||
377 | # | 377 | # |
@@ -384,13 +384,13 @@ ENTRY(ret_from_fork) | |||
384 | TRACE_IRQS_ON | 384 | TRACE_IRQS_ON |
385 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 385 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? | 386 | tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? |
387 | jne sysc_tracenogo | 387 | jne .Lsysc_tracenogo |
388 | # it's a kernel thread | 388 | # it's a kernel thread |
389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs | 389 | lmg %r9,%r10,__PT_R9(%r11) # load gprs |
390 | ENTRY(kernel_thread_starter) | 390 | ENTRY(kernel_thread_starter) |
391 | la %r2,0(%r10) | 391 | la %r2,0(%r10) |
392 | basr %r14,%r9 | 392 | basr %r14,%r9 |
393 | j sysc_tracenogo | 393 | j .Lsysc_tracenogo |
394 | 394 | ||
395 | /* | 395 | /* |
396 | * Program check handler routine | 396 | * Program check handler routine |
@@ -409,7 +409,7 @@ ENTRY(pgm_check_handler) | |||
409 | tmhh %r8,0x4000 # PER bit set in old PSW ? | 409 | tmhh %r8,0x4000 # PER bit set in old PSW ? |
410 | jnz 0f # -> enabled, can't be a double fault | 410 | jnz 0f # -> enabled, can't be a double fault |
411 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 411 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
412 | jnz pgm_svcper # -> single stepped svc | 412 | jnz .Lpgm_svcper # -> single stepped svc |
413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC | 413 | 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC |
414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 414 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
415 | j 2f | 415 | j 2f |
@@ -432,7 +432,7 @@ ENTRY(pgm_check_handler) | |||
432 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 432 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
433 | jz 0f | 433 | jz 0f |
434 | tmhh %r8,0x0001 # kernel per event ? | 434 | tmhh %r8,0x0001 # kernel per event ? |
435 | jz pgm_kprobe | 435 | jz .Lpgm_kprobe |
436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP | 436 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | 437 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE | 438 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE |
@@ -443,31 +443,31 @@ ENTRY(pgm_check_handler) | |||
443 | llgh %r10,__PT_INT_CODE+2(%r11) | 443 | llgh %r10,__PT_INT_CODE+2(%r11) |
444 | nill %r10,0x007f | 444 | nill %r10,0x007f |
445 | sll %r10,2 | 445 | sll %r10,2 |
446 | je sysc_return | 446 | je .Lsysc_return |
447 | lgf %r1,0(%r10,%r1) # load address of handler routine | 447 | lgf %r1,0(%r10,%r1) # load address of handler routine |
448 | lgr %r2,%r11 # pass pointer to pt_regs | 448 | lgr %r2,%r11 # pass pointer to pt_regs |
449 | basr %r14,%r1 # branch to interrupt-handler | 449 | basr %r14,%r1 # branch to interrupt-handler |
450 | j sysc_return | 450 | j .Lsysc_return |
451 | 451 | ||
452 | # | 452 | # |
453 | # PER event in supervisor state, must be kprobes | 453 | # PER event in supervisor state, must be kprobes |
454 | # | 454 | # |
455 | pgm_kprobe: | 455 | .Lpgm_kprobe: |
456 | REENABLE_IRQS | 456 | REENABLE_IRQS |
457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 457 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
458 | lgr %r2,%r11 # pass pointer to pt_regs | 458 | lgr %r2,%r11 # pass pointer to pt_regs |
459 | brasl %r14,do_per_trap | 459 | brasl %r14,do_per_trap |
460 | j sysc_return | 460 | j .Lsysc_return |
461 | 461 | ||
462 | # | 462 | # |
463 | # single stepped system call | 463 | # single stepped system call |
464 | # | 464 | # |
465 | pgm_svcper: | 465 | .Lpgm_svcper: |
466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW | 466 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
467 | larl %r14,sysc_per | 467 | larl %r14,.Lsysc_per |
468 | stg %r14,__LC_RETURN_PSW+8 | 468 | stg %r14,__LC_RETURN_PSW+8 |
469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | 469 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP |
470 | lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs | 470 | lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs |
471 | 471 | ||
472 | /* | 472 | /* |
473 | * IO interrupt handler routine | 473 | * IO interrupt handler routine |
@@ -483,10 +483,10 @@ ENTRY(io_int_handler) | |||
483 | HANDLE_SIE_INTERCEPT %r14,2 | 483 | HANDLE_SIE_INTERCEPT %r14,2 |
484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 484 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
485 | tmhh %r8,0x0001 # interrupting from user? | 485 | tmhh %r8,0x0001 # interrupting from user? |
486 | jz io_skip | 486 | jz .Lio_skip |
487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | 487 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
488 | LAST_BREAK %r14 | 488 | LAST_BREAK %r14 |
489 | io_skip: | 489 | .Lio_skip: |
490 | stmg %r0,%r7,__PT_R0(%r11) | 490 | stmg %r0,%r7,__PT_R0(%r11) |
491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | 491 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
492 | stmg %r8,%r9,__PT_PSW(%r11) | 492 | stmg %r8,%r9,__PT_PSW(%r11) |
@@ -494,29 +494,29 @@ io_skip: | |||
494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | 494 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
495 | TRACE_IRQS_OFF | 495 | TRACE_IRQS_OFF |
496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 496 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
497 | io_loop: | 497 | .Lio_loop: |
498 | lgr %r2,%r11 # pass pointer to pt_regs | 498 | lgr %r2,%r11 # pass pointer to pt_regs |
499 | lghi %r3,IO_INTERRUPT | 499 | lghi %r3,IO_INTERRUPT |
500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | 500 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? |
501 | jz io_call | 501 | jz .Lio_call |
502 | lghi %r3,THIN_INTERRUPT | 502 | lghi %r3,THIN_INTERRUPT |
503 | io_call: | 503 | .Lio_call: |
504 | brasl %r14,do_IRQ | 504 | brasl %r14,do_IRQ |
505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR | 505 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR |
506 | jz io_return | 506 | jz .Lio_return |
507 | tpi 0 | 507 | tpi 0 |
508 | jz io_return | 508 | jz .Lio_return |
509 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 509 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
510 | j io_loop | 510 | j .Lio_loop |
511 | io_return: | 511 | .Lio_return: |
512 | LOCKDEP_SYS_EXIT | 512 | LOCKDEP_SYS_EXIT |
513 | TRACE_IRQS_ON | 513 | TRACE_IRQS_ON |
514 | io_tif: | 514 | .Lio_tif: |
515 | tm __TI_flags+7(%r12),_TIF_WORK | 515 | tm __TI_flags+7(%r12),_TIF_WORK |
516 | jnz io_work # there is work to do (signals etc.) | 516 | jnz .Lio_work # there is work to do (signals etc.) |
517 | tm __LC_CPU_FLAGS+7,_CIF_WORK | 517 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
518 | jnz io_work | 518 | jnz .Lio_work |
519 | io_restore: | 519 | .Lio_restore: |
520 | lg %r14,__LC_VDSO_PER_CPU | 520 | lg %r14,__LC_VDSO_PER_CPU |
521 | lmg %r0,%r10,__PT_R0(%r11) | 521 | lmg %r0,%r10,__PT_R0(%r11) |
522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) | 522 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
@@ -524,7 +524,7 @@ io_restore: | |||
524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER | 524 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
525 | lmg %r11,%r15,__PT_R11(%r11) | 525 | lmg %r11,%r15,__PT_R11(%r11) |
526 | lpswe __LC_RETURN_PSW | 526 | lpswe __LC_RETURN_PSW |
527 | io_done: | 527 | .Lio_done: |
528 | 528 | ||
529 | # | 529 | # |
530 | # There is work todo, find out in which context we have been interrupted: | 530 | # There is work todo, find out in which context we have been interrupted: |
@@ -535,15 +535,15 @@ io_done: | |||
535 | # the preemption counter and if it is zero call preempt_schedule_irq | 535 | # the preemption counter and if it is zero call preempt_schedule_irq |
536 | # Before any work can be done, a switch to the kernel stack is required. | 536 | # Before any work can be done, a switch to the kernel stack is required. |
537 | # | 537 | # |
538 | io_work: | 538 | .Lio_work: |
539 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 539 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
540 | jo io_work_user # yes -> do resched & signal | 540 | jo .Lio_work_user # yes -> do resched & signal |
541 | #ifdef CONFIG_PREEMPT | 541 | #ifdef CONFIG_PREEMPT |
542 | # check for preemptive scheduling | 542 | # check for preemptive scheduling |
543 | icm %r0,15,__TI_precount(%r12) | 543 | icm %r0,15,__TI_precount(%r12) |
544 | jnz io_restore # preemption is disabled | 544 | jnz .Lio_restore # preemption is disabled |
545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 545 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
546 | jno io_restore | 546 | jno .Lio_restore |
547 | # switch to kernel stack | 547 | # switch to kernel stack |
548 | lg %r1,__PT_R15(%r11) | 548 | lg %r1,__PT_R15(%r11) |
549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 549 | aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
@@ -551,19 +551,19 @@ io_work: | |||
551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | 551 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
552 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 552 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
553 | lgr %r15,%r1 | 553 | lgr %r15,%r1 |
554 | # TRACE_IRQS_ON already done at io_return, call | 554 | # TRACE_IRQS_ON already done at .Lio_return, call |
555 | # TRACE_IRQS_OFF to keep things symmetrical | 555 | # TRACE_IRQS_OFF to keep things symmetrical |
556 | TRACE_IRQS_OFF | 556 | TRACE_IRQS_OFF |
557 | brasl %r14,preempt_schedule_irq | 557 | brasl %r14,preempt_schedule_irq |
558 | j io_return | 558 | j .Lio_return |
559 | #else | 559 | #else |
560 | j io_restore | 560 | j .Lio_restore |
561 | #endif | 561 | #endif |
562 | 562 | ||
563 | # | 563 | # |
564 | # Need to do work before returning to userspace, switch to kernel stack | 564 | # Need to do work before returning to userspace, switch to kernel stack |
565 | # | 565 | # |
566 | io_work_user: | 566 | .Lio_work_user: |
567 | lg %r1,__LC_KERNEL_STACK | 567 | lg %r1,__LC_KERNEL_STACK |
568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 568 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | 569 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
@@ -573,70 +573,70 @@ io_work_user: | |||
573 | # | 573 | # |
574 | # One of the work bits is on. Find out which one. | 574 | # One of the work bits is on. Find out which one. |
575 | # | 575 | # |
576 | io_work_tif: | 576 | .Lio_work_tif: |
577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | 577 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
578 | jo io_mcck_pending | 578 | jo .Lio_mcck_pending |
579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 579 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
580 | jo io_reschedule | 580 | jo .Lio_reschedule |
581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | 581 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
582 | jo io_sigpending | 582 | jo .Lio_sigpending |
583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 583 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
584 | jo io_notify_resume | 584 | jo .Lio_notify_resume |
585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE | 585 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
586 | jo io_uaccess | 586 | jo .Lio_uaccess |
587 | j io_return # beware of critical section cleanup | 587 | j .Lio_return # beware of critical section cleanup |
588 | 588 | ||
589 | # | 589 | # |
590 | # _CIF_MCCK_PENDING is set, call handler | 590 | # _CIF_MCCK_PENDING is set, call handler |
591 | # | 591 | # |
592 | io_mcck_pending: | 592 | .Lio_mcck_pending: |
593 | # TRACE_IRQS_ON already done at io_return | 593 | # TRACE_IRQS_ON already done at .Lio_return |
594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler | 594 | brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler |
595 | TRACE_IRQS_OFF | 595 | TRACE_IRQS_OFF |
596 | j io_return | 596 | j .Lio_return |
597 | 597 | ||
598 | # | 598 | # |
599 | # _CIF_ASCE is set, load user space asce | 599 | # _CIF_ASCE is set, load user space asce |
600 | # | 600 | # |
601 | io_uaccess: | 601 | .Lio_uaccess: |
602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE | 602 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 603 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
604 | j io_return | 604 | j .Lio_return |
605 | 605 | ||
606 | # | 606 | # |
607 | # _TIF_NEED_RESCHED is set, call schedule | 607 | # _TIF_NEED_RESCHED is set, call schedule |
608 | # | 608 | # |
609 | io_reschedule: | 609 | .Lio_reschedule: |
610 | # TRACE_IRQS_ON already done at io_return | 610 | # TRACE_IRQS_ON already done at .Lio_return |
611 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 611 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
612 | brasl %r14,schedule # call scheduler | 612 | brasl %r14,schedule # call scheduler |
613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 613 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
614 | TRACE_IRQS_OFF | 614 | TRACE_IRQS_OFF |
615 | j io_return | 615 | j .Lio_return |
616 | 616 | ||
617 | # | 617 | # |
618 | # _TIF_SIGPENDING or is set, call do_signal | 618 | # _TIF_SIGPENDING or is set, call do_signal |
619 | # | 619 | # |
620 | io_sigpending: | 620 | .Lio_sigpending: |
621 | # TRACE_IRQS_ON already done at io_return | 621 | # TRACE_IRQS_ON already done at .Lio_return |
622 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 622 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
623 | lgr %r2,%r11 # pass pointer to pt_regs | 623 | lgr %r2,%r11 # pass pointer to pt_regs |
624 | brasl %r14,do_signal | 624 | brasl %r14,do_signal |
625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 625 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
626 | TRACE_IRQS_OFF | 626 | TRACE_IRQS_OFF |
627 | j io_return | 627 | j .Lio_return |
628 | 628 | ||
629 | # | 629 | # |
630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume | 630 | # _TIF_NOTIFY_RESUME or is set, call do_notify_resume |
631 | # | 631 | # |
632 | io_notify_resume: | 632 | .Lio_notify_resume: |
633 | # TRACE_IRQS_ON already done at io_return | 633 | # TRACE_IRQS_ON already done at .Lio_return |
634 | ssm __LC_SVC_NEW_PSW # reenable interrupts | 634 | ssm __LC_SVC_NEW_PSW # reenable interrupts |
635 | lgr %r2,%r11 # pass pointer to pt_regs | 635 | lgr %r2,%r11 # pass pointer to pt_regs |
636 | brasl %r14,do_notify_resume | 636 | brasl %r14,do_notify_resume |
637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts | 637 | ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts |
638 | TRACE_IRQS_OFF | 638 | TRACE_IRQS_OFF |
639 | j io_return | 639 | j .Lio_return |
640 | 640 | ||
641 | /* | 641 | /* |
642 | * External interrupt handler routine | 642 | * External interrupt handler routine |
@@ -652,10 +652,10 @@ ENTRY(ext_int_handler) | |||
652 | HANDLE_SIE_INTERCEPT %r14,3 | 652 | HANDLE_SIE_INTERCEPT %r14,3 |
653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT | 653 | SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT |
654 | tmhh %r8,0x0001 # interrupting from user ? | 654 | tmhh %r8,0x0001 # interrupting from user ? |
655 | jz ext_skip | 655 | jz .Lext_skip |
656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER | 656 | UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER |
657 | LAST_BREAK %r14 | 657 | LAST_BREAK %r14 |
658 | ext_skip: | 658 | .Lext_skip: |
659 | stmg %r0,%r7,__PT_R0(%r11) | 659 | stmg %r0,%r7,__PT_R0(%r11) |
660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | 660 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
661 | stmg %r8,%r9,__PT_PSW(%r11) | 661 | stmg %r8,%r9,__PT_PSW(%r11) |
@@ -669,23 +669,23 @@ ext_skip: | |||
669 | lgr %r2,%r11 # pass pointer to pt_regs | 669 | lgr %r2,%r11 # pass pointer to pt_regs |
670 | lghi %r3,EXT_INTERRUPT | 670 | lghi %r3,EXT_INTERRUPT |
671 | brasl %r14,do_IRQ | 671 | brasl %r14,do_IRQ |
672 | j io_return | 672 | j .Lio_return |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * Load idle PSW. The second "half" of this function is in cleanup_idle. | 675 | * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. |
676 | */ | 676 | */ |
677 | ENTRY(psw_idle) | 677 | ENTRY(psw_idle) |
678 | stg %r3,__SF_EMPTY(%r15) | 678 | stg %r3,__SF_EMPTY(%r15) |
679 | larl %r1,psw_idle_lpsw+4 | 679 | larl %r1,.Lpsw_idle_lpsw+4 |
680 | stg %r1,__SF_EMPTY+8(%r15) | 680 | stg %r1,__SF_EMPTY+8(%r15) |
681 | STCK __CLOCK_IDLE_ENTER(%r2) | 681 | STCK __CLOCK_IDLE_ENTER(%r2) |
682 | stpt __TIMER_IDLE_ENTER(%r2) | 682 | stpt __TIMER_IDLE_ENTER(%r2) |
683 | psw_idle_lpsw: | 683 | .Lpsw_idle_lpsw: |
684 | lpswe __SF_EMPTY(%r15) | 684 | lpswe __SF_EMPTY(%r15) |
685 | br %r14 | 685 | br %r14 |
686 | psw_idle_end: | 686 | .Lpsw_idle_end: |
687 | 687 | ||
688 | __critical_end: | 688 | .L__critical_end: |
689 | 689 | ||
690 | /* | 690 | /* |
691 | * Machine check handler routines | 691 | * Machine check handler routines |
@@ -701,7 +701,7 @@ ENTRY(mcck_int_handler) | |||
701 | lmg %r8,%r9,__LC_MCK_OLD_PSW | 701 | lmg %r8,%r9,__LC_MCK_OLD_PSW |
702 | HANDLE_SIE_INTERCEPT %r14,4 | 702 | HANDLE_SIE_INTERCEPT %r14,4 |
703 | tm __LC_MCCK_CODE,0x80 # system damage? | 703 | tm __LC_MCCK_CODE,0x80 # system damage? |
704 | jo mcck_panic # yes -> rest of mcck code invalid | 704 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA | 705 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 706 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 707 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
@@ -719,13 +719,13 @@ ENTRY(mcck_int_handler) | |||
719 | 2: spt 0(%r14) | 719 | 2: spt 0(%r14) |
720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) | 720 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | 721 | 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
722 | jno mcck_panic # no -> skip cleanup critical | 722 | jno .Lmcck_panic # no -> skip cleanup critical |
723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT | 723 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT |
724 | tm %r8,0x0001 # interrupting from user ? | 724 | tm %r8,0x0001 # interrupting from user ? |
725 | jz mcck_skip | 725 | jz .Lmcck_skip |
726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER | 726 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
727 | LAST_BREAK %r14 | 727 | LAST_BREAK %r14 |
728 | mcck_skip: | 728 | .Lmcck_skip: |
729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 | 729 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
730 | stmg %r0,%r7,__PT_R0(%r11) | 730 | stmg %r0,%r7,__PT_R0(%r11) |
731 | mvc __PT_R8(64,%r11),0(%r14) | 731 | mvc __PT_R8(64,%r11),0(%r14) |
@@ -735,7 +735,7 @@ mcck_skip: | |||
735 | lgr %r2,%r11 # pass pointer to pt_regs | 735 | lgr %r2,%r11 # pass pointer to pt_regs |
736 | brasl %r14,s390_do_machine_check | 736 | brasl %r14,s390_do_machine_check |
737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 737 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
738 | jno mcck_return | 738 | jno .Lmcck_return |
739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack | 739 | lg %r1,__LC_KERNEL_STACK # switch to kernel stack |
740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) | 740 | mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) |
741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) | 741 | xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) |
@@ -743,11 +743,11 @@ mcck_skip: | |||
743 | lgr %r15,%r1 | 743 | lgr %r15,%r1 |
744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 744 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING | 745 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
746 | jno mcck_return | 746 | jno .Lmcck_return |
747 | TRACE_IRQS_OFF | 747 | TRACE_IRQS_OFF |
748 | brasl %r14,s390_handle_mcck | 748 | brasl %r14,s390_handle_mcck |
749 | TRACE_IRQS_ON | 749 | TRACE_IRQS_ON |
750 | mcck_return: | 750 | .Lmcck_return: |
751 | lg %r14,__LC_VDSO_PER_CPU | 751 | lg %r14,__LC_VDSO_PER_CPU |
752 | lmg %r0,%r10,__PT_R0(%r11) | 752 | lmg %r0,%r10,__PT_R0(%r11) |
753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | 753 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW |
@@ -758,14 +758,14 @@ mcck_return: | |||
758 | 0: lmg %r11,%r15,__PT_R11(%r11) | 758 | 0: lmg %r11,%r15,__PT_R11(%r11) |
759 | lpswe __LC_RETURN_MCCK_PSW | 759 | lpswe __LC_RETURN_MCCK_PSW |
760 | 760 | ||
761 | mcck_panic: | 761 | .Lmcck_panic: |
762 | lg %r14,__LC_PANIC_STACK | 762 | lg %r14,__LC_PANIC_STACK |
763 | slgr %r14,%r15 | 763 | slgr %r14,%r15 |
764 | srag %r14,%r14,PAGE_SHIFT | 764 | srag %r14,%r14,PAGE_SHIFT |
765 | jz 0f | 765 | jz 0f |
766 | lg %r15,__LC_PANIC_STACK | 766 | lg %r15,__LC_PANIC_STACK |
767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) | 767 | 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
768 | j mcck_skip | 768 | j .Lmcck_skip |
769 | 769 | ||
770 | # | 770 | # |
771 | # PSW restart interrupt handler | 771 | # PSW restart interrupt handler |
@@ -815,69 +815,69 @@ stack_overflow: | |||
815 | #endif | 815 | #endif |
816 | 816 | ||
817 | .align 8 | 817 | .align 8 |
818 | cleanup_table: | 818 | .Lcleanup_table: |
819 | .quad system_call | 819 | .quad system_call |
820 | .quad sysc_do_svc | 820 | .quad .Lsysc_do_svc |
821 | .quad sysc_tif | 821 | .quad .Lsysc_tif |
822 | .quad sysc_restore | 822 | .quad .Lsysc_restore |
823 | .quad sysc_done | 823 | .quad .Lsysc_done |
824 | .quad io_tif | 824 | .quad .Lio_tif |
825 | .quad io_restore | 825 | .quad .Lio_restore |
826 | .quad io_done | 826 | .quad .Lio_done |
827 | .quad psw_idle | 827 | .quad psw_idle |
828 | .quad psw_idle_end | 828 | .quad .Lpsw_idle_end |
829 | 829 | ||
830 | cleanup_critical: | 830 | cleanup_critical: |
831 | clg %r9,BASED(cleanup_table) # system_call | 831 | clg %r9,BASED(.Lcleanup_table) # system_call |
832 | jl 0f | 832 | jl 0f |
833 | clg %r9,BASED(cleanup_table+8) # sysc_do_svc | 833 | clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc |
834 | jl cleanup_system_call | 834 | jl .Lcleanup_system_call |
835 | clg %r9,BASED(cleanup_table+16) # sysc_tif | 835 | clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif |
836 | jl 0f | 836 | jl 0f |
837 | clg %r9,BASED(cleanup_table+24) # sysc_restore | 837 | clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore |
838 | jl cleanup_sysc_tif | 838 | jl .Lcleanup_sysc_tif |
839 | clg %r9,BASED(cleanup_table+32) # sysc_done | 839 | clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done |
840 | jl cleanup_sysc_restore | 840 | jl .Lcleanup_sysc_restore |
841 | clg %r9,BASED(cleanup_table+40) # io_tif | 841 | clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif |
842 | jl 0f | 842 | jl 0f |
843 | clg %r9,BASED(cleanup_table+48) # io_restore | 843 | clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore |
844 | jl cleanup_io_tif | 844 | jl .Lcleanup_io_tif |
845 | clg %r9,BASED(cleanup_table+56) # io_done | 845 | clg %r9,BASED(.Lcleanup_table+56) # .Lio_done |
846 | jl cleanup_io_restore | 846 | jl .Lcleanup_io_restore |
847 | clg %r9,BASED(cleanup_table+64) # psw_idle | 847 | clg %r9,BASED(.Lcleanup_table+64) # psw_idle |
848 | jl 0f | 848 | jl 0f |
849 | clg %r9,BASED(cleanup_table+72) # psw_idle_end | 849 | clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end |
850 | jl cleanup_idle | 850 | jl .Lcleanup_idle |
851 | 0: br %r14 | 851 | 0: br %r14 |
852 | 852 | ||
853 | 853 | ||
854 | cleanup_system_call: | 854 | .Lcleanup_system_call: |
855 | # check if stpt has been executed | 855 | # check if stpt has been executed |
856 | clg %r9,BASED(cleanup_system_call_insn) | 856 | clg %r9,BASED(.Lcleanup_system_call_insn) |
857 | jh 0f | 857 | jh 0f |
858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 858 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
859 | cghi %r11,__LC_SAVE_AREA_ASYNC | 859 | cghi %r11,__LC_SAVE_AREA_ASYNC |
860 | je 0f | 860 | je 0f |
861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER | 861 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER |
862 | 0: # check if stmg has been executed | 862 | 0: # check if stmg has been executed |
863 | clg %r9,BASED(cleanup_system_call_insn+8) | 863 | clg %r9,BASED(.Lcleanup_system_call_insn+8) |
864 | jh 0f | 864 | jh 0f |
865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) | 865 | mvc __LC_SAVE_AREA_SYNC(64),0(%r11) |
866 | 0: # check if base register setup + TIF bit load has been done | 866 | 0: # check if base register setup + TIF bit load has been done |
867 | clg %r9,BASED(cleanup_system_call_insn+16) | 867 | clg %r9,BASED(.Lcleanup_system_call_insn+16) |
868 | jhe 0f | 868 | jhe 0f |
869 | # set up saved registers r10 and r12 | 869 | # set up saved registers r10 and r12 |
870 | stg %r10,16(%r11) # r10 last break | 870 | stg %r10,16(%r11) # r10 last break |
871 | stg %r12,32(%r11) # r12 thread-info pointer | 871 | stg %r12,32(%r11) # r12 thread-info pointer |
872 | 0: # check if the user time update has been done | 872 | 0: # check if the user time update has been done |
873 | clg %r9,BASED(cleanup_system_call_insn+24) | 873 | clg %r9,BASED(.Lcleanup_system_call_insn+24) |
874 | jh 0f | 874 | jh 0f |
875 | lg %r15,__LC_EXIT_TIMER | 875 | lg %r15,__LC_EXIT_TIMER |
876 | slg %r15,__LC_SYNC_ENTER_TIMER | 876 | slg %r15,__LC_SYNC_ENTER_TIMER |
877 | alg %r15,__LC_USER_TIMER | 877 | alg %r15,__LC_USER_TIMER |
878 | stg %r15,__LC_USER_TIMER | 878 | stg %r15,__LC_USER_TIMER |
879 | 0: # check if the system time update has been done | 879 | 0: # check if the system time update has been done |
880 | clg %r9,BASED(cleanup_system_call_insn+32) | 880 | clg %r9,BASED(.Lcleanup_system_call_insn+32) |
881 | jh 0f | 881 | jh 0f |
882 | lg %r15,__LC_LAST_UPDATE_TIMER | 882 | lg %r15,__LC_LAST_UPDATE_TIMER |
883 | slg %r15,__LC_EXIT_TIMER | 883 | slg %r15,__LC_EXIT_TIMER |
@@ -904,21 +904,21 @@ cleanup_system_call: | |||
904 | # setup saved register r15 | 904 | # setup saved register r15 |
905 | stg %r15,56(%r11) # r15 stack pointer | 905 | stg %r15,56(%r11) # r15 stack pointer |
906 | # set new psw address and exit | 906 | # set new psw address and exit |
907 | larl %r9,sysc_do_svc | 907 | larl %r9,.Lsysc_do_svc |
908 | br %r14 | 908 | br %r14 |
909 | cleanup_system_call_insn: | 909 | .Lcleanup_system_call_insn: |
910 | .quad system_call | 910 | .quad system_call |
911 | .quad sysc_stmg | 911 | .quad .Lsysc_stmg |
912 | .quad sysc_per | 912 | .quad .Lsysc_per |
913 | .quad sysc_vtime+18 | 913 | .quad .Lsysc_vtime+18 |
914 | .quad sysc_vtime+42 | 914 | .quad .Lsysc_vtime+42 |
915 | 915 | ||
916 | cleanup_sysc_tif: | 916 | .Lcleanup_sysc_tif: |
917 | larl %r9,sysc_tif | 917 | larl %r9,.Lsysc_tif |
918 | br %r14 | 918 | br %r14 |
919 | 919 | ||
920 | cleanup_sysc_restore: | 920 | .Lcleanup_sysc_restore: |
921 | clg %r9,BASED(cleanup_sysc_restore_insn) | 921 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) |
922 | je 0f | 922 | je 0f |
923 | lg %r9,24(%r11) # get saved pointer to pt_regs | 923 | lg %r9,24(%r11) # get saved pointer to pt_regs |
924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | 924 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
@@ -926,15 +926,15 @@ cleanup_sysc_restore: | |||
926 | lmg %r0,%r7,__PT_R0(%r9) | 926 | lmg %r0,%r7,__PT_R0(%r9) |
927 | 0: lmg %r8,%r9,__LC_RETURN_PSW | 927 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
928 | br %r14 | 928 | br %r14 |
929 | cleanup_sysc_restore_insn: | 929 | .Lcleanup_sysc_restore_insn: |
930 | .quad sysc_done - 4 | 930 | .quad .Lsysc_done - 4 |
931 | 931 | ||
932 | cleanup_io_tif: | 932 | .Lcleanup_io_tif: |
933 | larl %r9,io_tif | 933 | larl %r9,.Lio_tif |
934 | br %r14 | 934 | br %r14 |
935 | 935 | ||
936 | cleanup_io_restore: | 936 | .Lcleanup_io_restore: |
937 | clg %r9,BASED(cleanup_io_restore_insn) | 937 | clg %r9,BASED(.Lcleanup_io_restore_insn) |
938 | je 0f | 938 | je 0f |
939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs | 939 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) | 940 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
@@ -942,10 +942,10 @@ cleanup_io_restore: | |||
942 | lmg %r0,%r7,__PT_R0(%r9) | 942 | lmg %r0,%r7,__PT_R0(%r9) |
943 | 0: lmg %r8,%r9,__LC_RETURN_PSW | 943 | 0: lmg %r8,%r9,__LC_RETURN_PSW |
944 | br %r14 | 944 | br %r14 |
945 | cleanup_io_restore_insn: | 945 | .Lcleanup_io_restore_insn: |
946 | .quad io_done - 4 | 946 | .quad .Lio_done - 4 |
947 | 947 | ||
948 | cleanup_idle: | 948 | .Lcleanup_idle: |
949 | # copy interrupt clock & cpu timer | 949 | # copy interrupt clock & cpu timer |
950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK | 950 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK |
951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER | 951 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER |
@@ -954,7 +954,7 @@ cleanup_idle: | |||
954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK | 954 | mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK |
955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER | 955 | mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER |
956 | 0: # check if stck & stpt have been executed | 956 | 0: # check if stck & stpt have been executed |
957 | clg %r9,BASED(cleanup_idle_insn) | 957 | clg %r9,BASED(.Lcleanup_idle_insn) |
958 | jhe 1f | 958 | jhe 1f |
959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) | 959 | mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) |
960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) | 960 | mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) |
@@ -973,17 +973,17 @@ cleanup_idle: | |||
973 | nihh %r8,0xfcfd # clear irq & wait state bits | 973 | nihh %r8,0xfcfd # clear irq & wait state bits |
974 | lg %r9,48(%r11) # return from psw_idle | 974 | lg %r9,48(%r11) # return from psw_idle |
975 | br %r14 | 975 | br %r14 |
976 | cleanup_idle_insn: | 976 | .Lcleanup_idle_insn: |
977 | .quad psw_idle_lpsw | 977 | .quad .Lpsw_idle_lpsw |
978 | 978 | ||
979 | /* | 979 | /* |
980 | * Integer constants | 980 | * Integer constants |
981 | */ | 981 | */ |
982 | .align 8 | 982 | .align 8 |
983 | .Lcritical_start: | 983 | .Lcritical_start: |
984 | .quad __critical_start | 984 | .quad .L__critical_start |
985 | .Lcritical_length: | 985 | .Lcritical_length: |
986 | .quad __critical_end - __critical_start | 986 | .quad .L__critical_end - .L__critical_start |
987 | 987 | ||
988 | 988 | ||
989 | #if IS_ENABLED(CONFIG_KVM) | 989 | #if IS_ENABLED(CONFIG_KVM) |
@@ -1000,25 +1000,25 @@ ENTRY(sie64a) | |||
1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 | 1000 | lmg %r0,%r13,0(%r3) # load guest gprs 0-13 |
1001 | lg %r14,__LC_GMAP # get gmap pointer | 1001 | lg %r14,__LC_GMAP # get gmap pointer |
1002 | ltgr %r14,%r14 | 1002 | ltgr %r14,%r14 |
1003 | jz sie_gmap | 1003 | jz .Lsie_gmap |
1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce | 1004 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce |
1005 | sie_gmap: | 1005 | .Lsie_gmap: |
1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | 1006 | lg %r14,__SF_EMPTY(%r15) # get control block pointer |
1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now | 1007 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
1008 | tm __SIE_PROG20+3(%r14),1 # last exit... | 1008 | tm __SIE_PROG20+3(%r14),1 # last exit... |
1009 | jnz sie_done | 1009 | jnz .Lsie_done |
1010 | LPP __SF_EMPTY(%r15) # set guest id | 1010 | LPP __SF_EMPTY(%r15) # set guest id |
1011 | sie 0(%r14) | 1011 | sie 0(%r14) |
1012 | sie_done: | 1012 | .Lsie_done: |
1013 | LPP __SF_EMPTY+16(%r15) # set host id | 1013 | LPP __SF_EMPTY+16(%r15) # set host id |
1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | 1014 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 1015 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) | 1016 | # some program checks are suppressing. C code (e.g. do_protection_exception) |
1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | 1017 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other |
1018 | # instructions between sie64a and sie_done should not cause program | 1018 | # instructions between sie64a and .Lsie_done should not cause program |
1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | 1019 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. |
1020 | # See also HANDLE_SIE_INTERCEPT | 1020 | # See also HANDLE_SIE_INTERCEPT |
1021 | rewind_pad: | 1021 | .Lrewind_pad: |
1022 | nop 0 | 1022 | nop 0 |
1023 | .globl sie_exit | 1023 | .globl sie_exit |
1024 | sie_exit: | 1024 | sie_exit: |
@@ -1027,19 +1027,19 @@ sie_exit: | |||
1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | 1027 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code | 1028 | lg %r2,__SF_EMPTY+24(%r15) # return exit reason code |
1029 | br %r14 | 1029 | br %r14 |
1030 | sie_fault: | 1030 | .Lsie_fault: |
1031 | lghi %r14,-EFAULT | 1031 | lghi %r14,-EFAULT |
1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code | 1032 | stg %r14,__SF_EMPTY+24(%r15) # set exit reason code |
1033 | j sie_exit | 1033 | j sie_exit |
1034 | 1034 | ||
1035 | .align 8 | 1035 | .align 8 |
1036 | .Lsie_critical: | 1036 | .Lsie_critical: |
1037 | .quad sie_gmap | 1037 | .quad .Lsie_gmap |
1038 | .Lsie_critical_length: | 1038 | .Lsie_critical_length: |
1039 | .quad sie_done - sie_gmap | 1039 | .quad .Lsie_done - .Lsie_gmap |
1040 | 1040 | ||
1041 | EX_TABLE(rewind_pad,sie_fault) | 1041 | EX_TABLE(.Lrewind_pad,.Lsie_fault) |
1042 | EX_TABLE(sie_exit,sie_fault) | 1042 | EX_TABLE(sie_exit,.Lsie_fault) |
1043 | #endif | 1043 | #endif |
1044 | 1044 | ||
1045 | .section .rodata, "a" | 1045 | .section .rodata, "a" |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index ca1cabb3a96c..b86bb8823f15 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/moduleloader.h> | ||
10 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
11 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
12 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
@@ -15,60 +16,39 @@ | |||
15 | #include <linux/kprobes.h> | 16 | #include <linux/kprobes.h> |
16 | #include <trace/syscall.h> | 17 | #include <trace/syscall.h> |
17 | #include <asm/asm-offsets.h> | 18 | #include <asm/asm-offsets.h> |
19 | #include <asm/cacheflush.h> | ||
18 | #include "entry.h" | 20 | #include "entry.h" |
19 | 21 | ||
20 | void mcount_replace_code(void); | ||
21 | void ftrace_disable_code(void); | ||
22 | void ftrace_enable_insn(void); | ||
23 | |||
24 | /* | 22 | /* |
25 | * The mcount code looks like this: | 23 | * The mcount code looks like this: |
26 | * stg %r14,8(%r15) # offset 0 | 24 | * stg %r14,8(%r15) # offset 0 |
27 | * larl %r1,<&counter> # offset 6 | 25 | * larl %r1,<&counter> # offset 6 |
28 | * brasl %r14,_mcount # offset 12 | 26 | * brasl %r14,_mcount # offset 12 |
29 | * lg %r14,8(%r15) # offset 18 | 27 | * lg %r14,8(%r15) # offset 18 |
30 | * Total length is 24 bytes. The complete mcount block initially gets replaced | 28 | * Total length is 24 bytes. Only the first instruction will be patched |
31 | * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop | 29 | * by ftrace_make_call / ftrace_make_nop. |
32 | * only patch the jg/lg instruction within the block. | ||
33 | * Note: we do not patch the first instruction to an unconditional branch, | ||
34 | * since that would break kprobes/jprobes. It is easier to leave the larl | ||
35 | * instruction in and only modify the second instruction. | ||
36 | * The enabled ftrace code block looks like this: | 30 | * The enabled ftrace code block looks like this: |
37 | * larl %r0,.+24 # offset 0 | 31 | * > brasl %r0,ftrace_caller # offset 0 |
38 | * > lg %r1,__LC_FTRACE_FUNC # offset 6 | 32 | * larl %r1,<&counter> # offset 6 |
39 | * br %r1 # offset 12 | 33 | * brasl %r14,_mcount # offset 12 |
40 | * brcl 0,0 # offset 14 | 34 | * lg %r14,8(%r15) # offset 18 |
41 | * brc 0,0 # offset 20 | ||
42 | * The ftrace function gets called with a non-standard C function call ABI | 35 | * The ftrace function gets called with a non-standard C function call ABI |
43 | * where r0 contains the return address. It is also expected that the called | 36 | * where r0 contains the return address. It is also expected that the called |
44 | * function only clobbers r0 and r1, but restores r2-r15. | 37 | * function only clobbers r0 and r1, but restores r2-r15. |
38 | * For module code we can't directly jump to ftrace caller, but need a | ||
39 | * trampoline (ftrace_plt), which clobbers also r1. | ||
45 | * The return point of the ftrace function has offset 24, so execution | 40 | * The return point of the ftrace function has offset 24, so execution |
46 | * continues behind the mcount block. | 41 | * continues behind the mcount block. |
47 | * larl %r0,.+24 # offset 0 | 42 | * The disabled ftrace code block looks like this: |
48 | * > jg .+18 # offset 6 | 43 | * > jg .+24 # offset 0 |
49 | * br %r1 # offset 12 | 44 | * larl %r1,<&counter> # offset 6 |
50 | * brcl 0,0 # offset 14 | 45 | * brasl %r14,_mcount # offset 12 |
51 | * brc 0,0 # offset 20 | 46 | * lg %r14,8(%r15) # offset 18 |
52 | * The jg instruction branches to offset 24 to skip as many instructions | 47 | * The jg instruction branches to offset 24 to skip as many instructions |
53 | * as possible. | 48 | * as possible. |
54 | */ | 49 | */ |
55 | asm( | 50 | |
56 | " .align 4\n" | 51 | unsigned long ftrace_plt; |
57 | "mcount_replace_code:\n" | ||
58 | " larl %r0,0f\n" | ||
59 | "ftrace_disable_code:\n" | ||
60 | " jg 0f\n" | ||
61 | " br %r1\n" | ||
62 | " brcl 0,0\n" | ||
63 | " brc 0,0\n" | ||
64 | "0:\n" | ||
65 | " .align 4\n" | ||
66 | "ftrace_enable_insn:\n" | ||
67 | " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); | ||
68 | |||
69 | #define MCOUNT_BLOCK_SIZE 24 | ||
70 | #define MCOUNT_INSN_OFFSET 6 | ||
71 | #define FTRACE_INSN_SIZE 6 | ||
72 | 52 | ||
73 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 53 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
74 | unsigned long addr) | 54 | unsigned long addr) |
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |||
79 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | 59 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
80 | unsigned long addr) | 60 | unsigned long addr) |
81 | { | 61 | { |
82 | /* Initial replacement of the whole mcount block */ | 62 | struct ftrace_insn insn; |
83 | if (addr == MCOUNT_ADDR) { | 63 | unsigned short op; |
84 | if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, | 64 | void *from, *to; |
85 | mcount_replace_code, | 65 | size_t size; |
86 | MCOUNT_BLOCK_SIZE)) | 66 | |
87 | return -EPERM; | 67 | ftrace_generate_nop_insn(&insn); |
88 | return 0; | 68 | size = sizeof(insn); |
69 | from = &insn; | ||
70 | to = (void *) rec->ip; | ||
71 | if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) | ||
72 | return -EFAULT; | ||
73 | /* | ||
74 | * If we find a breakpoint instruction, a kprobe has been placed | ||
75 | * at the beginning of the function. We write the constant | ||
76 | * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original | ||
77 | * instruction so that the kprobes handler can execute a nop, if it | ||
78 | * reaches this breakpoint. | ||
79 | */ | ||
80 | if (op == BREAKPOINT_INSTRUCTION) { | ||
81 | size -= 2; | ||
82 | from += 2; | ||
83 | to += 2; | ||
84 | insn.disp = KPROBE_ON_FTRACE_NOP; | ||
89 | } | 85 | } |
90 | if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, | 86 | if (probe_kernel_write(to, from, size)) |
91 | MCOUNT_INSN_SIZE)) | ||
92 | return -EPERM; | 87 | return -EPERM; |
93 | return 0; | 88 | return 0; |
94 | } | 89 | } |
95 | 90 | ||
96 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 91 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
97 | { | 92 | { |
98 | if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, | 93 | struct ftrace_insn insn; |
99 | FTRACE_INSN_SIZE)) | 94 | unsigned short op; |
95 | void *from, *to; | ||
96 | size_t size; | ||
97 | |||
98 | ftrace_generate_call_insn(&insn, rec->ip); | ||
99 | size = sizeof(insn); | ||
100 | from = &insn; | ||
101 | to = (void *) rec->ip; | ||
102 | if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) | ||
103 | return -EFAULT; | ||
104 | /* | ||
105 | * If we find a breakpoint instruction, a kprobe has been placed | ||
106 | * at the beginning of the function. We write the constant | ||
107 | * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original | ||
108 | * instruction so that the kprobes handler can execute a brasl if it | ||
109 | * reaches this breakpoint. | ||
110 | */ | ||
111 | if (op == BREAKPOINT_INSTRUCTION) { | ||
112 | size -= 2; | ||
113 | from += 2; | ||
114 | to += 2; | ||
115 | insn.disp = KPROBE_ON_FTRACE_CALL; | ||
116 | } | ||
117 | if (probe_kernel_write(to, from, size)) | ||
100 | return -EPERM; | 118 | return -EPERM; |
101 | return 0; | 119 | return 0; |
102 | } | 120 | } |
@@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void) | |||
111 | return 0; | 129 | return 0; |
112 | } | 130 | } |
113 | 131 | ||
132 | static int __init ftrace_plt_init(void) | ||
133 | { | ||
134 | unsigned int *ip; | ||
135 | |||
136 | ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE); | ||
137 | if (!ftrace_plt) | ||
138 | panic("cannot allocate ftrace plt\n"); | ||
139 | ip = (unsigned int *) ftrace_plt; | ||
140 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | ||
141 | ip[1] = 0x100a0004; | ||
142 | ip[2] = 0x07f10000; | ||
143 | ip[3] = FTRACE_ADDR >> 32; | ||
144 | ip[4] = FTRACE_ADDR & 0xffffffff; | ||
145 | set_memory_ro(ftrace_plt, 1); | ||
146 | return 0; | ||
147 | } | ||
148 | device_initcall(ftrace_plt_init); | ||
149 | |||
114 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 150 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
115 | /* | 151 | /* |
116 | * Hook the return address and push it in the stack of return addresses | 152 | * Hook the return address and push it in the stack of return addresses |
117 | * in current thread info. | 153 | * in current thread info. |
118 | */ | 154 | */ |
119 | unsigned long __kprobes prepare_ftrace_return(unsigned long parent, | 155 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
120 | unsigned long ip) | ||
121 | { | 156 | { |
122 | struct ftrace_graph_ent trace; | 157 | struct ftrace_graph_ent trace; |
123 | 158 | ||
@@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, | |||
137 | out: | 172 | out: |
138 | return parent; | 173 | return parent; |
139 | } | 174 | } |
175 | NOKPROBE_SYMBOL(prepare_ftrace_return); | ||
140 | 176 | ||
141 | /* | 177 | /* |
142 | * Patch the kernel code at ftrace_graph_caller location. The instruction | 178 | * Patch the kernel code at ftrace_graph_caller location. The instruction |
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 7559f1beab29..7a55c29b0b33 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | 20 | static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
21 | 21 | ||
22 | void __kprobes enabled_wait(void) | 22 | void enabled_wait(void) |
23 | { | 23 | { |
24 | struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); | 24 | struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); |
25 | unsigned long long idle_time; | 25 | unsigned long long idle_time; |
@@ -35,31 +35,32 @@ void __kprobes enabled_wait(void) | |||
35 | /* Call the assembler magic in entry.S */ | 35 | /* Call the assembler magic in entry.S */ |
36 | psw_idle(idle, psw_mask); | 36 | psw_idle(idle, psw_mask); |
37 | 37 | ||
38 | trace_hardirqs_off(); | ||
39 | |||
38 | /* Account time spent with enabled wait psw loaded as idle time. */ | 40 | /* Account time spent with enabled wait psw loaded as idle time. */ |
39 | idle->sequence++; | 41 | write_seqcount_begin(&idle->seqcount); |
40 | smp_wmb(); | ||
41 | idle_time = idle->clock_idle_exit - idle->clock_idle_enter; | 42 | idle_time = idle->clock_idle_exit - idle->clock_idle_enter; |
42 | idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; | 43 | idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; |
43 | idle->idle_time += idle_time; | 44 | idle->idle_time += idle_time; |
44 | idle->idle_count++; | 45 | idle->idle_count++; |
45 | account_idle_time(idle_time); | 46 | account_idle_time(idle_time); |
46 | smp_wmb(); | 47 | write_seqcount_end(&idle->seqcount); |
47 | idle->sequence++; | ||
48 | } | 48 | } |
49 | NOKPROBE_SYMBOL(enabled_wait); | ||
49 | 50 | ||
50 | static ssize_t show_idle_count(struct device *dev, | 51 | static ssize_t show_idle_count(struct device *dev, |
51 | struct device_attribute *attr, char *buf) | 52 | struct device_attribute *attr, char *buf) |
52 | { | 53 | { |
53 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); | 54 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
54 | unsigned long long idle_count; | 55 | unsigned long long idle_count; |
55 | unsigned int sequence; | 56 | unsigned int seq; |
56 | 57 | ||
57 | do { | 58 | do { |
58 | sequence = ACCESS_ONCE(idle->sequence); | 59 | seq = read_seqcount_begin(&idle->seqcount); |
59 | idle_count = ACCESS_ONCE(idle->idle_count); | 60 | idle_count = ACCESS_ONCE(idle->idle_count); |
60 | if (ACCESS_ONCE(idle->clock_idle_enter)) | 61 | if (ACCESS_ONCE(idle->clock_idle_enter)) |
61 | idle_count++; | 62 | idle_count++; |
62 | } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); | 63 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
63 | return sprintf(buf, "%llu\n", idle_count); | 64 | return sprintf(buf, "%llu\n", idle_count); |
64 | } | 65 | } |
65 | DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); | 66 | DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); |
@@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev, | |||
69 | { | 70 | { |
70 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); | 71 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
71 | unsigned long long now, idle_time, idle_enter, idle_exit; | 72 | unsigned long long now, idle_time, idle_enter, idle_exit; |
72 | unsigned int sequence; | 73 | unsigned int seq; |
73 | 74 | ||
74 | do { | 75 | do { |
75 | now = get_tod_clock(); | 76 | now = get_tod_clock(); |
76 | sequence = ACCESS_ONCE(idle->sequence); | 77 | seq = read_seqcount_begin(&idle->seqcount); |
77 | idle_time = ACCESS_ONCE(idle->idle_time); | 78 | idle_time = ACCESS_ONCE(idle->idle_time); |
78 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); | 79 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); |
79 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); | 80 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); |
80 | } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); | 81 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
81 | idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; | 82 | idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; |
82 | return sprintf(buf, "%llu\n", idle_time >> 12); | 83 | return sprintf(buf, "%llu\n", idle_time >> 12); |
83 | } | 84 | } |
@@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu) | |||
87 | { | 88 | { |
88 | struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); | 89 | struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); |
89 | unsigned long long now, idle_enter, idle_exit; | 90 | unsigned long long now, idle_enter, idle_exit; |
90 | unsigned int sequence; | 91 | unsigned int seq; |
91 | 92 | ||
92 | do { | 93 | do { |
93 | now = get_tod_clock(); | 94 | now = get_tod_clock(); |
94 | sequence = ACCESS_ONCE(idle->sequence); | 95 | seq = read_seqcount_begin(&idle->seqcount); |
95 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); | 96 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); |
96 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); | 97 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); |
97 | } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); | 98 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
98 | return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; | 99 | return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; |
99 | } | 100 | } |
100 | 101 | ||
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1b8a38ab7861..f238720690f3 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v) | |||
127 | for_each_online_cpu(cpu) | 127 | for_each_online_cpu(cpu) |
128 | seq_printf(p, "CPU%d ", cpu); | 128 | seq_printf(p, "CPU%d ", cpu); |
129 | seq_putc(p, '\n'); | 129 | seq_putc(p, '\n'); |
130 | goto out; | ||
131 | } | 130 | } |
132 | if (index < NR_IRQS) { | 131 | if (index < NR_IRQS) { |
133 | if (index >= NR_IRQS_BASE) | 132 | if (index >= NR_IRQS_BASE) |
134 | goto out; | 133 | goto out; |
135 | /* Adjust index to process irqclass_main_desc array entries */ | ||
136 | index--; | ||
137 | seq_printf(p, "%s: ", irqclass_main_desc[index].name); | 134 | seq_printf(p, "%s: ", irqclass_main_desc[index].name); |
138 | irq = irqclass_main_desc[index].irq; | 135 | irq = irqclass_main_desc[index].irq; |
139 | for_each_online_cpu(cpu) | 136 | for_each_online_cpu(cpu) |
@@ -158,7 +155,7 @@ out: | |||
158 | 155 | ||
159 | unsigned int arch_dynirq_lower_bound(unsigned int from) | 156 | unsigned int arch_dynirq_lower_bound(unsigned int from) |
160 | { | 157 | { |
161 | return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; | 158 | return from < NR_IRQS_BASE ? NR_IRQS_BASE : from; |
162 | } | 159 | } |
163 | 160 | ||
164 | /* | 161 | /* |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 014d4729b134..1e4c710dfb92 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
32 | #include <linux/ftrace.h> | ||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | #include <asm/sections.h> | 34 | #include <asm/sections.h> |
34 | #include <asm/dis.h> | 35 | #include <asm/dis.h> |
@@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = { | |||
58 | .insn_size = MAX_INSN_SIZE, | 59 | .insn_size = MAX_INSN_SIZE, |
59 | }; | 60 | }; |
60 | 61 | ||
61 | static void __kprobes copy_instruction(struct kprobe *p) | 62 | static void copy_instruction(struct kprobe *p) |
62 | { | 63 | { |
64 | unsigned long ip = (unsigned long) p->addr; | ||
63 | s64 disp, new_disp; | 65 | s64 disp, new_disp; |
64 | u64 addr, new_addr; | 66 | u64 addr, new_addr; |
65 | 67 | ||
66 | memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); | 68 | if (ftrace_location(ip) == ip) { |
69 | /* | ||
70 | * If kprobes patches the instruction that is morphed by | ||
71 | * ftrace make sure that kprobes always sees the branch | ||
72 | * "jg .+24" that skips the mcount block | ||
73 | */ | ||
74 | ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); | ||
75 | p->ainsn.is_ftrace_insn = 1; | ||
76 | } else | ||
77 | memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); | ||
78 | p->opcode = p->ainsn.insn[0]; | ||
67 | if (!probe_is_insn_relative_long(p->ainsn.insn)) | 79 | if (!probe_is_insn_relative_long(p->ainsn.insn)) |
68 | return; | 80 | return; |
69 | /* | 81 | /* |
@@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p) | |||
79 | new_disp = ((addr + (disp * 2)) - new_addr) / 2; | 91 | new_disp = ((addr + (disp * 2)) - new_addr) / 2; |
80 | *(s32 *)&p->ainsn.insn[1] = new_disp; | 92 | *(s32 *)&p->ainsn.insn[1] = new_disp; |
81 | } | 93 | } |
94 | NOKPROBE_SYMBOL(copy_instruction); | ||
82 | 95 | ||
83 | static inline int is_kernel_addr(void *addr) | 96 | static inline int is_kernel_addr(void *addr) |
84 | { | 97 | { |
85 | return addr < (void *)_end; | 98 | return addr < (void *)_end; |
86 | } | 99 | } |
87 | 100 | ||
88 | static inline int is_module_addr(void *addr) | 101 | static int s390_get_insn_slot(struct kprobe *p) |
89 | { | ||
90 | #ifdef CONFIG_64BIT | ||
91 | BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); | ||
92 | if (addr < (void *)MODULES_VADDR) | ||
93 | return 0; | ||
94 | if (addr > (void *)MODULES_END) | ||
95 | return 0; | ||
96 | #endif | ||
97 | return 1; | ||
98 | } | ||
99 | |||
100 | static int __kprobes s390_get_insn_slot(struct kprobe *p) | ||
101 | { | 102 | { |
102 | /* | 103 | /* |
103 | * Get an insn slot that is within the same 2GB area like the original | 104 | * Get an insn slot that is within the same 2GB area like the original |
@@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p) | |||
111 | p->ainsn.insn = get_insn_slot(); | 112 | p->ainsn.insn = get_insn_slot(); |
112 | return p->ainsn.insn ? 0 : -ENOMEM; | 113 | return p->ainsn.insn ? 0 : -ENOMEM; |
113 | } | 114 | } |
115 | NOKPROBE_SYMBOL(s390_get_insn_slot); | ||
114 | 116 | ||
115 | static void __kprobes s390_free_insn_slot(struct kprobe *p) | 117 | static void s390_free_insn_slot(struct kprobe *p) |
116 | { | 118 | { |
117 | if (!p->ainsn.insn) | 119 | if (!p->ainsn.insn) |
118 | return; | 120 | return; |
@@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p) | |||
122 | free_insn_slot(p->ainsn.insn, 0); | 124 | free_insn_slot(p->ainsn.insn, 0); |
123 | p->ainsn.insn = NULL; | 125 | p->ainsn.insn = NULL; |
124 | } | 126 | } |
127 | NOKPROBE_SYMBOL(s390_free_insn_slot); | ||
125 | 128 | ||
126 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 129 | int arch_prepare_kprobe(struct kprobe *p) |
127 | { | 130 | { |
128 | if ((unsigned long) p->addr & 0x01) | 131 | if ((unsigned long) p->addr & 0x01) |
129 | return -EINVAL; | 132 | return -EINVAL; |
@@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
132 | return -EINVAL; | 135 | return -EINVAL; |
133 | if (s390_get_insn_slot(p)) | 136 | if (s390_get_insn_slot(p)) |
134 | return -ENOMEM; | 137 | return -ENOMEM; |
135 | p->opcode = *p->addr; | ||
136 | copy_instruction(p); | 138 | copy_instruction(p); |
137 | return 0; | 139 | return 0; |
138 | } | 140 | } |
141 | NOKPROBE_SYMBOL(arch_prepare_kprobe); | ||
139 | 142 | ||
140 | struct ins_replace_args { | 143 | int arch_check_ftrace_location(struct kprobe *p) |
141 | kprobe_opcode_t *ptr; | 144 | { |
142 | kprobe_opcode_t opcode; | 145 | return 0; |
146 | } | ||
147 | |||
148 | struct swap_insn_args { | ||
149 | struct kprobe *p; | ||
150 | unsigned int arm_kprobe : 1; | ||
143 | }; | 151 | }; |
144 | 152 | ||
145 | static int __kprobes swap_instruction(void *aref) | 153 | static int swap_instruction(void *data) |
146 | { | 154 | { |
147 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 155 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
148 | unsigned long status = kcb->kprobe_status; | 156 | unsigned long status = kcb->kprobe_status; |
149 | struct ins_replace_args *args = aref; | 157 | struct swap_insn_args *args = data; |
150 | 158 | struct ftrace_insn new_insn, *insn; | |
159 | struct kprobe *p = args->p; | ||
160 | size_t len; | ||
161 | |||
162 | new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; | ||
163 | len = sizeof(new_insn.opc); | ||
164 | if (!p->ainsn.is_ftrace_insn) | ||
165 | goto skip_ftrace; | ||
166 | len = sizeof(new_insn); | ||
167 | insn = (struct ftrace_insn *) p->addr; | ||
168 | if (args->arm_kprobe) { | ||
169 | if (is_ftrace_nop(insn)) | ||
170 | new_insn.disp = KPROBE_ON_FTRACE_NOP; | ||
171 | else | ||
172 | new_insn.disp = KPROBE_ON_FTRACE_CALL; | ||
173 | } else { | ||
174 | ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); | ||
175 | if (insn->disp == KPROBE_ON_FTRACE_NOP) | ||
176 | ftrace_generate_nop_insn(&new_insn); | ||
177 | } | ||
178 | skip_ftrace: | ||
151 | kcb->kprobe_status = KPROBE_SWAP_INST; | 179 | kcb->kprobe_status = KPROBE_SWAP_INST; |
152 | probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); | 180 | probe_kernel_write(p->addr, &new_insn, len); |
153 | kcb->kprobe_status = status; | 181 | kcb->kprobe_status = status; |
154 | return 0; | 182 | return 0; |
155 | } | 183 | } |
184 | NOKPROBE_SYMBOL(swap_instruction); | ||
156 | 185 | ||
157 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 186 | void arch_arm_kprobe(struct kprobe *p) |
158 | { | 187 | { |
159 | struct ins_replace_args args; | 188 | struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; |
160 | 189 | ||
161 | args.ptr = p->addr; | ||
162 | args.opcode = BREAKPOINT_INSTRUCTION; | ||
163 | stop_machine(swap_instruction, &args, NULL); | 190 | stop_machine(swap_instruction, &args, NULL); |
164 | } | 191 | } |
192 | NOKPROBE_SYMBOL(arch_arm_kprobe); | ||
165 | 193 | ||
166 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 194 | void arch_disarm_kprobe(struct kprobe *p) |
167 | { | 195 | { |
168 | struct ins_replace_args args; | 196 | struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; |
169 | 197 | ||
170 | args.ptr = p->addr; | ||
171 | args.opcode = p->opcode; | ||
172 | stop_machine(swap_instruction, &args, NULL); | 198 | stop_machine(swap_instruction, &args, NULL); |
173 | } | 199 | } |
200 | NOKPROBE_SYMBOL(arch_disarm_kprobe); | ||
174 | 201 | ||
175 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 202 | void arch_remove_kprobe(struct kprobe *p) |
176 | { | 203 | { |
177 | s390_free_insn_slot(p); | 204 | s390_free_insn_slot(p); |
178 | } | 205 | } |
206 | NOKPROBE_SYMBOL(arch_remove_kprobe); | ||
179 | 207 | ||
180 | static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, | 208 | static void enable_singlestep(struct kprobe_ctlblk *kcb, |
181 | struct pt_regs *regs, | 209 | struct pt_regs *regs, |
182 | unsigned long ip) | 210 | unsigned long ip) |
183 | { | 211 | { |
184 | struct per_regs per_kprobe; | 212 | struct per_regs per_kprobe; |
185 | 213 | ||
@@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, | |||
199 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | 227 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
200 | regs->psw.addr = ip | PSW_ADDR_AMODE; | 228 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
201 | } | 229 | } |
230 | NOKPROBE_SYMBOL(enable_singlestep); | ||
202 | 231 | ||
203 | static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, | 232 | static void disable_singlestep(struct kprobe_ctlblk *kcb, |
204 | struct pt_regs *regs, | 233 | struct pt_regs *regs, |
205 | unsigned long ip) | 234 | unsigned long ip) |
206 | { | 235 | { |
207 | /* Restore control regs and psw mask, set new psw address */ | 236 | /* Restore control regs and psw mask, set new psw address */ |
208 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); | 237 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); |
@@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, | |||
210 | regs->psw.mask |= kcb->kprobe_saved_imask; | 239 | regs->psw.mask |= kcb->kprobe_saved_imask; |
211 | regs->psw.addr = ip | PSW_ADDR_AMODE; | 240 | regs->psw.addr = ip | PSW_ADDR_AMODE; |
212 | } | 241 | } |
242 | NOKPROBE_SYMBOL(disable_singlestep); | ||
213 | 243 | ||
214 | /* | 244 | /* |
215 | * Activate a kprobe by storing its pointer to current_kprobe. The | 245 | * Activate a kprobe by storing its pointer to current_kprobe. The |
216 | * previous kprobe is stored in kcb->prev_kprobe. A stack of up to | 246 | * previous kprobe is stored in kcb->prev_kprobe. A stack of up to |
217 | * two kprobes can be active, see KPROBE_REENTER. | 247 | * two kprobes can be active, see KPROBE_REENTER. |
218 | */ | 248 | */ |
219 | static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) | 249 | static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) |
220 | { | 250 | { |
221 | kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); | 251 | kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); |
222 | kcb->prev_kprobe.status = kcb->kprobe_status; | 252 | kcb->prev_kprobe.status = kcb->kprobe_status; |
223 | __this_cpu_write(current_kprobe, p); | 253 | __this_cpu_write(current_kprobe, p); |
224 | } | 254 | } |
255 | NOKPROBE_SYMBOL(push_kprobe); | ||
225 | 256 | ||
226 | /* | 257 | /* |
227 | * Deactivate a kprobe by backing up to the previous state. If the | 258 | * Deactivate a kprobe by backing up to the previous state. If the |
228 | * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, | 259 | * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, |
229 | * for any other state prev_kprobe.kp will be NULL. | 260 | * for any other state prev_kprobe.kp will be NULL. |
230 | */ | 261 | */ |
231 | static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) | 262 | static void pop_kprobe(struct kprobe_ctlblk *kcb) |
232 | { | 263 | { |
233 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | 264 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
234 | kcb->kprobe_status = kcb->prev_kprobe.status; | 265 | kcb->kprobe_status = kcb->prev_kprobe.status; |
235 | } | 266 | } |
267 | NOKPROBE_SYMBOL(pop_kprobe); | ||
236 | 268 | ||
237 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | 269 | void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
238 | struct pt_regs *regs) | ||
239 | { | 270 | { |
240 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; | 271 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; |
241 | 272 | ||
242 | /* Replace the return addr with trampoline addr */ | 273 | /* Replace the return addr with trampoline addr */ |
243 | regs->gprs[14] = (unsigned long) &kretprobe_trampoline; | 274 | regs->gprs[14] = (unsigned long) &kretprobe_trampoline; |
244 | } | 275 | } |
276 | NOKPROBE_SYMBOL(arch_prepare_kretprobe); | ||
245 | 277 | ||
246 | static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, | 278 | static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) |
247 | struct kprobe *p) | ||
248 | { | 279 | { |
249 | switch (kcb->kprobe_status) { | 280 | switch (kcb->kprobe_status) { |
250 | case KPROBE_HIT_SSDONE: | 281 | case KPROBE_HIT_SSDONE: |
@@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, | |||
264 | BUG(); | 295 | BUG(); |
265 | } | 296 | } |
266 | } | 297 | } |
298 | NOKPROBE_SYMBOL(kprobe_reenter_check); | ||
267 | 299 | ||
268 | static int __kprobes kprobe_handler(struct pt_regs *regs) | 300 | static int kprobe_handler(struct pt_regs *regs) |
269 | { | 301 | { |
270 | struct kprobe_ctlblk *kcb; | 302 | struct kprobe_ctlblk *kcb; |
271 | struct kprobe *p; | 303 | struct kprobe *p; |
@@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
339 | preempt_enable_no_resched(); | 371 | preempt_enable_no_resched(); |
340 | return 0; | 372 | return 0; |
341 | } | 373 | } |
374 | NOKPROBE_SYMBOL(kprobe_handler); | ||
342 | 375 | ||
343 | /* | 376 | /* |
344 | * Function return probe trampoline: | 377 | * Function return probe trampoline: |
@@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void) | |||
355 | /* | 388 | /* |
356 | * Called when the probe at kretprobe trampoline is hit | 389 | * Called when the probe at kretprobe trampoline is hit |
357 | */ | 390 | */ |
358 | static int __kprobes trampoline_probe_handler(struct kprobe *p, | 391 | static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
359 | struct pt_regs *regs) | ||
360 | { | 392 | { |
361 | struct kretprobe_instance *ri; | 393 | struct kretprobe_instance *ri; |
362 | struct hlist_head *head, empty_rp; | 394 | struct hlist_head *head, empty_rp; |
@@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
444 | */ | 476 | */ |
445 | return 1; | 477 | return 1; |
446 | } | 478 | } |
479 | NOKPROBE_SYMBOL(trampoline_probe_handler); | ||
447 | 480 | ||
448 | /* | 481 | /* |
449 | * Called after single-stepping. p->addr is the address of the | 482 | * Called after single-stepping. p->addr is the address of the |
@@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
453 | * single-stepped a copy of the instruction. The address of this | 486 | * single-stepped a copy of the instruction. The address of this |
454 | * copy is p->ainsn.insn. | 487 | * copy is p->ainsn.insn. |
455 | */ | 488 | */ |
456 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | 489 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
457 | { | 490 | { |
458 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 491 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
459 | unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; | 492 | unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; |
460 | int fixup = probe_get_fixup_type(p->ainsn.insn); | 493 | int fixup = probe_get_fixup_type(p->ainsn.insn); |
461 | 494 | ||
495 | /* Check if the kprobes location is an enabled ftrace caller */ | ||
496 | if (p->ainsn.is_ftrace_insn) { | ||
497 | struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; | ||
498 | struct ftrace_insn call_insn; | ||
499 | |||
500 | ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); | ||
501 | /* | ||
502 | * A kprobe on an enabled ftrace call site actually single | ||
503 | * stepped an unconditional branch (ftrace nop equivalent). | ||
504 | * Now we need to fixup things and pretend that a brasl r0,... | ||
505 | * was executed instead. | ||
506 | */ | ||
507 | if (insn->disp == KPROBE_ON_FTRACE_CALL) { | ||
508 | ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; | ||
509 | regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); | ||
510 | } | ||
511 | } | ||
512 | |||
462 | if (fixup & FIXUP_PSW_NORMAL) | 513 | if (fixup & FIXUP_PSW_NORMAL) |
463 | ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; | 514 | ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; |
464 | 515 | ||
@@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
476 | 527 | ||
477 | disable_singlestep(kcb, regs, ip); | 528 | disable_singlestep(kcb, regs, ip); |
478 | } | 529 | } |
530 | NOKPROBE_SYMBOL(resume_execution); | ||
479 | 531 | ||
480 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | 532 | static int post_kprobe_handler(struct pt_regs *regs) |
481 | { | 533 | { |
482 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 534 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
483 | struct kprobe *p = kprobe_running(); | 535 | struct kprobe *p = kprobe_running(); |
@@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
504 | 556 | ||
505 | return 1; | 557 | return 1; |
506 | } | 558 | } |
559 | NOKPROBE_SYMBOL(post_kprobe_handler); | ||
507 | 560 | ||
508 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) | 561 | static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
509 | { | 562 | { |
510 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 563 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
511 | struct kprobe *p = kprobe_running(); | 564 | struct kprobe *p = kprobe_running(); |
@@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) | |||
567 | } | 620 | } |
568 | return 0; | 621 | return 0; |
569 | } | 622 | } |
623 | NOKPROBE_SYMBOL(kprobe_trap_handler); | ||
570 | 624 | ||
571 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 625 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
572 | { | 626 | { |
573 | int ret; | 627 | int ret; |
574 | 628 | ||
@@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
579 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | 633 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); |
580 | return ret; | 634 | return ret; |
581 | } | 635 | } |
636 | NOKPROBE_SYMBOL(kprobe_fault_handler); | ||
582 | 637 | ||
583 | /* | 638 | /* |
584 | * Wrapper routine to for handling exceptions. | 639 | * Wrapper routine to for handling exceptions. |
585 | */ | 640 | */ |
586 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | 641 | int kprobe_exceptions_notify(struct notifier_block *self, |
587 | unsigned long val, void *data) | 642 | unsigned long val, void *data) |
588 | { | 643 | { |
589 | struct die_args *args = (struct die_args *) data; | 644 | struct die_args *args = (struct die_args *) data; |
590 | struct pt_regs *regs = args->regs; | 645 | struct pt_regs *regs = args->regs; |
@@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
616 | 671 | ||
617 | return ret; | 672 | return ret; |
618 | } | 673 | } |
674 | NOKPROBE_SYMBOL(kprobe_exceptions_notify); | ||
619 | 675 | ||
620 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 676 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
621 | { | 677 | { |
622 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 678 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
623 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 679 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
635 | memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); | 691 | memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); |
636 | return 1; | 692 | return 1; |
637 | } | 693 | } |
694 | NOKPROBE_SYMBOL(setjmp_pre_handler); | ||
638 | 695 | ||
639 | void __kprobes jprobe_return(void) | 696 | void jprobe_return(void) |
640 | { | 697 | { |
641 | asm volatile(".word 0x0002"); | 698 | asm volatile(".word 0x0002"); |
642 | } | 699 | } |
700 | NOKPROBE_SYMBOL(jprobe_return); | ||
643 | 701 | ||
644 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 702 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
645 | { | 703 | { |
646 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 704 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
647 | unsigned long stack; | 705 | unsigned long stack; |
@@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
655 | preempt_enable_no_resched(); | 713 | preempt_enable_no_resched(); |
656 | return 1; | 714 | return 1; |
657 | } | 715 | } |
716 | NOKPROBE_SYMBOL(longjmp_break_handler); | ||
658 | 717 | ||
659 | static struct kprobe trampoline = { | 718 | static struct kprobe trampoline = { |
660 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | 719 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, |
@@ -666,7 +725,8 @@ int __init arch_init_kprobes(void) | |||
666 | return register_kprobe(&trampoline); | 725 | return register_kprobe(&trampoline); |
667 | } | 726 | } |
668 | 727 | ||
669 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 728 | int arch_trampoline_kprobe(struct kprobe *p) |
670 | { | 729 | { |
671 | return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; | 730 | return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; |
672 | } | 731 | } |
732 | NOKPROBE_SYMBOL(arch_trampoline_kprobe); | ||
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 4300ea374826..b6dfc5bfcb89 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller) | |||
27 | .globl ftrace_regs_caller | 27 | .globl ftrace_regs_caller |
28 | .set ftrace_regs_caller,ftrace_caller | 28 | .set ftrace_regs_caller,ftrace_caller |
29 | lgr %r1,%r15 | 29 | lgr %r1,%r15 |
30 | aghi %r0,MCOUNT_RETURN_FIXUP | ||
30 | aghi %r15,-STACK_FRAME_SIZE | 31 | aghi %r15,-STACK_FRAME_SIZE |
31 | stg %r1,__SF_BACKCHAIN(%r15) | 32 | stg %r1,__SF_BACKCHAIN(%r15) |
32 | stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) | 33 | stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index b878f12a9597..c3f8d157cb0d 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags) | |||
1383 | cpuhw->lsctl.ed = 1; | 1383 | cpuhw->lsctl.ed = 1; |
1384 | 1384 | ||
1385 | /* Set in_use flag and store event */ | 1385 | /* Set in_use flag and store event */ |
1386 | event->hw.idx = 0; /* only one sampling event per CPU supported */ | ||
1387 | cpuhw->event = event; | 1386 | cpuhw->event = event; |
1388 | cpuhw->flags |= PMU_F_IN_USE; | 1387 | cpuhw->flags |= PMU_F_IN_USE; |
1389 | 1388 | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ed84cc224899..aa7a83948c7b 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
61 | return sf->gprs[8]; | 61 | return sf->gprs[8]; |
62 | } | 62 | } |
63 | 63 | ||
64 | extern void __kprobes kernel_thread_starter(void); | 64 | extern void kernel_thread_starter(void); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Free current thread data structures etc.. | 67 | * Free current thread data structures etc.. |
@@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
153 | save_fp_ctl(&p->thread.fp_regs.fpc); | 153 | save_fp_ctl(&p->thread.fp_regs.fpc); |
154 | save_fp_regs(p->thread.fp_regs.fprs); | 154 | save_fp_regs(p->thread.fp_regs.fprs); |
155 | p->thread.fp_regs.pad = 0; | 155 | p->thread.fp_regs.pad = 0; |
156 | p->thread.vxrs = NULL; | ||
156 | /* Set a new TLS ? */ | 157 | /* Set a new TLS ? */ |
157 | if (clone_flags & CLONE_SETTLS) { | 158 | if (clone_flags & CLONE_SETTLS) { |
158 | unsigned long tls = frame->childregs.gprs[6]; | 159 | unsigned long tls = frame->childregs.gprs[6]; |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 99a567b70d16..eabfb4594517 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
248 | */ | 248 | */ |
249 | tmp = 0; | 249 | tmp = 0; |
250 | 250 | ||
251 | } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { | ||
252 | /* | ||
253 | * floating point control reg. is in the thread structure | ||
254 | */ | ||
255 | tmp = child->thread.fp_regs.fpc; | ||
256 | tmp <<= BITS_PER_LONG - 32; | ||
257 | |||
251 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | 258 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { |
252 | /* | 259 | /* |
253 | * floating point regs. are stored in the thread structure | 260 | * floating point regs. are either in child->thread.fp_regs |
261 | * or the child->thread.vxrs array | ||
254 | */ | 262 | */ |
255 | offset = addr - (addr_t) &dummy->regs.fp_regs; | 263 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
256 | tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); | 264 | #ifdef CONFIG_64BIT |
257 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc) | 265 | if (child->thread.vxrs) |
258 | tmp <<= BITS_PER_LONG - 32; | 266 | tmp = *(addr_t *) |
267 | ((addr_t) child->thread.vxrs + 2*offset); | ||
268 | else | ||
269 | #endif | ||
270 | tmp = *(addr_t *) | ||
271 | ((addr_t) &child->thread.fp_regs.fprs + offset); | ||
259 | 272 | ||
260 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { | 273 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { |
261 | /* | 274 | /* |
@@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
383 | */ | 396 | */ |
384 | return 0; | 397 | return 0; |
385 | 398 | ||
399 | } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { | ||
400 | /* | ||
401 | * floating point control reg. is in the thread structure | ||
402 | */ | ||
403 | if ((unsigned int) data != 0 || | ||
404 | test_fp_ctl(data >> (BITS_PER_LONG - 32))) | ||
405 | return -EINVAL; | ||
406 | child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32); | ||
407 | |||
386 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | 408 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { |
387 | /* | 409 | /* |
388 | * floating point regs. are stored in the thread structure | 410 | * floating point regs. are either in child->thread.fp_regs |
411 | * or the child->thread.vxrs array | ||
389 | */ | 412 | */ |
390 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc) | 413 | offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; |
391 | if ((unsigned int) data != 0 || | 414 | #ifdef CONFIG_64BIT |
392 | test_fp_ctl(data >> (BITS_PER_LONG - 32))) | 415 | if (child->thread.vxrs) |
393 | return -EINVAL; | 416 | *(addr_t *)((addr_t) |
394 | offset = addr - (addr_t) &dummy->regs.fp_regs; | 417 | child->thread.vxrs + 2*offset) = data; |
395 | *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; | 418 | else |
419 | #endif | ||
420 | *(addr_t *)((addr_t) | ||
421 | &child->thread.fp_regs.fprs + offset) = data; | ||
396 | 422 | ||
397 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { | 423 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { |
398 | /* | 424 | /* |
@@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
611 | */ | 637 | */ |
612 | tmp = 0; | 638 | tmp = 0; |
613 | 639 | ||
640 | } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { | ||
641 | /* | ||
642 | * floating point control reg. is in the thread structure | ||
643 | */ | ||
644 | tmp = child->thread.fp_regs.fpc; | ||
645 | |||
614 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | 646 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { |
615 | /* | 647 | /* |
616 | * floating point regs. are stored in the thread structure | 648 | * floating point regs. are either in child->thread.fp_regs |
649 | * or the child->thread.vxrs array | ||
617 | */ | 650 | */ |
618 | offset = addr - (addr_t) &dummy32->regs.fp_regs; | 651 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; |
619 | tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); | 652 | #ifdef CONFIG_64BIT |
653 | if (child->thread.vxrs) | ||
654 | tmp = *(__u32 *) | ||
655 | ((addr_t) child->thread.vxrs + 2*offset); | ||
656 | else | ||
657 | #endif | ||
658 | tmp = *(__u32 *) | ||
659 | ((addr_t) &child->thread.fp_regs.fprs + offset); | ||
620 | 660 | ||
621 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { | 661 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { |
622 | /* | 662 | /* |
@@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child, | |||
722 | */ | 762 | */ |
723 | return 0; | 763 | return 0; |
724 | 764 | ||
725 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | 765 | } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { |
726 | /* | 766 | /* |
727 | * floating point regs. are stored in the thread structure | 767 | * floating point control reg. is in the thread structure |
728 | */ | 768 | */ |
729 | if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && | 769 | if (test_fp_ctl(tmp)) |
730 | test_fp_ctl(tmp)) | ||
731 | return -EINVAL; | 770 | return -EINVAL; |
732 | offset = addr - (addr_t) &dummy32->regs.fp_regs; | 771 | child->thread.fp_regs.fpc = data; |
733 | *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; | 772 | |
773 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | ||
774 | /* | ||
775 | * floating point regs. are either in child->thread.fp_regs | ||
776 | * or the child->thread.vxrs array | ||
777 | */ | ||
778 | offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; | ||
779 | #ifdef CONFIG_64BIT | ||
780 | if (child->thread.vxrs) | ||
781 | *(__u32 *)((addr_t) | ||
782 | child->thread.vxrs + 2*offset) = tmp; | ||
783 | else | ||
784 | #endif | ||
785 | *(__u32 *)((addr_t) | ||
786 | &child->thread.fp_regs.fprs + offset) = tmp; | ||
734 | 787 | ||
735 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { | 788 | } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { |
736 | /* | 789 | /* |
@@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target, | |||
1038 | return 0; | 1091 | return 0; |
1039 | } | 1092 | } |
1040 | 1093 | ||
1041 | static int s390_vxrs_active(struct task_struct *target, | ||
1042 | const struct user_regset *regset) | ||
1043 | { | ||
1044 | return !!target->thread.vxrs; | ||
1045 | } | ||
1046 | |||
1047 | static int s390_vxrs_low_get(struct task_struct *target, | 1094 | static int s390_vxrs_low_get(struct task_struct *target, |
1048 | const struct user_regset *regset, | 1095 | const struct user_regset *regset, |
1049 | unsigned int pos, unsigned int count, | 1096 | unsigned int pos, unsigned int count, |
@@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target, | |||
1052 | __u64 vxrs[__NUM_VXRS_LOW]; | 1099 | __u64 vxrs[__NUM_VXRS_LOW]; |
1053 | int i; | 1100 | int i; |
1054 | 1101 | ||
1102 | if (!MACHINE_HAS_VX) | ||
1103 | return -ENODEV; | ||
1055 | if (target->thread.vxrs) { | 1104 | if (target->thread.vxrs) { |
1056 | if (target == current) | 1105 | if (target == current) |
1057 | save_vx_regs(target->thread.vxrs); | 1106 | save_vx_regs(target->thread.vxrs); |
@@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target, | |||
1070 | __u64 vxrs[__NUM_VXRS_LOW]; | 1119 | __u64 vxrs[__NUM_VXRS_LOW]; |
1071 | int i, rc; | 1120 | int i, rc; |
1072 | 1121 | ||
1122 | if (!MACHINE_HAS_VX) | ||
1123 | return -ENODEV; | ||
1073 | if (!target->thread.vxrs) { | 1124 | if (!target->thread.vxrs) { |
1074 | rc = alloc_vector_registers(target); | 1125 | rc = alloc_vector_registers(target); |
1075 | if (rc) | 1126 | if (rc) |
@@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target, | |||
1095 | { | 1146 | { |
1096 | __vector128 vxrs[__NUM_VXRS_HIGH]; | 1147 | __vector128 vxrs[__NUM_VXRS_HIGH]; |
1097 | 1148 | ||
1149 | if (!MACHINE_HAS_VX) | ||
1150 | return -ENODEV; | ||
1098 | if (target->thread.vxrs) { | 1151 | if (target->thread.vxrs) { |
1099 | if (target == current) | 1152 | if (target == current) |
1100 | save_vx_regs(target->thread.vxrs); | 1153 | save_vx_regs(target->thread.vxrs); |
@@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target, | |||
1112 | { | 1165 | { |
1113 | int rc; | 1166 | int rc; |
1114 | 1167 | ||
1168 | if (!MACHINE_HAS_VX) | ||
1169 | return -ENODEV; | ||
1115 | if (!target->thread.vxrs) { | 1170 | if (!target->thread.vxrs) { |
1116 | rc = alloc_vector_registers(target); | 1171 | rc = alloc_vector_registers(target); |
1117 | if (rc) | 1172 | if (rc) |
@@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = { | |||
1196 | .n = __NUM_VXRS_LOW, | 1251 | .n = __NUM_VXRS_LOW, |
1197 | .size = sizeof(__u64), | 1252 | .size = sizeof(__u64), |
1198 | .align = sizeof(__u64), | 1253 | .align = sizeof(__u64), |
1199 | .active = s390_vxrs_active, | ||
1200 | .get = s390_vxrs_low_get, | 1254 | .get = s390_vxrs_low_get, |
1201 | .set = s390_vxrs_low_set, | 1255 | .set = s390_vxrs_low_set, |
1202 | }, | 1256 | }, |
@@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = { | |||
1205 | .n = __NUM_VXRS_HIGH, | 1259 | .n = __NUM_VXRS_HIGH, |
1206 | .size = sizeof(__vector128), | 1260 | .size = sizeof(__vector128), |
1207 | .align = sizeof(__vector128), | 1261 | .align = sizeof(__vector128), |
1208 | .active = s390_vxrs_active, | ||
1209 | .get = s390_vxrs_high_get, | 1262 | .get = s390_vxrs_high_get, |
1210 | .set = s390_vxrs_high_set, | 1263 | .set = s390_vxrs_high_set, |
1211 | }, | 1264 | }, |
@@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1419 | .n = __NUM_VXRS_LOW, | 1472 | .n = __NUM_VXRS_LOW, |
1420 | .size = sizeof(__u64), | 1473 | .size = sizeof(__u64), |
1421 | .align = sizeof(__u64), | 1474 | .align = sizeof(__u64), |
1422 | .active = s390_vxrs_active, | ||
1423 | .get = s390_vxrs_low_get, | 1475 | .get = s390_vxrs_low_get, |
1424 | .set = s390_vxrs_low_set, | 1476 | .set = s390_vxrs_low_set, |
1425 | }, | 1477 | }, |
@@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = { | |||
1428 | .n = __NUM_VXRS_HIGH, | 1480 | .n = __NUM_VXRS_HIGH, |
1429 | .size = sizeof(__vector128), | 1481 | .size = sizeof(__vector128), |
1430 | .align = sizeof(__vector128), | 1482 | .align = sizeof(__vector128), |
1431 | .active = s390_vxrs_active, | ||
1432 | .get = s390_vxrs_high_get, | 1483 | .get = s390_vxrs_high_get, |
1433 | .set = s390_vxrs_high_set, | 1484 | .set = s390_vxrs_high_set, |
1434 | }, | 1485 | }, |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e80d9ff9a56d..4e532c67832f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/ctype.h> | 41 | #include <linux/ctype.h> |
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/topology.h> | 43 | #include <linux/topology.h> |
44 | #include <linux/ftrace.h> | ||
45 | #include <linux/kexec.h> | 44 | #include <linux/kexec.h> |
46 | #include <linux/crash_dump.h> | 45 | #include <linux/crash_dump.h> |
47 | #include <linux/memory.h> | 46 | #include <linux/memory.h> |
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void) | |||
356 | lc->steal_timer = S390_lowcore.steal_timer; | 355 | lc->steal_timer = S390_lowcore.steal_timer; |
357 | lc->last_update_timer = S390_lowcore.last_update_timer; | 356 | lc->last_update_timer = S390_lowcore.last_update_timer; |
358 | lc->last_update_clock = S390_lowcore.last_update_clock; | 357 | lc->last_update_clock = S390_lowcore.last_update_clock; |
359 | lc->ftrace_func = S390_lowcore.ftrace_func; | ||
360 | 358 | ||
361 | restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); | 359 | restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); |
362 | restart_stack += ASYNC_SIZE; | 360 | restart_stack += ASYNC_SIZE; |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 0c1a0ff0a558..6a2ac257d98f 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
371 | restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; | 371 | restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; |
372 | } else { | 372 | } else { |
373 | /* Signal frame without vector registers are short ! */ | 373 | /* Signal frame without vector registers are short ! */ |
374 | __u16 __user *svc = (void *) frame + frame_size - 2; | 374 | __u16 __user *svc = (void __user *) frame + frame_size - 2; |
375 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) | 375 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) |
376 | return -EFAULT; | 376 | return -EFAULT; |
377 | restorer = (unsigned long) svc | PSW_ADDR_AMODE; | 377 | restorer = (unsigned long) svc | PSW_ADDR_AMODE; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6fd9e60101f1..0b499f5cbe19 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) | |||
236 | lc->percpu_offset = __per_cpu_offset[cpu]; | 236 | lc->percpu_offset = __per_cpu_offset[cpu]; |
237 | lc->kernel_asce = S390_lowcore.kernel_asce; | 237 | lc->kernel_asce = S390_lowcore.kernel_asce; |
238 | lc->machine_flags = S390_lowcore.machine_flags; | 238 | lc->machine_flags = S390_lowcore.machine_flags; |
239 | lc->ftrace_func = S390_lowcore.ftrace_func; | ||
240 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; | 239 | lc->user_timer = lc->system_timer = lc->steal_timer = 0; |
241 | __ctl_store(lc->cregs_save_area, 0, 15); | 240 | __ctl_store(lc->cregs_save_area, 0, 15); |
242 | save_access_regs((unsigned int *) lc->access_regs_save_area); | 241 | save_access_regs((unsigned int *) lc->access_regs_save_area); |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9f7087fd58de..a2987243bc76 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) | |||
360 | SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) | 360 | SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) |
361 | SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ | 361 | SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ |
362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) | 362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) |
363 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) | ||
364 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 005d665fe4a5..20660dddb2d6 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); | |||
61 | /* | 61 | /* |
62 | * Scheduler clock - returns current time in nanosec units. | 62 | * Scheduler clock - returns current time in nanosec units. |
63 | */ | 63 | */ |
64 | unsigned long long notrace __kprobes sched_clock(void) | 64 | unsigned long long notrace sched_clock(void) |
65 | { | 65 | { |
66 | return tod_to_ns(get_tod_clock_monotonic()); | 66 | return tod_to_ns(get_tod_clock_monotonic()); |
67 | } | 67 | } |
68 | NOKPROBE_SYMBOL(sched_clock); | ||
68 | 69 | ||
69 | /* | 70 | /* |
70 | * Monotonic_clock - returns # of nanoseconds passed since time_init() | 71 | * Monotonic_clock - returns # of nanoseconds passed since time_init() |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 9ff5ecba26ab..f081cf1157c3 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr) | |||
49 | return; | 49 | return; |
50 | if (!printk_ratelimit()) | 50 | if (!printk_ratelimit()) |
51 | return; | 51 | return; |
52 | printk("User process fault: interruption code 0x%X ", regs->int_code); | 52 | printk("User process fault: interruption code %04x ilc:%d ", |
53 | regs->int_code & 0xffff, regs->int_code >> 17); | ||
53 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); | 54 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); |
54 | printk("\n"); | 55 | printk("\n"); |
55 | show_regs(regs); | 56 | show_regs(regs); |
@@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) | |||
87 | } | 88 | } |
88 | } | 89 | } |
89 | 90 | ||
90 | static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code, | 91 | static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) |
91 | char *str) | ||
92 | { | 92 | { |
93 | if (notify_die(DIE_TRAP, str, regs, 0, | 93 | if (notify_die(DIE_TRAP, str, regs, 0, |
94 | regs->int_code, si_signo) == NOTIFY_STOP) | 94 | regs->int_code, si_signo) == NOTIFY_STOP) |
95 | return; | 95 | return; |
96 | do_report_trap(regs, si_signo, si_code, str); | 96 | do_report_trap(regs, si_signo, si_code, str); |
97 | } | 97 | } |
98 | NOKPROBE_SYMBOL(do_trap); | ||
98 | 99 | ||
99 | void __kprobes do_per_trap(struct pt_regs *regs) | 100 | void do_per_trap(struct pt_regs *regs) |
100 | { | 101 | { |
101 | siginfo_t info; | 102 | siginfo_t info; |
102 | 103 | ||
@@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) | |||
111 | (void __force __user *) current->thread.per_event.address; | 112 | (void __force __user *) current->thread.per_event.address; |
112 | force_sig_info(SIGTRAP, &info, current); | 113 | force_sig_info(SIGTRAP, &info, current); |
113 | } | 114 | } |
115 | NOKPROBE_SYMBOL(do_per_trap); | ||
114 | 116 | ||
115 | void default_trap_handler(struct pt_regs *regs) | 117 | void default_trap_handler(struct pt_regs *regs) |
116 | { | 118 | { |
@@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, | |||
151 | "privileged operation") | 153 | "privileged operation") |
152 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, | 154 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, |
153 | "special operation exception") | 155 | "special operation exception") |
154 | DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, | ||
155 | "translation exception") | ||
156 | 156 | ||
157 | #ifdef CONFIG_64BIT | 157 | #ifdef CONFIG_64BIT |
158 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, | 158 | DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, |
@@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) | |||
179 | do_trap(regs, SIGFPE, si_code, "floating point exception"); | 179 | do_trap(regs, SIGFPE, si_code, "floating point exception"); |
180 | } | 180 | } |
181 | 181 | ||
182 | void __kprobes illegal_op(struct pt_regs *regs) | 182 | void translation_exception(struct pt_regs *regs) |
183 | { | ||
184 | /* May never happen. */ | ||
185 | die(regs, "Translation exception"); | ||
186 | } | ||
187 | |||
188 | void illegal_op(struct pt_regs *regs) | ||
183 | { | 189 | { |
184 | siginfo_t info; | 190 | siginfo_t info; |
185 | __u8 opcode[6]; | 191 | __u8 opcode[6]; |
@@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs) | |||
252 | if (signal) | 258 | if (signal) |
253 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); | 259 | do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); |
254 | } | 260 | } |
255 | 261 | NOKPROBE_SYMBOL(illegal_op); | |
256 | 262 | ||
257 | #ifdef CONFIG_MATHEMU | 263 | #ifdef CONFIG_MATHEMU |
258 | void specification_exception(struct pt_regs *regs) | 264 | void specification_exception(struct pt_regs *regs) |
@@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs) | |||
469 | do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); | 475 | do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); |
470 | } | 476 | } |
471 | 477 | ||
472 | void __kprobes kernel_stack_overflow(struct pt_regs * regs) | 478 | void kernel_stack_overflow(struct pt_regs *regs) |
473 | { | 479 | { |
474 | bust_spinlocks(1); | 480 | bust_spinlocks(1); |
475 | printk("Kernel stack overflow.\n"); | 481 | printk("Kernel stack overflow.\n"); |
@@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs) | |||
477 | bust_spinlocks(0); | 483 | bust_spinlocks(0); |
478 | panic("Corrupt kernel stack, can't continue."); | 484 | panic("Corrupt kernel stack, can't continue."); |
479 | } | 485 | } |
486 | NOKPROBE_SYMBOL(kernel_stack_overflow); | ||
480 | 487 | ||
481 | void __init trap_init(void) | 488 | void __init trap_init(void) |
482 | { | 489 | { |