aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-01-05 06:48:10 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2011-01-05 06:47:31 -0500
commit5e9a26928f550157563cfc06ce12c4ae121a02ec (patch)
treefc58668f8c6151a5f58c0430f92a0691d727af42
parentda7f51c11d5fedca9ba779ee220063ccb4f0a27e (diff)
[S390] ptrace cleanup
Overhaul program event recording and the code dealing with the ptrace user space interface. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h52
-rw-r--r--arch/s390/include/asm/system.h2
-rw-r--r--arch/s390/include/asm/thread_info.h8
-rw-r--r--arch/s390/kernel/asm-offsets.c14
-rw-r--r--arch/s390/kernel/compat_ptrace.h53
-rw-r--r--arch/s390/kernel/entry.S70
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S69
-rw-r--r--arch/s390/kernel/kprobes.c13
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/ptrace.c306
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/mm/fault.c6
15 files changed, 347 insertions, 263 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 8d6f8716957..1988807d415 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -81,7 +81,8 @@ struct thread_struct {
81 mm_segment_t mm_segment; 81 mm_segment_t mm_segment;
82 unsigned long prot_addr; /* address of protection-excep. */ 82 unsigned long prot_addr; /* address of protection-excep. */
83 unsigned int trap_no; 83 unsigned int trap_no;
84 per_struct per_info; 84 struct per_regs per_user; /* User specified PER registers */
85 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 86 /* pfault_wait is used to block the process on a pfault event */
86 unsigned long pfault_wait; 87 unsigned long pfault_wait;
87}; 88};
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d9d42b1e46f..9ad628a8574 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -331,10 +331,60 @@ struct pt_regs
331 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr; 332 unsigned short svcnr;
333}; 333};
334
335/*
336 * Program event recording (PER) register set.
337 */
338struct per_regs {
339 unsigned long control; /* PER control bits */
340 unsigned long start; /* PER starting address */
341 unsigned long end; /* PER ending address */
342};
343
344/*
345 * PER event contains information about the cause of the last PER exception.
346 */
347struct per_event {
348 unsigned short cause; /* PER code, ATMID and AI */
349 unsigned long address; /* PER address */
350 unsigned char paid; /* PER access identification */
351};
352
353/*
354 * Simplified per_info structure used to decode the ptrace user space ABI.
355 */
356struct per_struct_kernel {
357 unsigned long cr9; /* PER control bits */
358 unsigned long cr10; /* PER starting address */
359 unsigned long cr11; /* PER ending address */
360 unsigned long bits; /* Obsolete software bits */
361 unsigned long starting_addr; /* User specified start address */
362 unsigned long ending_addr; /* User specified end address */
363 unsigned short perc_atmid; /* PER trap ATMID */
364 unsigned long address; /* PER trap instruction address */
365 unsigned char access_id; /* PER trap access identification */
366};
367
368#define PER_EVENT_MASK 0xE9000000UL
369
370#define PER_EVENT_BRANCH 0x80000000UL
371#define PER_EVENT_IFETCH 0x40000000UL
372#define PER_EVENT_STORE 0x20000000UL
373#define PER_EVENT_STORE_REAL 0x08000000UL
374#define PER_EVENT_NULLIFICATION 0x01000000UL
375
376#define PER_CONTROL_MASK 0x00a00000UL
377
378#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
379#define PER_CONTROL_ALTERATION 0x00200000UL
380
334#endif 381#endif
335 382
336/* 383/*
337 * Now for the program event recording (trace) definitions. 384 * Now for the user space program event recording (trace) definitions.
385 * The following structures are used only for the ptrace interface, don't
386 * touch or even look at it if you don't want to modify the user-space
387 * ptrace interface. In particular stay away from it for in-kernel PER.
338 */ 388 */
339typedef struct 389typedef struct
340{ 390{
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 4ab2779fdb0..6710b0eac16 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,6 +20,7 @@
20struct task_struct; 20struct task_struct;
21 21
22extern struct task_struct *__switch_to(void *, void *); 22extern struct task_struct *__switch_to(void *, void *);
23extern void update_per_regs(struct task_struct *task);
23 24
24static inline void save_fp_regs(s390_fp_regs *fpregs) 25static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 26{
@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
93 if (next->mm) { \ 94 if (next->mm) { \
94 restore_fp_regs(&next->thread.fp_regs); \ 95 restore_fp_regs(&next->thread.fp_regs); \
95 restore_access_regs(&next->thread.acrs[0]); \ 96 restore_access_regs(&next->thread.acrs[0]); \
97 update_per_regs(next); \
96 } \ 98 } \
97 prev = __switch_to(prev,next); \ 99 prev = __switch_to(prev,next); \
98} while (0) 100} while (0)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 81452021794..ebc77091466 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
88#define TIF_SIGPENDING 2 /* signal pending */ 88#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_31BIT 17 /* 32bit process */ 99#define TIF_31BIT 17 /* 32bit process */
100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
102#define TIF_FREEZE 20 /* thread is freezing for suspend */ 102#define TIF_SINGLE_STEP 20 /* This task is single stepped */
103#define TIF_FREEZE 21 /* thread is freezing for suspend */
103 104
104#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 105#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
105#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 106#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
106#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 107#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 108#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
108#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 109#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 110#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 111#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 115#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
116#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
118#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
117#define _TIF_FREEZE (1<<TIF_FREEZE) 119#define _TIF_FREEZE (1<<TIF_FREEZE)
118 120
119#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 33982e7ce04..fe03c140002 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,14 +23,16 @@ int main(void)
23{ 23{
24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
26 DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
27 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 26 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
28 BLANK(); 27 BLANK();
29 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
30 BLANK(); 29 BLANK();
31 DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); 30 DEFINE(__THREAD_per_cause,
32 DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); 31 offsetof(struct task_struct, thread.per_event.cause));
33 DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); 32 DEFINE(__THREAD_per_address,
33 offsetof(struct task_struct, thread.per_event.address));
34 DEFINE(__THREAD_per_paid,
35 offsetof(struct task_struct, thread.per_event.paid));
34 BLANK(); 36 BLANK();
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 37 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -85,9 +87,9 @@ int main(void)
85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 87 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 88 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
87 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); 89 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
88 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); 90 DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
89 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 91 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
90 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 92 DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
91 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); 93 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
92 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 94 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
93 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); 95 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 3141025724f..12b82383351 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -4,40 +4,19 @@
4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
5#include "compat_linux.h" /* needed for psw_compat_t */ 5#include "compat_linux.h" /* needed for psw_compat_t */
6 6
7typedef struct { 7struct compat_per_struct_kernel {
8 __u32 cr[NUM_CR_WORDS]; 8 __u32 cr9; /* PER control bits */
9} per_cr_words32; 9 __u32 cr10; /* PER starting address */
10 10 __u32 cr11; /* PER ending address */
11typedef struct { 11 __u32 bits; /* Obsolete software bits */
12 __u16 perc_atmid; /* 0x096 */ 12 __u32 starting_addr; /* User specified start address */
13 __u32 address; /* 0x098 */ 13 __u32 ending_addr; /* User specified end address */
14 __u8 access_id; /* 0x0a1 */ 14 __u16 perc_atmid; /* PER trap ATMID */
15} per_lowcore_words32; 15 __u32 address; /* PER trap instruction address */
16 16 __u8 access_id; /* PER trap access identification */
17typedef struct { 17};
18 union {
19 per_cr_words32 words;
20 } control_regs;
21 /*
22 * Use these flags instead of setting em_instruction_fetch
23 * directly they are used so that single stepping can be
24 * switched on & off while not affecting other tracing
25 */
26 unsigned single_step : 1;
27 unsigned instruction_fetch : 1;
28 unsigned : 30;
29 /*
30 * These addresses are copied into cr10 & cr11 if single
31 * stepping is switched off
32 */
33 __u32 starting_addr;
34 __u32 ending_addr;
35 union {
36 per_lowcore_words32 words;
37 } lowcore;
38} per_struct32;
39 18
40struct user_regs_struct32 19struct compat_user_regs_struct
41{ 20{
42 psw_compat_t psw; 21 psw_compat_t psw;
43 u32 gprs[NUM_GPRS]; 22 u32 gprs[NUM_GPRS];
@@ -50,14 +29,14 @@ struct user_regs_struct32
50 * itself as there is no "official" ptrace interface for hardware 29 * itself as there is no "official" ptrace interface for hardware
51 * watchpoints. This is the way intel does it. 30 * watchpoints. This is the way intel does it.
52 */ 31 */
53 per_struct32 per_info; 32 struct compat_per_struct_kernel per_info;
54 u32 ieee_instruction_pointer; /* obsolete, always 0 */ 33 u32 ieee_instruction_pointer; /* obsolete, always 0 */
55}; 34};
56 35
57struct user32 { 36struct compat_user {
58 /* We start with the registers, to mimic the way that "memory" 37 /* We start with the registers, to mimic the way that "memory"
59 is returned from the ptrace(3,...) function. */ 38 is returned from the ptrace(3,...) function. */
60 struct user_regs_struct32 regs; /* Where the registers are actually stored */ 39 struct compat_user_regs_struct regs;
61 /* The rest of this junk is to help gdb figure out what goes where */ 40 /* The rest of this junk is to help gdb figure out what goes where */
62 u32 u_tsize; /* Text segment size (pages). */ 41 u32 u_tsize; /* Text segment size (pages). */
63 u32 u_dsize; /* Data segment size (pages). */ 42 u32 u_dsize; /* Data segment size (pages). */
@@ -79,6 +58,6 @@ typedef struct
79 __u32 len; 58 __u32 len;
80 __u32 kernel_addr; 59 __u32 kernel_addr;
81 __u32 process_addr; 60 __u32 process_addr;
82} ptrace_area_emu31; 61} compat_ptrace_area;
83 62
84#endif /* _PTRACE32_H */ 63#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index af8bd3b90a2..648f64239a9 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -48,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
49 49
50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
53 _TIF_MCCK_PENDING) 53 _TIF_MCCK_PENDING)
54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -200,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
200 .globl __switch_to 200 .globl __switch_to
201__switch_to: 201__switch_to:
202 basr %r1,0 202 basr %r1,0
203__switch_to_base: 2030: l %r4,__THREAD_info(%r2) # get thread_info of prev
204 tm __THREAD_per(%r3),0xe8 # new process is using per ? 204 l %r5,__THREAD_info(%r3) # get thread_info of next
205 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
206 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
207 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
208 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
209 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
210__switch_to_noper:
211 l %r4,__THREAD_info(%r2) # get thread_info of prev
212 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 205 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
213 bz __switch_to_no_mcck-__switch_to_base(%r1) 206 bz 1f-0b(%r1)
214 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 207 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
215 l %r4,__THREAD_info(%r3) # get thread_info of next 208 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
216 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 2091: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
217__switch_to_no_mcck: 210 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
218 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 211 l %r15,__THREAD_ksp(%r3) # load kernel stack of next
219 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 212 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
220 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 213 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
221 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 214 st %r3,__LC_CURRENT # store task struct of next
222 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 215 st %r5,__LC_THREAD_INFO # store thread info of next
223 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 216 ahi %r5,STACK_SIZE # end of kernel stack of next
224 l %r3,__THREAD_info(%r3) # load thread_info from task struct 217 st %r5,__LC_KERNEL_STACK # store end of kernel stack
225 st %r3,__LC_THREAD_INFO
226 ahi %r3,STACK_SIZE
227 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
228 br %r14 218 br %r14
229 219
230__critical_start: 220__critical_start:
@@ -297,7 +287,7 @@ sysc_work_tif:
297 bo BASED(sysc_notify_resume) 287 bo BASED(sysc_notify_resume)
298 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 288 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
299 bo BASED(sysc_restart) 289 bo BASED(sysc_restart)
300 tm __TI_flags+3(%r12),_TIF_SINGLE_STEP 290 tm __TI_flags+3(%r12),_TIF_PER_TRAP
301 bo BASED(sysc_singlestep) 291 bo BASED(sysc_singlestep)
302 b BASED(sysc_return) # beware of critical section cleanup 292 b BASED(sysc_return) # beware of critical section cleanup
303 293
@@ -321,13 +311,13 @@ sysc_mcck_pending:
321# _TIF_SIGPENDING is set, call do_signal 311# _TIF_SIGPENDING is set, call do_signal
322# 312#
323sysc_sigpending: 313sysc_sigpending:
324 ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 314 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
325 la %r2,SP_PTREGS(%r15) # load pt_regs 315 la %r2,SP_PTREGS(%r15) # load pt_regs
326 l %r1,BASED(.Ldo_signal) 316 l %r1,BASED(.Ldo_signal)
327 basr %r14,%r1 # call do_signal 317 basr %r14,%r1 # call do_signal
328 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 318 tm __TI_flags+3(%r12),_TIF_RESTART_SVC
329 bo BASED(sysc_restart) 319 bo BASED(sysc_restart)
330 tm __TI_flags+3(%r12),_TIF_SINGLE_STEP 320 tm __TI_flags+3(%r12),_TIF_PER_TRAP
331 bo BASED(sysc_singlestep) 321 bo BASED(sysc_singlestep)
332 b BASED(sysc_return) 322 b BASED(sysc_return)
333 323
@@ -353,15 +343,15 @@ sysc_restart:
353 b BASED(sysc_nr_ok) # restart svc 343 b BASED(sysc_nr_ok) # restart svc
354 344
355# 345#
356# _TIF_SINGLE_STEP is set, call do_single_step 346# _TIF_PER_TRAP is set, call do_per_trap
357# 347#
358sysc_singlestep: 348sysc_singlestep:
359 ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 349 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
360 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 350 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
361 la %r2,SP_PTREGS(%r15) # address of register-save area 351 la %r2,SP_PTREGS(%r15) # address of register-save area
362 l %r1,BASED(.Lhandle_per) # load adr. of per handler 352 l %r1,BASED(.Lhandle_per) # load adr. of per handler
363 la %r14,BASED(sysc_return) # load adr. of system return 353 la %r14,BASED(sysc_return) # load adr. of system return
364 br %r1 # branch to do_single_step 354 br %r1 # branch to do_per_trap
365 355
366# 356#
367# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 357# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -520,10 +510,10 @@ pgm_no_vtime2:
520 l %r1,__TI_task(%r12) 510 l %r1,__TI_task(%r12)
521 tm SP_PSW+1(%r15),0x01 # kernel per event ? 511 tm SP_PSW+1(%r15),0x01 # kernel per event ?
522 bz BASED(kernel_per) 512 bz BASED(kernel_per)
523 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 513 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
524 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 514 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
525 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 515 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
526 oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 516 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
527 l %r3,__LC_PGM_ILC # load program interruption code 517 l %r3,__LC_PGM_ILC # load program interruption code
528 l %r4,__LC_TRANS_EXC_CODE 518 l %r4,__LC_TRANS_EXC_CODE
529 REENABLE_IRQS 519 REENABLE_IRQS
@@ -551,10 +541,10 @@ pgm_svcper:
551 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 541 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
552 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 542 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
553 l %r8,__TI_task(%r12) 543 l %r8,__TI_task(%r12)
554 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 544 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
555 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS 545 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
556 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 546 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
557 oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 547 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
558 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 548 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
559 lm %r2,%r6,SP_R2(%r15) # load svc arguments 549 lm %r2,%r6,SP_R2(%r15) # load svc arguments
560 b BASED(sysc_do_svc) 550 b BASED(sysc_do_svc)
@@ -1056,7 +1046,7 @@ cleanup_io_restore_insn:
1056.Ldo_signal: .long do_signal 1046.Ldo_signal: .long do_signal
1057.Ldo_notify_resume: 1047.Ldo_notify_resume:
1058 .long do_notify_resume 1048 .long do_notify_resume
1059.Lhandle_per: .long do_single_step 1049.Lhandle_per: .long do_per_trap
1060.Ldo_execve: .long do_execve 1050.Ldo_execve: .long do_execve
1061.Lexecve_tail: .long execve_tail 1051.Lexecve_tail: .long execve_tail
1062.Ljump_table: .long pgm_check_table 1052.Ljump_table: .long pgm_check_table
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 95c1dfc4ef3..17a6f83a2d6 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
12 12
13extern int sysctl_userprocess_debug; 13extern int sysctl_userprocess_debug;
14 14
15void do_single_step(struct pt_regs *regs); 15void do_per_trap(struct pt_regs *regs);
16void syscall_trace(struct pt_regs *regs, int entryexit); 16void syscall_trace(struct pt_regs *regs, int entryexit);
17void kernel_stack_overflow(struct pt_regs * regs); 17void kernel_stack_overflow(struct pt_regs * regs);
18void do_signal(struct pt_regs *regs); 18void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1c0dce58933..9d3603d6c51 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51STACK_SIZE = 1 << STACK_SHIFT 51STACK_SIZE = 1 << STACK_SHIFT
52 52
53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -208,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
208 */ 208 */
209 .globl __switch_to 209 .globl __switch_to
210__switch_to: 210__switch_to:
211 tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? 211 lg %r4,__THREAD_info(%r2) # get thread_info of prev
212 jz __switch_to_noper # if not we're fine 212 lg %r5,__THREAD_info(%r3) # get thread_info of next
213 stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
214 clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
215 je __switch_to_noper # we got away without bashing TLB's
216 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
217__switch_to_noper:
218 lg %r4,__THREAD_info(%r2) # get thread_info of prev
219 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
220 jz __switch_to_no_mcck 214 jz 0f
221 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 215 ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
222 lg %r4,__THREAD_info(%r3) # get thread_info of next 216 oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
223 oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 2170: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
224__switch_to_no_mcck: 218 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
225 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 219 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
226 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 220 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
227 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
228 lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 222 stg %r3,__LC_CURRENT # store task struct of next
229 stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct 223 stg %r5,__LC_THREAD_INFO # store thread info of next
230 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 224 aghi %r5,STACK_SIZE # end of kernel stack of next
231 lg %r3,__THREAD_info(%r3) # load thread_info from task struct 225 stg %r5,__LC_KERNEL_STACK # store end of kernel stack
232 stg %r3,__LC_THREAD_INFO
233 aghi %r3,STACK_SIZE
234 stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
235 br %r14 226 br %r14
236 227
237__critical_start: 228__critical_start:
@@ -311,7 +302,7 @@ sysc_work_tif:
311 jo sysc_notify_resume 302 jo sysc_notify_resume
312 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 303 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
313 jo sysc_restart 304 jo sysc_restart
314 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 305 tm __TI_flags+7(%r12),_TIF_PER_TRAP
315 jo sysc_singlestep 306 jo sysc_singlestep
316 j sysc_return # beware of critical section cleanup 307 j sysc_return # beware of critical section cleanup
317 308
@@ -333,12 +324,12 @@ sysc_mcck_pending:
333# _TIF_SIGPENDING is set, call do_signal 324# _TIF_SIGPENDING is set, call do_signal
334# 325#
335sysc_sigpending: 326sysc_sigpending:
336 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 327 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
337 la %r2,SP_PTREGS(%r15) # load pt_regs 328 la %r2,SP_PTREGS(%r15) # load pt_regs
338 brasl %r14,do_signal # call do_signal 329 brasl %r14,do_signal # call do_signal
339 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 330 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
340 jo sysc_restart 331 jo sysc_restart
341 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 332 tm __TI_flags+7(%r12),_TIF_PER_TRAP
342 jo sysc_singlestep 333 jo sysc_singlestep
343 j sysc_return 334 j sysc_return
344 335
@@ -363,14 +354,14 @@ sysc_restart:
363 j sysc_nr_ok # restart svc 354 j sysc_nr_ok # restart svc
364 355
365# 356#
366# _TIF_SINGLE_STEP is set, call do_single_step 357# _TIF_PER_TRAP is set, call do_per_trap
367# 358#
368sysc_singlestep: 359sysc_singlestep:
369 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 360 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
370 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 361 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
371 la %r2,SP_PTREGS(%r15) # address of register-save area 362 la %r2,SP_PTREGS(%r15) # address of register-save area
372 larl %r14,sysc_return # load adr. of system return 363 larl %r14,sysc_return # load adr. of system return
373 jg do_single_step # branch to do_sigtrap 364 jg do_per_trap
374 365
375# 366#
376# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 367# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -526,10 +517,10 @@ pgm_no_vtime2:
526 lg %r1,__TI_task(%r12) 517 lg %r1,__TI_task(%r12)
527 tm SP_PSW+1(%r15),0x01 # kernel per event ? 518 tm SP_PSW+1(%r15),0x01 # kernel per event ?
528 jz kernel_per 519 jz kernel_per
529 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 520 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
530 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 521 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
531 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 522 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
532 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 523 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
533 lgf %r3,__LC_PGM_ILC # load program interruption code 524 lgf %r3,__LC_PGM_ILC # load program interruption code
534 lg %r4,__LC_TRANS_EXC_CODE 525 lg %r4,__LC_TRANS_EXC_CODE
535 REENABLE_IRQS 526 REENABLE_IRQS
@@ -558,10 +549,10 @@ pgm_svcper:
558 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 549 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
559 LAST_BREAK 550 LAST_BREAK
560 lg %r8,__TI_task(%r12) 551 lg %r8,__TI_task(%r12)
561 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 552 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
562 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 553 mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
563 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 554 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
564 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 555 oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
565 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 556 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
566 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 557 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
567 j sysc_do_svc 558 j sysc_do_svc
@@ -573,7 +564,7 @@ kernel_per:
573 REENABLE_IRQS 564 REENABLE_IRQS
574 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 565 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
575 la %r2,SP_PTREGS(%r15) # address of register-save area 566 la %r2,SP_PTREGS(%r15) # address of register-save area
576 brasl %r14,do_single_step 567 brasl %r14,do_per_trap
577 j pgm_exit 568 j pgm_exit
578 569
579/* 570/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index f227f52ce91..1d05d669107 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -175,13 +175,12 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs, 175 struct pt_regs *regs,
176 unsigned long ip) 176 unsigned long ip)
177{ 177{
178 per_cr_bits kprobe_per_regs[1]; 178 struct per_regs per_kprobe;
179 179
180 /* Set up the per control reg info, will pass to lctl */ 180 /* Set up the PER control registers %cr9-%cr11 */
181 memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 181 per_kprobe.control = PER_EVENT_IFETCH;
182 kprobe_per_regs[0].em_instruction_fetch = 1; 182 per_kprobe.start = ip;
183 kprobe_per_regs[0].starting_addr = ip; 183 per_kprobe.end = ip;
184 kprobe_per_regs[0].ending_addr = ip;
185 184
186 /* Save control regs and psw mask */ 185 /* Save control regs and psw mask */
187 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 186 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
@@ -189,7 +188,7 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
189 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 188 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
190 189
191 /* Set PER control regs, turns on single step for the given address */ 190 /* Set PER control regs, turns on single step for the given address */
192 __ctl_load(kprobe_per_regs, 9, 11); 191 __ctl_load(per_kprobe, 9, 11);
193 regs->psw.mask |= PSW_MASK_PER; 192 regs->psw.mask |= PSW_MASK_PER;
194 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 193 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
195 regs->psw.addr = ip | PSW_ADDR_AMODE; 194 regs->psw.addr = ip | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c2fffb57d72..6ba42222b54 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -213,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
213 /* start new process with ar4 pointing to the correct address space */ 213 /* start new process with ar4 pointing to the correct address space */
214 p->thread.mm_segment = get_fs(); 214 p->thread.mm_segment = get_fs();
215 /* Don't copy debug registers */ 215 /* Don't copy debug registers */
216 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 216 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
217 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
217 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
219 clear_tsk_thread_flag(p, TIF_PER_TRAP);
218 /* Initialize per thread user and system timer values */ 220 /* Initialize per thread user and system timer values */
219 ti = task_thread_info(p); 221 ti = task_thread_info(p);
220 ti->user_timer = 0; 222 ti->user_timer = 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 019bb714db4..ef86ad24398 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,25 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/ptrace.c 2 * Ptrace user space interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2010
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Denis Joseph Barrow
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
16 *
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
18 *
19 *
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
23 */ 7 */
24 8
25#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -61,76 +45,58 @@ enum s390_regset {
61 REGSET_GENERAL_EXTENDED, 45 REGSET_GENERAL_EXTENDED,
62}; 46};
63 47
64static void 48void update_per_regs(struct task_struct *task)
65FixPerRegisters(struct task_struct *task)
66{ 49{
67 struct pt_regs *regs; 50 static const struct per_regs per_single_step = {
68 per_struct *per_info; 51 .control = PER_EVENT_IFETCH,
69 per_cr_words cr_words; 52 .start = 0,
70 53 .end = PSW_ADDR_INSN,
71 regs = task_pt_regs(task); 54 };
72 per_info = (per_struct *) &task->thread.per_info; 55 struct pt_regs *regs = task_pt_regs(task);
73 per_info->control_regs.bits.em_instruction_fetch = 56 struct thread_struct *thread = &task->thread;
74 per_info->single_step | per_info->instruction_fetch; 57 const struct per_regs *new;
75 58 struct per_regs old;
76 if (per_info->single_step) { 59
77 per_info->control_regs.bits.starting_addr = 0; 60 /* TIF_SINGLE_STEP overrides the user specified PER registers. */
78#ifdef CONFIG_COMPAT 61 new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
79 if (is_compat_task()) 62 &per_single_step : &thread->per_user;
80 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 63
81 else 64 /* Take care of the PER enablement bit in the PSW. */
82#endif 65 if (!(new->control & PER_EVENT_MASK)) {
83 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
84 } else {
85 per_info->control_regs.bits.starting_addr =
86 per_info->starting_addr;
87 per_info->control_regs.bits.ending_addr =
88 per_info->ending_addr;
89 }
90 /*
91 * if any of the control reg tracing bits are on
92 * we switch on per in the psw
93 */
94 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
95 regs->psw.mask |= PSW_MASK_PER;
96 else
97 regs->psw.mask &= ~PSW_MASK_PER; 66 regs->psw.mask &= ~PSW_MASK_PER;
98 67 return;
99 if (per_info->control_regs.bits.em_storage_alteration)
100 per_info->control_regs.bits.storage_alt_space_ctl = 1;
101 else
102 per_info->control_regs.bits.storage_alt_space_ctl = 0;
103
104 if (task == current) {
105 __ctl_store(cr_words, 9, 11);
106 if (memcmp(&cr_words, &per_info->control_regs.words,
107 sizeof(cr_words)) != 0)
108 __ctl_load(per_info->control_regs.words, 9, 11);
109 } 68 }
69 regs->psw.mask |= PSW_MASK_PER;
70 __ctl_store(old, 9, 11);
71 if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
72 __ctl_load(*new, 9, 11);
110} 73}
111 74
112void user_enable_single_step(struct task_struct *task) 75void user_enable_single_step(struct task_struct *task)
113{ 76{
114 task->thread.per_info.single_step = 1; 77 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
115 FixPerRegisters(task); 78 if (task == current)
79 update_per_regs(task);
116} 80}
117 81
118void user_disable_single_step(struct task_struct *task) 82void user_disable_single_step(struct task_struct *task)
119{ 83{
120 task->thread.per_info.single_step = 0; 84 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
121 FixPerRegisters(task); 85 if (task == current)
86 update_per_regs(task);
122} 87}
123 88
124/* 89/*
125 * Called by kernel/ptrace.c when detaching.. 90 * Called by kernel/ptrace.c when detaching..
126 * 91 *
127 * Make sure single step bits etc are not set. 92 * Clear all debugging related fields.
128 */ 93 */
129void 94void ptrace_disable(struct task_struct *task)
130ptrace_disable(struct task_struct *child)
131{ 95{
132 /* make sure the single step bit is not set. */ 96 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
133 user_disable_single_step(child); 97 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
98 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
99 clear_tsk_thread_flag(task, TIF_PER_TRAP);
134} 100}
135 101
136#ifndef CONFIG_64BIT 102#ifndef CONFIG_64BIT
@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
139# define __ADDR_MASK 7 105# define __ADDR_MASK 7
140#endif 106#endif
141 107
108static inline unsigned long __peek_user_per(struct task_struct *child,
109 addr_t addr)
110{
111 struct per_struct_kernel *dummy = NULL;
112
113 if (addr == (addr_t) &dummy->cr9)
114 /* Control bits of the active per set. */
115 return test_thread_flag(TIF_SINGLE_STEP) ?
116 PER_EVENT_IFETCH : child->thread.per_user.control;
117 else if (addr == (addr_t) &dummy->cr10)
118 /* Start address of the active per set. */
119 return test_thread_flag(TIF_SINGLE_STEP) ?
120 0 : child->thread.per_user.start;
121 else if (addr == (addr_t) &dummy->cr11)
122 /* End address of the active per set. */
123 return test_thread_flag(TIF_SINGLE_STEP) ?
124 PSW_ADDR_INSN : child->thread.per_user.end;
125 else if (addr == (addr_t) &dummy->bits)
126 /* Single-step bit. */
127 return test_thread_flag(TIF_SINGLE_STEP) ?
128 (1UL << (BITS_PER_LONG - 1)) : 0;
129 else if (addr == (addr_t) &dummy->starting_addr)
130 /* Start address of the user specified per set. */
131 return child->thread.per_user.start;
132 else if (addr == (addr_t) &dummy->ending_addr)
133 /* End address of the user specified per set. */
134 return child->thread.per_user.end;
135 else if (addr == (addr_t) &dummy->perc_atmid)
136 /* PER code, ATMID and AI of the last PER trap */
137 return (unsigned long)
138 child->thread.per_event.cause << (BITS_PER_LONG - 16);
139 else if (addr == (addr_t) &dummy->address)
140 /* Address of the last PER trap */
141 return child->thread.per_event.address;
142 else if (addr == (addr_t) &dummy->access_id)
143 /* Access id of the last PER trap */
144 return (unsigned long)
145 child->thread.per_event.paid << (BITS_PER_LONG - 8);
146 return 0;
147}
148
142/* 149/*
143 * Read the word at offset addr from the user area of a process. The 150 * Read the word at offset addr from the user area of a process. The
144 * trouble here is that the information is littered over different 151 * trouble here is that the information is littered over different
@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
204 211
205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 212 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
206 /* 213 /*
207 * per_info is found in the thread structure 214 * Handle access to the per_info structure.
208 */ 215 */
209 offset = addr - (addr_t) &dummy->regs.per_info; 216 addr -= (addr_t) &dummy->regs.per_info;
210 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); 217 tmp = __peek_user_per(child, addr);
211 218
212 } else 219 } else
213 tmp = 0; 220 tmp = 0;
@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
237 return put_user(tmp, (addr_t __user *) data); 244 return put_user(tmp, (addr_t __user *) data);
238} 245}
239 246
247static inline void __poke_user_per(struct task_struct *child,
248 addr_t addr, addr_t data)
249{
250 struct per_struct_kernel *dummy = NULL;
251
252 /*
253 * There are only three fields in the per_info struct that the
254 * debugger user can write to.
255 * 1) cr9: the debugger wants to set a new PER event mask
256 * 2) starting_addr: the debugger wants to set a new starting
257 * address to use with the PER event mask.
258 * 3) ending_addr: the debugger wants to set a new ending
259 * address to use with the PER event mask.
260 * The user specified PER event mask and the start and end
261 * addresses are used only if single stepping is not in effect.
262 * Writes to any other field in per_info are ignored.
263 */
264 if (addr == (addr_t) &dummy->cr9)
265 /* PER event mask of the user specified per set. */
266 child->thread.per_user.control =
267 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
268 else if (addr == (addr_t) &dummy->starting_addr)
269 /* Starting address of the user specified per set. */
270 child->thread.per_user.start = data;
271 else if (addr == (addr_t) &dummy->ending_addr)
272 /* Ending address of the user specified per set. */
273 child->thread.per_user.end = data;
274}
275
240/* 276/*
241 * Write a word to the user area of a process at location addr. This 277 * Write a word to the user area of a process at location addr. This
242 * operation does have an additional problem compared to peek_user. 278 * operation does have an additional problem compared to peek_user.
@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
311 347
312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 348 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
313 /* 349 /*
314 * per_info is found in the thread structure 350 * Handle access to the per_info structure.
315 */ 351 */
316 offset = addr - (addr_t) &dummy->regs.per_info; 352 addr -= (addr_t) &dummy->regs.per_info;
317 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; 353 __poke_user_per(child, addr, data);
318 354
319 } 355 }
320 356
321 FixPerRegisters(child);
322 return 0; 357 return 0;
323} 358}
324 359
325static int 360static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
326poke_user(struct task_struct *child, addr_t addr, addr_t data)
327{ 361{
328 addr_t mask; 362 addr_t mask;
329 363
@@ -410,12 +444,53 @@ long arch_ptrace(struct task_struct *child, long request,
410 */ 444 */
411 445
412/* 446/*
447 * Same as peek_user_per but for a 31 bit program.
448 */
449static inline __u32 __peek_user_per_compat(struct task_struct *child,
450 addr_t addr)
451{
452 struct compat_per_struct_kernel *dummy32 = NULL;
453
454 if (addr == (addr_t) &dummy32->cr9)
455 /* Control bits of the active per set. */
456 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
457 PER_EVENT_IFETCH : child->thread.per_user.control;
458 else if (addr == (addr_t) &dummy32->cr10)
459 /* Start address of the active per set. */
460 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
461 0 : child->thread.per_user.start;
462 else if (addr == (addr_t) &dummy32->cr11)
463 /* End address of the active per set. */
464 return test_thread_flag(TIF_SINGLE_STEP) ?
465 PSW32_ADDR_INSN : child->thread.per_user.end;
466 else if (addr == (addr_t) &dummy32->bits)
467 /* Single-step bit. */
468 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
469 0x80000000 : 0;
470 else if (addr == (addr_t) &dummy32->starting_addr)
471 /* Start address of the user specified per set. */
472 return (__u32) child->thread.per_user.start;
473 else if (addr == (addr_t) &dummy32->ending_addr)
474 /* End address of the user specified per set. */
475 return (__u32) child->thread.per_user.end;
476 else if (addr == (addr_t) &dummy32->perc_atmid)
477 /* PER code, ATMID and AI of the last PER trap */
478 return (__u32) child->thread.per_event.cause << 16;
479 else if (addr == (addr_t) &dummy32->address)
480 /* Address of the last PER trap */
481 return (__u32) child->thread.per_event.address;
482 else if (addr == (addr_t) &dummy32->access_id)
483 /* Access id of the last PER trap */
484 return (__u32) child->thread.per_event.paid << 24;
485 return 0;
486}
487
488/*
413 * Same as peek_user but for a 31 bit program. 489 * Same as peek_user but for a 31 bit program.
414 */ 490 */
415static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 491static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
416{ 492{
417 struct user32 *dummy32 = NULL; 493 struct compat_user *dummy32 = NULL;
418 per_struct32 *dummy_per32 = NULL;
419 addr_t offset; 494 addr_t offset;
420 __u32 tmp; 495 __u32 tmp;
421 496
@@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
465 540
466 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 541 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
467 /* 542 /*
468 * per_info is found in the thread structure 543 * Handle access to the per_info structure.
469 */ 544 */
470 offset = addr - (addr_t) &dummy32->regs.per_info; 545 addr -= (addr_t) &dummy32->regs.per_info;
471 /* This is magic. See per_struct and per_struct32. */ 546 tmp = __peek_user_per_compat(child, addr);
472 if ((offset >= (addr_t) &dummy_per32->control_regs &&
473 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
474 (offset >= (addr_t) &dummy_per32->starting_addr &&
475 offset <= (addr_t) &dummy_per32->ending_addr) ||
476 offset == (addr_t) &dummy_per32->lowcore.words.address)
477 offset = offset*2 + 4;
478 else
479 offset = offset*2;
480 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
481 547
482 } else 548 } else
483 tmp = 0; 549 tmp = 0;
@@ -498,13 +564,32 @@ static int peek_user_compat(struct task_struct *child,
498} 564}
499 565
500/* 566/*
567 * Same as poke_user_per but for a 31 bit program.
568 */
569static inline void __poke_user_per_compat(struct task_struct *child,
570 addr_t addr, __u32 data)
571{
572 struct compat_per_struct_kernel *dummy32 = NULL;
573
574 if (addr == (addr_t) &dummy32->cr9)
575 /* PER event mask of the user specified per set. */
576 child->thread.per_user.control =
577 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
578 else if (addr == (addr_t) &dummy32->starting_addr)
579 /* Starting address of the user specified per set. */
580 child->thread.per_user.start = data;
581 else if (addr == (addr_t) &dummy32->ending_addr)
582 /* Ending address of the user specified per set. */
583 child->thread.per_user.end = data;
584}
585
586/*
501 * Same as poke_user but for a 31 bit program. 587 * Same as poke_user but for a 31 bit program.
502 */ 588 */
503static int __poke_user_compat(struct task_struct *child, 589static int __poke_user_compat(struct task_struct *child,
504 addr_t addr, addr_t data) 590 addr_t addr, addr_t data)
505{ 591{
506 struct user32 *dummy32 = NULL; 592 struct compat_user *dummy32 = NULL;
507 per_struct32 *dummy_per32 = NULL;
508 __u32 tmp = (__u32) data; 593 __u32 tmp = (__u32) data;
509 addr_t offset; 594 addr_t offset;
510 595
@@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
561 646
562 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 647 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
563 /* 648 /*
564 * per_info is found in the thread structure. 649 * Handle access to the per_info structure.
565 */
566 offset = addr - (addr_t) &dummy32->regs.per_info;
567 /*
568 * This is magic. See per_struct and per_struct32.
569 * By incident the offsets in per_struct are exactly
570 * twice the offsets in per_struct32 for all fields.
571 * The 8 byte fields need special handling though,
572 * because the second half (bytes 4-7) is needed and
573 * not the first half.
574 */ 650 */
575 if ((offset >= (addr_t) &dummy_per32->control_regs && 651 addr -= (addr_t) &dummy32->regs.per_info;
576 offset < (addr_t) (&dummy_per32->control_regs + 1)) || 652 __poke_user_per_compat(child, addr, data);
577 (offset >= (addr_t) &dummy_per32->starting_addr &&
578 offset <= (addr_t) &dummy_per32->ending_addr) ||
579 offset == (addr_t) &dummy_per32->lowcore.words.address)
580 offset = offset*2 + 4;
581 else
582 offset = offset*2;
583 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
584
585 } 653 }
586 654
587 FixPerRegisters(child);
588 return 0; 655 return 0;
589} 656}
590 657
591static int poke_user_compat(struct task_struct *child, 658static int poke_user_compat(struct task_struct *child,
592 addr_t addr, addr_t data) 659 addr_t addr, addr_t data)
593{ 660{
594 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 661 if (!is_compat_task() || (addr & 3) ||
662 addr > sizeof(struct compat_user) - 3)
595 return -EIO; 663 return -EIO;
596 664
597 return __poke_user_compat(child, addr, data); 665 return __poke_user_compat(child, addr, data);
@@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
602{ 670{
603 unsigned long addr = caddr; 671 unsigned long addr = caddr;
604 unsigned long data = cdata; 672 unsigned long data = cdata;
605 ptrace_area_emu31 parea; 673 compat_ptrace_area parea;
606 int copied, ret; 674 int copied, ret;
607 675
608 switch (request) { 676 switch (request) {
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ee7ac8b1178..abbb3c3c7aa 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
505 * Let tracing know that we've done the handler setup. 505 * Let tracing know that we've done the handler setup.
506 */ 506 */
507 tracehook_signal_handler(signr, &info, &ka, regs, 507 tracehook_signal_handler(signr, &info, &ka, regs,
508 current->thread.per_info.single_step); 508 test_thread_flag(TIF_SINGLE_STEP));
509 } 509 }
510 return; 510 return;
511 } 511 }
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4f0cecb4f9e..5eb78dd584c 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); 365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
366} 366}
367 367
368void __kprobes do_single_step(struct pt_regs *regs) 368void __kprobes do_per_trap(struct pt_regs *regs)
369{ 369{
370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 370 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
371 SIGTRAP) == NOTIFY_STOP){
372 return; 371 return;
373 }
374 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 372 if (tracehook_consider_fatal_signal(current, SIGTRAP))
375 force_sig(SIGTRAP, current); 373 force_sig(SIGTRAP, current);
376} 374}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index dccb85d9153..2c57806c085 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -235,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
236 236
237 if (!rc && instruction == 0x0a77) { 237 if (!rc && instruction == 0x0a77) {
238 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 238 clear_tsk_thread_flag(current, TIF_PER_TRAP);
239 if (is_compat_task()) 239 if (is_compat_task())
240 sys32_sigreturn(); 240 sys32_sigreturn();
241 else 241 else
242 sys_sigreturn(); 242 sys_sigreturn();
243 } else if (!rc && instruction == 0x0aad) { 243 } else if (!rc && instruction == 0x0aad) {
244 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 244 clear_tsk_thread_flag(current, TIF_PER_TRAP);
245 if (is_compat_task()) 245 if (is_compat_task())
246 sys32_rt_sigreturn(); 246 sys32_rt_sigreturn();
247 else 247 else
@@ -379,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
379 * The instruction that caused the program check will 379 * The instruction that caused the program check will
380 * be repeated. Don't signal single step via SIGTRAP. 380 * be repeated. Don't signal single step via SIGTRAP.
381 */ 381 */
382 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 382 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
383 fault = 0; 383 fault = 0;
384out_up: 384out_up:
385 up_read(&mm->mmap_sem); 385 up_read(&mm->mmap_sem);