aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2008-01-17 23:50:30 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-25 06:52:50 -0500
commitc3b75bd7bbf4a0438dc140033b80657995fd30ed (patch)
tree4621ebbf620d6262fccf61811824386b9ba50915 /arch/powerpc
parentcaa34c9e9cab6afb243a3da406cce272805c48c5 (diff)
[POWERPC] Make setjmp/longjmp code usable outside of xmon
This makes the setjmp/longjmp code used by xmon, generically available to other code. It also removes the requirement for debugger hooks to be only called on 0x300 (data storage) exception. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/misc.S65
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/xmon/setjmp.S61
-rw-r--r--arch/powerpc/xmon/xmon.c6
4 files changed, 68 insertions, 70 deletions
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 74ce0c7a7b1e..7b9160220698 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -8,6 +8,8 @@
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 * 10 *
11 * setjmp/longjmp code by Paul Mackerras.
12 *
11 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 14 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 15 * as published by the Free Software Foundation; either version
@@ -15,6 +17,8 @@
15 */ 17 */
16#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
17#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/asm-compat.h>
21#include <asm/asm-offsets.h>
18 22
19 .text 23 .text
20 24
@@ -51,3 +55,64 @@ _GLOBAL(kernel_execve)
51 bnslr 55 bnslr
52 neg r3,r3 56 neg r3,r3
53 blr 57 blr
58
59_GLOBAL(setjmp)
60 mflr r0
61 PPC_STL r0,0(r3)
62 PPC_STL r1,SZL(r3)
63 PPC_STL r2,2*SZL(r3)
64 mfcr r0
65 PPC_STL r0,3*SZL(r3)
66 PPC_STL r13,4*SZL(r3)
67 PPC_STL r14,5*SZL(r3)
68 PPC_STL r15,6*SZL(r3)
69 PPC_STL r16,7*SZL(r3)
70 PPC_STL r17,8*SZL(r3)
71 PPC_STL r18,9*SZL(r3)
72 PPC_STL r19,10*SZL(r3)
73 PPC_STL r20,11*SZL(r3)
74 PPC_STL r21,12*SZL(r3)
75 PPC_STL r22,13*SZL(r3)
76 PPC_STL r23,14*SZL(r3)
77 PPC_STL r24,15*SZL(r3)
78 PPC_STL r25,16*SZL(r3)
79 PPC_STL r26,17*SZL(r3)
80 PPC_STL r27,18*SZL(r3)
81 PPC_STL r28,19*SZL(r3)
82 PPC_STL r29,20*SZL(r3)
83 PPC_STL r30,21*SZL(r3)
84 PPC_STL r31,22*SZL(r3)
85 li r3,0
86 blr
87
88_GLOBAL(longjmp)
89 PPC_LCMPI r4,0
90 bne 1f
91 li r4,1
921: PPC_LL r13,4*SZL(r3)
93 PPC_LL r14,5*SZL(r3)
94 PPC_LL r15,6*SZL(r3)
95 PPC_LL r16,7*SZL(r3)
96 PPC_LL r17,8*SZL(r3)
97 PPC_LL r18,9*SZL(r3)
98 PPC_LL r19,10*SZL(r3)
99 PPC_LL r20,11*SZL(r3)
100 PPC_LL r21,12*SZL(r3)
101 PPC_LL r22,13*SZL(r3)
102 PPC_LL r23,14*SZL(r3)
103 PPC_LL r24,15*SZL(r3)
104 PPC_LL r25,16*SZL(r3)
105 PPC_LL r26,17*SZL(r3)
106 PPC_LL r27,18*SZL(r3)
107 PPC_LL r28,19*SZL(r3)
108 PPC_LL r29,20*SZL(r3)
109 PPC_LL r30,21*SZL(r3)
110 PPC_LL r31,22*SZL(r3)
111 PPC_LL r0,3*SZL(r3)
112 mtcrf 0x38,r0
113 PPC_LL r0,0(r3)
114 PPC_LL r1,SZL(r3)
115 PPC_LL r2,2*SZL(r3)
116 mtlr r0
117 mr r3,r4
118 blr
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 10dda224a361..7b2510799266 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -167,10 +167,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
167 if (notify_page_fault(regs)) 167 if (notify_page_fault(regs))
168 return 0; 168 return 0;
169 169
170 if (trap == 0x300) { 170 if (unlikely(debugger_fault_handler(regs)))
171 if (debugger_fault_handler(regs)) 171 return 0;
172 return 0;
173 }
174 172
175 /* On a kernel SLB miss we can only check for a valid exception entry */ 173 /* On a kernel SLB miss we can only check for a valid exception entry */
176 if (!user_mode(regs) && (address >= TASK_SIZE)) 174 if (!user_mode(regs) && (address >= TASK_SIZE))
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
index 96a91f10e2ec..04c0b305ad4a 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/xmon/setjmp.S
@@ -12,67 +12,6 @@
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14 14
15_GLOBAL(xmon_setjmp)
16 mflr r0
17 PPC_STL r0,0(r3)
18 PPC_STL r1,SZL(r3)
19 PPC_STL r2,2*SZL(r3)
20 mfcr r0
21 PPC_STL r0,3*SZL(r3)
22 PPC_STL r13,4*SZL(r3)
23 PPC_STL r14,5*SZL(r3)
24 PPC_STL r15,6*SZL(r3)
25 PPC_STL r16,7*SZL(r3)
26 PPC_STL r17,8*SZL(r3)
27 PPC_STL r18,9*SZL(r3)
28 PPC_STL r19,10*SZL(r3)
29 PPC_STL r20,11*SZL(r3)
30 PPC_STL r21,12*SZL(r3)
31 PPC_STL r22,13*SZL(r3)
32 PPC_STL r23,14*SZL(r3)
33 PPC_STL r24,15*SZL(r3)
34 PPC_STL r25,16*SZL(r3)
35 PPC_STL r26,17*SZL(r3)
36 PPC_STL r27,18*SZL(r3)
37 PPC_STL r28,19*SZL(r3)
38 PPC_STL r29,20*SZL(r3)
39 PPC_STL r30,21*SZL(r3)
40 PPC_STL r31,22*SZL(r3)
41 li r3,0
42 blr
43
44_GLOBAL(xmon_longjmp)
45 PPC_LCMPI r4,0
46 bne 1f
47 li r4,1
481: PPC_LL r13,4*SZL(r3)
49 PPC_LL r14,5*SZL(r3)
50 PPC_LL r15,6*SZL(r3)
51 PPC_LL r16,7*SZL(r3)
52 PPC_LL r17,8*SZL(r3)
53 PPC_LL r18,9*SZL(r3)
54 PPC_LL r19,10*SZL(r3)
55 PPC_LL r20,11*SZL(r3)
56 PPC_LL r21,12*SZL(r3)
57 PPC_LL r22,13*SZL(r3)
58 PPC_LL r23,14*SZL(r3)
59 PPC_LL r24,15*SZL(r3)
60 PPC_LL r25,16*SZL(r3)
61 PPC_LL r26,17*SZL(r3)
62 PPC_LL r27,18*SZL(r3)
63 PPC_LL r28,19*SZL(r3)
64 PPC_LL r29,20*SZL(r3)
65 PPC_LL r30,21*SZL(r3)
66 PPC_LL r31,22*SZL(r3)
67 PPC_LL r0,3*SZL(r3)
68 mtcrf 0x38,r0
69 PPC_LL r0,0(r3)
70 PPC_LL r1,SZL(r3)
71 PPC_LL r2,2*SZL(r3)
72 mtlr r0
73 mr r3,r4
74 blr
75
76/* 15/*
77 * Grab the register values as they are now. 16 * Grab the register values as they are now.
78 * This won't do a particularily good job because we really 17 * This won't do a particularily good job because we really
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 865e36751f21..a34172ddc468 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -40,6 +40,7 @@
40#include <asm/spu.h> 40#include <asm/spu.h>
41#include <asm/spu_priv1.h> 41#include <asm/spu_priv1.h>
42#include <asm/firmware.h> 42#include <asm/firmware.h>
43#include <asm/setjmp.h>
43 44
44#ifdef CONFIG_PPC64 45#ifdef CONFIG_PPC64
45#include <asm/hvcall.h> 46#include <asm/hvcall.h>
@@ -71,12 +72,9 @@ static unsigned long ncsum = 4096;
71static int termch; 72static int termch;
72static char tmpstr[128]; 73static char tmpstr[128];
73 74
74#define JMP_BUF_LEN 23
75static long bus_error_jmp[JMP_BUF_LEN]; 75static long bus_error_jmp[JMP_BUF_LEN];
76static int catch_memory_errors; 76static int catch_memory_errors;
77static long *xmon_fault_jmp[NR_CPUS]; 77static long *xmon_fault_jmp[NR_CPUS];
78#define setjmp xmon_setjmp
79#define longjmp xmon_longjmp
80 78
81/* Breakpoint stuff */ 79/* Breakpoint stuff */
82struct bpt { 80struct bpt {
@@ -162,8 +160,6 @@ int xmon_no_auto_backtrace;
162extern void xmon_enter(void); 160extern void xmon_enter(void);
163extern void xmon_leave(void); 161extern void xmon_leave(void);
164 162
165extern long setjmp(long *);
166extern void longjmp(long *, long);
167extern void xmon_save_regs(struct pt_regs *); 163extern void xmon_save_regs(struct pt_regs *);
168 164
169#ifdef CONFIG_PPC64 165#ifdef CONFIG_PPC64