aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2008-02-12 16:17:07 -0500
committerChris Zankel <chris@zankel.net>2008-02-13 20:41:43 -0500
commitc658eac628aa8df040dfe614556d95e6da3a9ffb (patch)
treee2211e1d5c894c29e92d4c744f504b38410efe41 /arch/xtensa
parent71d28e6c285548106f551fde13ca6d589433d843 (diff)
[XTENSA] Add support for configurable registers and coprocessors
The Xtensa architecture allows to define custom instructions and registers. Registers that are bound to a coprocessor are only accessible if the corresponding enable bit is set, which allows to implement a 'lazy' context switch mechanism. Other registers needs to be saved and restore at the time of the context switch or during interrupt handling. This patch adds support for these additional states: - save and restore registers that are used by the compiler upon interrupt entry and exit. - context switch additional registers unbound to any coprocessor - 'lazy' context switch of registers bound to a coprocessor - ptrace interface to provide access to additional registers - update configuration files in include/asm-xtensa/variant-fsf Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/coprocessor.S443
-rw-r--r--arch/xtensa/kernel/entry.S295
-rw-r--r--arch/xtensa/kernel/process.c261
-rw-r--r--arch/xtensa/kernel/ptrace.c347
-rw-r--r--arch/xtensa/kernel/signal.c65
-rw-r--r--arch/xtensa/kernel/traps.c16
7 files changed, 713 insertions, 730 deletions
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 5d9ef515ca1e..ef63adadf7f4 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -63,6 +63,8 @@ int main(void)
63 DEFINE(PT_SIZE, sizeof(struct pt_regs)); 63 DEFINE(PT_SIZE, sizeof(struct pt_regs));
64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); 64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); 65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
66 DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
67 DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
66 68
67 /* struct task_struct */ 69 /* struct task_struct */
68 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); 70 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
@@ -76,7 +78,19 @@ int main(void)
76 /* struct thread_info (offset from start_struct) */ 78 /* struct thread_info (offset from start_struct) */
77 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); 79 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
78 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); 80 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
79 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save)); 81 DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
82#if XTENSA_HAVE_COPROCESSORS
83 DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
84 DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
85 DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
86 DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
87 DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
88 DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
89 DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
90 DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
91#endif
92 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
93 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
80 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); 94 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
81 95
82 /* struct mm_struct */ 96 /* struct mm_struct */
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 01bcb9fcfcbd..2bc1e145c0a4 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -8,193 +8,328 @@
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 * 10 *
11 * Copyright (C) 2003 - 2005 Tensilica Inc. 11 * Copyright (C) 2003 - 2007 Tensilica Inc.
12 *
13 * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */ 12 */
15 13
16/*
17 * This module contains a table that describes the layout of the various
18 * custom registers and states associated with each coprocessor, as well
19 * as those not associated with any coprocessor ("extra state").
20 * This table is included with core dumps and is available via the ptrace
21 * interface, allowing the layout of such register/state information to
22 * be modified in the kernel without affecting the debugger. Each
23 * register or state is identified using a 32-bit "libdb target number"
24 * assigned when the Xtensa processor is generated.
25 */
26 14
27#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/asm-offsets.h>
28#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/coprocessor.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <asm/tlbflush.h>
29 28
30#if XCHAL_HAVE_CP 29/*
30 * Entry condition:
31 *
32 * a0: trashed, original value saved on stack (PT_AREG0)
33 * a1: a1
34 * a2: new stack pointer, original in DEPC
35 * a3: dispatch table
36 * depc: a2, original value saved on stack (PT_DEPC)
37 * excsave_1: a3
38 *
39 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
40 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
41 */
31 42
32#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE) 43/* IO protection is currently unsupported. */
33 44
34ENTRY(release_coprocessors) 45ENTRY(fast_io_protect)
46 wsr a0, EXCSAVE_1
47 movi a0, unrecoverable_exception
48 callx0 a0
35 49
36 entry a1, 16 50#if XTENSA_HAVE_COPROCESSORS
37 # a2: task
38 movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
39 movi a4, coprocessor_info+CP_LAST # a4: owner-table
40 # a5: tmp
41 movi a6, 0 # a6: 0
42 rsil a7, LOCKLEVEL # a7: PS
43 51
441: /* Check if task is coprocessor owner of coprocessor[i]. */ 52/*
53 * Macros for lazy context switch.
54 */
45 55
46 l32i a5, a4, COPROCESSOR_INFO_OWNER 56#define SAVE_CP_REGS(x) \
47 srli a3, a3, 1 57 .align 4; \
48 beqz a3, 1f 58 .Lsave_cp_regs_cp##x: \
49 addi a4, a4, -8 59 .if XTENSA_HAVE_COPROCESSOR(x); \
50 beq a2, a5, 1b 60 xchal_cp##x##_store a2 a4 a5 a6 a7; \
61 .endif; \
62 jx a0
51 63
52 /* Found an entry: Clear entry CPENABLE bit to disable CP. */ 64#define SAVE_CP_REGS_TAB(x) \
65 .if XTENSA_HAVE_COPROCESSOR(x); \
66 .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
67 .else; \
68 .long 0; \
69 .endif; \
70 .long THREAD_XTREGS_CP##x
53 71
54 rsr a5, CPENABLE
55 s32i a6, a4, COPROCESSOR_INFO_OWNER
56 xor a5, a3, a5
57 wsr a5, CPENABLE
58 72
59 bnez a3, 1b 73#define LOAD_CP_REGS(x) \
74 .align 4; \
75 .Lload_cp_regs_cp##x: \
76 .if XTENSA_HAVE_COPROCESSOR(x); \
77 xchal_cp##x##_load a2 a4 a5 a6 a7; \
78 .endif; \
79 jx a0
60 80
611: wsr a7, PS 81#define LOAD_CP_REGS_TAB(x) \
62 rsync 82 .if XTENSA_HAVE_COPROCESSOR(x); \
63 retw 83 .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
84 .else; \
85 .long 0; \
86 .endif; \
87 .long THREAD_XTREGS_CP##x
64 88
89 SAVE_CP_REGS(0)
90 SAVE_CP_REGS(1)
91 SAVE_CP_REGS(2)
92 SAVE_CP_REGS(3)
93 SAVE_CP_REGS(4)
94 SAVE_CP_REGS(5)
95 SAVE_CP_REGS(6)
96 SAVE_CP_REGS(7)
65 97
66ENTRY(disable_coprocessor) 98 LOAD_CP_REGS(0)
67 entry sp, 16 99 LOAD_CP_REGS(1)
68 rsil a7, LOCKLEVEL 100 LOAD_CP_REGS(2)
69 rsr a3, CPENABLE 101 LOAD_CP_REGS(3)
70 movi a4, 1 102 LOAD_CP_REGS(4)
71 ssl a2 103 LOAD_CP_REGS(5)
72 sll a4, a4 104 LOAD_CP_REGS(6)
73 and a4, a3, a4 105 LOAD_CP_REGS(7)
74 xor a3, a3, a4
75 wsr a3, CPENABLE
76 wsr a7, PS
77 rsync
78 retw
79 106
80ENTRY(enable_coprocessor) 107 .align 4
81 entry sp, 16 108.Lsave_cp_regs_jump_table:
82 rsil a7, LOCKLEVEL 109 SAVE_CP_REGS_TAB(0)
83 rsr a3, CPENABLE 110 SAVE_CP_REGS_TAB(1)
84 movi a4, 1 111 SAVE_CP_REGS_TAB(2)
85 ssl a2 112 SAVE_CP_REGS_TAB(3)
86 sll a4, a4 113 SAVE_CP_REGS_TAB(4)
87 or a3, a3, a4 114 SAVE_CP_REGS_TAB(5)
88 wsr a3, CPENABLE 115 SAVE_CP_REGS_TAB(6)
89 wsr a7, PS 116 SAVE_CP_REGS_TAB(7)
90 rsync
91 retw
92 117
118.Lload_cp_regs_jump_table:
119 LOAD_CP_REGS_TAB(0)
120 LOAD_CP_REGS_TAB(1)
121 LOAD_CP_REGS_TAB(2)
122 LOAD_CP_REGS_TAB(3)
123 LOAD_CP_REGS_TAB(4)
124 LOAD_CP_REGS_TAB(5)
125 LOAD_CP_REGS_TAB(6)
126 LOAD_CP_REGS_TAB(7)
93 127
94ENTRY(save_coprocessor_extra) 128/*
95 entry sp, 16 129 * coprocessor_save(buffer, index)
96 xchal_extra_store_funcbody 130 * a2 a3
97 retw 131 * coprocessor_load(buffer, index)
132 * a2 a3
133 *
134 * Save or load coprocessor registers for coprocessor 'index'.
135 * The register values are saved to or loaded from them 'buffer' address.
136 *
137 * Note that these functions don't update the coprocessor_owner information!
138 *
139 */
98 140
99ENTRY(restore_coprocessor_extra) 141ENTRY(coprocessor_save)
100 entry sp, 16 142 entry a1, 32
101 xchal_extra_load_funcbody 143 s32i a0, a1, 0
144 movi a0, .Lsave_cp_regs_jump_table
145 addx8 a3, a3, a0
146 l32i a3, a3, 0
147 beqz a3, 1f
148 add a0, a0, a3
149 callx0 a0
1501: l32i a0, a1, 0
102 retw 151 retw
103 152
104ENTRY(save_coprocessor_registers) 153ENTRY(coprocessor_load)
105 entry sp, 16 154 entry a1, 32
106 xchal_cpi_store_funcbody 155 s32i a0, a1, 0
156 movi a0, .Lload_cp_regs_jump_table
157 addx4 a3, a3, a0
158 l32i a3, a3, 0
159 beqz a3, 1f
160 add a0, a0, a3
161 callx0 a0
1621: l32i a0, a1, 0
107 retw 163 retw
108 164
109ENTRY(restore_coprocessor_registers) 165/*
110 entry sp, 16 166 * coprocessor_flush(struct task_info*, index)
111 xchal_cpi_load_funcbody 167 * a2 a3
168 * coprocessor_restore(struct task_info*, index)
169 * a2 a3
170 *
171 * Save or load coprocessor registers for coprocessor 'index'.
172 * The register values are saved to or loaded from the coprocessor area
173 * inside the task_info structure.
174 *
175 * Note that these functions don't update the coprocessor_owner information!
176 *
177 */
178
179
180ENTRY(coprocessor_flush)
181 entry a1, 32
182 s32i a0, a1, 0
183 movi a0, .Lsave_cp_regs_jump_table
184 addx8 a3, a3, a0
185 l32i a4, a3, 4
186 l32i a3, a3, 0
187 add a2, a2, a4
188 beqz a3, 1f
189 add a0, a0, a3
190 callx0 a0
1911: l32i a0, a1, 0
112 retw 192 retw
113 193
194ENTRY(coprocessor_restore)
195 entry a1, 32
196 s32i a0, a1, 0
197 movi a0, .Lload_cp_regs_jump_table
198 addx4 a3, a3, a0
199 l32i a4, a3, 4
200 l32i a3, a3, 0
201 add a2, a2, a4
202 beqz a3, 1f
203 add a0, a0, a3
204 callx0 a0
2051: l32i a0, a1, 0
206 retw
114 207
115/* 208/*
116 * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros 209 * Entry condition:
117 * describe the contents of coprocessor & extra save areas in terms of
118 * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
119 * latter macros here; they expand into a table of the format we want.
120 * The general format is:
121 * 210 *
122 * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, 211 * a0: trashed, original value saved on stack (PT_AREG0)
123 * bitmask, rsv2, rsv3) 212 * a1: a1
124 * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, 213 * a2: new stack pointer, original in DEPC
125 * bitmask, rsv2, rsv3) 214 * a3: dispatch table
126 * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, 215 * depc: a2, original value saved on stack (PT_DEPC)
127 * numentries, contentsize, regname_base, 216 * excsave_1: a3
128 * regfile_name, rsv2, rsv3)
129 * 217 *
130 * For this table, we only care about the <libdbnum>, <offset> and <size> 218 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
131 * fields. 219 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
132 */ 220 */
133 221
134/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */ 222ENTRY(fast_coprocessor_double)
135 223 wsr a0, EXCSAVE_1
136#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \ 224 movi a0, unrecoverable_exception
137 bitmask, rsv2, rsv3) \ 225 callx0 a0
138 reg_entry libdbnum, offset, size ;
139#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
140 bitmask, rsv2, rsv3) \
141 reg_entry libdbnum, offset, size ;
142#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
143 numentries, contentsize, regname_base, \
144 regfile_name, rsv2, rsv3) \
145 reg_entry libdbnum, offset, size ;
146
147/* A single table entry: */
148 .macro reg_entry libdbnum, offset, size
149 .ifne (__last_offset-(__last_group_offset+\offset))
150 /* padding entry */
151 .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
152 .endif
153 .word \libdbnum /* actual entry */
154 .set __last_offset, __last_group_offset+\offset+\size
155 .endm /* reg_entry */
156
157
158/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
159 .macro reg_group cpnum, num_entries, align
160 .set __last_group_offset, (__last_offset + \align- 1) & -\align
161 .ifne \num_entries
162 .word 0xFD000000+(\cpnum<<16)+\num_entries
163 .endif
164 .endm /* reg_group */
165 226
166/*
167 * Register info tables.
168 */
169 227
170 .section .rodata, "a" 228ENTRY(fast_coprocessor)
171 .globl _xtensa_reginfo_tables 229
172 .globl _xtensa_reginfo_table_size 230 /* Save remaining registers a1-a3 and SAR */
173 .align 4 231
174_xtensa_reginfo_table_size: 232 xsr a3, EXCSAVE_1
175 .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables 233 s32i a3, a2, PT_AREG3
176 234 rsr a3, SAR
177_xtensa_reginfo_tables: 235 s32i a1, a2, PT_AREG1
178 .set __last_offset, 0 236 s32i a3, a2, PT_SAR
179 reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN 237 mov a1, a2
180 XCHAL_EXTRA_SA_CONTENTS_LIBDB 238 rsr a2, DEPC
181 reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN 239 s32i a2, a1, PT_AREG2
182 XCHAL_CP0_SA_CONTENTS_LIBDB 240
183 reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN 241 /*
184 XCHAL_CP1_SA_CONTENTS_LIBDB 242 * The hal macros require up to 4 temporary registers. We use a3..a6.
185 reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN 243 */
186 XCHAL_CP2_SA_CONTENTS_LIBDB 244
187 reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN 245 s32i a4, a1, PT_AREG4
188 XCHAL_CP3_SA_CONTENTS_LIBDB 246 s32i a5, a1, PT_AREG5
189 reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN 247 s32i a6, a1, PT_AREG6
190 XCHAL_CP4_SA_CONTENTS_LIBDB 248
191 reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN 249 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
192 XCHAL_CP5_SA_CONTENTS_LIBDB 250
193 reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN 251 rsr a3, EXCCAUSE
194 XCHAL_CP6_SA_CONTENTS_LIBDB 252 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
195 reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN 253
196 XCHAL_CP7_SA_CONTENTS_LIBDB 254 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
197 .word 0xFC000000 /* invalid register number,marks end of table*/ 255
198_xtensa_reginfo_table_end: 256 ssl a3 # SAR: 32 - coprocessor_number
199#endif 257 movi a2, 1
258 rsr a0, CPENABLE
259 sll a2, a2
260 or a0, a0, a2
261 wsr a0, CPENABLE
262 rsync
263
264 /* Retrieve previous owner. (a3 still holds CP number) */
265
266 movi a0, coprocessor_owner # list of owners
267 addx4 a0, a3, a0 # entry for CP
268 l32i a4, a0, 0
269
270 beqz a4, 1f # skip 'save' if no previous owner
271
272 /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
273
274 l32i a5, a4, THREAD_CPENABLE
275 xor a5, a5, a2 # (1 << cp-id) still in a2
276 s32i a5, a4, THREAD_CPENABLE
277
278 /*
279 * Get context save area and 'call' save routine.
280 * (a4 still holds previous owner (thread_info), a3 CP number)
281 */
282
283 movi a5, .Lsave_cp_regs_jump_table
284 movi a0, 2f # a0: 'return' address
285 addx8 a3, a3, a5 # a3: coprocessor number
286 l32i a2, a3, 4 # a2: xtregs offset
287 l32i a3, a3, 0 # a3: jump offset
288 add a2, a2, a4
289 add a4, a3, a5 # a4: address of save routine
290 jx a4
291
292 /* Note that only a0 and a1 were preserved. */
293
2942: rsr a3, EXCCAUSE
295 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
296 movi a0, coprocessor_owner
297 addx4 a0, a3, a0
298
299 /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
300
3011: GET_THREAD_INFO (a4, a1)
302 s32i a4, a0, 0
303
304 /* Get context save area and 'call' load routine. */
305
306 movi a5, .Lload_cp_regs_jump_table
307 movi a0, 1f
308 addx8 a3, a3, a5
309 l32i a2, a3, 4 # a2: xtregs offset
310 l32i a3, a3, 0 # a3: jump offset
311 add a2, a2, a4
312 add a4, a3, a5
313 jx a4
314
315 /* Restore all registers and return from exception handler. */
316
3171: l32i a6, a1, PT_AREG6
318 l32i a5, a1, PT_AREG5
319 l32i a4, a1, PT_AREG4
320
321 l32i a0, a1, PT_SAR
322 l32i a3, a1, PT_AREG3
323 l32i a2, a1, PT_AREG2
324 wsr a0, SAR
325 l32i a0, a1, PT_AREG0
326 l32i a1, a1, PT_AREG1
327
328 rfe
329
330 .data
331ENTRY(coprocessor_owner)
332 .fill XCHAL_CP_MAX, 4, 0
333
334#endif /* XTENSA_HAVE_COPROCESSORS */
200 335
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index b51ddb0dcf28..24770b6a5e4c 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -25,6 +25,7 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/signal.h> 26#include <asm/signal.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/variant/tie-asm.h>
28 29
29/* Unimplemented features. */ 30/* Unimplemented features. */
30 31
@@ -213,19 +214,7 @@ _user_exception:
213 214
214 /* We are back to the original stack pointer (a1) */ 215 /* We are back to the original stack pointer (a1) */
215 216
2162: 2172: /* Now, jump to the common exception handler. */
217#if XCHAL_EXTRA_SA_SIZE
218
219 /* For user exceptions, save the extra state into the user's TCB.
220 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221 */
222
223 GET_CURRENT(a2,a1)
224 addi a2, a2, THREAD_CP_SAVE
225 xchal_extra_store_funcbody
226#endif
227
228 /* Now, jump to the common exception handler. */
229 218
230 j common_exception 219 j common_exception
231 220
@@ -381,6 +370,10 @@ common_exception:
381 s32i a2, a1, PT_LBEG 370 s32i a2, a1, PT_LBEG
382 s32i a3, a1, PT_LEND 371 s32i a3, a1, PT_LEND
383 372
373 /* Save optional registers. */
374
375 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
376
384 /* Go to second-level dispatcher. Set up parameters to pass to the 377 /* Go to second-level dispatcher. Set up parameters to pass to the
385 * exception handler and call the exception handler. 378 * exception handler and call the exception handler.
386 */ 379 */
@@ -452,22 +445,6 @@ common_exception_return:
452 445
4534: /* a2 holds GET_CURRENT(a2,a1) */ 4464: /* a2 holds GET_CURRENT(a2,a1) */
454 447
455#if XCHAL_EXTRA_SA_SIZE
456
457 /* For user exceptions, restore the extra state from the user's TCB. */
458
459 /* Note: a2 still contains GET_CURRENT(a2,a1) */
460 addi a2, a2, THREAD_CP_SAVE
461 xchal_extra_load_funcbody
462
463 /* We must assume that xchal_extra_store_funcbody destroys
464 * registers a2..a15. FIXME, this list can eventually be
465 * reduced once real register requirements of the macro are
466 * finalized. */
467
468#endif /* XCHAL_EXTRA_SA_SIZE */
469
470
471 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 448 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
472 449
473 l32i a2, a1, PT_WINDOWBASE 450 l32i a2, a1, PT_WINDOWBASE
@@ -614,6 +591,12 @@ kernel_exception_exit:
614 591
615common_exception_exit: 592common_exception_exit:
616 593
594 /* Restore optional registers. */
595
596 load_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
597
598 /* Restore address registers. */
599
617 _bbsi.l a2, 1, 1f 600 _bbsi.l a2, 1, 1f
618 l32i a4, a1, PT_AREG4 601 l32i a4, a1, PT_AREG4
619 l32i a5, a1, PT_AREG5 602 l32i a5, a1, PT_AREG5
@@ -1146,7 +1129,6 @@ CATCH
1146 * excsave_1: a3 1129 * excsave_1: a3
1147 * 1130 *
1148 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1131 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1149 * Note: We don't need to save a2 in depc (return value)
1150 */ 1132 */
1151 1133
1152ENTRY(fast_syscall_spill_registers) 1134ENTRY(fast_syscall_spill_registers)
@@ -1162,29 +1144,31 @@ ENTRY(fast_syscall_spill_registers)
1162 1144
1163 rsr a0, SAR 1145 rsr a0, SAR
1164 xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1146 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
1165 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
1166 s32i a3, a2, PT_AREG3 1147 s32i a3, a2, PT_AREG3
1148 s32i a4, a2, PT_AREG4
1149 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
1167 1150
1168 /* The spill routine might clobber a7, a11, and a15. */ 1151 /* The spill routine might clobber a7, a11, and a15. */
1169 1152
1170 s32i a7, a2, PT_AREG5 1153 s32i a7, a2, PT_AREG7
1171 s32i a11, a2, PT_AREG6 1154 s32i a11, a2, PT_AREG11
1172 s32i a15, a2, PT_AREG7 1155 s32i a15, a2, PT_AREG15
1173 1156
1174 call0 _spill_registers # destroys a3, DEPC, and SAR 1157 call0 _spill_registers # destroys a3, a4, and SAR
1175 1158
1176 /* Advance PC, restore registers and SAR, and return from exception. */ 1159 /* Advance PC, restore registers and SAR, and return from exception. */
1177 1160
1178 l32i a3, a2, PT_AREG4 1161 l32i a3, a2, PT_AREG5
1162 l32i a4, a2, PT_AREG4
1179 l32i a0, a2, PT_AREG0 1163 l32i a0, a2, PT_AREG0
1180 wsr a3, SAR 1164 wsr a3, SAR
1181 l32i a3, a2, PT_AREG3 1165 l32i a3, a2, PT_AREG3
1182 1166
1183 /* Restore clobbered registers. */ 1167 /* Restore clobbered registers. */
1184 1168
1185 l32i a7, a2, PT_AREG5 1169 l32i a7, a2, PT_AREG7
1186 l32i a11, a2, PT_AREG6 1170 l32i a11, a2, PT_AREG11
1187 l32i a15, a2, PT_AREG7 1171 l32i a15, a2, PT_AREG15
1188 1172
1189 movi a2, 0 1173 movi a2, 0
1190 rfe 1174 rfe
@@ -1257,9 +1241,9 @@ fast_syscall_spill_registers_fixup:
1257 1241
1258 movi a3, exc_table 1242 movi a3, exc_table
1259 rsr a0, EXCCAUSE 1243 rsr a0, EXCCAUSE
1260 addx4 a0, a0, a3 # find entry in table 1244 addx4 a0, a0, a3 # find entry in table
1261 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1245 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1262 jx a0 1246 jx a0
1263 1247
1264fast_syscall_spill_registers_fixup_return: 1248fast_syscall_spill_registers_fixup_return:
1265 1249
@@ -1297,7 +1281,7 @@ fast_syscall_spill_registers_fixup_return:
1297 * This is not a real function. The following conditions must be met: 1281 * This is not a real function. The following conditions must be met:
1298 * 1282 *
1299 * - must be called with call0. 1283 * - must be called with call0.
1300 * - uses DEPC, a3 and SAR. 1284 * - uses a3, a4 and SAR.
1301 * - the last 'valid' register of each frame are clobbered. 1285 * - the last 'valid' register of each frame are clobbered.
1302 * - the caller must have registered a fixup handler 1286 * - the caller must have registered a fixup handler
1303 * (or be inside a critical section) 1287 * (or be inside a critical section)
@@ -1309,41 +1293,39 @@ ENTRY(_spill_registers)
1309 /* 1293 /*
1310 * Rotate ws so that the current windowbase is at bit 0. 1294 * Rotate ws so that the current windowbase is at bit 0.
1311 * Assume ws = xxxwww1yy (www1 current window frame). 1295 * Assume ws = xxxwww1yy (www1 current window frame).
1312 * Rotate ws right so that a2 = yyxxxwww1. 1296 * Rotate ws right so that a4 = yyxxxwww1.
1313 */ 1297 */
1314 1298
1315 wsr a2, DEPC # preserve a2 1299 rsr a4, WINDOWBASE
1316 rsr a2, WINDOWBASE
1317 rsr a3, WINDOWSTART # a3 = xxxwww1yy 1300 rsr a3, WINDOWSTART # a3 = xxxwww1yy
1318 ssr a2 # holds WB 1301 ssr a4 # holds WB
1319 slli a2, a3, WSBITS 1302 slli a4, a3, WSBITS
1320 or a3, a3, a2 # a3 = xxxwww1yyxxxwww1yy 1303 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
1321 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1304 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
1322 1305
1323 /* We are done if there are no more than the current register frame. */ 1306 /* We are done if there are no more than the current register frame. */
1324 1307
1325 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1308 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
1326 movi a2, (1 << (WSBITS-1)) 1309 movi a4, (1 << (WSBITS-1))
1327 _beqz a3, .Lnospill # only one active frame? jump 1310 _beqz a3, .Lnospill # only one active frame? jump
1328 1311
1329 /* We want 1 at the top, so that we return to the current windowbase */ 1312 /* We want 1 at the top, so that we return to the current windowbase */
1330 1313
1331 or a3, a3, a2 # 1yyxxxwww 1314 or a3, a3, a4 # 1yyxxxwww
1332 1315
1333 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1316 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1334 1317
1335 wsr a3, WINDOWSTART # save shifted windowstart 1318 wsr a3, WINDOWSTART # save shifted windowstart
1336 neg a2, a3 1319 neg a4, a3
1337 and a3, a2, a3 # first bit set from right: 000010000 1320 and a3, a4, a3 # first bit set from right: 000010000
1338 1321
1339 ffs_ws a2, a3 # a2: shifts to skip empty frames 1322 ffs_ws a4, a3 # a4: shifts to skip empty frames
1340 movi a3, WSBITS 1323 movi a3, WSBITS
1341 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right 1324 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1342 ssr a2 # save in SAR for later. 1325 ssr a4 # save in SAR for later.
1343 1326
1344 rsr a3, WINDOWBASE 1327 rsr a3, WINDOWBASE
1345 add a3, a3, a2 1328 add a3, a3, a4
1346 rsr a2, DEPC # restore a2
1347 wsr a3, WINDOWBASE 1329 wsr a3, WINDOWBASE
1348 rsync 1330 rsync
1349 1331
@@ -1373,7 +1355,6 @@ ENTRY(_spill_registers)
1373 j .Lc12c 1355 j .Lc12c
1374 1356
1375.Lnospill: 1357.Lnospill:
1376 rsr a2, DEPC
1377 ret 1358 ret
1378 1359
1379.Lloop: _bbsi.l a3, 1, .Lc4 1360.Lloop: _bbsi.l a3, 1, .Lc4
@@ -1810,154 +1791,6 @@ ENTRY(fast_store_prohibited)
18101: j _user_exception 17911: j _user_exception
1811 1792
1812 1793
1813#if XCHAL_EXTRA_SA_SIZE
1814
1815#warning fast_coprocessor untested
1816
1817/*
1818 * Entry condition:
1819 *
1820 * a0: trashed, original value saved on stack (PT_AREG0)
1821 * a1: a1
1822 * a2: new stack pointer, original in DEPC
1823 * a3: dispatch table
1824 * depc: a2, original value saved on stack (PT_DEPC)
1825 * excsave_1: a3
1826 *
1827 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1828 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1829 */
1830
1831ENTRY(fast_coprocessor_double)
1832 wsr a0, EXCSAVE_1
1833 movi a0, unrecoverable_exception
1834 callx0 a0
1835
1836ENTRY(fast_coprocessor)
1837
1838 /* Fatal if we are in a double exception. */
1839
1840 l32i a0, a2, PT_DEPC
1841 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1842
1843 /* Save some registers a1, a3, a4, SAR */
1844
1845 xsr a3, EXCSAVE_1
1846 s32i a3, a2, PT_AREG3
1847 rsr a3, SAR
1848 s32i a4, a2, PT_AREG4
1849 s32i a1, a2, PT_AREG1
1850 s32i a5, a1, PT_AREG5
1851 s32i a3, a2, PT_SAR
1852 mov a1, a2
1853
1854 /* Currently, the HAL macros only guarantee saving a0 and a1.
1855 * These can and will be refined in the future, but for now,
1856 * just save the remaining registers of a2...a15.
1857 */
1858 s32i a6, a1, PT_AREG6
1859 s32i a7, a1, PT_AREG7
1860 s32i a8, a1, PT_AREG8
1861 s32i a9, a1, PT_AREG9
1862 s32i a10, a1, PT_AREG10
1863 s32i a11, a1, PT_AREG11
1864 s32i a12, a1, PT_AREG12
1865 s32i a13, a1, PT_AREG13
1866 s32i a14, a1, PT_AREG14
1867 s32i a15, a1, PT_AREG15
1868
1869 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1870
1871 rsr a0, EXCCAUSE
1872 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1873
1874 /* Set corresponding CPENABLE bit */
1875
1876 movi a4, 1
1877 ssl a3 # SAR: 32 - coprocessor_number
1878 rsr a5, CPENABLE
1879 sll a4, a4
1880 or a4, a5, a4
1881 wsr a4, CPENABLE
1882 rsync
1883 movi a5, coprocessor_info # list of owner and offset into cp_save
1884 addx8 a0, a4, a5 # entry for CP
1885
1886 bne a4, a5, .Lload # bit wasn't set before, cp not in use
1887
1888 /* Now compare the current task with the owner of the coprocessor.
1889 * If they are the same, there is no reason to save or restore any
1890 * coprocessor state. Having already enabled the coprocessor,
1891 * branch ahead to return.
1892 */
1893 GET_CURRENT(a5,a1)
1894 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
1895 beq a4, a5, .Ldone
1896
1897 /* Find location to dump current coprocessor state:
1898 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1899 *
1900 * Note: a0 pointer to the entry in the coprocessor owner table,
1901 * a3 coprocessor number,
1902 * a4 current owner of coprocessor.
1903 */
1904 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1905 addi a2, a4, THREAD_CP_SAVE
1906 add a2, a2, a5
1907
1908 /* Store current coprocessor states. (a5 still has CP number) */
1909
1910 xchal_cpi_store_funcbody
1911
1912 /* The macro might have destroyed a3 (coprocessor number), but
1913 * SAR still has 32 - coprocessor_number!
1914 */
1915 movi a3, 32
1916 rsr a4, SAR
1917 sub a3, a3, a4
1918
1919.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
1920 * the coprocessor owner table.
1921 *
1922 * Note: a0 pointer to the entry in the coprocessor owner table,
1923 * a3 coprocessor number.
1924 */
1925 GET_CURRENT(a4,a1)
1926 s32i a4, a0, 0
1927
1928 /* Find location from where to restore the current coprocessor state.*/
1929
1930 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1931 addi a2, a4, THREAD_CP_SAVE
1932 add a2, a2, a4
1933
1934 xchal_cpi_load_funcbody
1935
1936 /* We must assume that the xchal_cpi_store_funcbody macro destroyed
1937 * registers a2..a15.
1938 */
1939
1940.Ldone: l32i a15, a1, PT_AREG15
1941 l32i a14, a1, PT_AREG14
1942 l32i a13, a1, PT_AREG13
1943 l32i a12, a1, PT_AREG12
1944 l32i a11, a1, PT_AREG11
1945 l32i a10, a1, PT_AREG10
1946 l32i a9, a1, PT_AREG9
1947 l32i a8, a1, PT_AREG8
1948 l32i a7, a1, PT_AREG7
1949 l32i a6, a1, PT_AREG6
1950 l32i a5, a1, PT_AREG5
1951 l32i a4, a1, PT_AREG4
1952 l32i a3, a1, PT_AREG3
1953 l32i a2, a1, PT_AREG2
1954 l32i a0, a1, PT_AREG0
1955 l32i a1, a1, PT_AREG1
1956
1957 rfe
1958
1959#endif /* XCHAL_EXTRA_SA_SIZE */
1960
1961/* 1794/*
1962 * System Calls. 1795 * System Calls.
1963 * 1796 *
@@ -2066,20 +1899,36 @@ ENTRY(_switch_to)
2066 1899
2067 entry a1, 16 1900 entry a1, 16
2068 1901
2069 mov a4, a3 # preserve a3 1902 mov a12, a2 # preserve 'prev' (a2)
1903 mov a13, a3 # and 'next' (a3)
2070 1904
2071 s32i a0, a2, THREAD_RA # save return address 1905 l32i a4, a2, TASK_THREAD_INFO
2072 s32i a1, a2, THREAD_SP # save stack pointer 1906 l32i a5, a3, TASK_THREAD_INFO
2073 1907
2074 /* Disable ints while we manipulate the stack pointer; spill regs. */ 1908 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
2075 1909
2076 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL 1910 s32i a0, a12, THREAD_RA # save return address
2077 xsr a5, PS 1911 s32i a1, a12, THREAD_SP # save stack pointer
1912
1913 /* Disable ints while we manipulate the stack pointer. */
1914
1915 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
1916 xsr a14, PS
2078 rsr a3, EXCSAVE_1 1917 rsr a3, EXCSAVE_1
2079 rsync 1918 rsync
2080 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1919 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
2081 1920
2082 call0 _spill_registers 1921 /* Switch CPENABLE */
1922
1923#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1924 l32i a3, a5, THREAD_CPENABLE
1925 xsr a3, CPENABLE
1926 s32i a3, a4, THREAD_CPENABLE
1927#endif
1928
1929 /* Flush register file. */
1930
1931 call0 _spill_registers # destroys a3, a4, and SAR
2083 1932
2084 /* Set kernel stack (and leave critical section) 1933 /* Set kernel stack (and leave critical section)
2085 * Note: It's save to set it here. The stack will not be overwritten 1934 * Note: It's save to set it here. The stack will not be overwritten
@@ -2087,19 +1936,21 @@ ENTRY(_switch_to)
2087 * we return from kernel space. 1936 * we return from kernel space.
2088 */ 1937 */
2089 1938
2090 l32i a0, a4, TASK_THREAD_INFO
2091 rsr a3, EXCSAVE_1 # exc_table 1939 rsr a3, EXCSAVE_1 # exc_table
2092 movi a1, 0 1940 movi a6, 0
2093 addi a0, a0, PT_REGS_OFFSET 1941 addi a7, a5, PT_REGS_OFFSET
2094 s32i a1, a3, EXC_TABLE_FIXUP 1942 s32i a6, a3, EXC_TABLE_FIXUP
2095 s32i a0, a3, EXC_TABLE_KSTK 1943 s32i a7, a3, EXC_TABLE_KSTK
2096 1944
2097 /* restore context of the task that 'next' addresses */ 1945 /* restore context of the task that 'next' addresses */
2098 1946
2099 l32i a0, a4, THREAD_RA /* restore return address */ 1947 l32i a0, a13, THREAD_RA # restore return address
2100 l32i a1, a4, THREAD_SP /* restore stack pointer */ 1948 l32i a1, a13, THREAD_SP # restore stack pointer
1949
1950 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
2101 1951
2102 wsr a5, PS 1952 wsr a14, PS
1953 mov a2, a12 # return 'prev'
2103 rsync 1954 rsync
2104 1955
2105 retw 1956 retw
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 026138d641a4..9185597eb6a0 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL;
52EXPORT_SYMBOL(pm_power_off); 52EXPORT_SYMBOL(pm_power_off);
53 53
54 54
55#if XTENSA_HAVE_COPROCESSORS
56
57void coprocessor_release_all(struct thread_info *ti)
58{
59 unsigned long cpenable;
60 int i;
61
62 /* Make sure we don't switch tasks during this operation. */
63
64 preempt_disable();
65
66 /* Walk through all cp owners and release it for the requested one. */
67
68 cpenable = ti->cpenable;
69
70 for (i = 0; i < XCHAL_CP_MAX; i++) {
71 if (coprocessor_owner[i] == ti) {
72 coprocessor_owner[i] = 0;
73 cpenable &= ~(1 << i);
74 }
75 }
76
77 ti->cpenable = cpenable;
78 coprocessor_clear_cpenable();
79
80 preempt_enable();
81}
82
83void coprocessor_flush_all(struct thread_info *ti)
84{
85 unsigned long cpenable;
86 int i;
87
88 preempt_disable();
89
90 cpenable = ti->cpenable;
91
92 for (i = 0; i < XCHAL_CP_MAX; i++) {
93 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
94 coprocessor_flush(ti, i);
95 cpenable >>= 1;
96 }
97
98 preempt_enable();
99}
100
101#endif
102
103
55/* 104/*
56 * Powermanagement idle function, if any is provided by the platform. 105 * Powermanagement idle function, if any is provided by the platform.
57 */ 106 */
@@ -71,15 +120,36 @@ void cpu_idle(void)
71} 120}
72 121
73/* 122/*
74 * Free current thread data structures etc.. 123 * This is called when the thread calls exit().
75 */ 124 */
76
77void exit_thread(void) 125void exit_thread(void)
78{ 126{
127#if XTENSA_HAVE_COPROCESSORS
128 coprocessor_release_all(current_thread_info());
129#endif
79} 130}
80 131
132/*
133 * Flush thread state. This is called when a thread does an execve()
134 * Note that we flush coprocessor registers for the case execve fails.
135 */
81void flush_thread(void) 136void flush_thread(void)
82{ 137{
138#if XTENSA_HAVE_COPROCESSORS
139 struct thread_info *ti = current_thread_info();
140 coprocessor_flush_all(ti);
141 coprocessor_release_all(ti);
142#endif
143}
144
145/*
146 * This is called before the thread is copied.
147 */
148void prepare_to_copy(struct task_struct *tsk)
149{
150#if XTENSA_HAVE_COPROCESSORS
151 coprocessor_flush_all(task_thread_info(tsk));
152#endif
83} 153}
84 154
85/* 155/*
@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
107 struct task_struct * p, struct pt_regs * regs) 177 struct task_struct * p, struct pt_regs * regs)
108{ 178{
109 struct pt_regs *childregs; 179 struct pt_regs *childregs;
180 struct thread_info *ti;
110 unsigned long tos; 181 unsigned long tos;
111 int user_mode = user_mode(regs); 182 int user_mode = user_mode(regs);
112 183
@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
128 p->set_child_tid = p->clear_child_tid = NULL; 199 p->set_child_tid = p->clear_child_tid = NULL;
129 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); 200 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
130 p->thread.sp = (unsigned long)childregs; 201 p->thread.sp = (unsigned long)childregs;
202
131 if (user_mode(regs)) { 203 if (user_mode(regs)) {
132 204
133 int len = childregs->wmask & ~0xf; 205 int len = childregs->wmask & ~0xf;
134 childregs->areg[1] = usp; 206 childregs->areg[1] = usp;
135 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], 207 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
136 &regs->areg[XCHAL_NUM_AREGS - len/4], len); 208 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
137 209// FIXME: we need to set THREADPTR in thread_info...
138 if (clone_flags & CLONE_SETTLS) 210 if (clone_flags & CLONE_SETTLS)
139 childregs->areg[2] = childregs->areg[6]; 211 childregs->areg[2] = childregs->areg[6];
140 212
@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
142 /* In kernel space, we start a new thread with a new stack. */ 214 /* In kernel space, we start a new thread with a new stack. */
143 childregs->wmask = 1; 215 childregs->wmask = 1;
144 } 216 }
217
218#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
219 ti = task_thread_info(p);
220 ti->cpenable = 0;
221#endif
222
145 return 0; 223 return 0;
146} 224}
147 225
@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p)
179} 257}
180 258
181/* 259/*
182 * do_copy_regs() gathers information from 'struct pt_regs' and
183 * 'current->thread.areg[]' to fill in the xtensa_gregset_t
184 * structure.
185 *
186 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats 260 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
187 * of processor registers. Besides different ordering, 261 * of processor registers. Besides different ordering,
188 * xtensa_gregset_t contains non-live register information that 262 * xtensa_gregset_t contains non-live register information that
@@ -191,9 +265,20 @@ unsigned long get_wchan(struct task_struct *p)
191 * 265 *
192 */ 266 */
193 267
194void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, 268void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
195 struct task_struct *tsk)
196{ 269{
270 unsigned long wb, ws, wm;
271 int live, last;
272
273 wb = regs->windowbase;
274 ws = regs->windowstart;
275 wm = regs->wmask;
276 ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
277
278 /* Don't leak any random bits. */
279
280 memset(elfregs, 0, sizeof (elfregs));
281
197 /* Note: PS.EXCM is not set while user task is running; its 282 /* Note: PS.EXCM is not set while user task is running; its
198 * being set in regs->ps is for exception handling convenience. 283 * being set in regs->ps is for exception handling convenience.
199 */ 284 */
@@ -204,159 +289,18 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
204 elfregs->lend = regs->lend; 289 elfregs->lend = regs->lend;
205 elfregs->lcount = regs->lcount; 290 elfregs->lcount = regs->lcount;
206 elfregs->sar = regs->sar; 291 elfregs->sar = regs->sar;
292 elfregs->windowstart = ws;
207 293
208 memcpy (elfregs->a, regs->areg, sizeof(elfregs->a)); 294 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
209} 295 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
210 296 memcpy(elfregs->a, regs->areg, live * 4);
211void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) 297 memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
212{
213 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
214}
215
216
217/* The inverse of do_copy_regs(). No error or sanity checking. */
218
219void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
220 struct task_struct *tsk)
221{
222 const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
223 unsigned long ps;
224
225 /* Note: PS.EXCM is not set while user task is running; it
226 * needs to be set in regs->ps is for exception handling convenience.
227 */
228
229 ps = (regs->ps & ~ps_mask) | (elfregs->ps & ps_mask) | (1<<PS_EXCM_BIT);
230 regs->ps = ps;
231 regs->pc = elfregs->pc;
232 regs->lbeg = elfregs->lbeg;
233 regs->lend = elfregs->lend;
234 regs->lcount = elfregs->lcount;
235 regs->sar = elfregs->sar;
236
237 memcpy (regs->areg, elfregs->a, sizeof(regs->areg));
238}
239
240/*
241 * do_save_fpregs() gathers information from 'struct pt_regs' and
242 * 'current->thread' to fill in the elf_fpregset_t structure.
243 *
244 * Core files and ptrace use elf_fpregset_t.
245 */
246
247void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
248 struct task_struct *tsk)
249{
250#if XCHAL_HAVE_CP
251
252 extern unsigned char _xtensa_reginfo_tables[];
253 extern unsigned _xtensa_reginfo_table_size;
254 int i;
255 unsigned long flags;
256
257 /* Before dumping coprocessor state from memory,
258 * ensure any live coprocessor contents for this
259 * task are first saved to memory:
260 */
261 local_irq_save(flags);
262
263 for (i = 0; i < XCHAL_CP_MAX; i++) {
264 if (tsk == coprocessor_info[i].owner) {
265 enable_coprocessor(i);
266 save_coprocessor_registers(
267 tsk->thread.cp_save+coprocessor_info[i].offset,i);
268 disable_coprocessor(i);
269 }
270 }
271
272 local_irq_restore(flags);
273
274 /* Now dump coprocessor & extra state: */
275 memcpy((unsigned char*)fpregs,
276 _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
277 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
278 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
279#endif
280} 298}
281 299
282/* 300int dump_fpu(void)
283 * The inverse of do_save_fpregs().
284 * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
285 * Returns 0 on success, non-zero if layout doesn't match.
286 */
287
288int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
289 struct task_struct *tsk)
290{ 301{
291#if XCHAL_HAVE_CP
292
293 extern unsigned char _xtensa_reginfo_tables[];
294 extern unsigned _xtensa_reginfo_table_size;
295 int i;
296 unsigned long flags;
297
298 /* Make sure save area layouts match.
299 * FIXME: in the future we could allow restoring from
300 * a different layout of the same registers, by comparing
301 * fpregs' table with _xtensa_reginfo_tables and matching
302 * entries and copying registers one at a time.
303 * Not too sure yet whether that's very useful.
304 */
305
306 if( memcmp((unsigned char*)fpregs,
307 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
308 return -1;
309 }
310
311 /* Before restoring coprocessor state from memory,
312 * ensure any live coprocessor contents for this
313 * task are first invalidated.
314 */
315
316 local_irq_save(flags);
317
318 for (i = 0; i < XCHAL_CP_MAX; i++) {
319 if (tsk == coprocessor_info[i].owner) {
320 enable_coprocessor(i);
321 save_coprocessor_registers(
322 tsk->thread.cp_save+coprocessor_info[i].offset,i);
323 coprocessor_info[i].owner = 0;
324 disable_coprocessor(i);
325 }
326 }
327
328 local_irq_restore(flags);
329
330 /* Now restore coprocessor & extra state: */
331
332 memcpy(tsk->thread.cp_save,
333 (unsigned char*)fpregs + _xtensa_reginfo_table_size,
334 XTENSA_CP_EXTRA_SIZE);
335#endif
336 return 0; 302 return 0;
337} 303}
338/*
339 * Fill in the CP structure for a core dump for a particular task.
340 */
341
342int
343dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
344{
345 return 0; /* no coprocessors active on this processor */
346}
347
348/*
349 * Fill in the CP structure for a core dump.
350 * This includes any FPU coprocessor.
351 * Here, we dump all coprocessors, and other ("extra") custom state.
352 *
353 * This function is called by elf_core_dump() in fs/binfmt_elf.c
354 * (in which case 'regs' comes from calls to do_coredump, see signals.c).
355 */
356int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
357{
358 return dump_task_fpu(regs, current, r);
359}
360 304
361asmlinkage 305asmlinkage
362long xtensa_clone(unsigned long clone_flags, unsigned long newsp, 306long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
@@ -370,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
370} 314}
371 315
372/* 316/*
373 * * xtensa_execve() executes a new program. 317 * xtensa_execve() executes a new program.
374 * */ 318 */
375 319
376asmlinkage 320asmlinkage
377long xtensa_execve(char __user *name, char __user * __user *argv, 321long xtensa_execve(char __user *name, char __user * __user *argv,
@@ -386,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv,
386 error = PTR_ERR(filename); 330 error = PTR_ERR(filename);
387 if (IS_ERR(filename)) 331 if (IS_ERR(filename))
388 goto out; 332 goto out;
389 // FIXME: release coprocessor??
390 error = do_execve(filename, argv, envp, regs); 333 error = do_execve(filename, argv, envp, regs);
391 if (error == 0) { 334 if (error == 0) {
392 task_lock(current); 335 task_lock(current);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 5533c7850d53..f6669d605125 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -4,7 +4,7 @@
4 * License. See the file "COPYING" in the main directory of this archive 4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details. 5 * for more details.
6 * 6 *
7 * Copyright (C) 2001 - 2005 Tensilica Inc. 7 * Copyright (C) 2001 - 2007 Tensilica Inc.
8 * 8 *
9 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 9 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
10 * Chris Zankel <chris@zankel.net> 10 * Chris Zankel <chris@zankel.net>
@@ -28,14 +28,10 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/ptrace.h> 29#include <asm/ptrace.h>
30#include <asm/elf.h> 30#include <asm/elf.h>
31 31#include <asm/coprocessor.h>
32#define TEST_KERNEL // verify kernel operations FIXME: remove
33
34 32
35/* 33/*
36 * Called by kernel/ptrace.c when detaching.. 34 * Called by kernel/ptrace.c when detaching to disable single stepping.
37 *
38 * Make sure single step bits etc are not set.
39 */ 35 */
40 36
41void ptrace_disable(struct task_struct *child) 37void ptrace_disable(struct task_struct *child)
@@ -43,136 +39,233 @@ void ptrace_disable(struct task_struct *child)
43 /* Nothing to do.. */ 39 /* Nothing to do.. */
44} 40}
45 41
46long arch_ptrace(struct task_struct *child, long request, long addr, long data) 42int ptrace_getregs(struct task_struct *child, void __user *uregs)
47{ 43{
48 int ret = -EPERM; 44 struct pt_regs *regs = task_pt_regs(child);
45 xtensa_gregset_t __user *gregset = uregs;
46 unsigned long wb = regs->windowbase;
47 unsigned long ws = regs->windowstart;
48 unsigned long wm = regs->wmask;
49 int ret = 0;
50 int live, last;
51
52 if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
53 return -EIO;
54
55 /* Norm windowstart to a windowbase of 0. */
56
57 ws = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
58
59 ret |= __put_user(regs->pc, &gregset->pc);
60 ret |= __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
61 ret |= __put_user(regs->lbeg, &gregset->lbeg);
62 ret |= __put_user(regs->lend, &gregset->lend);
63 ret |= __put_user(regs->lcount, &gregset->lcount);
64 ret |= __put_user(ws, &gregset->windowstart);
65
66 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
67 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
68 ret |= __copy_to_user(gregset->a, regs->areg, live * 4);
69 ret |= __copy_to_user(gregset->a + last, regs->areg + last, (wm>>4)*16);
70
71 return ret ? -EFAULT : 0;
72}
49 73
50 switch (request) { 74int ptrace_setregs(struct task_struct *child, void __user *uregs)
51 case PTRACE_PEEKTEXT: /* read word at location addr. */ 75{
52 case PTRACE_PEEKDATA: 76 struct pt_regs *regs = task_pt_regs(child);
53 ret = generic_ptrace_peekdata(child, addr, data); 77 xtensa_gregset_t *gregset = uregs;
54 goto out; 78 const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
79 unsigned long wm = regs->wmask;
80 unsigned long ps;
81 int ret = 0;
82 int live, last;
83
84 if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
85 return -EIO;
86
87 ret |= __get_user(regs->pc, &gregset->pc);
88 ret |= __get_user(ps, &gregset->ps);
89 ret |= __get_user(regs->lbeg, &gregset->lbeg);
90 ret |= __get_user(regs->lend, &gregset->lend);
91 ret |= __get_user(regs->lcount, &gregset->lcount);
92
93 regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
94
95 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
96 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
97 ret |= __copy_from_user(regs->areg, gregset->a, live * 4);
98 ret |= __copy_from_user(regs->areg+last, gregset->a+last, (wm>>4)*16);
99
100 return ret ? -EFAULT : 0;
101}
55 102
56 /* Read the word at location addr in the USER area. */
57 103
58 case PTRACE_PEEKUSR: 104int ptrace_getxregs(struct task_struct *child, void __user *uregs)
59 { 105{
60 struct pt_regs *regs; 106 struct pt_regs *regs = task_pt_regs(child);
61 unsigned long tmp; 107 struct thread_info *ti = task_thread_info(child);
108 elf_xtregs_t __user *xtregs = uregs;
109 int ret = 0;
110
111 if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
112 return -EIO;
113
114#if XTENSA_HAVE_COPROCESSORS
115 /* Flush all coprocessor registers to memory. */
116 coprocessor_flush_all(ti);
117 ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
118 sizeof(xtregs_coprocessor_t));
119#endif
120 ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
121 sizeof(xtregs->opt));
122 ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
123 sizeof(xtregs->user));
62 124
63 regs = task_pt_regs(child); 125 return ret ? -EFAULT : 0;
64 tmp = 0; /* Default return value. */ 126}
127
128int ptrace_setxregs(struct task_struct *child, void __user *uregs)
129{
130 struct thread_info *ti = task_thread_info(child);
131 struct pt_regs *regs = task_pt_regs(child);
132 elf_xtregs_t *xtregs = uregs;
133 int ret = 0;
134
135#if XTENSA_HAVE_COPROCESSORS
136 /* Flush all coprocessors before we overwrite them. */
137 coprocessor_flush_all(ti);
138 coprocessor_release_all(ti);
139
140 ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
141 sizeof(xtregs_coprocessor_t));
142#endif
143 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
144 sizeof(xtregs->opt));
145 ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
146 sizeof(xtregs->user));
147
148 return ret ? -EFAULT : 0;
149}
150
151int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
152{
153 struct pt_regs *regs;
154 unsigned long tmp;
155
156 regs = task_pt_regs(child);
157 tmp = 0; /* Default return value. */
65 158
66 switch(addr) { 159 switch(regno) {
67 160
68 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 161 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
69 { 162 tmp = regs->areg[regno - REG_AR_BASE];
70 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
71 ar &= (XCHAL_NUM_AREGS - 1);
72 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
73 tmp = regs->areg[ar];
74 else
75 ret = -EIO;
76 break; 163 break;
77 } 164
78 case REG_A_BASE ... REG_A_BASE + 15: 165 case REG_A_BASE ... REG_A_BASE + 15:
79 tmp = regs->areg[addr - REG_A_BASE]; 166 tmp = regs->areg[regno - REG_A_BASE];
80 break; 167 break;
168
81 case REG_PC: 169 case REG_PC:
82 tmp = regs->pc; 170 tmp = regs->pc;
83 break; 171 break;
172
84 case REG_PS: 173 case REG_PS:
85 /* Note: PS.EXCM is not set while user task is running; 174 /* Note: PS.EXCM is not set while user task is running;
86 * its being set in regs is for exception handling 175 * its being set in regs is for exception handling
87 * convenience. */ 176 * convenience. */
88 tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); 177 tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
89 break; 178 break;
179
90 case REG_WB: 180 case REG_WB:
91 tmp = regs->windowbase; 181 break; /* tmp = 0 */
92 break; 182
93 case REG_WS: 183 case REG_WS:
94 tmp = regs->windowstart; 184 {
185 unsigned long wb = regs->windowbase;
186 unsigned long ws = regs->windowstart;
187 tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
95 break; 188 break;
189 }
96 case REG_LBEG: 190 case REG_LBEG:
97 tmp = regs->lbeg; 191 tmp = regs->lbeg;
98 break; 192 break;
193
99 case REG_LEND: 194 case REG_LEND:
100 tmp = regs->lend; 195 tmp = regs->lend;
101 break; 196 break;
197
102 case REG_LCOUNT: 198 case REG_LCOUNT:
103 tmp = regs->lcount; 199 tmp = regs->lcount;
104 break; 200 break;
201
105 case REG_SAR: 202 case REG_SAR:
106 tmp = regs->sar; 203 tmp = regs->sar;
107 break; 204 break;
108 case REG_DEPC: 205
109 tmp = regs->depc;
110 break;
111 case REG_EXCCAUSE:
112 tmp = regs->exccause;
113 break;
114 case REG_EXCVADDR:
115 tmp = regs->excvaddr;
116 break;
117 case SYSCALL_NR: 206 case SYSCALL_NR:
118 tmp = regs->syscall; 207 tmp = regs->syscall;
119 break; 208 break;
120 default:
121 tmp = 0;
122 ret = -EIO;
123 goto out;
124 }
125 ret = put_user(tmp, (unsigned long *) data);
126 goto out;
127 }
128 209
129 case PTRACE_POKETEXT: /* write the word at location addr. */ 210 default:
130 case PTRACE_POKEDATA: 211 return -EIO;
131 ret = generic_ptrace_pokedata(child, addr, data); 212 }
132 goto out; 213 return put_user(tmp, ret);
214}
133 215
134 case PTRACE_POKEUSR: 216int ptrace_pokeusr(struct task_struct *child, long regno, long val)
135 { 217{
136 struct pt_regs *regs; 218 struct pt_regs *regs;
137 regs = task_pt_regs(child); 219 regs = task_pt_regs(child);
138 220
139 switch (addr) { 221 switch (regno) {
140 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 222 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
141 { 223 regs->areg[regno - REG_AR_BASE] = val;
142 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
143 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
144 regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
145 else
146 ret = -EIO;
147 break; 224 break;
148 } 225
149 case REG_A_BASE ... REG_A_BASE + 15: 226 case REG_A_BASE ... REG_A_BASE + 15:
150 regs->areg[addr - REG_A_BASE] = data; 227 regs->areg[regno - REG_A_BASE] = val;
151 break; 228 break;
229
152 case REG_PC: 230 case REG_PC:
153 regs->pc = data; 231 regs->pc = val;
154 break; 232 break;
233
155 case SYSCALL_NR: 234 case SYSCALL_NR:
156 regs->syscall = data; 235 regs->syscall = val;
157 break;
158#ifdef TEST_KERNEL
159 case REG_WB:
160 regs->windowbase = data;
161 break; 236 break;
162 case REG_WS:
163 regs->windowstart = data;
164 break;
165#endif
166 237
167 default: 238 default:
168 /* The rest are not allowed. */ 239 return -EIO;
169 ret = -EIO; 240 }
170 break; 241 return 0;
171 } 242}
243
244long arch_ptrace(struct task_struct *child, long request, long addr, long data)
245{
246 int ret = -EPERM;
247
248 switch (request) {
249 case PTRACE_PEEKTEXT: /* read word at location addr. */
250 case PTRACE_PEEKDATA:
251 ret = generic_ptrace_peekdata(child, addr, data);
252 break;
253
254 case PTRACE_PEEKUSR: /* read register specified by addr. */
255 ret = ptrace_peekusr(child, addr, (void __user *) data);
256 break;
257
258 case PTRACE_POKETEXT: /* write the word at location addr. */
259 case PTRACE_POKEDATA:
260 ret = generic_ptrace_pokedata(child, addr, data);
261 break;
262
263 case PTRACE_POKEUSR: /* write register specified by addr. */
264 ret = ptrace_pokeusr(child, addr, data);
172 break; 265 break;
173 }
174 266
175 /* continue and stop at next (return from) syscall */ 267 /* continue and stop at next (return from) syscall */
268
176 case PTRACE_SYSCALL: 269 case PTRACE_SYSCALL:
177 case PTRACE_CONT: /* restart after signal. */ 270 case PTRACE_CONT: /* restart after signal. */
178 { 271 {
@@ -217,98 +310,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
217 break; 310 break;
218 311
219 case PTRACE_GETREGS: 312 case PTRACE_GETREGS:
220 { 313 ret = ptrace_getregs(child, (void __user *) data);
221 /* 'data' points to user memory in which to write.
222 * Mainly due to the non-live register values, we
223 * reformat the register values into something more
224 * standard. For convenience, we use the handy
225 * elf_gregset_t format. */
226
227 xtensa_gregset_t format;
228 struct pt_regs *regs = task_pt_regs(child);
229
230 do_copy_regs (&format, regs, child);
231
232 /* Now, copy to user space nice and easy... */
233 ret = 0;
234 if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
235 ret = -EFAULT;
236 break; 314 break;
237 }
238 315
239 case PTRACE_SETREGS: 316 case PTRACE_SETREGS:
240 { 317 ret = ptrace_setregs(child, (void __user *) data);
241 /* 'data' points to user memory that contains the new
242 * values in the elf_gregset_t format. */
243
244 xtensa_gregset_t format;
245 struct pt_regs *regs = task_pt_regs(child);
246
247 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
248 ret = -EFAULT;
249 break;
250 }
251
252 /* FIXME: Perhaps we want some sanity checks on
253 * these user-space values? See ARM version. Are
254 * debuggers a security concern? */
255
256 do_restore_regs (&format, regs, child);
257
258 ret = 0;
259 break;
260 }
261
262 case PTRACE_GETFPREGS:
263 {
264 /* 'data' points to user memory in which to write.
265 * For convenience, we use the handy
266 * elf_fpregset_t format. */
267
268 elf_fpregset_t fpregs;
269 struct pt_regs *regs = task_pt_regs(child);
270
271 do_save_fpregs (&fpregs, regs, child);
272
273 /* Now, copy to user space nice and easy... */
274 ret = 0;
275 if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
276 ret = -EFAULT;
277
278 break; 318 break;
279 }
280
281 case PTRACE_SETFPREGS:
282 {
283 /* 'data' points to user memory that contains the new
284 * values in the elf_fpregset_t format.
285 */
286 elf_fpregset_t fpregs;
287 struct pt_regs *regs = task_pt_regs(child);
288 319
289 ret = 0; 320 case PTRACE_GETXTREGS:
290 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) { 321 ret = ptrace_getxregs(child, (void __user *) data);
291 ret = -EFAULT;
292 break;
293 }
294
295 if (do_restore_fpregs (&fpregs, regs, child))
296 ret = -EIO;
297 break; 322 break;
298 }
299 323
300 case PTRACE_GETFPREGSIZE: 324 case PTRACE_SETXTREGS:
301 /* 'data' points to 'unsigned long' set to the size 325 ret = ptrace_setxregs(child, (void __user *) data);
302 * of elf_fpregset_t
303 */
304 ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
305 break; 326 break;
306 327
307 default: 328 default:
308 ret = ptrace_request(child, request, addr, data); 329 ret = ptrace_request(child, request, addr, data);
309 goto out; 330 break;
310 } 331 }
311 out: 332
312 return ret; 333 return ret;
313} 334}
314 335
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 42d9fd8a4225..299be42d116b 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
35 35
36extern struct task_struct *coproc_owners[]; 36extern struct task_struct *coproc_owners[];
37 37
38extern void release_all_cp (struct task_struct *);
39
40struct rt_sigframe 38struct rt_sigframe
41{ 39{
42 struct siginfo info; 40 struct siginfo info;
43 struct ucontext uc; 41 struct ucontext uc;
44 cp_state_t cpstate; 42 struct {
43 xtregs_opt_t opt;
44 xtregs_user_t user;
45#if XTENSA_HAVE_COPROCESSORS
46 xtregs_coprocessor_t cp;
47#endif
48 } xtregs;
45 unsigned char retcode[6]; 49 unsigned char retcode[6];
46 unsigned int window[4]; 50 unsigned int window[4];
47}; 51};
@@ -132,9 +136,10 @@ errout:
132 */ 136 */
133 137
134static int 138static int
135setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate, 139setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
136 struct pt_regs *regs)
137{ 140{
141 struct sigcontext __user *sc = &frame->uc.uc_mcontext;
142 struct thread_info *ti = current_thread_info();
138 int err = 0; 143 int err = 0;
139 144
140#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) 145#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
@@ -148,21 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
148 153
149 err |= flush_window_regs_user(regs); 154 err |= flush_window_regs_user(regs);
150 err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4); 155 err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
156 err |= __put_user(0, &sc->sc_xtregs);
151 157
152 // err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4) 158 if (err)
159 return err;
153 160
154#if XCHAL_HAVE_CP 161#if XTENSA_HAVE_COPROCESSORS
155# error Coprocessors unsupported 162 coprocessor_flush_all(ti);
156 err |= save_cpextra(cpstate); 163 coprocessor_release_all(ti);
157 err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate); 164 err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
165 sizeof (frame->xtregs.cp));
158#endif 166#endif
167 err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
168 sizeof (xtregs_opt_t));
169 err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
170 sizeof (xtregs_user_t));
171
172 err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
159 173
160 return err; 174 return err;
161} 175}
162 176
163static int 177static int
164restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 178restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
165{ 179{
180 struct sigcontext __user *sc = &frame->uc.uc_mcontext;
181 struct thread_info *ti = current_thread_info();
166 unsigned int err = 0; 182 unsigned int err = 0;
167 unsigned long ps; 183 unsigned long ps;
168 184
@@ -180,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
180 regs->windowbase = 0; 196 regs->windowbase = 0;
181 regs->windowstart = 1; 197 regs->windowstart = 1;
182 198
199 regs->syscall = -1; /* disable syscall checks */
200
183 /* For PS, restore only PS.CALLINC. 201 /* For PS, restore only PS.CALLINC.
184 * Assume that all other bits are either the same as for the signal 202 * Assume that all other bits are either the same as for the signal
185 * handler, or the user mode value doesn't matter (e.g. PS.OWB). 203 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
@@ -195,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
195 213
196 err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4); 214 err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
197 215
198#if XCHAL_HAVE_CP 216 if (err)
199# error Coprocessors unsupported 217 return err;
218
200 /* The signal handler may have used coprocessors in which 219 /* The signal handler may have used coprocessors in which
201 * case they are still enabled. We disable them to force a 220 * case they are still enabled. We disable them to force a
202 * reloading of the original task's CP state by the lazy 221 * reloading of the original task's CP state by the lazy
@@ -204,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
204 * Also, we essentially discard any coprocessor state that the 223 * Also, we essentially discard any coprocessor state that the
205 * signal handler created. */ 224 * signal handler created. */
206 225
207 if (!err) { 226#if XTENSA_HAVE_COPROCESSORS
208 struct task_struct *tsk = current; 227 coprocessor_release_all(ti);
209 release_all_cp(tsk); 228 err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
210 err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate, 229 sizeof (frame->xtregs.cp));
211 XTENSA_CP_EXTRA_SIZE);
212 }
213#endif 230#endif
231 err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
232 sizeof (xtregs_user_t));
233 err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
234 sizeof (xtregs_opt_t));
214 235
215 regs->syscall = -1; /* disable syscall checks */
216 return err; 236 return err;
217} 237}
218 238
219 239
220
221/* 240/*
222 * Do a signal return; undo the signal stack. 241 * Do a signal return; undo the signal stack.
223 */ 242 */
@@ -246,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
246 recalc_sigpending(); 265 recalc_sigpending();
247 spin_unlock_irq(&current->sighand->siglock); 266 spin_unlock_irq(&current->sighand->siglock);
248 267
249 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 268 if (restore_sigcontext(regs, frame))
250 goto badframe; 269 goto badframe;
251 270
252 ret = regs->areg[2]; 271 ret = regs->areg[2];
@@ -359,7 +378,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
359 err |= __put_user(sas_ss_flags(regs->areg[1]), 378 err |= __put_user(sas_ss_flags(regs->areg[1]),
360 &frame->uc.uc_stack.ss_flags); 379 &frame->uc.uc_stack.ss_flags);
361 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); 380 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
362 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate, regs); 381 err |= setup_sigcontext(frame, regs);
363 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 382 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
364 383
365 /* Create sys_rt_sigreturn syscall in stack frame */ 384 /* Create sys_rt_sigreturn syscall in stack frame */
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 6f722f91ba92..c7a021d9f696 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
118{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 118{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
119{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 119{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
121#if (XCHAL_CP_MASK & 1) 121#if XTENSA_HAVE_COPROCESSOR(0)
122COPROCESSOR(0), 122COPROCESSOR(0),
123#endif 123#endif
124#if (XCHAL_CP_MASK & 2) 124#if XTENSA_HAVE_COPROCESSOR(1)
125COPROCESSOR(1), 125COPROCESSOR(1),
126#endif 126#endif
127#if (XCHAL_CP_MASK & 4) 127#if XTENSA_HAVE_COPROCESSOR(2)
128COPROCESSOR(2), 128COPROCESSOR(2),
129#endif 129#endif
130#if (XCHAL_CP_MASK & 8) 130#if XTENSA_HAVE_COPROCESSOR(3)
131COPROCESSOR(3), 131COPROCESSOR(3),
132#endif 132#endif
133#if (XCHAL_CP_MASK & 16) 133#if XTENSA_HAVE_COPROCESSOR(4)
134COPROCESSOR(4), 134COPROCESSOR(4),
135#endif 135#endif
136#if (XCHAL_CP_MASK & 32) 136#if XTENSA_HAVE_COPROCESSOR(5)
137COPROCESSOR(5), 137COPROCESSOR(5),
138#endif 138#endif
139#if (XCHAL_CP_MASK & 64) 139#if XTENSA_HAVE_COPROCESSOR(6)
140COPROCESSOR(6), 140COPROCESSOR(6),
141#endif 141#endif
142#if (XCHAL_CP_MASK & 128) 142#if XTENSA_HAVE_COPROCESSOR(7)
143COPROCESSOR(7), 143COPROCESSOR(7),
144#endif 144#endif
145{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, 145{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },