aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/xtensa/kernel/Makefile18
-rw-r--r--arch/xtensa/kernel/align.S459
-rw-r--r--arch/xtensa/kernel/asm-offsets.c94
-rw-r--r--arch/xtensa/kernel/coprocessor.S201
-rw-r--r--arch/xtensa/kernel/entry.S1996
-rw-r--r--arch/xtensa/kernel/head.S237
-rw-r--r--arch/xtensa/kernel/irq.c192
-rw-r--r--arch/xtensa/kernel/module.c78
-rw-r--r--arch/xtensa/kernel/pci-dma.c73
-rw-r--r--arch/xtensa/kernel/pci.c563
-rw-r--r--arch/xtensa/kernel/platform.c49
-rw-r--r--arch/xtensa/kernel/process.c482
-rw-r--r--arch/xtensa/kernel/ptrace.c407
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/setup.c520
-rw-r--r--arch/xtensa/kernel/signal.c713
-rw-r--r--arch/xtensa/kernel/syscalls.c418
-rw-r--r--arch/xtensa/kernel/syscalls.h248
-rw-r--r--arch/xtensa/kernel/time.c227
-rw-r--r--arch/xtensa/kernel/traps.c498
-rw-r--r--arch/xtensa/kernel/vectors.S464
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S341
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c123
23 files changed, 8627 insertions, 0 deletions
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 000000000000..d573017a5dde
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux/Xtensa kernel.
3#
4
5extra-y := head.o vmlinux.lds
6
7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
10 pci-dma.o
11
12## windowspill.o
13
14obj-$(CONFIG_KGDB) += xtensa-stub.o
15obj-$(CONFIG_PCI) += pci.o
16obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
17
18
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 000000000000..74b1e90ef08c
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,459 @@
1/*
2 * arch/xtensa/kernel/align.S
3 *
4 * Handle unalignment exceptions in kernel space.
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License. See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica, Inc.
11 *
12 * Rewritten by Chris Zankel <chris@zankel.net>
13 *
14 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
16 */
17
18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h>
22#include <asm/offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29
30/* First-level exception handler for unaligned exceptions.
31 *
32 * Note: This handler works only for kernel exceptions. Unaligned user
33 * access should get a seg fault.
34 */
35
36/* Big and little endian 16-bit values are located in
37 * different halves of a register. HWORD_START helps to
38 * abstract the notion of extracting a 16-bit value from a
39 * register.
40 * We also have to define new shifting instructions because
41 * lsb and msb are on 'opposite' ends in a register for
42 * different endian machines.
43 *
44 * Assume a memory region in ascending address:
45 * 0 1 2 3|4 5 6 7
46 *
47 * When loading one word into a register, the content of that register is:
48 * LE 3 2 1 0, 7 6 5 4
49 * BE 0 1 2 3, 4 5 6 7
50 *
51 * Masking the bits of the higher/lower address means:
52 * LE X X 0 0, 0 0 X X
53 * BE 0 0 X X, X X 0 0
54 *
55 * Shifting to higher/lower addresses, means:
56 * LE shift left / shift right
57 * BE shift right / shift left
58 *
59 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
60 * LE mask 0 0 X X / shift left
61 * BE shift left / mask 0 0 X X
62 */
63
64#define UNALIGNED_USER_EXCEPTION
65
66#if XCHAL_HAVE_BE
67
68#define HWORD_START 16
69#define INSN_OP0 28
70#define INSN_T 24
71#define INSN_OP1 16
72
73.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
74.macro __ssa8 r; ssa8b \r; .endm
75.macro __ssa8r r; ssa8l \r; .endm
76.macro __sh r, s; srl \r, \s; .endm
77.macro __sl r, s; sll \r, \s; .endm
78.macro __exth r, s; extui \r, \s, 0, 16; .endm
79.macro __extl r, s; slli \r, \s, 16; .endm
80
81#else
82
83#define HWORD_START 0
84#define INSN_OP0 0
85#define INSN_T 4
86#define INSN_OP1 12
87
88.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
89.macro __ssa8 r; ssa8l \r; .endm
90.macro __ssa8r r; ssa8b \r; .endm
91.macro __sh r, s; sll \r, \s; .endm
92.macro __sl r, s; srl \r, \s; .endm
93.macro __exth r, s; slli \r, \s, 16; .endm
94.macro __extl r, s; extui \r, \s, 0, 16; .endm
95
96#endif
97
98/*
99 * xxxx xxxx = imm8 field
100 * yyyy = imm4 field
101 * ssss = s field
102 * tttt = t field
103 *
104 * 16 0
105 * -------------------
106 * L32I.N yyyy ssss tttt 1000
107 * S32I.N yyyy ssss tttt 1001
108 *
109 * 23 0
110 * -----------------------------
111 * res 0000 0010
112 * L16UI xxxx xxxx 0001 ssss tttt 0010
113 * L32I xxxx xxxx 0010 ssss tttt 0010
114 * XXX 0011 ssss tttt 0010
115 * XXX 0100 ssss tttt 0010
116 * S16I xxxx xxxx 0101 ssss tttt 0010
117 * S32I xxxx xxxx 0110 ssss tttt 0010
118 * XXX 0111 ssss tttt 0010
119 * XXX 1000 ssss tttt 0010
120 * L16SI xxxx xxxx 1001 ssss tttt 0010
121 * XXX 1010 0010
122 * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
123 * XXX 1100 0010
124 * XXX 1101 0010
125 * XXX 1110 0010
126 * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
127 * -----------------------------
128 * ^ ^ ^
129 * sub-opcode (NIBBLE_R) -+ | |
130 * t field (NIBBLE_T) -----------+ |
131 * major opcode (NIBBLE_OP0) --------------+
132 */
133
134#define OP0_L32I_N 0x8 /* load immediate narrow */
135#define OP0_S32I_N 0x9 /* store immediate narrow */
136#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
137#define OP1_SI_BIT 2 /* OP1 bit number for stores */
138
139#define OP1_L32I 0x2
140#define OP1_L16UI 0x1
141#define OP1_L16SI 0x9
142#define OP1_L32AI 0xb
143
144#define OP1_S32I 0x6
145#define OP1_S16I 0x5
146#define OP1_S32RI 0xf
147
148/*
149 * Entry condition:
150 *
151 * a0: trashed, original value saved on stack (PT_AREG0)
152 * a1: a1
153 * a2: new stack pointer, original in DEPC
154 * a3: dispatch table
155 * depc: a2, original value saved on stack (PT_DEPC)
156 * excsave_1: a3
157 *
158 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
159 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
160 */
161
162
163ENTRY(fast_unaligned)
164
165 /* Note: We don't expect the address to be aligned on a word
166 * boundary. After all, the processor generated that exception
167 * and it would be a hardware fault.
168 */
169
170 /* Save some working register */
171
172 s32i a4, a2, PT_AREG4
173 s32i a5, a2, PT_AREG5
174 s32i a6, a2, PT_AREG6
175 s32i a7, a2, PT_AREG7
176 s32i a8, a2, PT_AREG8
177
178 rsr a0, DEPC
179 xsr a3, EXCSAVE_1
180 s32i a0, a2, PT_AREG2
181 s32i a3, a2, PT_AREG3
182
183 /* Keep value of SAR in a0 */
184
185 rsr a0, SAR
186 rsr a8, EXCVADDR # load unaligned memory address
187
188 /* Now, identify one of the following load/store instructions.
189 *
190 * The only possible danger of a double exception on the
191 * following l32i instructions is kernel code in vmalloc
192 * memory. The processor was just executing at the EPC_1
193 * address, and indeed, already fetched the instruction. That
194 * guarantees a TLB mapping, which hasn't been replaced by
195 * this unaligned exception handler that uses only static TLB
196 * mappings. However, high-level interrupt handlers might
197 * modify TLB entries, so for the generic case, we register a
198 * TABLE_FIXUP handler here, too.
199 */
200
201 /* a3...a6 saved on stack, a2 = SP */
202
203 /* Extract the instruction that caused the unaligned access. */
204
205 rsr a7, EPC_1 # load exception address
206 movi a3, ~3
207 and a3, a3, a7 # mask lower bits
208
209 l32i a4, a3, 0 # load 2 words
210 l32i a5, a3, 4
211
212 __ssa8 a7
213 __src_b a4, a4, a5 # a4 has the instruction
214
215 /* Analyze the instruction (load or store?). */
216
217 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
218
219#if XCHAL_HAVE_NARROW
220 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
221 addi a6, a5, -OP0_S32I_N
222 _beqz a6, .Lstore # S32I.N, do a store
223#endif
224 /* 'store indicator bit' not set, jump */
225 _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
226
227 /* Store: Jump to table entry to get the value in the source register.*/
228
229.Lstore:movi a5, .Lstore_table # table
230 extui a6, a4, INSN_T, 4 # get source register
231 addx8 a5, a6, a5
232 jx a5 # jump into table
233
234 /* Invalid instruction, CRITICAL! */
235.Linvalid_instruction_load:
236 j .Linvalid_instruction
237
238 /* Load: Load memory address. */
239
240.Lload: movi a3, ~3
241 and a3, a3, a8 # align memory address
242
243 __ssa8 a8
244#ifdef UNALIGNED_USER_EXCEPTION
245 addi a3, a3, 8
246 l32e a5, a3, -8
247 l32e a6, a3, -4
248#else
249 l32i a5, a3, 0
250 l32i a6, a3, 4
251#endif
252 __src_b a3, a5, a6 # a3 has the data word
253
254#if XCHAL_HAVE_NARROW
255 addi a7, a7, 2 # increment PC (assume 16-bit insn)
256
257 extui a5, a4, INSN_OP0, 4
258 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
259
260 addi a7, a7, 1
261#else
262 addi a7, a7, 3
263#endif
264
265 extui a5, a4, INSN_OP1, 4
266 _beqi a5, OP1_L32I, 1f # l32i: jump
267
268 extui a3, a3, 0, 16 # extract lower 16 bits
269 _beqi a5, OP1_L16UI, 1f
270 addi a5, a5, -OP1_L16SI
271 _bnez a5, .Linvalid_instruction_load
272
273 /* sign extend value */
274
275 slli a3, a3, 16
276 srai a3, a3, 16
277
278 /* Set target register. */
279
2801:
281
282#if XCHAL_HAVE_LOOP
283 rsr a3, LEND # check if we reached LEND
284 bne a7, a3, 1f
285 rsr a3, LCOUNT # and LCOUNT != 0
286 beqz a3, 1f
287 addi a3, a3, -1 # decrement LCOUNT and set
288 rsr a7, LBEG # set PC to LBEGIN
289 wsr a3, LCOUNT
290#endif
291
2921: wsr a7, EPC_1 # skip load instruction
293 extui a4, a4, INSN_T, 4 # extract target register
294 movi a5, .Lload_table
295 addx8 a4, a4, a5
296 jx a4 # jump to entry for target register
297
298 .align 8
299.Lload_table:
300 s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
301 mov a1, a3; _j .Lexit; .align 8 # fishy??
302 s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
303 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
304 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
305 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
306 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
307 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
308 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
309 mov a9, a3 ; _j .Lexit; .align 8
310 mov a10, a3 ; _j .Lexit; .align 8
311 mov a11, a3 ; _j .Lexit; .align 8
312 mov a12, a3 ; _j .Lexit; .align 8
313 mov a13, a3 ; _j .Lexit; .align 8
314 mov a14, a3 ; _j .Lexit; .align 8
315 mov a15, a3 ; _j .Lexit; .align 8
316
317.Lstore_table:
318 l32i a3, a2, PT_AREG0; _j 1f; .align 8
319 mov a3, a1; _j 1f; .align 8 # fishy??
320 l32i a3, a2, PT_AREG2; _j 1f; .align 8
321 l32i a3, a2, PT_AREG3; _j 1f; .align 8
322 l32i a3, a2, PT_AREG4; _j 1f; .align 8
323 l32i a3, a2, PT_AREG5; _j 1f; .align 8
324 l32i a3, a2, PT_AREG6; _j 1f; .align 8
325 l32i a3, a2, PT_AREG7; _j 1f; .align 8
326 l32i a3, a2, PT_AREG8; _j 1f; .align 8
327 mov a3, a9 ; _j 1f; .align 8
328 mov a3, a10 ; _j 1f; .align 8
329 mov a3, a11 ; _j 1f; .align 8
330 mov a3, a12 ; _j 1f; .align 8
331 mov a3, a13 ; _j 1f; .align 8
332 mov a3, a14 ; _j 1f; .align 8
333 mov a3, a15 ; _j 1f; .align 8
334
3351: # a7: instruction pointer, a4: instruction, a3: value
336
337 movi a6, 0 # mask: ffffffff:00000000
338
339#if XCHAL_HAVE_NARROW
340 addi a7, a7, 2 # incr. PC,assume 16-bit instruction
341
342 extui a5, a4, INSN_OP0, 4 # extract OP0
343 addi a5, a5, -OP0_S32I_N
344 _beqz a5, 1f # s32i.n: jump
345
346 addi a7, a7, 1 # increment PC, 32-bit instruction
347#else
348 addi a7, a7, 3 # increment PC, 32-bit instruction
349#endif
350
351 extui a5, a4, INSN_OP1, 4 # extract OP1
352 _beqi a5, OP1_S32I, 1f # jump if 32 bit store
353 _bnei a5, OP1_S16I, .Linvalid_instruction_store
354
355 movi a5, -1
356 __extl a3, a3 # get 16-bit value
357 __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
358
359 /* Get memory address */
360
3611:
362#if XCHAL_HAVE_LOOP
363 rsr a3, LEND # check if we reached LEND
364 bne a7, a3, 1f
365 rsr a3, LCOUNT # and LCOUNT != 0
366 beqz a3, 1f
367 addi a3, a3, -1 # decrement LCOUNT and set
368 rsr a7, LBEG # set PC to LBEGIN
369 wsr a3, LCOUNT
370#endif
371
3721: wsr a7, EPC_1 # skip store instruction
373 movi a4, ~3
374 and a4, a4, a8 # align memory address
375
376 /* Insert value into memory */
377
378 movi a5, -1 # mask: ffffffff:XXXX0000
379#ifdef UNALIGNED_USER_EXCEPTION
380 addi a4, a4, 8
381#endif
382
383 __ssa8r a8
384 __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
385 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
386#ifdef UNALIGNED_USER_EXCEPTION
387 l32e a5, a4, -8
388#else
389 l32i a5, a4, 0 # load lower address word
390#endif
391 and a5, a5, a7 # mask
392 __sh a7, a3 # shift value
393 or a5, a5, a7 # or with original value
394#ifdef UNALIGNED_USER_EXCEPTION
395 s32e a5, a4, -8
396 l32e a7, a4, -4
397#else
398 s32i a5, a4, 0 # store
399 l32i a7, a4, 4 # same for upper address word
400#endif
401 __sl a5, a3
402 and a6, a7, a6
403 or a6, a6, a5
404#ifdef UNALIGNED_USER_EXCEPTION
405 s32e a6, a4, -4
406#else
407 s32i a6, a4, 4
408#endif
409
410 /* Done. restore stack and return */
411
412.Lexit:
413 movi a4, 0
414 rsr a3, EXCSAVE_1
415 s32i a4, a3, EXC_TABLE_FIXUP
416
417 /* Restore working register */
418
419 l32i a7, a2, PT_AREG7
420 l32i a6, a2, PT_AREG6
421 l32i a5, a2, PT_AREG5
422 l32i a4, a2, PT_AREG4
423 l32i a3, a2, PT_AREG3
424
425 /* restore SAR and return */
426
427 wsr a0, SAR
428 l32i a0, a2, PT_AREG0
429 l32i a2, a2, PT_AREG2
430 rfe
431
432 /* We cannot handle this exception. */
433
434 .extern _kernel_exception
435.Linvalid_instruction_store:
436.Linvalid_instruction:
437
438 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
439
440 l32i a8, a2, PT_AREG8
441 l32i a7, a2, PT_AREG7
442 l32i a6, a2, PT_AREG6
443 l32i a5, a2, PT_AREG5
444 l32i a4, a2, PT_AREG4
445 wsr a0, SAR
446 mov a1, a2
447
448 rsr a0, PS
449 bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
450
451 movi a0, _kernel_exception
452 jx a0
453
4541: movi a0, _user_exception
455 jx a0
456
457
458#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
459
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 000000000000..840cd9a1d3d2
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,94 @@
1/*
2 * arch/xtensa/kernel/asm-offsets.c
3 *
4 * Generates definitions from c-type structures used by assembly sources.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <asm/processor.h>
16
17#include <linux/types.h>
18#include <linux/sched.h>
19#include <linux/stddef.h>
20#include <linux/thread_info.h>
21#include <linux/ptrace.h>
22#include <asm/ptrace.h>
23#include <asm/processor.h>
24#include <asm/uaccess.h>
25
26#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
27#define BLANK() asm volatile("\n->" : : )
28
29int main(void)
30{
31 /* struct pt_regs */
32 DEFINE(PT_PC, offsetof (struct pt_regs, pc));
33 DEFINE(PT_PS, offsetof (struct pt_regs, ps));
34 DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
35 DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
36 DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
37 DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
38 DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
39 DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
40 DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
41 DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
42 DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
43 DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
44 DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
45 DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
46 DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
47 DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
48 DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
49 DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
50 DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
51 DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
52 DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
53 DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
54 DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
55 DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
56 DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
57 DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
58 DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
59 DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
60 DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
61 DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
62 DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
63 DEFINE(PT_SIZE, sizeof(struct pt_regs));
64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
66 BLANK();
67
68 /* struct task_struct */
69 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
70 DEFINE(TASK_MM, offsetof (struct task_struct, mm));
71 DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
72 DEFINE(TASK_PID, offsetof (struct task_struct, pid));
73 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
74 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
75 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
76 BLANK();
77
78 /* struct thread_info (offset from start_struct) */
79 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
80 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
81 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
82 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
83 BLANK();
84
85 /* struct mm_struct */
86 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
87 DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
88 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
89 BLANK();
90 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
91 return 0;
92}
93
94
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 000000000000..356192a4d39d
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,201 @@
1/*
2 * arch/xtensa/kernel/coprocessor.S
3 *
4 * Xtensa processor configuration-specific table of coprocessor and
5 * other custom register layout information.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2003 - 2005 Tensilica Inc.
12 *
13 * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16/*
17 * This module contains a table that describes the layout of the various
18 * custom registers and states associated with each coprocessor, as well
19 * as those not associated with any coprocessor ("extra state").
20 * This table is included with core dumps and is available via the ptrace
21 * interface, allowing the layout of such register/state information to
22 * be modified in the kernel without affecting the debugger. Each
23 * register or state is identified using a 32-bit "libdb target number"
24 * assigned when the Xtensa processor is generated.
25 */
26
27#include <linux/config.h>
28#include <linux/linkage.h>
29#include <asm/processor.h>
30
31#if XCHAL_HAVE_CP
32
33#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
34
35ENTRY(release_coprocessors)
36
37 entry a1, 16
38 # a2: task
39 movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
40 movi a4, coprocessor_info+CP_LAST # a4: owner-table
41 # a5: tmp
42 movi a6, 0 # a6: 0
43 rsil a7, LOCKLEVEL # a7: PS
44
451: /* Check if task is coprocessor owner of coprocessor[i]. */
46
47 l32i a5, a4, COPROCESSOR_INFO_OWNER
48 srli a3, a3, 1
49 beqz a3, 1f
50 addi a4, a4, -8
51 beq a2, a5, 1b
52
53 /* Found an entry: Clear entry CPENABLE bit to disable CP. */
54
55 rsr a5, CPENABLE
56 s32i a6, a4, COPROCESSOR_INFO_OWNER
57 xor a5, a3, a5
58 wsr a5, CPENABLE
59
60 bnez a3, 1b
61
621: wsr a7, PS
63 rsync
64 retw
65
66
67ENTRY(disable_coprocessor)
68 entry sp, 16
69 rsil a7, LOCKLEVEL
70 rsr a3, CPENABLE
71 movi a4, 1
72 ssl a2
73 sll a4, a4
74 and a4, a3, a4
75 xor a3, a3, a4
76 wsr a3, CPENABLE
77 wsr a7, PS
78 rsync
79 retw
80
81ENTRY(enable_coprocessor)
82 entry sp, 16
83 rsil a7, LOCKLEVEL
84 rsr a3, CPENABLE
85 movi a4, 1
86 ssl a2
87 sll a4, a4
88 or a3, a3, a4
89 wsr a3, CPENABLE
90 wsr a7, PS
91 rsync
92 retw
93
94#endif
95
96ENTRY(save_coprocessor_extra)
97 entry sp, 16
98 xchal_extra_store_funcbody
99 retw
100
101ENTRY(restore_coprocessor_extra)
102 entry sp, 16
103 xchal_extra_load_funcbody
104 retw
105
106ENTRY(save_coprocessor_registers)
107 entry sp, 16
108 xchal_cpi_store_funcbody
109 retw
110
111ENTRY(restore_coprocessor_registers)
112 entry sp, 16
113 xchal_cpi_load_funcbody
114 retw
115
116
117/*
118 * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
119 * describe the contents of coprocessor & extra save areas in terms of
120 * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
121 * latter macros here; they expand into a table of the format we want.
122 * The general format is:
123 *
124 * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
125 * bitmask, rsv2, rsv3)
126 * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
127 * bitmask, rsv2, rsv3)
128 * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
129 * numentries, contentsize, regname_base,
130 * regfile_name, rsv2, rsv3)
131 *
132 * For this table, we only care about the <libdbnum>, <offset> and <size>
133 * fields.
134 */
135
136/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
137
138#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
139 bitmask, rsv2, rsv3) \
140 reg_entry libdbnum, offset, size ;
141#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
142 bitmask, rsv2, rsv3) \
143 reg_entry libdbnum, offset, size ;
144#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
145 numentries, contentsize, regname_base, \
146 regfile_name, rsv2, rsv3) \
147 reg_entry libdbnum, offset, size ;
148
149/* A single table entry: */
150 .macro reg_entry libdbnum, offset, size
151 .ifne (__last_offset-(__last_group_offset+\offset))
152 /* padding entry */
153 .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
154 .endif
155 .word \libdbnum /* actual entry */
156 .set __last_offset, __last_group_offset+\offset+\size
157 .endm /* reg_entry */
158
159
160/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
161 .macro reg_group cpnum, num_entries, align
162 .set __last_group_offset, (__last_offset + \align- 1) & -\align
163 .ifne \num_entries
164 .word 0xFD000000+(\cpnum<<16)+\num_entries
165 .endif
166 .endm /* reg_group */
167
168/*
169 * Register info tables.
170 */
171
172 .section .rodata, "a"
173 .globl _xtensa_reginfo_tables
174 .globl _xtensa_reginfo_table_size
175 .align 4
176_xtensa_reginfo_table_size:
177 .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
178
179_xtensa_reginfo_tables:
180 .set __last_offset, 0
181 reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
182 XCHAL_EXTRA_SA_CONTENTS_LIBDB
183 reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
184 XCHAL_CP0_SA_CONTENTS_LIBDB
185 reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
186 XCHAL_CP1_SA_CONTENTS_LIBDB
187 reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
188 XCHAL_CP2_SA_CONTENTS_LIBDB
189 reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
190 XCHAL_CP3_SA_CONTENTS_LIBDB
191 reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
192 XCHAL_CP4_SA_CONTENTS_LIBDB
193 reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
194 XCHAL_CP5_SA_CONTENTS_LIBDB
195 reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
196 XCHAL_CP6_SA_CONTENTS_LIBDB
197 reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
198 XCHAL_CP7_SA_CONTENTS_LIBDB
199 .word 0xFC000000 /* invalid register number,marks end of table*/
200_xtensa_reginfo_table_end:
201
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 000000000000..c64a01f71de6
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,1996 @@
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004-2005 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/offsets.h>
18#include <asm/processor.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <xtensa/coreasm.h>
28
29/* Unimplemented features. */
30
31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
32#undef KERNEL_STACK_OVERFLOW_CHECK
33#undef PREEMPTIBLE_KERNEL
34#undef ALLOCA_EXCEPTION_IN_IRAM
35
36/* Not well tested.
37 *
38 * - fast_coprocessor
39 */
40
41/*
42 * Macro to find first bit set in WINDOWBASE from the left + 1
43 *
44 * 100....0 -> 1
45 * 010....0 -> 2
46 * 000....1 -> WSBITS
47 */
48
49 .macro ffs_ws bit mask
50
51#if XCHAL_HAVE_NSA
52 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
53 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
54#else
55 movi \bit, WSBITS
56#if WSBITS > 16
57 _bltui \mask, 0x10000, 99f
58 addi \bit, \bit, -16
59 extui \mask, \mask, 16, 16
60#endif
61#if WSBITS > 8
6299: _bltui \mask, 0x100, 99f
63 addi \bit, \bit, -8
64 srli \mask, \mask, 8
65#endif
6699: _bltui \mask, 0x10, 99f
67 addi \bit, \bit, -4
68 srli \mask, \mask, 4
6999: _bltui \mask, 0x4, 99f
70 addi \bit, \bit, -2
71 srli \mask, \mask, 2
7299: _bltui \mask, 0x2, 99f
73 addi \bit, \bit, -1
7499:
75
76#endif
77 .endm
78
79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
80
81/*
82 * First-level exception handler for user exceptions.
83 * Save some special registers, extra states and all registers in the AR
84 * register file that were in use in the user task, and jump to the common
85 * exception code.
86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
87 * save them for kernel exceptions).
88 *
89 * Entry condition for user_exception:
90 *
91 * a0: trashed, original value saved on stack (PT_AREG0)
92 * a1: a1
93 * a2: new stack pointer, original value in depc
94 * a3: dispatch table
95 * depc: a2, original value saved on stack (PT_DEPC)
96 * excsave1: a3
97 *
98 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
99 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
100 *
101 * Entry condition for _user_exception:
102 *
103 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
104 * excsave has been restored, and
105 * stack pointer (a1) has been set.
106 *
107 * Note: _user_exception might be at an odd adress. Don't use call0..call12
108 */
109
110ENTRY(user_exception)
111
112 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
113
114 xsr a3, EXCSAVE_1
115 rsr a0, DEPC
116 s32i a1, a2, PT_AREG1
117 s32i a0, a2, PT_AREG2
118 s32i a3, a2, PT_AREG3
119 mov a1, a2
120
121 .globl _user_exception
122_user_exception:
123
124 /* Save SAR and turn off single stepping */
125
126 movi a2, 0
127 rsr a3, SAR
128 wsr a2, ICOUNTLEVEL
129 s32i a3, a1, PT_SAR
130
131 /* Rotate ws so that the current windowbase is at bit0. */
132 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
133
134 rsr a2, WINDOWBASE
135 rsr a3, WINDOWSTART
136 ssr a2
137 s32i a2, a1, PT_WINDOWBASE
138 s32i a3, a1, PT_WINDOWSTART
139 slli a2, a3, 32-WSBITS
140 src a2, a3, a2
141 srli a2, a2, 32-WSBITS
142 s32i a2, a1, PT_WMASK # needed for restoring registers
143
144 /* Save only live registers. */
145
146 _bbsi.l a2, 1, 1f
147 s32i a4, a1, PT_AREG4
148 s32i a5, a1, PT_AREG5
149 s32i a6, a1, PT_AREG6
150 s32i a7, a1, PT_AREG7
151 _bbsi.l a2, 2, 1f
152 s32i a8, a1, PT_AREG8
153 s32i a9, a1, PT_AREG9
154 s32i a10, a1, PT_AREG10
155 s32i a11, a1, PT_AREG11
156 _bbsi.l a2, 3, 1f
157 s32i a12, a1, PT_AREG12
158 s32i a13, a1, PT_AREG13
159 s32i a14, a1, PT_AREG14
160 s32i a15, a1, PT_AREG15
161 _bnei a2, 1, 1f # only one valid frame?
162
163 /* Only one valid frame, skip saving regs. */
164
165 j 2f
166
167 /* Save the remaining registers.
168 * We have to save all registers up to the first '1' from
169 * the right, except the current frame (bit 0).
170 * Assume a2 is: 001001000110001
171 * All regiser frames starting from the top fiel to the marked '1'
172 * must be saved.
173 */
174
1751: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
176 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
177 and a3, a3, a2 # max. only one bit is set
178
179 /* Find number of frames to save */
180
181 ffs_ws a0, a3 # number of frames to the '1' from left
182
183 /* Store information into WMASK:
184 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
185 * bits 4...: number of valid 4-register frames
186 */
187
188 slli a3, a0, 4 # number of frames to save in bits 8..4
189 extui a2, a2, 0, 4 # mask for the first 16 registers
190 or a2, a3, a2
191 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
192
193 /* Save 4 registers at a time */
194
1951: rotw -1
196 s32i a0, a5, PT_AREG_END - 16
197 s32i a1, a5, PT_AREG_END - 12
198 s32i a2, a5, PT_AREG_END - 8
199 s32i a3, a5, PT_AREG_END - 4
200 addi a0, a4, -1
201 addi a1, a5, -16
202 _bnez a0, 1b
203
204 /* WINDOWBASE still in SAR! */
205
206 rsr a2, SAR # original WINDOWBASE
207 movi a3, 1
208 ssl a2
209 sll a3, a3
210 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
211 wsr a2, WINDOWBASE # and WINDOWSTART
212 rsync
213
214 /* We are back to the original stack pointer (a1) */
215
2162:
217#if XCHAL_EXTRA_SA_SIZE
218
219 /* For user exceptions, save the extra state into the user's TCB.
220 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221 */
222
223 GET_CURRENT(a2,a1)
224 addi a2, a2, THREAD_CP_SAVE
225 xchal_extra_store_funcbody
226#endif
227
228 /* Now, jump to the common exception handler. */
229
230 j common_exception
231
232
233/*
234 * First-level exit handler for kernel exceptions
235 * Save special registers and the live window frame.
236 * Note: Even though we changes the stack pointer, we don't have to do a
237 * MOVSP here, as we do that when we return from the exception.
238 * (See comment in the kernel exception exit code)
239 *
240 * Entry condition for kernel_exception:
241 *
242 * a0: trashed, original value saved on stack (PT_AREG0)
243 * a1: a1
244 * a2: new stack pointer, original in DEPC
245 * a3: dispatch table
246 * depc: a2, original value saved on stack (PT_DEPC)
247 * excsave_1: a3
248 *
249 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
250 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
251 *
252 * Entry condition for _kernel_exception:
253 *
254 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
255 * excsave has been restored, and
256 * stack pointer (a1) has been set.
257 *
258 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
259 */
260
261ENTRY(kernel_exception)
262
263 /* Save a0, a2, a3, DEPC and set SP. */
264
265 xsr a3, EXCSAVE_1 # restore a3, excsave_1
266 rsr a0, DEPC # get a2
267 s32i a1, a2, PT_AREG1
268 s32i a0, a2, PT_AREG2
269 s32i a3, a2, PT_AREG3
270 mov a1, a2
271
272 .globl _kernel_exception
273_kernel_exception:
274
275 /* Save SAR and turn off single stepping */
276
277 movi a2, 0
278 rsr a3, SAR
279 wsr a2, ICOUNTLEVEL
280 s32i a3, a1, PT_SAR
281
282 /* Rotate ws so that the current windowbase is at bit0. */
283 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
284
285 rsr a2, WINDOWBASE # don't need to save these, we only
286 rsr a3, WINDOWSTART # need shifted windowstart: windowmask
287 ssr a2
288 slli a2, a3, 32-WSBITS
289 src a2, a3, a2
290 srli a2, a2, 32-WSBITS
291 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
292
293 /* Save only the live window-frame */
294
295 _bbsi.l a2, 1, 1f
296 s32i a4, a1, PT_AREG4
297 s32i a5, a1, PT_AREG5
298 s32i a6, a1, PT_AREG6
299 s32i a7, a1, PT_AREG7
300 _bbsi.l a2, 2, 1f
301 s32i a8, a1, PT_AREG8
302 s32i a9, a1, PT_AREG9
303 s32i a10, a1, PT_AREG10
304 s32i a11, a1, PT_AREG11
305 _bbsi.l a2, 3, 1f
306 s32i a12, a1, PT_AREG12
307 s32i a13, a1, PT_AREG13
308 s32i a14, a1, PT_AREG14
309 s32i a15, a1, PT_AREG15
310
3111:
312
313#ifdef KERNEL_STACK_OVERFLOW_CHECK
314
315 /* Stack overflow check, for debugging */
316 extui a2, a1, TASK_SIZE_BITS,XX
317 movi a3, SIZE??
318 _bge a2, a3, out_of_stack_panic
319
320#endif
321
322/*
323 * This is the common exception handler.
324 * We get here from the user exception handler or simply by falling through
325 * from the kernel exception handler.
326 * Save the remaining special registers, switch to kernel mode, and jump
327 * to the second-level exception handler.
328 *
329 */
330
331common_exception:
332
333 /* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
334
335 rsr a2, DEBUGCAUSE
336 rsr a3, EPC_1
337 s32i a2, a1, PT_DEBUGCAUSE
338 s32i a3, a1, PT_PC
339
340 rsr a3, EXCVADDR
341 movi a2, 0
342 s32i a3, a1, PT_EXCVADDR
343 xsr a2, LCOUNT
344 s32i a2, a1, PT_LCOUNT
345
346 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
347
348 rsr a0, EXCCAUSE
349 movi a3, 0
350 rsr a2, EXCSAVE_1
351 s32i a0, a1, PT_EXCCAUSE
352 s32i a3, a2, EXC_TABLE_FIXUP
353
354 /* All unrecoverable states are saved on stack, now, and a1 is valid,
355 * so we can allow exceptions and interrupts (*) again.
356 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
357 *
358 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
359 * (interrupts disabled) and if this exception is not an interrupt.
360 */
361
362 rsr a3, PS
363 addi a0, a0, -4
364 movi a2, 1
365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
367 movi a2, PS_WOE_MASK
368 or a3, a3, a2
369 rsr a0, EXCCAUSE
370 xsr a3, PS
371
372 s32i a3, a1, PT_PS # save ps
373
374 /* Save LBEG, LEND */
375
376 rsr a2, LBEG
377 rsr a3, LEND
378 s32i a2, a1, PT_LBEG
379 s32i a3, a1, PT_LEND
380
381 /* Go to second-level dispatcher. Set up parameters to pass to the
382 * exception handler and call the exception handler.
383 */
384
385 movi a4, exc_table
386 mov a6, a1 # pass stack frame
387 mov a7, a0 # pass EXCCAUSE
388 addx4 a4, a0, a4
389 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
390
391 /* Call the second-level handler */
392
393 callx4 a4
394
395 /* Jump here for exception exit */
396
397common_exception_return:
398
399 /* Jump if we are returning from kernel exceptions. */
400
4011: l32i a3, a1, PT_PS
402 _bbsi.l a3, PS_UM_SHIFT, 2f
403 j kernel_exception_exit
404
405 /* Specific to a user exception exit:
406 * We need to check some flags for signal handling and rescheduling,
407 * and have to restore WB and WS, extra states, and all registers
408 * in the register file that were in use in the user task.
409 */
410
4112: wsr a3, PS /* disable interrupts */
412
413 /* Check for signals (keep interrupts disabled while we read TI_FLAGS)
414 * Note: PS.INTLEVEL = 0, PS.EXCM = 1
415 */
416
417 GET_THREAD_INFO(a2,a1)
418 l32i a4, a2, TI_FLAGS
419
420 /* Enable interrupts again.
421 * Note: When we get here, we certainly have handled any interrupts.
422 * (Hint: There is only one user exception frame on stack)
423 */
424
425 movi a3, PS_WOE_MASK
426
427 _bbsi.l a4, TIF_NEED_RESCHED, 3f
428 _bbci.l a4, TIF_SIGPENDING, 4f
429
430#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
431 l32i a4, a1, PT_DEPC
432 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
433#endif
434
435 /* Reenable interrupts and call do_signal() */
436
437 wsr a3, PS
438 movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
439 mov a6, a1
440 movi a7, 0
441 callx4 a4
442 j 1b
443
4443: /* Reenable interrupts and reschedule */
445
446 wsr a3, PS
447 movi a4, schedule # void schedule (void)
448 callx4 a4
449 j 1b
450
451 /* Restore the state of the task and return from the exception. */
452
453
454 /* If we are returning from a user exception, and the process
455 * to run next has PT_SINGLESTEP set, we want to setup
456 * ICOUNT and ICOUNTLEVEL to step one instruction.
457 * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
458 */
459
4604: /* a2 holds GET_CURRENT(a2,a1) */
461
462 l32i a3, a2, TI_TASK
463 l32i a3, a3, TASK_PTRACE
464 bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
465
466 movi a3, -2 # PT_SINGLESTEP flag is set,
467 movi a4, 1 # icountlevel of 1 means it won't
468 wsr a3, ICOUNT # start counting until after rfe
469 wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
470 isync
471
4721:
473
474#if XCHAL_EXTRA_SA_SIZE
475
476 /* For user exceptions, restore the extra state from the user's TCB. */
477
478 /* Note: a2 still contains GET_CURRENT(a2,a1) */
479 addi a2, a2, THREAD_CP_SAVE
480 xchal_extra_load_funcbody
481
482 /* We must assume that xchal_extra_store_funcbody destroys
483 * registers a2..a15. FIXME, this list can eventually be
484 * reduced once real register requirements of the macro are
485 * finalized. */
486
487#endif /* XCHAL_EXTRA_SA_SIZE */
488
489
490 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
491
492 l32i a2, a1, PT_WINDOWBASE
493 l32i a3, a1, PT_WINDOWSTART
494 wsr a1, DEPC # use DEPC as temp storage
495 wsr a3, WINDOWSTART # restore WINDOWSTART
496 ssr a2 # preserve user's WB in the SAR
497 wsr a2, WINDOWBASE # switch to user's saved WB
498 rsync
499 rsr a1, DEPC # restore stack pointer
500 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
501 rotw -1 # we restore a4..a7
502 _bltui a6, 16, 1f # only have to restore current window?
503
504 /* The working registers are a0 and a3. We are restoring to
505 * a4..a7. Be careful not to destroy what we have just restored.
506 * Note: wmask has the format YYYYM:
507 * Y: number of registers saved in groups of 4
508 * M: 4 bit mask of first 16 registers
509 */
510
511 mov a2, a6
512 mov a3, a5
513
5142: rotw -1 # a0..a3 become a4..a7
515 addi a3, a7, -4*4 # next iteration
516 addi a2, a6, -16 # decrementing Y in WMASK
517 l32i a4, a3, PT_AREG_END + 0
518 l32i a5, a3, PT_AREG_END + 4
519 l32i a6, a3, PT_AREG_END + 8
520 l32i a7, a3, PT_AREG_END + 12
521 _bgeui a2, 16, 2b
522
523 /* Clear unrestored registers (don't leak anything to user-land */
524
5251: rsr a0, WINDOWBASE
526 rsr a3, SAR
527 sub a3, a0, a3
528 beqz a3, 2f
529 extui a3, a3, 0, WBBITS
530
5311: rotw -1
532 addi a3, a7, -1
533 movi a4, 0
534 movi a5, 0
535 movi a6, 0
536 movi a7, 0
537 bgei a3, 1, 1b
538
539 /* We are back were we were when we started.
540 * Note: a2 still contains WMASK (if we've returned to the original
541 * frame where we had loaded a2), or at least the lower 4 bits
542 * (if we have restored WSBITS-1 frames).
543 */
544
5452: j common_exception_exit
546
547 /* This is the kernel exception exit.
548 * We avoided to do a MOVSP when we entered the exception, but we
549 * have to do it here.
550 */
551
552kernel_exception_exit:
553
554 /* Disable interrupts (a3 holds PT_PS) */
555
556 wsr a3, PS
557
558#ifdef PREEMPTIBLE_KERNEL
559
560#ifdef CONFIG_PREEMPT
561
562 /*
563 * Note: We've just returned from a call4, so we have
564 * at least 4 addt'l regs.
565 */
566
567 /* Check current_thread_info->preempt_count */
568
569 GET_THREAD_INFO(a2)
570 l32i a3, a2, TI_PREEMPT
571 bnez a3, 1f
572
573 l32i a2, a2, TI_FLAGS
574
5751:
576
577#endif
578
579#endif
580
581 /* Check if we have to do a movsp.
582 *
583 * We only have to do a movsp if the previous window-frame has
584 * been spilled to the *temporary* exception stack instead of the
585 * task's stack. This is the case if the corresponding bit in
586 * WINDOWSTART for the previous window-frame was set before
587 * (not spilled) but is zero now (spilled).
588 * If this bit is zero, all other bits except the one for the
589 * current window frame are also zero. So, we can use a simple test:
590 * 'and' WINDOWSTART and WINDOWSTART-1:
591 *
592 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
593 *
594 * The result is zero only if one bit was set.
595 *
596 * (Note: We might have gone through several task switches before
597 * we come back to the current task, so WINDOWBASE might be
598 * different from the time the exception occurred.)
599 */
600
601 /* Test WINDOWSTART before and after the exception.
602 * We actually have WMASK, so we only have to test if it is 1 or not.
603 */
604
605 l32i a2, a1, PT_WMASK
606 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
607
608 /* Test WINDOWSTART now. If spilled, do the movsp */
609
610 rsr a3, WINDOWSTART
611 addi a0, a3, -1
612 and a3, a3, a0
613 _bnez a3, common_exception_exit
614
615 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
616
617 addi a0, a1, -16
618 l32i a3, a0, 0
619 l32i a4, a0, 4
620 s32i a3, a1, PT_SIZE+0
621 s32i a4, a1, PT_SIZE+4
622 l32i a3, a0, 8
623 l32i a4, a0, 12
624 s32i a3, a1, PT_SIZE+8
625 s32i a4, a1, PT_SIZE+12
626
627 /* Common exception exit.
628 * We restore the special register and the current window frame, and
629 * return from the exception.
630 *
631 * Note: We expect a2 to hold PT_WMASK
632 */
633
634common_exception_exit:
635
636 _bbsi.l a2, 1, 1f
637 l32i a4, a1, PT_AREG4
638 l32i a5, a1, PT_AREG5
639 l32i a6, a1, PT_AREG6
640 l32i a7, a1, PT_AREG7
641 _bbsi.l a2, 2, 1f
642 l32i a8, a1, PT_AREG8
643 l32i a9, a1, PT_AREG9
644 l32i a10, a1, PT_AREG10
645 l32i a11, a1, PT_AREG11
646 _bbsi.l a2, 3, 1f
647 l32i a12, a1, PT_AREG12
648 l32i a13, a1, PT_AREG13
649 l32i a14, a1, PT_AREG14
650 l32i a15, a1, PT_AREG15
651
652 /* Restore PC, SAR */
653
6541: l32i a2, a1, PT_PC
655 l32i a3, a1, PT_SAR
656 wsr a2, EPC_1
657 wsr a3, SAR
658
659 /* Restore LBEG, LEND, LCOUNT */
660
661 l32i a2, a1, PT_LBEG
662 l32i a3, a1, PT_LEND
663 wsr a2, LBEG
664 l32i a2, a1, PT_LCOUNT
665 wsr a3, LEND
666 wsr a2, LCOUNT
667
668 /* Check if it was double exception. */
669
670 l32i a0, a1, PT_DEPC
671 l32i a3, a1, PT_AREG3
672 l32i a2, a1, PT_AREG2
673 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
674
675 /* Restore a0...a3 and return */
676
677 l32i a0, a1, PT_AREG0
678 l32i a1, a1, PT_AREG1
679 rfe
680
6811: wsr a0, DEPC
682 l32i a0, a1, PT_AREG0
683 l32i a1, a1, PT_AREG1
684 rfde
685
686/*
687 * Debug exception handler.
688 *
689 * Currently, we don't support KGDB, so only user application can be debugged.
690 *
691 * When we get here, a0 is trashed and saved to excsave[debuglevel]
692 */
693
694ENTRY(debug_exception)
695
696 rsr a0, EPS + XCHAL_DEBUGLEVEL
697 bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
698
699 /* Set EPC_1 and EXCCAUSE */
700
701 wsr a2, DEPC # save a2 temporarily
702 rsr a2, EPC + XCHAL_DEBUGLEVEL
703 wsr a2, EPC_1
704
705 movi a2, EXCCAUSE_MAPPED_DEBUG
706 wsr a2, EXCCAUSE
707
708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709
710 movi a2, 1 << PS_EXCM_SHIFT
711 or a2, a0, a2
712 movi a0, debug_exception # restore a3, debug jump vector
713 wsr a2, PS
714 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
715
716 /* Switch to kernel/user stack, restore jump vector, and save a0 */
717
718 bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
719
720 addi a2, a1, -16-PT_SIZE # assume kernel stack
721 s32i a0, a2, PT_AREG0
722 movi a0, 0
723 s32i a1, a2, PT_AREG1
724 s32i a0, a2, PT_DEPC # mark it as a regular exception
725 xsr a0, DEPC
726 s32i a3, a2, PT_AREG3
727 s32i a0, a2, PT_AREG2
728 mov a1, a2
729 j _kernel_exception
730
7312: rsr a2, EXCSAVE_1
732 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
733 s32i a0, a2, PT_AREG0
734 movi a0, 0
735 s32i a1, a2, PT_AREG1
736 s32i a0, a2, PT_DEPC
737 xsr a0, DEPC
738 s32i a3, a2, PT_AREG3
739 s32i a0, a2, PT_AREG2
740 mov a1, a2
741 j _user_exception
742
743 /* Debug exception while in exception mode. */
7441: j 1b // FIXME!!
745
746
747/*
748 * We get here in case of an unrecoverable exception.
749 * The only thing we can do is to be nice and print a panic message.
750 * We only produce a single stack frame for panic, so ???
751 *
752 *
753 * Entry conditions:
754 *
755 * - a0 contains the caller address; original value saved in excsave1.
756 * - the original a0 contains a valid return address (backtrace) or 0.
757 * - a2 contains a valid stackpointer
758 *
759 * Notes:
760 *
761 * - If the stack pointer could be invalid, the caller has to setup a
762 * dummy stack pointer (e.g. the stack of the init_task)
763 *
764 * - If the return address could be invalid, the caller has to set it
765 * to 0, so the backtrace would stop.
766 *
767 */
768 .align 4
769unrecoverable_text:
770 .ascii "Unrecoverable error in exception handler\0"
771
772ENTRY(unrecoverable_exception)
773
774 movi a0, 1
775 movi a1, 0
776
777 wsr a0, WINDOWSTART
778 wsr a1, WINDOWBASE
779 rsync
780
781 movi a1, PS_WOE_MASK | 1
782 wsr a1, PS
783 rsync
784
785 movi a1, init_task
786 movi a0, 0
787 addi a1, a1, PT_REGS_OFFSET
788
789 movi a4, panic
790 movi a6, unrecoverable_text
791
792 callx4 a4
793
7941: j 1b
795
796
797/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
798
799/*
800 * Fast-handler for alloca exceptions
801 *
802 * The ALLOCA handler is entered when user code executes the MOVSP
803 * instruction and the caller's frame is not in the register file.
804 * In this case, the caller frame's a0..a3 are on the stack just
805 * below sp (a1), and this handler moves them.
806 *
807 * For "MOVSP <ar>,<as>" without destination register a1, this routine
808 * simply moves the value from <as> to <ar> without moving the save area.
809 *
810 * Entry condition:
811 *
812 * a0: trashed, original value saved on stack (PT_AREG0)
813 * a1: a1
814 * a2: new stack pointer, original in DEPC
815 * a3: dispatch table
816 * depc: a2, original value saved on stack (PT_DEPC)
817 * excsave_1: a3
818 *
819 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
820 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
821 */
822
823#if XCHAL_HAVE_BE
824#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
825#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
826#else
827#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
828#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
829#endif
830
831ENTRY(fast_alloca)
832
833 /* We shouldn't be in a double exception. */
834
835 l32i a0, a2, PT_DEPC
836 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
837
838 rsr a0, DEPC # get a2
839 s32i a4, a2, PT_AREG4 # save a4 and
840 s32i a0, a2, PT_AREG2 # a2 to stack
841
842 /* Exit critical section. */
843
844 movi a0, 0
845 s32i a0, a3, EXC_TABLE_FIXUP
846
847 /* Restore a3, excsave_1 */
848
849 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
850 rsr a4, EPC_1 # get exception address
851 s32i a3, a2, PT_AREG3 # save a3 to stack
852
853#ifdef ALLOCA_EXCEPTION_IN_IRAM
854#error iram not supported
855#else
856 /* Note: l8ui not allowed in IRAM/IROM!! */
857 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
858#endif
859 movi a3, .Lmovsp_src
860 _EXTUI_MOVSP_SRC(a0) # extract source register number
861 addx8 a3, a0, a3
862 jx a3
863
864.Lunhandled_double:
865 wsr a0, EXCSAVE_1
866 movi a0, unrecoverable_exception
867 callx0 a0
868
869 .align 8
870.Lmovsp_src:
871 l32i a3, a2, PT_AREG0; _j 1f; .align 8
872 mov a3, a1; _j 1f; .align 8
873 l32i a3, a2, PT_AREG2; _j 1f; .align 8
874 l32i a3, a2, PT_AREG3; _j 1f; .align 8
875 l32i a3, a2, PT_AREG4; _j 1f; .align 8
876 mov a3, a5; _j 1f; .align 8
877 mov a3, a6; _j 1f; .align 8
878 mov a3, a7; _j 1f; .align 8
879 mov a3, a8; _j 1f; .align 8
880 mov a3, a9; _j 1f; .align 8
881 mov a3, a10; _j 1f; .align 8
882 mov a3, a11; _j 1f; .align 8
883 mov a3, a12; _j 1f; .align 8
884 mov a3, a13; _j 1f; .align 8
885 mov a3, a14; _j 1f; .align 8
886 mov a3, a15; _j 1f; .align 8
887
8881:
889
890#ifdef ALLOCA_EXCEPTION_IN_IRAM
891#error iram not supported
892#else
893 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
894#endif
895 addi a4, a4, 3 # step over movsp
896 _EXTUI_MOVSP_DST(a0) # extract destination register
897 wsr a4, EPC_1 # save new epc_1
898
899 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
900
901 /* Move the save area. This implies the use of the L32E
902 * and S32E instructions, because this move must be done with
903 * the user's PS.RING privilege levels, not with ring 0
904 * (kernel's) privileges currently active with PS.EXCM
905 * set. Note that we have stil registered a fixup routine with the
906 * double exception vector in case a double exception occurs.
907 */
908
909 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
910
911 l32e a0, a1, -16
912 l32e a4, a1, -12
913 s32e a0, a3, -16
914 s32e a4, a3, -12
915 l32e a0, a1, -8
916 l32e a4, a1, -4
917 s32e a0, a3, -8
918 s32e a4, a3, -4
919
920 /* Restore stack-pointer and all the other saved registers. */
921
922 mov a1, a3
923
924 l32i a4, a2, PT_AREG4
925 l32i a3, a2, PT_AREG3
926 l32i a0, a2, PT_AREG0
927 l32i a2, a2, PT_AREG2
928 rfe
929
930 /* MOVSP <at>,<as> was invoked with <at> != a1.
931 * Because the stack pointer is not being modified,
932 * we should be able to just modify the pointer
933 * without moving any save area.
934 * The processor only traps these occurrences if the
935 * caller window isn't live, so unfortunately we can't
936 * use this as an alternate trap mechanism.
937 * So we just do the move. This requires that we
938 * resolve the destination register, not just the source,
939 * so there's some extra work.
940 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
941 */
942
943 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
944
9451: movi a4, .Lmovsp_dst
946 addx8 a4, a0, a4
947 jx a4
948
949 .align 8
950.Lmovsp_dst:
951 s32i a3, a2, PT_AREG0; _j 1f; .align 8
952 mov a1, a3; _j 1f; .align 8
953 s32i a3, a2, PT_AREG2; _j 1f; .align 8
954 s32i a3, a2, PT_AREG3; _j 1f; .align 8
955 s32i a3, a2, PT_AREG4; _j 1f; .align 8
956 mov a5, a3; _j 1f; .align 8
957 mov a6, a3; _j 1f; .align 8
958 mov a7, a3; _j 1f; .align 8
959 mov a8, a3; _j 1f; .align 8
960 mov a9, a3; _j 1f; .align 8
961 mov a10, a3; _j 1f; .align 8
962 mov a11, a3; _j 1f; .align 8
963 mov a12, a3; _j 1f; .align 8
964 mov a13, a3; _j 1f; .align 8
965 mov a14, a3; _j 1f; .align 8
966 mov a15, a3; _j 1f; .align 8
967
9681: l32i a4, a2, PT_AREG4
969 l32i a3, a2, PT_AREG3
970 l32i a0, a2, PT_AREG0
971 l32i a2, a2, PT_AREG2
972 rfe
973
974
975/*
976 * fast system calls.
977 *
978 * WARNING: The kernel doesn't save the entire user context before
979 * handling a fast system call. These functions are small and short,
980 * usually offering some functionality not available to user tasks.
981 *
982 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
983 *
984 * Entry condition:
985 *
986 * a0: trashed, original value saved on stack (PT_AREG0)
987 * a1: a1
988 * a2: new stack pointer, original in DEPC
989 * a3: dispatch table
990 * depc: a2, original value saved on stack (PT_DEPC)
991 * excsave_1: a3
992 */
993
994ENTRY(fast_syscall_kernel)
995
996 /* Skip syscall. */
997
998 rsr a0, EPC_1
999 addi a0, a0, 3
1000 wsr a0, EPC_1
1001
1002 l32i a0, a2, PT_DEPC
1003 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1004
1005 rsr a0, DEPC # get syscall-nr
1006 _beqz a0, fast_syscall_spill_registers
1007
1008 addi a0, a0, -__NR_sysxtensa
1009 _beqz a0, fast_syscall_sysxtensa
1010
1011 j kernel_exception
1012
1013
1014ENTRY(fast_syscall_user)
1015
1016 /* Skip syscall. */
1017
1018 rsr a0, EPC_1
1019 addi a0, a0, 3
1020 wsr a0, EPC_1
1021
1022 l32i a0, a2, PT_DEPC
1023 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1024
1025 rsr a0, DEPC # get syscall-nr
1026 _beqz a0, fast_syscall_spill_registers
1027
1028 addi a0, a0, -__NR_sysxtensa
1029 _beqz a0, fast_syscall_sysxtensa
1030
1031 j user_exception
1032
1033ENTRY(fast_syscall_unrecoverable)
1034
1035 /* Restore all states. */
1036
1037 l32i a0, a2, PT_AREG0 # restore a0
1038 xsr a2, DEPC # restore a2, depc
1039 rsr a3, EXCSAVE_1
1040
1041 wsr a0, EXCSAVE_1
1042 movi a0, unrecoverable_exception
1043 callx0 a0
1044
1045
1046
1047/*
1048 * sysxtensa syscall handler
1049 *
1050 * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
1051 * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
1052 * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1053 * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1054 * a2 a6 a3 a4 a5
1055 *
1056 * Entry condition:
1057 *
1058 * a0: trashed, original value saved on stack (PT_AREG0)
1059 * a1: a1
1060 * a2: new stack pointer, original in DEPC
1061 * a3: dispatch table
1062 * depc: a2, original value saved on stack (PT_DEPC)
1063 * excsave_1: a3
1064 *
1065 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1066 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1067 *
1068 * Note: we don't have to save a2; a2 holds the return value
1069 *
1070 * We use the two macros TRY and CATCH:
1071 *
1072 * TRY adds an entry to the __ex_table fixup table for the immediately
1073 * following instruction.
1074 *
1075 * CATCH catches any exception that occurred at one of the preceeding TRY
1076 * statements and continues from there
1077 *
1078 * Usage TRY l32i a0, a1, 0
1079 * <other code>
1080 * done: rfe
1081 * CATCH <set return code>
1082 * j done
1083 */
1084
1085#define TRY \
1086 .section __ex_table, "a"; \
1087 .word 66f, 67f; \
1088 .text; \
108966:
1090
1091#define CATCH \
109267:
1093
1094ENTRY(fast_syscall_sysxtensa)
1095
1096 _beqz a6, 1f
1097 _blti a6, SYSXTENSA_COUNT, 2f
1098
10991: j user_exception
1100
11012: xsr a3, EXCSAVE_1 # restore a3, excsave1
1102 s32i a7, a2, PT_AREG7
1103
1104 movi a7, 4 # sizeof(unsigned int)
1105 verify_area a3, a7, a0, a2, .Leac
1106
1107 _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
1108 _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
1109 _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd
1110
1111 /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
1112
1113.Lswp: /* Atomic compare and swap */
1114
1115TRY l32i a7, a3, 0 # read old value
1116 bne a7, a4, 1f # same as old value? jump
1117 s32i a5, a3, 0 # different, modify value
1118 movi a7, 1 # and return 1
1119 j .Lret
1120
11211: movi a7, 0 # same values: return 0
1122 j .Lret
1123
1124.Ladd: /* Atomic add */
1125.Lexg: /* Atomic (exchange) add */
1126
1127TRY l32i a7, a3, 0
1128 add a4, a4, a7
1129 s32i a4, a3, 0
1130 j .Lret
1131
1132.Lset: /* Atomic set */
1133
1134TRY l32i a7, a3, 0 # read old value as return value
1135 s32i a4, a3, 0 # write new value
1136
1137.Lret: mov a0, a2
1138 mov a2, a7
1139 l32i a7, a0, PT_AREG7
1140 l32i a3, a0, PT_AREG3
1141 l32i a0, a0, PT_AREG0
1142 rfe
1143
1144CATCH
1145.Leac: movi a7, -EFAULT
1146 j .Lret
1147
1148
1149
1150/* fast_syscall_spill_registers.
1151 *
1152 * Entry condition:
1153 *
1154 * a0: trashed, original value saved on stack (PT_AREG0)
1155 * a1: a1
1156 * a2: new stack pointer, original in DEPC
1157 * a3: dispatch table
1158 * depc: a2, original value saved on stack (PT_DEPC)
1159 * excsave_1: a3
1160 *
1161 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1162 * Note: We don't need to save a2 in depc (return value)
1163 */
1164
1165ENTRY(fast_syscall_spill_registers)
1166
1167 /* Register a FIXUP handler (pass current wb as a parameter) */
1168
1169 movi a0, fast_syscall_spill_registers_fixup
1170 s32i a0, a3, EXC_TABLE_FIXUP
1171 rsr a0, WINDOWBASE
1172 s32i a0, a3, EXC_TABLE_PARAM
1173
1174 /* Save a3 and SAR on stack. */
1175
1176 rsr a0, SAR
1177 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
1178 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
1179 s32i a3, a2, PT_AREG3
1180
1181 /* The spill routine might clobber a7, a11, and a15. */
1182
1183 s32i a7, a2, PT_AREG5
1184 s32i a11, a2, PT_AREG6
1185 s32i a15, a2, PT_AREG7
1186
1187 call0 _spill_registers # destroys a3, DEPC, and SAR
1188
1189 /* Advance PC, restore registers and SAR, and return from exception. */
1190
1191 l32i a3, a2, PT_AREG4
1192 l32i a0, a2, PT_AREG0
1193 wsr a3, SAR
1194 l32i a3, a2, PT_AREG3
1195
1196 /* Restore clobbered registers. */
1197
1198 l32i a7, a2, PT_AREG5
1199 l32i a11, a2, PT_AREG6
1200 l32i a15, a2, PT_AREG7
1201
1202 movi a2, 0
1203 rfe
1204
1205/* Fixup handler.
1206 *
1207 * We get here if the spill routine causes an exception, e.g. tlb miss.
1208 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1209 * we entered the spill routine and jump to the user exception handler.
1210 *
1211 * a0: value of depc, original value in depc
1212 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1213 * a3: exctable, original value in excsave1
1214 */
1215
1216fast_syscall_spill_registers_fixup:
1217
1218 rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
1219 xsr a0, DEPC # restore depc and a0
1220 ssl a2 # set shift (32 - WB)
1221
1222 /* We need to make sure the current registers (a0-a3) are preserved.
1223 * To do this, we simply set the bit for the current window frame
1224 * in WS, so that the exception handlers save them to the task stack.
1225 */
1226
1227 rsr a3, EXCSAVE_1 # get spill-mask
1228 slli a2, a3, 1 # shift left by one
1229
1230 slli a3, a2, 32-WSBITS
1231 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
1232 wsr a2, WINDOWSTART # set corrected windowstart
1233
1234 movi a3, exc_table
1235 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1236 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1237
1238 /* Return to the original (user task) WINDOWBASE.
1239 * We leave the following frame behind:
1240 * a0, a1, a2 same
1241 * a3: trashed (saved in excsave_1)
1242 * depc: depc (we have to return to that address)
1243 * excsave_1: a3
1244 */
1245
1246 wsr a3, WINDOWBASE
1247 rsync
1248
1249 /* We are now in the original frame when we entered _spill_registers:
1250 * a0: return address
1251 * a1: used, stack pointer
1252 * a2: kernel stack pointer
1253 * a3: available, saved in EXCSAVE_1
1254 * depc: exception address
1255 * excsave: a3
1256 * Note: This frame might be the same as above.
1257 */
1258
1259#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1260 /* Restore registers we precautiously saved.
1261 * We have the value of the 'right' a3
1262 */
1263
1264 l32i a7, a2, PT_AREG5
1265 l32i a11, a2, PT_AREG6
1266 l32i a15, a2, PT_AREG7
1267#endif
1268
1269 /* Setup stack pointer. */
1270
1271 addi a2, a2, -PT_USER_SIZE
1272 s32i a0, a2, PT_AREG0
1273
1274 /* Make sure we return to this fixup handler. */
1275
1276 movi a3, fast_syscall_spill_registers_fixup_return
1277 s32i a3, a2, PT_DEPC # setup depc
1278
1279 /* Jump to the exception handler. */
1280
1281 movi a3, exc_table
1282 rsr a0, EXCCAUSE
1283 addx4 a0, a0, a3 # find entry in table
1284 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1285 jx a0
1286
1287fast_syscall_spill_registers_fixup_return:
1288
1289 /* When we return here, all registers have been restored (a2: DEPC) */
1290
1291 wsr a2, DEPC # exception address
1292
1293 /* Restore fixup handler. */
1294
1295 xsr a3, EXCSAVE_1
1296 movi a2, fast_syscall_spill_registers_fixup
1297 s32i a2, a3, EXC_TABLE_FIXUP
1298 rsr a2, WINDOWBASE
1299 s32i a2, a3, EXC_TABLE_PARAM
1300 l32i a2, a3, EXC_TABLE_KSTK
1301
1302#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1303 /* Save registers again that might be clobbered. */
1304
1305 s32i a7, a2, PT_AREG5
1306 s32i a11, a2, PT_AREG6
1307 s32i a15, a2, PT_AREG7
1308#endif
1309
1310 /* Load WB at the time the exception occurred. */
1311
1312 rsr a3, SAR # WB is still in SAR
1313 neg a3, a3
1314 wsr a3, WINDOWBASE
1315 rsync
1316
1317 /* Restore a3 and return. */
1318
1319 movi a3, exc_table
1320 xsr a3, EXCSAVE_1
1321
1322 rfde
1323
1324
1325/*
1326 * spill all registers.
1327 *
1328 * This is not a real function. The following conditions must be met:
1329 *
1330 * - must be called with call0.
1331 * - uses DEPC, a3 and SAR.
1332 * - the last 'valid' register of each frame are clobbered.
1333 * - the caller must have registered a fixup handler
1334 * (or be inside a critical section)
1335 * - PS_EXCM must be set (PS_WOE cleared?)
1336 */
1337
1338ENTRY(_spill_registers)
1339
1340 /*
1341 * Rotate ws so that the current windowbase is at bit 0.
1342 * Assume ws = xxxwww1yy (www1 current window frame).
1343 * Rotate ws right so that a2 = yyxxxwww1.
1344 */
1345
1346 wsr a2, DEPC # preserve a2
1347 rsr a2, WINDOWBASE
1348 rsr a3, WINDOWSTART
1349 ssr a2 # holds WB
1350 slli a2, a3, WSBITS
1351 or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy
1352 srl a3, a3
1353
1354 /* We are done if there are no more than the current register frame. */
1355
1356 extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww
1357 movi a2, (1 << (WSBITS-1))
1358 _beqz a3, .Lnospill # only one active frame? jump
1359
1360 /* We want 1 at the top, so that we return to the current windowbase */
1361
1362 or a3, a3, a2 # 1yyxxxwww
1363
1364 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1365
1366 wsr a3, WINDOWSTART # save shifted windowstart
1367 neg a2, a3
1368 and a3, a2, a3 # first bit set from right: 000010000
1369
1370 ffs_ws a2, a3 # a2: shifts to skip empty frames
1371 movi a3, WSBITS
1372 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
1373 ssr a2 # save in SAR for later.
1374
1375 rsr a3, WINDOWBASE
1376 add a3, a3, a2
1377 rsr a2, DEPC # restore a2
1378 wsr a3, WINDOWBASE
1379 rsync
1380
1381 rsr a3, WINDOWSTART
1382 srl a3, a3 # shift windowstart
1383
1384 /* WB is now just one frame below the oldest frame in the register
1385 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1386 and WS differ by one 4-register frame. */
1387
1388 /* Save frames. Depending what call was used (call4, call8, call12),
1389 * we have to save 4,8. or 12 registers.
1390 */
1391
1392 _bbsi.l a3, 1, .Lc4
1393 _bbsi.l a3, 2, .Lc8
1394
1395 /* Special case: we have a call12-frame starting at a4. */
1396
1397 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1398
1399 s32e a4, a1, -16 # a1 is valid with an empty spill area
1400 l32e a4, a5, -12
1401 s32e a8, a4, -48
1402 mov a8, a4
1403 l32e a4, a1, -16
1404 j .Lc12c
1405
1406.Lloop: _bbsi.l a3, 1, .Lc4
1407 _bbci.l a3, 2, .Lc12
1408
1409.Lc8: s32e a4, a13, -16
1410 l32e a4, a5, -12
1411 s32e a8, a4, -32
1412 s32e a5, a13, -12
1413 s32e a6, a13, -8
1414 s32e a7, a13, -4
1415 s32e a9, a4, -28
1416 s32e a10, a4, -24
1417 s32e a11, a4, -20
1418
1419 srli a11, a3, 2 # shift windowbase by 2
1420 rotw 2
1421 _bnei a3, 1, .Lloop
1422
1423.Lexit: /* Done. Do the final rotation, set WS, and return. */
1424
1425 rotw 1
1426 rsr a3, WINDOWBASE
1427 ssl a3
1428 movi a3, 1
1429 sll a3, a3
1430 wsr a3, WINDOWSTART
1431
1432.Lnospill:
1433 jx a0
1434
1435.Lc4: s32e a4, a9, -16
1436 s32e a5, a9, -12
1437 s32e a6, a9, -8
1438 s32e a7, a9, -4
1439
1440 srli a7, a3, 1
1441 rotw 1
1442 _bnei a3, 1, .Lloop
1443 j .Lexit
1444
1445.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1446
1447 /* 12-register frame (call12) */
1448
1449 l32e a2, a5, -12
1450 s32e a8, a2, -48
1451 mov a8, a2
1452
1453.Lc12c: s32e a9, a8, -44
1454 s32e a10, a8, -40
1455 s32e a11, a8, -36
1456 s32e a12, a8, -32
1457 s32e a13, a8, -28
1458 s32e a14, a8, -24
1459 s32e a15, a8, -20
1460 srli a15, a3, 3
1461
1462 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1463 * window, grab the stackpointer, and rotate back.
1464 * Alternatively, we could also use the following approach, but that
1465 * makes the fixup routine much more complicated:
1466 * rotw 1
1467 * s32e a0, a13, -16
1468 * ...
1469 * rotw 2
1470 */
1471
1472 rotw 1
1473 mov a5, a13
1474 rotw -1
1475
1476 s32e a4, a9, -16
1477 s32e a5, a9, -12
1478 s32e a6, a9, -8
1479 s32e a7, a9, -4
1480
1481 rotw 3
1482
1483 _beqi a3, 1, .Lexit
1484 j .Lloop
1485
1486.Linvalid_mask:
1487
1488 /* We get here because of an unrecoverable error in the window
1489 * registers. If we are in user space, we kill the application,
1490 * however, this condition is unrecoverable in kernel space.
1491 */
1492
1493 rsr a0, PS
1494 _bbci.l a0, PS_UM_SHIFT, 1f
1495
1496 /* User space: Setup a dummy frame and kill application.
1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1498 */
1499
1500 movi a0, 1
1501 movi a1, 0
1502
1503 wsr a0, WINDOWSTART
1504 wsr a1, WINDOWBASE
1505 rsync
1506
1507 movi a0, 0
1508
1509 movi a3, exc_table
1510 l32i a1, a3, EXC_TABLE_KSTK
1511 wsr a3, EXCSAVE_1
1512
1513 movi a4, PS_WOE_MASK | 1
1514 wsr a4, PS
1515 rsync
1516
1517 movi a6, SIGSEGV
1518 movi a4, do_exit
1519 callx4 a4
1520
15211: /* Kernel space: PANIC! */
1522
1523 wsr a0, EXCSAVE_1
1524 movi a0, unrecoverable_exception
1525 callx0 a0 # should not return
15261: j 1b
1527
1528/*
1529 * We should never get here. Bail out!
1530 */
1531
1532ENTRY(fast_second_level_miss_double_kernel)
1533
15341: movi a0, unrecoverable_exception
1535 callx0 a0 # should not return
15361: j 1b
1537
1538/* First-level entry handler for user, kernel, and double 2nd-level
1539 * TLB miss exceptions. Note that for now, user and kernel miss
1540 * exceptions share the same entry point and are handled identically.
1541 *
1542 * An old, less-efficient C version of this function used to exist.
1543 * We include it below, interleaved as comments, for reference.
1544 *
1545 * Entry condition:
1546 *
1547 * a0: trashed, original value saved on stack (PT_AREG0)
1548 * a1: a1
1549 * a2: new stack pointer, original in DEPC
1550 * a3: dispatch table
1551 * depc: a2, original value saved on stack (PT_DEPC)
1552 * excsave_1: a3
1553 *
1554 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1555 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1556 */
1557
1558ENTRY(fast_second_level_miss)
1559
1560 /* Save a1. Note: we don't expect a double exception. */
1561
1562 s32i a1, a2, PT_AREG1
1563
1564 /* We need to map the page of PTEs for the user task. Find
1565 * the pointer to that page. Also, it's possible for tsk->mm
1566 * to be NULL while tsk->active_mm is nonzero if we faulted on
1567 * a vmalloc address. In that rare case, we must use
1568 * active_mm instead to avoid a fault in this handler. See
1569 *
1570 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1571 * (or search Internet on "mm vs. active_mm")
1572 *
1573 * if (!mm)
1574 * mm = tsk->active_mm;
1575 * pgd = pgd_offset (mm, regs->excvaddr);
1576 * pmd = pmd_offset (pgd, regs->excvaddr);
1577 * pmdval = *pmd;
1578 */
1579
1580 GET_CURRENT(a1,a2)
1581 l32i a0, a1, TASK_MM # tsk->mm
1582 beqz a0, 9f
1583
15848: rsr a1, EXCVADDR # fault address
1585 _PGD_OFFSET(a0, a1, a1)
1586 l32i a0, a0, 0 # read pmdval
1587 //beqi a0, _PAGE_USER, 2f
1588 beqz a0, 2f
1589
1590 /* Read ptevaddr and convert to top of page-table page.
1591 *
1592 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1593 * vpnval += DTLB_WAY_PGTABLE;
1594 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1595 * write_dtlb_entry (pteval, vpnval);
1596 *
1597 * The messy computation for 'pteval' above really simplifies
1598 * into the following:
1599 *
1600 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
1601 */
1602
1603 movi a1, -PAGE_OFFSET
1604 add a0, a0, a1 # pmdval - PAGE_OFFSET
1605 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1606 xor a0, a0, a1
1607
1608
1609 movi a1, PAGE_DIRECTORY
1610 or a0, a0, a1 # ... | PAGE_DIRECTORY
1611
1612 rsr a1, PTEVADDR
1613 srli a1, a1, PAGE_SHIFT
1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1615 addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
1616
1617 wdtlb a0, a1
1618 dsync
1619
1620 /* Exit critical section. */
1621
1622 movi a0, 0
1623 s32i a0, a3, EXC_TABLE_FIXUP
1624
1625 /* Restore the working registers, and return. */
1626
1627 l32i a0, a2, PT_AREG0
1628 l32i a1, a2, PT_AREG1
1629 l32i a2, a2, PT_DEPC
1630 xsr a3, EXCSAVE_1
1631
1632 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1633
1634 /* Restore excsave1 and return. */
1635
1636 rsr a2, DEPC
1637 rfe
1638
1639 /* Return from double exception. */
1640
16411: xsr a2, DEPC
1642 esync
1643 rfde
1644
16459: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1646 j 8b
1647
16482: /* Invalid PGD, default exception handling */
1649
1650 rsr a1, DEPC
1651 xsr a3, EXCSAVE_1
1652 s32i a1, a2, PT_AREG2
1653 s32i a3, a2, PT_AREG3
1654 mov a1, a2
1655
1656 rsr a2, PS
1657 bbsi.l a2, PS_UM_SHIFT, 1f
1658 j _kernel_exception
16591: j _user_exception
1660
1661
1662/*
1663 * StoreProhibitedException
1664 *
1665 * Update the pte and invalidate the itlb mapping for this pte.
1666 *
1667 * Entry condition:
1668 *
1669 * a0: trashed, original value saved on stack (PT_AREG0)
1670 * a1: a1
1671 * a2: new stack pointer, original in DEPC
1672 * a3: dispatch table
1673 * depc: a2, original value saved on stack (PT_DEPC)
1674 * excsave_1: a3
1675 *
1676 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1677 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1678 */
1679
1680ENTRY(fast_store_prohibited)
1681
1682 /* Save a1 and a4. */
1683
1684 s32i a1, a2, PT_AREG1
1685 s32i a4, a2, PT_AREG4
1686
1687 GET_CURRENT(a1,a2)
1688 l32i a0, a1, TASK_MM # tsk->mm
1689 beqz a0, 9f
1690
16918: rsr a1, EXCVADDR # fault address
1692 _PGD_OFFSET(a0, a1, a4)
1693 l32i a0, a0, 0
1694 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
1695 beqz a0, 2f
1696
1697 _PTE_OFFSET(a0, a1, a4)
1698 l32i a4, a0, 0 # read pteval
1699 movi a1, _PAGE_VALID | _PAGE_RW
1700 bnall a4, a1, 2f
1701
1702 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
1703 or a4, a4, a1
1704 rsr a1, EXCVADDR
1705 s32i a4, a0, 0
1706
1707 /* We need to flush the cache if we have page coloring. */
1708#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1709 dhwb a0, 0
1710#endif
1711 pdtlb a0, a1
1712 beqz a0, 1f
1713 idtlb a0 // FIXME do we need this?
1714 wdtlb a4, a0
17151:
1716
1717 /* Exit critical section. */
1718
1719 movi a0, 0
1720 s32i a0, a3, EXC_TABLE_FIXUP
1721
1722 /* Restore the working registers, and return. */
1723
1724 l32i a4, a2, PT_AREG4
1725 l32i a1, a2, PT_AREG1
1726 l32i a0, a2, PT_AREG0
1727 l32i a2, a2, PT_DEPC
1728
1729 /* Restore excsave1 and a3. */
1730
1731 xsr a3, EXCSAVE_1
1732 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1733
1734 rsr a2, DEPC
1735 rfe
1736
1737 /* Double exception. Restore FIXUP handler and return. */
1738
17391: xsr a2, DEPC
1740 esync
1741 rfde
1742
17439: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1744 j 8b
1745
17462: /* If there was a problem, handle fault in C */
1747
1748 rsr a4, DEPC # still holds a2
1749 xsr a3, EXCSAVE_1
1750 s32i a4, a2, PT_AREG2
1751 s32i a3, a2, PT_AREG3
1752 l32i a4, a2, PT_AREG4
1753 mov a1, a2
1754
1755 rsr a2, PS
1756 bbsi.l a2, PS_UM_SHIFT, 1f
1757 j _kernel_exception
17581: j _user_exception
1759
1760
1761#if XCHAL_EXTRA_SA_SIZE
1762
1763#warning fast_coprocessor untested
1764
1765/*
1766 * Entry condition:
1767 *
1768 * a0: trashed, original value saved on stack (PT_AREG0)
1769 * a1: a1
1770 * a2: new stack pointer, original in DEPC
1771 * a3: dispatch table
1772 * depc: a2, original value saved on stack (PT_DEPC)
1773 * excsave_1: a3
1774 *
1775 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1776 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1777 */
1778
1779ENTRY(fast_coprocessor_double)
1780 wsr a0, EXCSAVE_1
1781 movi a0, unrecoverable_exception
1782 callx0 a0
1783
1784ENTRY(fast_coprocessor)
1785
1786 /* Fatal if we are in a double exception. */
1787
1788 l32i a0, a2, PT_DEPC
1789 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1790
1791 /* Save some registers a1, a3, a4, SAR */
1792
1793 xsr a3, EXCSAVE_1
1794 s32i a3, a2, PT_AREG3
1795 rsr a3, SAR
1796 s32i a4, a2, PT_AREG4
1797 s32i a1, a2, PT_AREG1
1798 s32i a5, a1, PT_AREG5
1799 s32i a3, a2, PT_SAR
1800 mov a1, a2
1801
1802 /* Currently, the HAL macros only guarantee saving a0 and a1.
1803 * These can and will be refined in the future, but for now,
1804 * just save the remaining registers of a2...a15.
1805 */
1806 s32i a6, a1, PT_AREG6
1807 s32i a7, a1, PT_AREG7
1808 s32i a8, a1, PT_AREG8
1809 s32i a9, a1, PT_AREG9
1810 s32i a10, a1, PT_AREG10
1811 s32i a11, a1, PT_AREG11
1812 s32i a12, a1, PT_AREG12
1813 s32i a13, a1, PT_AREG13
1814 s32i a14, a1, PT_AREG14
1815 s32i a15, a1, PT_AREG15
1816
1817 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1818
1819 rsr a0, EXCCAUSE
1820 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1821
1822 /* Set corresponding CPENABLE bit */
1823
1824 movi a4, 1
1825 ssl a3 # SAR: 32 - coprocessor_number
1826 rsr a5, CPENABLE
1827 sll a4, a4
1828 or a4, a5, a4
1829 wsr a4, CPENABLE
1830 rsync
1831 movi a5, coprocessor_info # list of owner and offset into cp_save
1832 addx8 a0, a4, a5 # entry for CP
1833
1834 bne a4, a5, .Lload # bit wasn't set before, cp not in use
1835
1836 /* Now compare the current task with the owner of the coprocessor.
1837 * If they are the same, there is no reason to save or restore any
1838 * coprocessor state. Having already enabled the coprocessor,
1839 * branch ahead to return.
1840 */
1841 GET_CURRENT(a5,a1)
1842 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
1843 beq a4, a5, .Ldone
1844
1845 /* Find location to dump current coprocessor state:
1846 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1847 *
1848 * Note: a0 pointer to the entry in the coprocessor owner table,
1849 * a3 coprocessor number,
1850 * a4 current owner of coprocessor.
1851 */
1852 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1853 addi a2, a4, THREAD_CP_SAVE
1854 add a2, a2, a5
1855
1856 /* Store current coprocessor states. (a5 still has CP number) */
1857
1858 xchal_cpi_store_funcbody
1859
1860 /* The macro might have destroyed a3 (coprocessor number), but
1861 * SAR still has 32 - coprocessor_number!
1862 */
1863 movi a3, 32
1864 rsr a4, SAR
1865 sub a3, a3, a4
1866
1867.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
1868 * the coprocessor owner table.
1869 *
1870 * Note: a0 pointer to the entry in the coprocessor owner table,
1871 * a3 coprocessor number.
1872 */
1873 GET_CURRENT(a4,a1)
1874 s32i a4, a0, 0
1875
1876 /* Find location from where to restore the current coprocessor state.*/
1877
1878 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1879 addi a2, a4, THREAD_CP_SAVE
1880 add a2, a2, a4
1881
1882 xchal_cpi_load_funcbody
1883
1884 /* We must assume that the xchal_cpi_store_funcbody macro destroyed
1885 * registers a2..a15.
1886 */
1887
1888.Ldone: l32i a15, a1, PT_AREG15
1889 l32i a14, a1, PT_AREG14
1890 l32i a13, a1, PT_AREG13
1891 l32i a12, a1, PT_AREG12
1892 l32i a11, a1, PT_AREG11
1893 l32i a10, a1, PT_AREG10
1894 l32i a9, a1, PT_AREG9
1895 l32i a8, a1, PT_AREG8
1896 l32i a7, a1, PT_AREG7
1897 l32i a6, a1, PT_AREG6
1898 l32i a5, a1, PT_AREG5
1899 l32i a4, a1, PT_AREG4
1900 l32i a3, a1, PT_AREG3
1901 l32i a2, a1, PT_AREG2
1902 l32i a0, a1, PT_AREG0
1903 l32i a1, a1, PT_AREG1
1904
1905 rfe
1906
1907#endif /* XCHAL_EXTRA_SA_SIZE */
1908
1909/*
1910 * Task switch.
1911 *
1912 * struct task* _switch_to (struct task* prev, struct task* next)
1913 * a2 a2 a3
1914 */
1915
1916ENTRY(_switch_to)
1917
1918 entry a1, 16
1919
1920 mov a4, a3 # preserve a3
1921
1922 s32i a0, a2, THREAD_RA # save return address
1923 s32i a1, a2, THREAD_SP # save stack pointer
1924
1925 /* Disable ints while we manipulate the stack pointer; spill regs. */
1926
1927 movi a5, PS_EXCM_MASK | LOCKLEVEL
1928 xsr a5, PS
1929 rsr a3, EXCSAVE_1
1930 rsync
1931 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1932
1933 call0 _spill_registers
1934
1935 /* Set kernel stack (and leave critical section)
1936 * Note: It's save to set it here. The stack will not be overwritten
1937 * because the kernel stack will only be loaded again after
1938 * we return from kernel space.
1939 */
1940
1941 l32i a0, a4, TASK_THREAD_INFO
1942 rsr a3, EXCSAVE_1 # exc_table
1943 movi a1, 0
1944 addi a0, a0, PT_REGS_OFFSET
1945 s32i a1, a3, EXC_TABLE_FIXUP
1946 s32i a0, a3, EXC_TABLE_KSTK
1947
1948 /* restore context of the task that 'next' addresses */
1949
1950 l32i a0, a4, THREAD_RA /* restore return address */
1951 l32i a1, a4, THREAD_SP /* restore stack pointer */
1952
1953 wsr a5, PS
1954 rsync
1955
1956 retw
1957
1958
1959ENTRY(ret_from_fork)
1960
1961 /* void schedule_tail (struct task_struct *prev)
1962 * Note: prev is still in a6 (return value from fake call4 frame)
1963 */
1964 movi a4, schedule_tail
1965 callx4 a4
1966
1967 movi a4, do_syscall_trace
1968 callx4 a4
1969
1970 j common_exception_return
1971
1972
1973
1974/*
1975 * Table of syscalls
1976 */
1977
1978.data
1979.align 4
1980.global sys_call_table
1981sys_call_table:
1982
1983#define SYSCALL(call, narg) .word call
1984#include "syscalls.h"
1985
1986/*
1987 * Number of arguments of each syscall
1988 */
1989
1990.global sys_narg_table
1991sys_narg_table:
1992
1993#undef SYSCALL
1994#define SYSCALL(call, narg) .byte narg
1995#include "syscalls.h"
1996
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 000000000000..6e9b5225b8f6
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,237 @@
1/*
2 * arch/xtensa/kernel/head.S
3 *
4 * Xtensa Processor startup code.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * Kevin Chea
16 */
17
18#include <xtensa/cacheasm.h>
19#include <linux/config.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22
23/*
24 * This module contains the entry code for kernel images. It performs the
25 * minimal setup needed to call the generic C routines.
26 *
27 * Prerequisites:
28 *
29 * - The kernel image has been loaded to the actual address where it was
30 * compiled to.
31 * - a2 contains either 0 or a pointer to a list of boot parameters.
32 * (see setup.c for more details)
33 *
34 */
35
36 .macro iterate from, to , cmd
37 .ifeq ((\to - \from) & ~0xfff)
38 \cmd \from
39 iterate "(\from+1)", \to, \cmd
40 .endif
41 .endm
42
43/*
44 * _start
45 *
46 * The bootloader passes a pointer to a list of boot parameters in a2.
47 */
48
49 /* The first bytes of the kernel image must be an instruction, so we
50 * manually allocate and define the literal constant we need for a jx
51 * instruction.
52 */
53
54 .section .head.text, "ax"
55 .globl _start
56_start: _j 2f
57 .align 4
581: .word _startup
592: l32r a0, 1b
60 jx a0
61
62 .text
63 .align 4
64_startup:
65
66 /* Disable interrupts and exceptions. */
67
68 movi a0, XCHAL_PS_EXCM_MASK
69 wsr a0, PS
70
71 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
72
73 wsr a2, EXCSAVE_1
74
75 /* Start with a fresh windowbase and windowstart. */
76
77 movi a1, 1
78 movi a0, 0
79 wsr a1, WINDOWSTART
80 wsr a0, WINDOWBASE
81 rsync
82
83 /* Set a0 to 0 for the remaining initialization. */
84
85 movi a0, 0
86
87 /* Clear debugging registers. */
88
89#if XCHAL_HAVE_DEBUG
90 wsr a0, IBREAKENABLE
91 wsr a0, ICOUNT
92 movi a1, 15
93 wsr a0, ICOUNTLEVEL
94
95 .macro reset_dbreak num
96 wsr a0, DBREAKC + \num
97 .endm
98
99 iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
100#endif
101
102 /* Clear CCOUNT (not really necessary, but nice) */
103
104 wsr a0, CCOUNT # not really necessary, but nice
105
106 /* Disable zero-loops. */
107
108#if XCHAL_HAVE_LOOPS
109 wsr a0, LCOUNT
110#endif
111
112 /* Disable all timers. */
113
114 .macro reset_timer num
115 wsr a0, CCOMPARE_0 + \num
116 .endm
117 iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
118
119 /* Interrupt initialization. */
120
121 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
122 wsr a0, INTENABLE
123 wsr a2, INTCLEAR
124
125 /* Disable coprocessors. */
126
127#if XCHAL_CP_NUM > 0
128 wsr a0, CPENABLE
129#endif
130
131 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
132 *
133 * Note: PS.EXCM must be cleared before using any loop
134 * instructions; otherwise, they are silently disabled, and
135 * at most one iteration of the loop is executed.
136 */
137
138 movi a1, 1
139 wsr a1, PS
140 rsync
141
142 /* Initialize the caches.
143 * Does not include flushing writeback d-cache.
144 * a6, a7 are just working registers (clobbered).
145 */
146
147 icache_reset a2, a3
148 dcache_reset a2, a3
149
150 /* Unpack data sections
151 *
152 * The linker script used to build the Linux kernel image
153 * creates a table located at __boot_reloc_table_start
154 * that contans the information what data needs to be unpacked.
155 *
156 * Uses a2-a7.
157 */
158
159 movi a2, __boot_reloc_table_start
160 movi a3, __boot_reloc_table_end
161
1621: beq a2, a3, 3f # no more entries?
163 l32i a4, a2, 0 # start destination (in RAM)
164 l32i a5, a2, 4 # end desination (in RAM)
165 l32i a6, a2, 8 # start source (in ROM)
166 addi a2, a2, 12 # next entry
167 beq a4, a5, 1b # skip, empty entry
168 beq a4, a6, 1b # skip, source and dest. are the same
169
1702: l32i a7, a6, 0 # load word
171 addi a6, a6, 4
172 s32i a7, a4, 0 # store word
173 addi a4, a4, 4
174 bltu a4, a5, 2b
175 j 1b
176
1773:
178 /* All code and initialized data segments have been copied.
179 * Now clear the BSS segment.
180 */
181
182 movi a2, _bss_start # start of BSS
183 movi a3, _bss_end # end of BSS
184
1851: addi a2, a2, 4
186 s32i a0, a2, 0
187 blt a2, a3, 1b
188
189#if XCHAL_DCACHE_IS_WRITEBACK
190
191 /* After unpacking, flush the writeback cache to memory so the
192 * instructions/data are available.
193 */
194
195 dcache_writeback_all a2, a3
196#endif
197
198 /* Setup stack and enable window exceptions (keep irqs disabled) */
199
200 movi a1, init_thread_union
201 addi a1, a1, KERNEL_STACK_SIZE
202
203 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
204 wsr a2, PS # (enable reg-windows; progmode stack)
205 rsync
206
207 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
208
209 movi a2, debug_exception
210 wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
211
212 /* Set up EXCSAVE[1] to point to the exc_table. */
213
214 movi a6, exc_table
215 xsr a6, EXCSAVE_1
216
217 /* init_arch kick-starts the linux kernel */
218
219 movi a4, init_arch
220 callx4 a4
221
222 movi a4, start_kernel
223 callx4 a4
224
225should_never_return:
226 j should_never_return
227
228 /* Define some common data structures here. We define them
229 * here in this assembly file due to their unusual alignment
230 * requirements.
231 */
232
233 .comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
234 .comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
235 .comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
236 .comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
237
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 000000000000..4cbf6d91571f
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,192 @@
1/*
2 * linux/arch/xtensa/kernel/irq.c
3 *
4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386.
6 *
7 * Copyright (C) 2002 - 2005 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 *
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/seq_file.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/kernel_stat.h>
21
22#include <asm/uaccess.h>
23#include <asm/platform.h>
24
25static void enable_xtensa_irq(unsigned int irq);
26static void disable_xtensa_irq(unsigned int irq);
27static void mask_and_ack_xtensa(unsigned int irq);
28static void end_xtensa_irq(unsigned int irq);
29
30static unsigned int cached_irq_mask;
31
32atomic_t irq_err_count;
33
34/*
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
37 */
38void ack_bad_irq(unsigned int irq)
39{
40 printk("unexpected IRQ trap at vector %02x\n", irq);
41}
42
43/*
44 * do_IRQ handles all normal device IRQ's (the special
45 * SMP cross-CPU interrupts have their own specific
46 * handlers).
47 */
48
49unsigned int do_IRQ(int irq, struct pt_regs *regs)
50{
51 irq_enter();
52
53#ifdef CONFIG_DEBUG_STACKOVERFLOW
54 /* Debugging check for stack overflow: is there less than 1KB free? */
55 {
56 unsigned long sp;
57
58 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
59 sp &= THREAD_SIZE - 1;
60
61 if (unlikely(sp < (sizeof(thread_info) + 1024)))
62 printk("Stack overflow in do_IRQ: %ld\n",
63 sp - sizeof(struct thread_info));
64 }
65#endif
66
67 __do_IRQ(irq, regs);
68
69 irq_exit();
70
71 return 1;
72}
73
74/*
75 * Generic, controller-independent functions:
76 */
77
78int show_interrupts(struct seq_file *p, void *v)
79{
80 int i = *(loff_t *) v, j;
81 struct irqaction * action;
82 unsigned long flags;
83
84 if (i == 0) {
85 seq_printf(p, " ");
86 for (j=0; j<NR_CPUS; j++)
87 if (cpu_online(j))
88 seq_printf(p, "CPU%d ",j);
89 seq_putc(p, '\n');
90 }
91
92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action;
95 if (!action)
96 goto skip;
97 seq_printf(p, "%3d: ",i);
98#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i));
100#else
101 for (j = 0; j < NR_CPUS; j++)
102 if (cpu_online(j))
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104#endif
105 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 seq_printf(p, " %s", action->name);
107
108 for (action=action->next; action; action = action->next)
109 seq_printf(p, ", %s", action->name);
110
111 seq_putc(p, '\n');
112skip:
113 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 } else if (i == NR_IRQS) {
115 seq_printf(p, "NMI: ");
116 for (j = 0; j < NR_CPUS; j++)
117 if (cpu_online(j))
118 seq_printf(p, "%10u ", nmi_count(j));
119 seq_putc(p, '\n');
120 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 }
122 return 0;
123}
124/* shutdown is same as "disable" */
125#define shutdown_xtensa_irq disable_xtensa_irq
126
127static unsigned int startup_xtensa_irq(unsigned int irq)
128{
129 enable_xtensa_irq(irq);
130 return 0; /* never anything pending */
131}
132
133static struct hw_interrupt_type xtensa_irq_type = {
134 "Xtensa-IRQ",
135 startup_xtensa_irq,
136 shutdown_xtensa_irq,
137 enable_xtensa_irq,
138 disable_xtensa_irq,
139 mask_and_ack_xtensa,
140 end_xtensa_irq
141};
142
143static inline void mask_irq(unsigned int irq)
144{
145 cached_irq_mask &= ~(1 << irq);
146 set_sr (cached_irq_mask, INTENABLE);
147}
148
149static inline void unmask_irq(unsigned int irq)
150{
151 cached_irq_mask |= 1 << irq;
152 set_sr (cached_irq_mask, INTENABLE);
153}
154
155static void disable_xtensa_irq(unsigned int irq)
156{
157 unsigned long flags;
158 local_save_flags(flags);
159 mask_irq(irq);
160 local_irq_restore(flags);
161}
162
163static void enable_xtensa_irq(unsigned int irq)
164{
165 unsigned long flags;
166 local_save_flags(flags);
167 unmask_irq(irq);
168 local_irq_restore(flags);
169}
170
171static void mask_and_ack_xtensa(unsigned int irq)
172{
173 disable_xtensa_irq(irq);
174}
175
176static void end_xtensa_irq(unsigned int irq)
177{
178 enable_xtensa_irq(irq);
179}
180
181
182void __init init_IRQ(void)
183{
184 int i;
185
186 for (i=0; i < XTENSA_NR_IRQS; i++)
187 irq_desc[i].handler = &xtensa_irq_type;
188
189 cached_irq_mask = 0;
190
191 platform_init_irq();
192}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 000000000000..d1683cfa19a2
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,78 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Module support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/moduleloader.h>
18#include <linux/elf.h>
19#include <linux/vmalloc.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23#include <linux/cache.h>
24
25LIST_HEAD(module_buf_list);
26
27void *module_alloc(unsigned long size)
28{
29 panic("module_alloc not implemented");
30}
31
32void module_free(struct module *mod, void *module_region)
33{
34 panic("module_free not implemented");
35}
36
37int module_frob_arch_sections(Elf32_Ehdr *hdr,
38 Elf32_Shdr *sechdrs,
39 char *secstrings,
40 struct module *me)
41{
42 panic("module_frob_arch_sections not implemented");
43}
44
45int apply_relocate(Elf32_Shdr *sechdrs,
46 const char *strtab,
47 unsigned int symindex,
48 unsigned int relsec,
49 struct module *module)
50{
51 panic ("apply_relocate not implemented");
52}
53
54int apply_relocate_add(Elf32_Shdr *sechdrs,
55 const char *strtab,
56 unsigned int symindex,
57 unsigned int relsec,
58 struct module *module)
59{
60 panic("apply_relocate_add not implemented");
61}
62
63int module_finalize(const Elf_Ehdr *hdr,
64 const Elf_Shdr *sechdrs,
65 struct module *me)
66{
67 panic ("module_finalize not implemented");
68}
69
70void module_arch_cleanup(struct module *mod)
71{
72 panic("module_arch_cleanup not implemented");
73}
74
75struct bug_entry *module_find_bug(unsigned long bugaddr)
76{
77 panic("module_find_bug not implemented");
78}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 000000000000..84fde258cf85
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,73 @@
1/*
2 * arch/xtensa/pci-dma.c
3 *
4 * DMA coherent memory allocation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2002 - 2005 Tensilica Inc.
12 *
13 * Based on version for i386.
14 *
15 * Chris Zankel <chris@zankel.net>
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 */
18
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/string.h>
22#include <linux/pci.h>
23#include <asm/io.h>
24#include <asm/cacheflush.h>
25
26/*
27 * Note: We assume that the full memory space is always mapped to 'kseg'
28 * Otherwise we have to use page attributes (not implemented).
29 */
30
31void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
33{
34 void *ret;
35
36 /* ignore region speicifiers */
37 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
38
39 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
40 gfp |= GFP_DMA;
41 ret = (void *)__get_free_pages(gfp, get_order(size));
42
43 if (ret != NULL) {
44 memset(ret, 0, size);
45 *handle = virt_to_bus(ret);
46 }
47 return (void*) BYPASS_ADDR((unsigned long)ret);
48}
49
50void dma_free_coherent(struct device *hwdev, size_t size,
51 void *vaddr, dma_addr_t dma_handle)
52{
53 free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
54}
55
56
57void consistent_sync(void *vaddr, size_t size, int direction)
58{
59 switch (direction) {
60 case PCI_DMA_NONE:
61 BUG();
62 case PCI_DMA_FROMDEVICE: /* invalidate only */
63 __invalidate_dcache_range((unsigned long)vaddr,
64 (unsigned long)size);
65 break;
66
67 case PCI_DMA_TODEVICE: /* writeback only */
68 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
69 __flush_invalidate_dcache_range((unsigned long)vaddr,
70 (unsigned long)size);
71 break;
72 }
73}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 000000000000..d29a81648637
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,563 @@
1/*
2 * arch/xtensa/pcibios.c
3 *
4 * PCI bios-type initialisation for PCI machines
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2001-2005 Tensilica Inc.
12 *
13 * Based largely on work from Cort (ppc/kernel/pci.c)
14 * IO functions copied from sparc.
15 *
16 * Chris Zankel <chris@zankel.net>
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/bootmem.h>
29
30#include <asm/pci-bridge.h>
31#include <asm/platform.h>
32
33#undef DEBUG
34
35#ifdef DEBUG
36#define DBG(x...) printk(x)
37#else
38#define DBG(x...)
39#endif
40
41/* PCI Controller */
42
43
44/*
45 * pcibios_alloc_controller
46 * pcibios_enable_device
47 * pcibios_fixups
48 * pcibios_align_resource
49 * pcibios_fixup_bus
50 * pcibios_setup
51 * pci_bus_add_device
52 * pci_mmap_page_range
53 */
54
55struct pci_controller* pci_ctrl_head;
56struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
57
58static int pci_bus_count;
59
60static void pcibios_fixup_resources(struct pci_dev* dev);
61
62#if 0 // FIXME
63struct pci_fixup pcibios_fixups[] = {
64 { DECLARE_PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
65 { 0 }
66};
67#endif
68
69void
70pcibios_update_resource(struct pci_dev *dev, struct resource *root,
71 struct resource *res, int resource)
72{
73 u32 new, check, mask;
74 int reg;
75 struct pci_controller* pci_ctrl = dev->sysdata;
76
77 new = res->start;
78 if (pci_ctrl && res->flags & IORESOURCE_IO) {
79 new -= pci_ctrl->io_space.base;
80 }
81 new |= (res->flags & PCI_REGION_FLAG_MASK);
82 if (resource < 6) {
83 reg = PCI_BASE_ADDRESS_0 + 4*resource;
84 } else if (resource == PCI_ROM_RESOURCE) {
85 res->flags |= PCI_ROM_ADDRESS_ENABLE;
86 reg = dev->rom_base_reg;
87 } else {
88 /* Somebody might have asked allocation of a non-standard resource */
89 return;
90 }
91
92 pci_write_config_dword(dev, reg, new);
93 pci_read_config_dword(dev, reg, &check);
94 mask = (new & PCI_BASE_ADDRESS_SPACE_IO) ?
95 PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
96
97 if ((new ^ check) & mask) {
98 printk(KERN_ERR "PCI: Error while updating region "
99 "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
100 new, check);
101 }
102}
103
104/*
105 * We need to avoid collisions with `mirrored' VGA ports
106 * and other strange ISA hardware, so we always want the
107 * addresses to be allocated in the 0x000-0x0ff region
108 * modulo 0x400.
109 *
110 * Why? Because some silly external IO cards only decode
111 * the low 10 bits of the IO address. The 0x00-0xff region
112 * is reserved for motherboard devices that decode all 16
113 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
114 * but we want to try to avoid allocating at 0x2900-0x2bff
115 * which might have be mirrored at 0x0100-0x03ff..
116 */
117void
118pcibios_align_resource(void *data, struct resource *res, unsigned long size,
119 unsigned long align)
120{
121 struct pci_dev *dev = data;
122
123 if (res->flags & IORESOURCE_IO) {
124 unsigned long start = res->start;
125
126 if (size > 0x100) {
127 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
128 " (%ld bytes)\n", dev->slot_name,
129 dev->resource - res, size);
130 }
131
132 if (start & 0x300) {
133 start = (start + 0x3ff) & ~0x3ff;
134 res->start = start;
135 }
136 }
137}
138
139int
140pcibios_enable_resources(struct pci_dev *dev, int mask)
141{
142 u16 cmd, old_cmd;
143 int idx;
144 struct resource *r;
145
146 pci_read_config_word(dev, PCI_COMMAND, &cmd);
147 old_cmd = cmd;
148 for(idx=0; idx<6; idx++) {
149 r = &dev->resource[idx];
150 if (!r->start && r->end) {
151 printk (KERN_ERR "PCI: Device %s not available because "
152 "of resource collisions\n", dev->slot_name);
153 return -EINVAL;
154 }
155 if (r->flags & IORESOURCE_IO)
156 cmd |= PCI_COMMAND_IO;
157 if (r->flags & IORESOURCE_MEM)
158 cmd |= PCI_COMMAND_MEMORY;
159 }
160 if (dev->resource[PCI_ROM_RESOURCE].start)
161 cmd |= PCI_COMMAND_MEMORY;
162 if (cmd != old_cmd) {
163 printk("PCI: Enabling device %s (%04x -> %04x)\n",
164 dev->slot_name, old_cmd, cmd);
165 pci_write_config_word(dev, PCI_COMMAND, cmd);
166 }
167 return 0;
168}
169
170struct pci_controller * __init pcibios_alloc_controller(void)
171{
172 struct pci_controller *pci_ctrl;
173
174 pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
175 memset(pci_ctrl, 0, sizeof(struct pci_controller));
176
177 *pci_ctrl_tail = pci_ctrl;
178 pci_ctrl_tail = &pci_ctrl->next;
179
180 return pci_ctrl;
181}
182
183static int __init pcibios_init(void)
184{
185 struct pci_controller *pci_ctrl;
186 struct pci_bus *bus;
187 int next_busno = 0, i;
188
189 printk("PCI: Probing PCI hardware\n");
190
191 /* Scan all of the recorded PCI controllers. */
192 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
193 pci_ctrl->last_busno = 0xff;
194 bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
195 pci_ctrl);
196 if (pci_ctrl->io_resource.flags) {
197 unsigned long offs;
198
199 offs = (unsigned long)pci_ctrl->io_space.base;
200 pci_ctrl->io_resource.start += offs;
201 pci_ctrl->io_resource.end += offs;
202 bus->resource[0] = &pci_ctrl->io_resource;
203 }
204 for (i = 0; i < 3; ++i)
205 if (pci_ctrl->mem_resources[i].flags)
206 bus->resource[i+1] =&pci_ctrl->mem_resources[i];
207 pci_ctrl->bus = bus;
208 pci_ctrl->last_busno = bus->subordinate;
209 if (next_busno <= pci_ctrl->last_busno)
210 next_busno = pci_ctrl->last_busno+1;
211 }
212 pci_bus_count = next_busno;
213
214 return platform_pcibios_fixup();
215}
216
217subsys_initcall(pcibios_init);
218
219void __init pcibios_fixup_bus(struct pci_bus *bus)
220{
221 struct pci_controller *pci_ctrl = bus->sysdata;
222 struct resource *res;
223 unsigned long io_offset;
224 int i;
225
226 io_offset = (unsigned long)pci_ctrl->io_space.base;
227 if (bus->parent == NULL) {
228 /* this is a host bridge - fill in its resources */
229 pci_ctrl->bus = bus;
230
231 bus->resource[0] = res = &pci_ctrl->io_resource;
232 if (!res->flags) {
233 if (io_offset)
234 printk (KERN_ERR "I/O resource not set for host"
235 " bridge %d\n", pci_ctrl->index);
236 res->start = 0;
237 res->end = IO_SPACE_LIMIT;
238 res->flags = IORESOURCE_IO;
239 }
240 res->start += io_offset;
241 res->end += io_offset;
242
243 for (i = 0; i < 3; i++) {
244 res = &pci_ctrl->mem_resources[i];
245 if (!res->flags) {
246 if (i > 0)
247 continue;
248 printk(KERN_ERR "Memory resource not set for "
249 "host bridge %d\n", pci_ctrl->index);
250 res->start = 0;
251 res->end = ~0U;
252 res->flags = IORESOURCE_MEM;
253 }
254 bus->resource[i+1] = res;
255 }
256 } else {
257 /* This is a subordinate bridge */
258 pci_read_bridge_bases(bus);
259
260 for (i = 0; i < 4; i++) {
261 if ((res = bus->resource[i]) == NULL || !res->flags)
262 continue;
263 if (io_offset && (res->flags & IORESOURCE_IO)) {
264 res->start += io_offset;
265 res->end += io_offset;
266 }
267 }
268 }
269}
270
271char __init *pcibios_setup(char *str)
272{
273 return str;
274}
275
276/* the next one is stolen from the alpha port... */
277
278void __init
279pcibios_update_irq(struct pci_dev *dev, int irq)
280{
281 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
282}
283
284int pcibios_enable_device(struct pci_dev *dev, int mask)
285{
286 u16 cmd, old_cmd;
287 int idx;
288 struct resource *r;
289
290 pci_read_config_word(dev, PCI_COMMAND, &cmd);
291 old_cmd = cmd;
292 for (idx=0; idx<6; idx++) {
293 r = &dev->resource[idx];
294 if (!r->start && r->end) {
295 printk(KERN_ERR "PCI: Device %s not available because "
296 "of resource collisions\n", dev->slot_name);
297 return -EINVAL;
298 }
299 if (r->flags & IORESOURCE_IO)
300 cmd |= PCI_COMMAND_IO;
301 if (r->flags & IORESOURCE_MEM)
302 cmd |= PCI_COMMAND_MEMORY;
303 }
304 if (cmd != old_cmd) {
305 printk("PCI: Enabling device %s (%04x -> %04x)\n",
306 dev->slot_name, old_cmd, cmd);
307 pci_write_config_word(dev, PCI_COMMAND, cmd);
308 }
309
310 return 0;
311}
312
313#ifdef CONFIG_PROC_FS
314
315/*
316 * Return the index of the PCI controller for device pdev.
317 */
318
319int
320pci_controller_num(struct pci_dev *dev)
321{
322 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
323 return pci_ctrl->index;
324}
325
326#endif /* CONFIG_PROC_FS */
327
328
329static void
330pcibios_fixup_resources(struct pci_dev *dev)
331{
332 struct pci_controller* pci_ctrl = (struct pci_controller *)dev->sysdata;
333 int i;
334 unsigned long offset;
335
336 if (!pci_ctrl) {
337 printk(KERN_ERR "No pci_ctrl for PCI dev %s!\n",dev->slot_name);
338 return;
339 }
340 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
341 struct resource *res = dev->resource + i;
342 if (!res->start || !res->flags)
343 continue;
344 if (res->end == 0xffffffff) {
345 DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
346 dev->slot_name, i, res->start, res->end);
347 res->end -= res->start;
348 res->start = 0;
349 continue;
350 }
351 offset = 0;
352 if (res->flags & IORESOURCE_IO)
353 offset = (unsigned long) pci_ctrl->io_space.base;
354 else if (res->flags & IORESOURCE_MEM)
355 offset = (unsigned long) pci_ctrl->mem_space.base;
356
357 if (offset != 0) {
358 res->start += offset;
359 res->end += offset;
360#ifdef DEBUG
361 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
362 i, res->flags, dev->slot_name,
363 res->start - offset, res->start);
364#endif
365 }
366 }
367}
368
369/*
370 * Platform support for /proc/bus/pci/X/Y mmap()s,
371 * modelled on the sparc64 implementation by Dave Miller.
372 * -- paulus.
373 */
374
375/*
376 * Adjust vm_pgoff of VMA such that it is the physical page offset
377 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
378 *
379 * Basically, the user finds the base address for his device which he wishes
380 * to mmap. They read the 32-bit value from the config space base register,
381 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
382 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
383 *
384 * Returns negative error code on failure, zero on success.
385 */
386static __inline__ int
387__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
388 enum pci_mmap_state mmap_state)
389{
390 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
391 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
392 unsigned long io_offset = 0;
393 int i, res_bit;
394
395 if (pci_ctrl == 0)
396 return -EINVAL; /* should never happen */
397
398 /* If memory, add on the PCI bridge address offset */
399 if (mmap_state == pci_mmap_mem) {
400 res_bit = IORESOURCE_MEM;
401 } else {
402 io_offset = (unsigned long)pci_ctrl->io_space.base;
403 offset += io_offset;
404 res_bit = IORESOURCE_IO;
405 }
406
407 /*
408 * Check that the offset requested corresponds to one of the
409 * resources of the device.
410 */
411 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
412 struct resource *rp = &dev->resource[i];
413 int flags = rp->flags;
414
415 /* treat ROM as memory (should be already) */
416 if (i == PCI_ROM_RESOURCE)
417 flags |= IORESOURCE_MEM;
418
419 /* Active and same type? */
420 if ((flags & res_bit) == 0)
421 continue;
422
423 /* In the range of this resource? */
424 if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
425 continue;
426
427 /* found it! construct the final physical address */
428 if (mmap_state == pci_mmap_io)
429 offset += pci_ctrl->io_space.start - io_offset;
430 vma->vm_pgoff = offset >> PAGE_SHIFT;
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
437/*
438 * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
439 * mapping.
440 */
441static __inline__ void
442__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
443 enum pci_mmap_state mmap_state)
444{
445 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
446}
447
448/*
449 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
450 * device mapping.
451 */
452static __inline__ void
453__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
454 enum pci_mmap_state mmap_state, int write_combine)
455{
456 int prot = pgprot_val(vma->vm_page_prot);
457
458 /* Set to write-through */
459 prot &= ~_PAGE_NO_CACHE;
460#if 0
461 if (!write_combine)
462 prot |= _PAGE_WRITETHRU;
463#endif
464 vma->vm_page_prot = __pgprot(prot);
465}
466
467/*
468 * Perform the actual remap of the pages for a PCI device mapping, as
469 * appropriate for this architecture. The region in the process to map
470 * is described by vm_start and vm_end members of VMA, the base physical
471 * address is found in vm_pgoff.
472 * The pci device structure is provided so that architectures may make mapping
473 * decisions on a per-device or per-bus basis.
474 *
475 * Returns a negative error code on failure, zero on success.
476 */
477int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
478 enum pci_mmap_state mmap_state,
479 int write_combine)
480{
481 int ret;
482
483 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
484 if (ret < 0)
485 return ret;
486
487 __pci_mmap_set_flags(dev, vma, mmap_state);
488 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
489
490 ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
491 vma->vm_end - vma->vm_start, vma->vm_page_prot);
492
493 return ret;
494}
495
496/*
497 * This probably belongs here rather than ioport.c because
498 * we do not want this crud linked into SBus kernels.
499 * Also, think for a moment about likes of floppy.c that
500 * include architecture specific parts. They may want to redefine ins/outs.
501 *
502 * We do not use horroble macroses here because we want to
503 * advance pointer by sizeof(size).
504 */
505void outsb(unsigned long addr, const void *src, unsigned long count) {
506 while (count) {
507 count -= 1;
508 writeb(*(const char *)src, addr);
509 src += 1;
510 addr += 1;
511 }
512}
513
514void outsw(unsigned long addr, const void *src, unsigned long count) {
515 while (count) {
516 count -= 2;
517 writew(*(const short *)src, addr);
518 src += 2;
519 addr += 2;
520 }
521}
522
523void outsl(unsigned long addr, const void *src, unsigned long count) {
524 while (count) {
525 count -= 4;
526 writel(*(const long *)src, addr);
527 src += 4;
528 addr += 4;
529 }
530}
531
532void insb(unsigned long addr, void *dst, unsigned long count) {
533 while (count) {
534 count -= 1;
535 *(unsigned char *)dst = readb(addr);
536 dst += 1;
537 addr += 1;
538 }
539}
540
541void insw(unsigned long addr, void *dst, unsigned long count) {
542 while (count) {
543 count -= 2;
544 *(unsigned short *)dst = readw(addr);
545 dst += 2;
546 addr += 2;
547 }
548}
549
550void insl(unsigned long addr, void *dst, unsigned long count) {
551 while (count) {
552 count -= 4;
553 /*
554 * XXX I am sure we are in for an unaligned trap here.
555 */
556 *(unsigned long *)dst = readl(addr);
557 dst += 4;
558 addr += 4;
559 }
560}
561
562
563
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 000000000000..cf1362784443
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,49 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Default platform functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/types.h>
17#include <linux/pci.h>
18#include <linux/time.h>
19#include <asm/platform.h>
20#include <asm/timex.h>
21
22#define _F(r,f,a,b) \
23 r __platform_##f a b; \
24 r platform_##f a __attribute__((weak, alias("__platform_"#f)))
25
26/*
27 * Default functions that are used if no platform specific function is defined.
28 * (Please, refer to include/asm-xtensa/platform.h for more information)
29 */
30
31_F(void, setup, (char** cmd), { });
32_F(void, init_irq, (void), { });
33_F(void, restart, (void), { while(1); });
34_F(void, halt, (void), { while(1); });
35_F(void, power_off, (void), { while(1); });
36_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
37_F(void, heartbeat, (void), { });
38_F(int, pcibios_fixup, (void), { return 0; });
39_F(int, get_rtc_time, (time_t* t), { return 0; });
40_F(int, set_rtc_time, (time_t t), { return 0; });
41
42#if CONFIG_XTENSA_CALIBRATE_CCOUNT
43_F(void, calibrate_ccount, (void),
44{
45 printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
46 ccount_per_jiffy = 100 * (1000000UL/HZ);
47});
48#endif
49
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 000000000000..4099703b14be
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,482 @@
1// TODO verify coprocessor handling
2/*
3 * arch/xtensa/kernel/process.c
4 *
5 * Xtensa Processor version.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/elf.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/platform.h>
43#include <asm/mmu.h>
44#include <asm/irq.h>
45#include <asm/atomic.h>
46#include <asm/offsets.h>
47#include <asm/coprocessor.h>
48
49extern void ret_from_fork(void);
50
51static struct fs_struct init_fs = INIT_FS;
52static struct files_struct init_files = INIT_FILES;
53static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
54static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
55struct mm_struct init_mm = INIT_MM(init_mm);
56EXPORT_SYMBOL(init_mm);
57
58union thread_union init_thread_union
59 __attribute__((__section__(".data.init_task"))) =
60{ INIT_THREAD_INFO(init_task) };
61
62struct task_struct init_task = INIT_TASK(init_task);
63EXPORT_SYMBOL(init_task);
64
65struct task_struct *current_set[NR_CPUS] = {&init_task, };
66
67
68#if XCHAL_CP_NUM > 0
69
70/*
71 * Coprocessor ownership.
72 */
73
74coprocessor_info_t coprocessor_info[] = {
75 { 0, XTENSA_CPE_CP0_OFFSET },
76 { 0, XTENSA_CPE_CP1_OFFSET },
77 { 0, XTENSA_CPE_CP2_OFFSET },
78 { 0, XTENSA_CPE_CP3_OFFSET },
79 { 0, XTENSA_CPE_CP4_OFFSET },
80 { 0, XTENSA_CPE_CP5_OFFSET },
81 { 0, XTENSA_CPE_CP6_OFFSET },
82 { 0, XTENSA_CPE_CP7_OFFSET },
83};
84
85#endif
86
87/*
88 * Powermanagement idle function, if any is provided by the platform.
89 */
90
91void cpu_idle(void)
92{
93 local_irq_enable();
94
95 /* endless idle loop with no priority at all */
96 while (1) {
97 while (!need_resched())
98 platform_idle();
99 preempt_enable();
100 schedule();
101 }
102}
103
104/*
105 * Free current thread data structures etc..
106 */
107
108void exit_thread(void)
109{
110 release_coprocessors(current); /* Empty macro if no CPs are defined */
111}
112
113void flush_thread(void)
114{
115 release_coprocessors(current); /* Empty macro if no CPs are defined */
116}
117
118/*
119 * Copy thread.
120 *
121 * The stack layout for the new thread looks like this:
122 *
123 * +------------------------+ <- sp in childregs (= tos)
124 * | childregs |
125 * +------------------------+ <- thread.sp = sp in dummy-frame
126 * | dummy-frame | (saved in dummy-frame spill-area)
127 * +------------------------+
128 *
129 * We create a dummy frame to return to ret_from_fork:
130 * a0 points to ret_from_fork (simulating a call4)
131 * sp points to itself (thread.sp)
132 * a2, a3 are unused.
133 *
134 * Note: This is a pristine frame, so we don't need any spill region on top of
135 * childregs.
136 */
137
138int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
139 unsigned long unused,
140 struct task_struct * p, struct pt_regs * regs)
141{
142 struct pt_regs *childregs;
143 unsigned long tos;
144 int user_mode = user_mode(regs);
145
146 /* Set up new TSS. */
147 tos = (unsigned long)p->thread_info + THREAD_SIZE;
148 if (user_mode)
149 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
150 else
151 childregs = (struct pt_regs*)tos - 1;
152
153 *childregs = *regs;
154
155 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
156 *((int*)childregs - 3) = (unsigned long)childregs;
157 *((int*)childregs - 4) = 0;
158
159 childregs->areg[1] = tos;
160 childregs->areg[2] = 0;
161 p->set_child_tid = p->clear_child_tid = NULL;
162 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
163 p->thread.sp = (unsigned long)childregs;
164 if (user_mode(regs)) {
165
166 int len = childregs->wmask & ~0xf;
167 childregs->areg[1] = usp;
168 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
169 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
170
171 if (clone_flags & CLONE_SETTLS)
172 childregs->areg[2] = childregs->areg[6];
173
174 } else {
175 /* In kernel space, we start a new thread with a new stack. */
176 childregs->wmask = 1;
177 }
178 return 0;
179}
180
181
182/*
183 * Create a kernel thread
184 */
185
186int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
187{
188 long retval;
189 __asm__ __volatile__
190 ("mov a5, %4\n\t" /* preserve fn in a5 */
191 "mov a6, %3\n\t" /* preserve and setup arg in a6 */
192 "movi a2, %1\n\t" /* load __NR_clone for syscall*/
193 "mov a3, sp\n\t" /* sp check and sys_clone */
194 "mov a4, %5\n\t" /* load flags for syscall */
195 "syscall\n\t"
196 "beq a3, sp, 1f\n\t" /* branch if parent */
197 "callx4 a5\n\t" /* call fn */
198 "movi a2, %2\n\t" /* load __NR_exit for syscall */
199 "mov a3, a6\n\t" /* load fn return value */
200 "syscall\n"
201 "1:\n\t"
202 "mov %0, a2\n\t" /* parent returns zero */
203 :"=r" (retval)
204 :"i" (__NR_clone), "i" (__NR_exit),
205 "r" (arg), "r" (fn),
206 "r" (flags | CLONE_VM)
207 : "a2", "a3", "a4", "a5", "a6" );
208 return retval;
209}
210
211
212/*
213 * These bracket the sleeping functions..
214 */
215
216unsigned long get_wchan(struct task_struct *p)
217{
218 unsigned long sp, pc;
219 unsigned long stack_page = (unsigned long) p->thread_info;
220 int count = 0;
221
222 if (!p || p == current || p->state == TASK_RUNNING)
223 return 0;
224
225 sp = p->thread.sp;
226 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
227
228 do {
229 if (sp < stack_page + sizeof(struct task_struct) ||
230 sp >= (stack_page + THREAD_SIZE) ||
231 pc == 0)
232 return 0;
233 if (!in_sched_functions(pc))
234 return pc;
235
236 /* Stack layout: sp-4: ra, sp-3: sp' */
237
238 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
239 sp = *(unsigned long *)sp - 3;
240 } while (count++ < 16);
241 return 0;
242}
243
244/*
245 * do_copy_regs() gathers information from 'struct pt_regs' and
246 * 'current->thread.areg[]' to fill in the xtensa_gregset_t
247 * structure.
248 *
249 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
250 * of processor registers. Besides different ordering,
251 * xtensa_gregset_t contains non-live register information that
252 * 'struct pt_regs' does not. Exception handling (primarily) uses
253 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
254 *
255 */
256
257void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
258 struct task_struct *tsk)
259{
260 int i, n, wb_offset;
261
262 elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
263 elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
264
265 __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
266 elfregs->cpux = i;
267 __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
268 elfregs->cpuy = i;
269
270 /* Note: PS.EXCM is not set while user task is running; its
271 * being set in regs->ps is for exception handling convenience.
272 */
273
274 elfregs->pc = regs->pc;
275 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
276 elfregs->exccause = regs->exccause;
277 elfregs->excvaddr = regs->excvaddr;
278 elfregs->windowbase = regs->windowbase;
279 elfregs->windowstart = regs->windowstart;
280 elfregs->lbeg = regs->lbeg;
281 elfregs->lend = regs->lend;
282 elfregs->lcount = regs->lcount;
283 elfregs->sar = regs->sar;
284 elfregs->syscall = regs->syscall;
285
286 /* Copy register file.
287 * The layout looks like this:
288 *
289 * | a0 ... a15 | Z ... Z | arX ... arY |
290 * current window unused saved frames
291 */
292
293 memset (elfregs->ar, 0, sizeof(elfregs->ar));
294
295 wb_offset = regs->windowbase * 4;
296 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
297
298 for (i = 0; i < n; i++)
299 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
300
301 n = (regs->wmask >> 4) * 4;
302
303 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
304 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
305}
306
307void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
308{
309 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
310}
311
312
313/* The inverse of do_copy_regs(). No error or sanity checking. */
314
315void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
316 struct task_struct *tsk)
317{
318 int i, n, wb_offset;
319
320 /* Note: PS.EXCM is not set while user task is running; it
321 * needs to be set in regs->ps is for exception handling convenience.
322 */
323
324 regs->pc = elfregs->pc;
325 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
326 regs->exccause = elfregs->exccause;
327 regs->excvaddr = elfregs->excvaddr;
328 regs->windowbase = elfregs->windowbase;
329 regs->windowstart = elfregs->windowstart;
330 regs->lbeg = elfregs->lbeg;
331 regs->lend = elfregs->lend;
332 regs->lcount = elfregs->lcount;
333 regs->sar = elfregs->sar;
334 regs->syscall = elfregs->syscall;
335
336 /* Clear everything. */
337
338 memset (regs->areg, 0, sizeof(regs->areg));
339
340 /* Copy regs from live window frame. */
341
342 wb_offset = regs->windowbase * 4;
343 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
344
345 for (i = 0; i < n; i++)
346 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
347
348 n = (regs->wmask >> 4) * 4;
349
350 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
351 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
352}
353
354/*
355 * do_save_fpregs() gathers information from 'struct pt_regs' and
356 * 'current->thread' to fill in the elf_fpregset_t structure.
357 *
358 * Core files and ptrace use elf_fpregset_t.
359 */
360
361void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
362 struct task_struct *tsk)
363{
364#if XCHAL_HAVE_CP
365
366 extern unsigned char _xtensa_reginfo_tables[];
367 extern unsigned _xtensa_reginfo_table_size;
368 int i;
369 unsigned long flags;
370
371 /* Before dumping coprocessor state from memory,
372 * ensure any live coprocessor contents for this
373 * task are first saved to memory:
374 */
375 local_irq_save(flags);
376
377 for (i = 0; i < XCHAL_CP_MAX; i++) {
378 if (tsk == coprocessor_info[i].owner) {
379 enable_coprocessor(i);
380 save_coprocessor_registers(
381 tsk->thread.cp_save+coprocessor_info[i].offset,i);
382 disable_coprocessor(i);
383 }
384 }
385
386 local_irq_restore(flags);
387
388 /* Now dump coprocessor & extra state: */
389 memcpy((unsigned char*)fpregs,
390 _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
391 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
392 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
393#endif
394}
395
396/*
397 * The inverse of do_save_fpregs().
398 * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
399 * Returns 0 on success, non-zero if layout doesn't match.
400 */
401
402int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
403 struct task_struct *tsk)
404{
405#if XCHAL_HAVE_CP
406
407 extern unsigned char _xtensa_reginfo_tables[];
408 extern unsigned _xtensa_reginfo_table_size;
409 int i;
410 unsigned long flags;
411
412 /* Make sure save area layouts match.
413 * FIXME: in the future we could allow restoring from
414 * a different layout of the same registers, by comparing
415 * fpregs' table with _xtensa_reginfo_tables and matching
416 * entries and copying registers one at a time.
417 * Not too sure yet whether that's very useful.
418 */
419
420 if( memcmp((unsigned char*)fpregs,
421 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
422 return -1;
423 }
424
425 /* Before restoring coprocessor state from memory,
426 * ensure any live coprocessor contents for this
427 * task are first invalidated.
428 */
429
430 local_irq_save(flags);
431
432 for (i = 0; i < XCHAL_CP_MAX; i++) {
433 if (tsk == coprocessor_info[i].owner) {
434 enable_coprocessor(i);
435 save_coprocessor_registers(
436 tsk->thread.cp_save+coprocessor_info[i].offset,i);
437 coprocessor_info[i].owner = 0;
438 disable_coprocessor(i);
439 }
440 }
441
442 local_irq_restore(flags);
443
444 /* Now restore coprocessor & extra state: */
445
446 memcpy(tsk->thread.cp_save,
447 (unsigned char*)fpregs + _xtensa_reginfo_table_size,
448 XTENSA_CP_EXTRA_SIZE);
449#endif
450 return 0;
451}
452/*
453 * Fill in the CP structure for a core dump for a particular task.
454 */
455
456int
457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
458{
459/* see asm/coprocessor.h for this magic number 16 */
460#if TOTAL_CPEXTRA_SIZE > 16
461 do_save_fpregs (r, regs, task);
462
463 /* For now, bit 16 means some extra state may be present: */
464// FIXME!! need to track to return more accurate mask
465 return 0x10000 | XCHAL_CP_MASK;
466#else
467 return 0; /* no coprocessors active on this processor */
468#endif
469}
470
471/*
472 * Fill in the CP structure for a core dump.
473 * This includes any FPU coprocessor.
474 * Here, we dump all coprocessors, and other ("extra") custom state.
475 *
476 * This function is called by elf_core_dump() in fs/binfmt_elf.c
477 * (in which case 'regs' comes from calls to do_coredump, see signals.c).
478 */
479int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
480{
481 return dump_task_fpu(regs, current, r);
482}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 000000000000..9ef07a4dd2a2
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,407 @@
1// TODO some minor issues
2/*
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2001 - 2005 Tensilica Inc.
8 *
9 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
10 * Chris Zankel <chris@zankel.net>
11 * Scott Foehner<sfoehner@yahoo.com>,
12 * Kevin Chea
13 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/errno.h>
21#include <linux/ptrace.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/security.h>
25
26#include <asm/pgtable.h>
27#include <asm/page.h>
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/ptrace.h>
31#include <asm/elf.h>
32
33#define TEST_KERNEL // verify kernel operations FIXME: remove
34
35
36/*
37 * Called by kernel/ptrace.c when detaching..
38 *
39 * Make sure single step bits etc are not set.
40 */
41
42void ptrace_disable(struct task_struct *child)
43{
44 /* Nothing to do.. */
45}
46
47int sys_ptrace(long request, long pid, long addr, long data)
48{
49 struct task_struct *child;
50 int ret = -EPERM;
51
52 lock_kernel();
53
54#if 0
55 if ((int)request != 1)
56 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
57 (int) request, (int) pid, (unsigned long) addr,
58 (unsigned long) data);
59#endif
60
61 if (request == PTRACE_TRACEME) {
62
63 /* Are we already being traced? */
64
65 if (current->ptrace & PT_PTRACED)
66 goto out;
67
68 if ((ret = security_ptrace(current->parent, current)))
69 goto out;
70
71 /* Set the ptrace bit in the process flags. */
72
73 current->ptrace |= PT_PTRACED;
74 ret = 0;
75 goto out;
76 }
77
78 ret = -ESRCH;
79 read_lock(&tasklist_lock);
80 child = find_task_by_pid(pid);
81 if (child)
82 get_task_struct(child);
83 read_unlock(&tasklist_lock);
84 if (!child)
85 goto out;
86
87 ret = -EPERM;
88 if (pid == 1) /* you may not mess with init */
89 goto out;
90
91 if (request == PTRACE_ATTACH) {
92 ret = ptrace_attach(child);
93 goto out_tsk;
94 }
95
96 if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
97 goto out_tsk;
98
99 switch (request) {
100 case PTRACE_PEEKTEXT: /* read word at location addr. */
101 case PTRACE_PEEKDATA:
102 {
103 unsigned long tmp;
104 int copied;
105
106 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
107 ret = -EIO;
108 if (copied != sizeof(tmp))
109 break;
110 ret = put_user(tmp,(unsigned long *) data);
111
112 goto out;
113 }
114
115 /* Read the word at location addr in the USER area. */
116
117 case PTRACE_PEEKUSR:
118 {
119 struct pt_regs *regs;
120 unsigned long tmp;
121
122 regs = xtensa_pt_regs(child);
123 tmp = 0; /* Default return value. */
124
125 switch(addr) {
126
127 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
128 {
129 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
130 ar &= (XCHAL_NUM_AREGS - 1);
131 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
132 tmp = regs->areg[ar];
133 else
134 ret = -EIO;
135 break;
136 }
137 case REG_A_BASE ... REG_A_BASE + 15:
138 tmp = regs->areg[addr - REG_A_BASE];
139 break;
140 case REG_PC:
141 tmp = regs->pc;
142 break;
143 case REG_PS:
144 /* Note: PS.EXCM is not set while user task is running;
145 * its being set in regs is for exception handling
146 * convenience. */
147 tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
148 break;
149 case REG_WB:
150 tmp = regs->windowbase;
151 break;
152 case REG_WS:
153 tmp = regs->windowstart;
154 break;
155 case REG_LBEG:
156 tmp = regs->lbeg;
157 break;
158 case REG_LEND:
159 tmp = regs->lend;
160 break;
161 case REG_LCOUNT:
162 tmp = regs->lcount;
163 break;
164 case REG_SAR:
165 tmp = regs->sar;
166 break;
167 case REG_DEPC:
168 tmp = regs->depc;
169 break;
170 case REG_EXCCAUSE:
171 tmp = regs->exccause;
172 break;
173 case REG_EXCVADDR:
174 tmp = regs->excvaddr;
175 break;
176 case SYSCALL_NR:
177 tmp = regs->syscall;
178 break;
179 default:
180 tmp = 0;
181 ret = -EIO;
182 goto out;
183 }
184 ret = put_user(tmp, (unsigned long *) data);
185 goto out;
186 }
187
188 case PTRACE_POKETEXT: /* write the word at location addr. */
189 case PTRACE_POKEDATA:
190 if (access_process_vm(child, addr, &data, sizeof(data), 1)
191 == sizeof(data))
192 break;
193 ret = -EIO;
194 goto out;
195
196 case PTRACE_POKEUSR:
197 {
198 struct pt_regs *regs;
199 regs = xtensa_pt_regs(child);
200
201 switch (addr) {
202 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
203 {
204 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
205 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
206 regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
207 else
208 ret = -EIO;
209 break;
210 }
211 case REG_A_BASE ... REG_A_BASE + 15:
212 regs->areg[addr - REG_A_BASE] = data;
213 break;
214 case REG_PC:
215 regs->pc = data;
216 break;
217 case SYSCALL_NR:
218 regs->syscall = data;
219 break;
220#ifdef TEST_KERNEL
221 case REG_WB:
222 regs->windowbase = data;
223 break;
224 case REG_WS:
225 regs->windowstart = data;
226 break;
227#endif
228
229 default:
230 /* The rest are not allowed. */
231 ret = -EIO;
232 break;
233 }
234 break;
235 }
236
237 /* continue and stop at next (return from) syscall */
238 case PTRACE_SYSCALL:
239 case PTRACE_CONT: /* restart after signal. */
240 {
241 ret = -EIO;
242 if ((unsigned long) data > _NSIG)
243 break;
244 if (request == PTRACE_SYSCALL)
245 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
246 else
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 child->exit_code = data;
249 /* Make sure the single step bit is not set. */
250 child->ptrace &= ~PT_SINGLESTEP;
251 wake_up_process(child);
252 ret = 0;
253 break;
254 }
255
256 /*
257 * make the child exit. Best I can do is send it a sigkill.
258 * perhaps it should be put in the status that it wants to
259 * exit.
260 */
261 case PTRACE_KILL:
262 ret = 0;
263 if (child->state == EXIT_ZOMBIE) /* already dead */
264 break;
265 child->exit_code = SIGKILL;
266 child->ptrace &= ~PT_SINGLESTEP;
267 wake_up_process(child);
268 break;
269
270 case PTRACE_SINGLESTEP:
271 ret = -EIO;
272 if ((unsigned long) data > _NSIG)
273 break;
274 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
275 child->ptrace |= PT_SINGLESTEP;
276 child->exit_code = data;
277 wake_up_process(child);
278 ret = 0;
279 break;
280
281 case PTRACE_GETREGS:
282 {
283 /* 'data' points to user memory in which to write.
284 * Mainly due to the non-live register values, we
285 * reformat the register values into something more
286 * standard. For convenience, we use the handy
287 * elf_gregset_t format. */
288
289 xtensa_gregset_t format;
290 struct pt_regs *regs = xtensa_pt_regs(child);
291
292 do_copy_regs (&format, regs, child);
293
294 /* Now, copy to user space nice and easy... */
295 ret = 0;
296 if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
297 ret = -EFAULT;
298 break;
299 }
300
301 case PTRACE_SETREGS:
302 {
303 /* 'data' points to user memory that contains the new
304 * values in the elf_gregset_t format. */
305
306 xtensa_gregset_t format;
307 struct pt_regs *regs = xtensa_pt_regs(child);
308
309 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
310 ret = -EFAULT;
311 break;
312 }
313
314 /* FIXME: Perhaps we want some sanity checks on
315 * these user-space values? See ARM version. Are
316 * debuggers a security concern? */
317
318 do_restore_regs (&format, regs, child);
319
320 ret = 0;
321 break;
322 }
323
324 case PTRACE_GETFPREGS:
325 {
326 /* 'data' points to user memory in which to write.
327 * For convenience, we use the handy
328 * elf_fpregset_t format. */
329
330 elf_fpregset_t fpregs;
331 struct pt_regs *regs = xtensa_pt_regs(child);
332
333 do_save_fpregs (&fpregs, regs, child);
334
335 /* Now, copy to user space nice and easy... */
336 ret = 0;
337 if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
338 ret = -EFAULT;
339
340 break;
341 }
342
343 case PTRACE_SETFPREGS:
344 {
345 /* 'data' points to user memory that contains the new
346 * values in the elf_fpregset_t format.
347 */
348 elf_fpregset_t fpregs;
349 struct pt_regs *regs = xtensa_pt_regs(child);
350
351 ret = 0;
352 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
353 ret = -EFAULT;
354 break;
355 }
356
357 if (do_restore_fpregs (&fpregs, regs, child))
358 ret = -EIO;
359 break;
360 }
361
362 case PTRACE_GETFPREGSIZE:
363 /* 'data' points to 'unsigned long' set to the size
364 * of elf_fpregset_t
365 */
366 ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
367 break;
368
369 case PTRACE_DETACH: /* detach a process that was attached. */
370 ret = ptrace_detach(child, data);
371 break;
372
373 default:
374 ret = ptrace_request(child, request, addr, data);
375 goto out;
376 }
377out_tsk:
378 put_task_struct(child);
379out:
380 unlock_kernel();
381 return ret;
382}
383
384void do_syscall_trace(void)
385{
386 if (!test_thread_flag(TIF_SYSCALL_TRACE))
387 return;
388
389 if (!(current->ptrace & PT_PTRACED))
390 return;
391
392 /*
393 * The 0x80 provides a way for the tracing parent to distinguish
394 * between a syscall stop and SIGTRAP delivery
395 */
396 ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
397
398 /*
399 * this isn't the same as continuing with a signal, but it will do
400 * for normal use. strace only continues with a signal if the
401 * stopping signal is not SIGTRAP. -brl
402 */
403 if (current->exit_code) {
404 send_sig(current->exit_code, current, 1);
405 current->exit_code = 0;
406 }
407}
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
new file mode 100644
index 000000000000..d40f4b1b75ac
--- /dev/null
+++ b/arch/xtensa/kernel/semaphore.c
@@ -0,0 +1,226 @@
1/*
2 * arch/xtensa/kernel/semaphore.c
3 *
4 * Generic semaphore code. Buyer beware. Do your own specific changes
5 * in <asm/semaphore-helper.h>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * These two _must_ execute atomically wrt each other.
27 */
28
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58
59static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
60 struct task_struct *tsk)
61{
62 unsigned long flags;
63 int ret = 0;
64
65 spin_lock_irqsave(&semaphore_wake_lock, flags);
66 if (sem->sleepers > 0) {
67 sem->sleepers--;
68 ret = 1;
69 } else if (signal_pending(tsk)) {
70 atomic_inc(&sem->count);
71 ret = -EINTR;
72 }
73 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
74 return ret;
75}
76
77/*
78 * waking_non_zero_trylock:
79 * 1 failed to lock
80 * 0 got the lock
81 *
82 * We must undo the sem->count down_trylock() increment while we are
83 * protected by the spinlock in order to make atomic this atomic_inc() with the
84 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 */
86
87static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
88{
89 unsigned long flags;
90 int ret = 1;
91
92 spin_lock_irqsave(&semaphore_wake_lock, flags);
93 if (sem->sleepers <= 0)
94 atomic_inc(&sem->count);
95 else {
96 sem->sleepers--;
97 ret = 0;
98 }
99 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 return ret;
101}
102
103spinlock_t semaphore_wake_lock;
104
105/*
106 * Semaphores are implemented using a two-way counter:
107 * The "count" variable is decremented for each process
108 * that tries to sleep, while the "waking" variable is
109 * incremented when the "up()" code goes to wake up waiting
110 * processes.
111 *
112 * Notably, the inline "up()" and "down()" functions can
113 * efficiently test if they need to do any extra work (up
114 * needs to do something only if count was negative before
115 * the increment operation.
116 *
117 * waking_non_zero() (from asm/semaphore.h) must execute
118 * atomically.
119 *
120 * When __up() is called, the count was negative before
121 * incrementing it, and we need to wake up somebody.
122 *
123 * This routine adds one to the count of processes that need to
124 * wake up and exit. ALL waiting processes actually wake up but
125 * only the one that gets to the "waking" field first will gate
126 * through and acquire the semaphore. The others will go back
127 * to sleep.
128 *
129 * Note that these functions are only called when there is
130 * contention on the lock, and as such all this is the
131 * "non-critical" part of the whole semaphore business. The
132 * critical part is the inline stuff in <asm/semaphore.h>
133 * where we want to avoid any extra jumps and calls.
134 */
135
136void __up(struct semaphore *sem)
137{
138 wake_one_more(sem);
139 wake_up(&sem->wait);
140}
141
142/*
143 * Perform the "down" function. Return zero for semaphore acquired,
144 * return negative for signalled out of the function.
145 *
146 * If called from __down, the return is ignored and the wait loop is
147 * not interruptible. This means that a task waiting on a semaphore
148 * using "down()" cannot be killed until someone does an "up()" on
149 * the semaphore.
150 *
151 * If called from __down_interruptible, the return value gets checked
152 * upon return. If the return value is negative then the task continues
153 * with the negative value in the return register (it can be tested by
154 * the caller).
155 *
156 * Either form may be used in conjunction with "up()".
157 *
158 */
159
160#define DOWN_VAR \
161 struct task_struct *tsk = current; \
162 wait_queue_t wait; \
163 init_waitqueue_entry(&wait, tsk);
164
165#define DOWN_HEAD(task_state) \
166 \
167 \
168 tsk->state = (task_state); \
169 add_wait_queue(&sem->wait, &wait); \
170 \
171 /* \
172 * Ok, we're set up. sem->count is known to be less than zero \
173 * so we must wait. \
174 * \
175 * We can let go the lock for purposes of waiting. \
176 * We re-acquire it after awaking so as to protect \
177 * all semaphore operations. \
178 * \
179 * If "up()" is called before we call waking_non_zero() then \
180 * we will catch it right away. If it is called later then \
181 * we will have to go through a wakeup cycle to catch it. \
182 * \
183 * Multiple waiters contend for the semaphore lock to see \
184 * who gets to gate through and who has to wait some more. \
185 */ \
186 for (;;) {
187
188#define DOWN_TAIL(task_state) \
189 tsk->state = (task_state); \
190 } \
191 tsk->state = TASK_RUNNING; \
192 remove_wait_queue(&sem->wait, &wait);
193
194void __sched __down(struct semaphore * sem)
195{
196 DOWN_VAR
197 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
198 if (waking_non_zero(sem))
199 break;
200 schedule();
201 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
202}
203
204int __sched __down_interruptible(struct semaphore * sem)
205{
206 int ret = 0;
207 DOWN_VAR
208 DOWN_HEAD(TASK_INTERRUPTIBLE)
209
210 ret = waking_non_zero_interruptible(sem, tsk);
211 if (ret)
212 {
213 if (ret == 1)
214 /* ret != 0 only if we get interrupted -arca */
215 ret = 0;
216 break;
217 }
218 schedule();
219 DOWN_TAIL(TASK_INTERRUPTIBLE)
220 return ret;
221}
222
223int __down_trylock(struct semaphore * sem)
224{
225 return waking_non_zero_trylock(sem);
226}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 000000000000..1f5bf5d624e4
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,520 @@
1/*
2 * arch/xtensa/setup.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995 Linus Torvalds
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Kevin Chea
14 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/tty.h>
22#include <linux/bootmem.h>
23#include <linux/kernel.h>
24
25#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
26# include <linux/console.h>
27#endif
28
29#ifdef CONFIG_RTC
30# include <linux/timex.h>
31#endif
32
33#ifdef CONFIG_PROC_FS
34# include <linux/seq_file.h>
35#endif
36
37#include <asm/system.h>
38#include <asm/bootparam.h>
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41#include <asm/timex.h>
42#include <asm/platform.h>
43#include <asm/page.h>
44#include <asm/setup.h>
45
46#include <xtensa/config/system.h>
47
48#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
49struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
50#endif
51
52#ifdef CONFIG_BLK_DEV_FD
53extern struct fd_ops no_fd_ops;
54struct fd_ops *fd_ops;
55#endif
56
57#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
58extern struct ide_ops no_ide_ops;
59struct ide_ops *ide_ops;
60#endif
61
62extern struct rtc_ops no_rtc_ops;
63struct rtc_ops *rtc_ops;
64
65#ifdef CONFIG_PC_KEYB
66extern struct kbd_ops no_kbd_ops;
67struct kbd_ops *kbd_ops;
68#endif
69
70#ifdef CONFIG_BLK_DEV_INITRD
71extern void *initrd_start;
72extern void *initrd_end;
73extern void *__initrd_start;
74extern void *__initrd_end;
75int initrd_is_mapped = 0;
76extern int initrd_below_start_ok;
77#endif
78
79unsigned char aux_device_present;
80extern unsigned long loops_per_jiffy;
81
82/* Command line specified as configuration option. */
83
84static char command_line[COMMAND_LINE_SIZE];
85
86#ifdef CONFIG_CMDLINE_BOOL
87static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
88#endif
89
90sysmem_info_t __initdata sysmem;
91
92#ifdef CONFIG_BLK_DEV_INITRD
93int initrd_is_mapped;
94#endif
95
96extern void init_mmu(void);
97
98/*
99 * Boot parameter parsing.
100 *
101 * The Xtensa port uses a list of variable-sized tags to pass data to
102 * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
103 * to be recognised. The list is terminated with a zero-sized
104 * BP_TAG_LAST tag.
105 */
106
107typedef struct tagtable {
108 u32 tag;
109 int (*parse)(const bp_tag_t*);
110} tagtable_t;
111
112#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
113 __attribute__((unused, __section__(".taglist"))) = { tag, fn }
114
115/* parse current tag */
116
117static int __init parse_tag_mem(const bp_tag_t *tag)
118{
119 meminfo_t *mi = (meminfo_t*)(tag->data);
120
121 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
122 return -1;
123
124 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
125 printk(KERN_WARNING
126 "Ignoring memory bank 0x%08lx size %ldKB\n",
127 (unsigned long)mi->start,
128 (unsigned long)mi->end - (unsigned long)mi->start);
129 return -EINVAL;
130 }
131 sysmem.bank[sysmem.nr_banks].type = mi->type;
132 sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
133 sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
134 sysmem.nr_banks++;
135
136 return 0;
137}
138
139__tagtable(BP_TAG_MEMORY, parse_tag_mem);
140
141#ifdef CONFIG_BLK_DEV_INITRD
142
143static int __init parse_tag_initrd(const bp_tag_t* tag)
144{
145 meminfo_t* mi;
146 mi = (meminfo_t*)(tag->data);
147 initrd_start = (void*)(mi->start);
148 initrd_end = (void*)(mi->end);
149
150 return 0;
151}
152
153__tagtable(BP_TAG_INITRD, parse_tag_initrd);
154
155#endif /* CONFIG_BLK_DEV_INITRD */
156
157static int __init parse_tag_cmdline(const bp_tag_t* tag)
158{
159 strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
160 command_line[COMMAND_LINE_SIZE - 1] = '\0';
161 return 0;
162}
163
164__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
165
166static int __init parse_bootparam(const bp_tag_t* tag)
167{
168 extern tagtable_t __tagtable_begin, __tagtable_end;
169 tagtable_t *t;
170
171 /* Boot parameters must start with a BP_TAG_FIRST tag. */
172
173 if (tag->id != BP_TAG_FIRST) {
174 printk(KERN_WARNING "Invalid boot parameters!\n");
175 return 0;
176 }
177
178 tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
179
180 /* Parse all tags. */
181
182 while (tag != NULL && tag->id != BP_TAG_LAST) {
183 for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
184 if (tag->id == t->tag) {
185 t->parse(tag);
186 break;
187 }
188 }
189 if (t == &__tagtable_end)
190 printk(KERN_WARNING "Ignoring tag "
191 "0x%08x\n", tag->id);
192 tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
193 }
194
195 return 0;
196}
197
198/*
199 * Initialize architecture. (Early stage)
200 */
201
202void __init init_arch(bp_tag_t *bp_start)
203{
204
205#ifdef CONFIG_BLK_DEV_INITRD
206 initrd_start = &__initrd_start;
207 initrd_end = &__initrd_end;
208#endif
209
210 sysmem.nr_banks = 0;
211
212#ifdef CONFIG_CMDLINE_BOOL
213 strcpy(command_line, default_command_line);
214#endif
215
216 /* Parse boot parameters */
217
218 if (bp_start)
219 parse_bootparam(bp_start);
220
221 if (sysmem.nr_banks == 0) {
222 sysmem.nr_banks = 1;
223 sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
224 sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
225 + PLATFORM_DEFAULT_MEM_SIZE;
226 }
227
228 /* Early hook for platforms */
229
230 platform_init(bp_start);
231
232 /* Initialize MMU. */
233
234 init_mmu();
235}
236
237/*
238 * Initialize system. Setup memory and reserve regions.
239 */
240
241extern char _end;
242extern char _stext;
243extern char _WindowVectors_text_start;
244extern char _WindowVectors_text_end;
245extern char _DebugInterruptVector_literal_start;
246extern char _DebugInterruptVector_text_end;
247extern char _KernelExceptionVector_literal_start;
248extern char _KernelExceptionVector_text_end;
249extern char _UserExceptionVector_literal_start;
250extern char _UserExceptionVector_text_end;
251extern char _DoubleExceptionVector_literal_start;
252extern char _DoubleExceptionVector_text_end;
253
254void __init setup_arch(char **cmdline_p)
255{
256 extern int mem_reserve(unsigned long, unsigned long, int);
257 extern void bootmem_init(void);
258
259 memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
260 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
261 *cmdline_p = command_line;
262
263 /* Reserve some memory regions */
264
265#ifdef CONFIG_BLK_DEV_INITRD
266 if (initrd_start < initrd_end) {
267 initrd_is_mapped = mem_reserve(__pa(initrd_start),
268 __pa(initrd_end), 0);
269 initrd_below_start_ok = 1;
270 } else {
271 initrd_start = 0;
272 }
273#endif
274
275 mem_reserve(__pa(&_stext),__pa(&_end), 1);
276
277 mem_reserve(__pa(&_WindowVectors_text_start),
278 __pa(&_WindowVectors_text_end), 0);
279
280 mem_reserve(__pa(&_DebugInterruptVector_literal_start),
281 __pa(&_DebugInterruptVector_text_end), 0);
282
283 mem_reserve(__pa(&_KernelExceptionVector_literal_start),
284 __pa(&_KernelExceptionVector_text_end), 0);
285
286 mem_reserve(__pa(&_UserExceptionVector_literal_start),
287 __pa(&_UserExceptionVector_text_end), 0);
288
289 mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
290 __pa(&_DoubleExceptionVector_text_end), 0);
291
292 bootmem_init();
293
294 platform_setup(cmdline_p);
295
296
297 paging_init();
298
299#ifdef CONFIG_VT
300# if defined(CONFIG_VGA_CONSOLE)
301 conswitchp = &vga_con;
302# elif defined(CONFIG_DUMMY_CONSOLE)
303 conswitchp = &dummy_con;
304# endif
305#endif
306
307#if CONFIG_PCI
308 platform_pcibios_init();
309#endif
310}
311
312void machine_restart(char * cmd)
313{
314 platform_restart();
315}
316
317void machine_halt(void)
318{
319 platform_halt();
320 while (1);
321}
322
323void machine_power_off(void)
324{
325 platform_power_off();
326 while (1);
327}
328#ifdef CONFIG_PROC_FS
329
330/*
331 * Display some core information through /proc/cpuinfo.
332 */
333
334static int
335c_show(struct seq_file *f, void *slot)
336{
337 /* high-level stuff */
338 seq_printf(f,"processor\t: 0\n"
339 "vendor_id\t: Tensilica\n"
340 "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
341 "core ID\t\t: " XCHAL_CORE_ID "\n"
342 "build ID\t: 0x%x\n"
343 "byte order\t: %s\n"
344 "cpu MHz\t\t: %lu.%02lu\n"
345 "bogomips\t: %lu.%02lu\n",
346 XCHAL_BUILD_UNIQUE_ID,
347 XCHAL_HAVE_BE ? "big" : "little",
348 CCOUNT_PER_JIFFY/(1000000/HZ),
349 (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
350 loops_per_jiffy/(500000/HZ),
351 (loops_per_jiffy/(5000/HZ)) % 100);
352
353 seq_printf(f,"flags\t\t: "
354#if XCHAL_HAVE_NMI
355 "nmi "
356#endif
357#if XCHAL_HAVE_DEBUG
358 "debug "
359# if XCHAL_HAVE_OCD
360 "ocd "
361# endif
362#endif
363#if XCHAL_HAVE_DENSITY
364 "density "
365#endif
366#if XCHAL_HAVE_BOOLEANS
367 "boolean "
368#endif
369#if XCHAL_HAVE_LOOPS
370 "loop "
371#endif
372#if XCHAL_HAVE_NSA
373 "nsa "
374#endif
375#if XCHAL_HAVE_MINMAX
376 "minmax "
377#endif
378#if XCHAL_HAVE_SEXT
379 "sext "
380#endif
381#if XCHAL_HAVE_CLAMPS
382 "clamps "
383#endif
384#if XCHAL_HAVE_MAC16
385 "mac16 "
386#endif
387#if XCHAL_HAVE_MUL16
388 "mul16 "
389#endif
390#if XCHAL_HAVE_MUL32
391 "mul32 "
392#endif
393#if XCHAL_HAVE_MUL32_HIGH
394 "mul32h "
395#endif
396#if XCHAL_HAVE_FP
397 "fpu "
398#endif
399 "\n");
400
401 /* Registers. */
402 seq_printf(f,"physical aregs\t: %d\n"
403 "misc regs\t: %d\n"
404 "ibreak\t\t: %d\n"
405 "dbreak\t\t: %d\n",
406 XCHAL_NUM_AREGS,
407 XCHAL_NUM_MISC_REGS,
408 XCHAL_NUM_IBREAK,
409 XCHAL_NUM_DBREAK);
410
411
412 /* Interrupt. */
413 seq_printf(f,"num ints\t: %d\n"
414 "ext ints\t: %d\n"
415 "int levels\t: %d\n"
416 "timers\t\t: %d\n"
417 "debug level\t: %d\n",
418 XCHAL_NUM_INTERRUPTS,
419 XCHAL_NUM_EXTINTERRUPTS,
420 XCHAL_NUM_INTLEVELS,
421 XCHAL_NUM_TIMERS,
422 XCHAL_DEBUGLEVEL);
423
424 /* Coprocessors */
425#if XCHAL_HAVE_CP
426 seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
427#else
428 seq_printf(f, "coprocessors\t: none\n");
429#endif
430
431 /* {I,D}{RAM,ROM} and XLMI */
432 seq_printf(f,"inst ROMs\t: %d\n"
433 "inst RAMs\t: %d\n"
434 "data ROMs\t: %d\n"
435 "data RAMs\t: %d\n"
436 "XLMI ports\t: %d\n",
437 XCHAL_NUM_IROM,
438 XCHAL_NUM_IRAM,
439 XCHAL_NUM_DROM,
440 XCHAL_NUM_DRAM,
441 XCHAL_NUM_XLMI);
442
443 /* Cache */
444 seq_printf(f,"icache line size: %d\n"
445 "icache ways\t: %d\n"
446 "icache size\t: %d\n"
447 "icache flags\t: "
448#if XCHAL_ICACHE_LINE_LOCKABLE
449 "lock"
450#endif
451 "\n"
452 "dcache line size: %d\n"
453 "dcache ways\t: %d\n"
454 "dcache size\t: %d\n"
455 "dcache flags\t: "
456#if XCHAL_DCACHE_IS_WRITEBACK
457 "writeback"
458#endif
459#if XCHAL_DCACHE_LINE_LOCKABLE
460 "lock"
461#endif
462 "\n",
463 XCHAL_ICACHE_LINESIZE,
464 XCHAL_ICACHE_WAYS,
465 XCHAL_ICACHE_SIZE,
466 XCHAL_DCACHE_LINESIZE,
467 XCHAL_DCACHE_WAYS,
468 XCHAL_DCACHE_SIZE);
469
470 /* MMU */
471 seq_printf(f,"ASID bits\t: %d\n"
472 "ASID invalid\t: %d\n"
473 "ASID kernel\t: %d\n"
474 "rings\t\t: %d\n"
475 "itlb ways\t: %d\n"
476 "itlb AR ways\t: %d\n"
477 "dtlb ways\t: %d\n"
478 "dtlb AR ways\t: %d\n",
479 XCHAL_MMU_ASID_BITS,
480 XCHAL_MMU_ASID_INVALID,
481 XCHAL_MMU_ASID_KERNEL,
482 XCHAL_MMU_RINGS,
483 XCHAL_ITLB_WAYS,
484 XCHAL_ITLB_ARF_WAYS,
485 XCHAL_DTLB_WAYS,
486 XCHAL_DTLB_ARF_WAYS);
487
488 return 0;
489}
490
491/*
492 * We show only CPU #0 info.
493 */
494static void *
495c_start(struct seq_file *f, loff_t *pos)
496{
497 return (void *) ((*pos == 0) ? (void *)1 : NULL);
498}
499
500static void *
501c_next(struct seq_file *f, void *v, loff_t *pos)
502{
503 return NULL;
504}
505
506static void
507c_stop(struct seq_file *f, void *v)
508{
509}
510
511struct seq_operations cpuinfo_op =
512{
513 start: c_start,
514 next: c_next,
515 stop: c_stop,
516 show: c_show
517};
518
519#endif /* CONFIG_PROC_FS */
520
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 000000000000..df6e1e17b096
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,713 @@
1// TODO coprocessor stuff
2/*
3 * linux/arch/xtensa/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * Joe Taylor <joe@tensilica.com>
9 * Chris Zankel <chris@zankel.net>
10 *
11 *
12 *
13 */
14
15#include <xtensa/config/core.h>
16#include <xtensa/hal.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/personality.h>
29#include <asm/ucontext.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/cacheflush.h>
33
34#define DEBUG_SIG 0
35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
39 struct rusage * ru);
40asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
41
42extern struct task_struct *coproc_owners[];
43
44
45/*
46 * Atomically swap in the new signal mask, and wait for a signal.
47 */
48
49int sys_sigsuspend(struct pt_regs *regs)
50{
51 old_sigset_t mask = (old_sigset_t) regs->areg[3];
52 sigset_t saveset;
53
54 mask &= _BLOCKABLE;
55 spin_lock_irq(&current->sighand->siglock);
56 saveset = current->blocked;
57 siginitset(&current->blocked, mask);
58 recalc_sigpending();
59 spin_unlock_irq(&current->sighand->siglock);
60
61 regs->areg[2] = -EINTR;
62 while (1) {
63 current->state = TASK_INTERRUPTIBLE;
64 schedule();
65 if (do_signal(regs, &saveset))
66 return -EINTR;
67 }
68}
69
70asmlinkage int
71sys_rt_sigsuspend(struct pt_regs *regs)
72{
73 sigset_t *unewset = (sigset_t *) regs->areg[4];
74 size_t sigsetsize = (size_t) regs->areg[3];
75 sigset_t saveset, newset;
76 /* XXX: Don't preclude handling different sized sigset_t's. */
77 if (sigsetsize != sizeof(sigset_t))
78 return -EINVAL;
79
80 if (copy_from_user(&newset, unewset, sizeof(newset)))
81 return -EFAULT;
82 sigdelsetmask(&newset, ~_BLOCKABLE);
83 spin_lock_irq(&current->sighand->siglock);
84 saveset = current->blocked;
85 current->blocked = newset;
86 recalc_sigpending();
87 spin_unlock_irq(&current->sighand->siglock);
88
89 regs->areg[2] = -EINTR;
90 while (1) {
91 current->state = TASK_INTERRUPTIBLE;
92 schedule();
93 if (do_signal(regs, &saveset))
94 return -EINTR;
95 }
96}
97
98asmlinkage int
99sys_sigaction(int sig, const struct old_sigaction *act,
100 struct old_sigaction *oact)
101{
102 struct k_sigaction new_ka, old_ka;
103 int ret;
104
105 if (act) {
106 old_sigset_t mask;
107 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
108 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
109 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
110 return -EFAULT;
111 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
112 __get_user(mask, &act->sa_mask);
113 siginitset(&new_ka.sa.sa_mask, mask);
114 }
115
116 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
117
118 if (!ret && oact) {
119 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
120 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
121 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
122 return -EFAULT;
123 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
124 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
125 }
126
127 return ret;
128}
129
130asmlinkage int
131sys_sigaltstack(struct pt_regs *regs)
132{
133 const stack_t *uss = (stack_t *) regs->areg[4];
134 stack_t *uoss = (stack_t *) regs->areg[3];
135
136 if (regs->depc > 64)
137 panic ("Double exception sys_sigreturn\n");
138
139
140 return do_sigaltstack(uss, uoss, regs->areg[1]);
141}
142
143
144/*
145 * Do a signal return; undo the signal stack.
146 */
147
148struct sigframe
149{
150 struct sigcontext sc;
151 struct _cpstate cpstate;
152 unsigned long extramask[_NSIG_WORDS-1];
153 unsigned char retcode[6];
154 unsigned int reserved[4]; /* Reserved area for chaining */
155 unsigned int window[4]; /* Window of 4 registers for initial context */
156};
157
158struct rt_sigframe
159{
160 struct siginfo info;
161 struct ucontext uc;
162 struct _cpstate cpstate;
163 unsigned char retcode[6];
164 unsigned int reserved[4]; /* Reserved area for chaining */
165 unsigned int window[4]; /* Window of 4 registers for initial context */
166};
167
168extern void release_all_cp (struct task_struct *);
169
170
171// FIXME restore_cpextra
172static inline int
173restore_cpextra (struct _cpstate *buf)
174{
175#if 0
176 /* The signal handler may have used coprocessors in which
177 * case they are still enabled. We disable them to force a
178 * reloading of the original task's CP state by the lazy
179 * context-switching mechanisms of CP exception handling.
180 * Also, we essentially discard any coprocessor state that the
181 * signal handler created. */
182
183 struct task_struct *tsk = current;
184 release_all_cp(tsk);
185 return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE);
186#endif
187 return 0;
188}
189
190/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
191 */
192
193
194static int
195restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
196{
197 struct thread_struct *thread;
198 unsigned int err = 0;
199 unsigned long ps;
200 struct _cpstate *buf;
201
202#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
203 COPY(pc);
204 COPY(depc);
205 COPY(wmask);
206 COPY(lbeg);
207 COPY(lend);
208 COPY(lcount);
209 COPY(sar);
210 COPY(windowbase);
211 COPY(windowstart);
212#undef COPY
213
214 /* For PS, restore only PS.CALLINC.
215 * Assume that all other bits are either the same as for the signal
216 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
217 */
218 err |= __get_user(ps, &sc->sc_ps);
219 regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
220 | (ps & XCHAL_PS_CALLINC_MASK);
221
222 /* Additional corruption checks */
223
224 if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
225 || ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
226 err = 1;
227 if ((regs->lcount > 0)
228 && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
229 err = 1;
230
231 /* Restore extended register state.
232 * See struct thread_struct in processor.h.
233 */
234 thread = &current->thread;
235
236 err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
237 err |= __get_user(buf, &sc->sc_cpstate);
238 if (buf) {
239 if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
240 goto badframe;
241 err |= restore_cpextra(buf);
242 }
243
244 regs->syscall = -1; /* disable syscall checks */
245 return err;
246
247badframe:
248 return 1;
249}
250
251static inline void
252flush_my_cpstate(struct task_struct *tsk)
253{
254 unsigned long flags;
255 local_irq_save(flags);
256
257#if 0 // FIXME
258 for (i = 0; i < XCHAL_CP_NUM; i++) {
259 if (tsk == coproc_owners[i]) {
260 xthal_validate_cp(i);
261 xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
262
263 /* Invalidate and "disown" the cp to allow
264 * callers the chance to reset cp state in the
265 * task_struct. */
266
267 xthal_invalidate_cp(i);
268 coproc_owners[i] = 0;
269 }
270 }
271#endif
272 local_irq_restore(flags);
273}
274
275/* Return codes:
276 0: nothing saved
277 1: stuff to save, successful
278 -1: stuff to save, error happened
279*/
280static int
281save_cpextra (struct _cpstate *buf)
282{
283#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
284 return 0;
285#else
286
287 /* FIXME: If a task has never used a coprocessor, there is
288 * no need to save and restore anything. Tracking this
289 * information would allow us to optimize this section.
290 * Perhaps we can use current->used_math or (current->flags &
291 * PF_USEDFPU) or define a new field in the thread
292 * structure. */
293
294 /* We flush any live, task-owned cp state to the task_struct,
295 * then copy it all to the sigframe. Then we clear all
296 * cp/extra state in the task_struct, effectively
297 * clearing/resetting all cp/extra state for the signal
298 * handler (cp-exception handling will load these new values
299 * into the cp/extra registers.) This step is important for
300 * things like a floating-point cp, where the OS must reset
301 * the FCR to the default rounding mode. */
302
303 int err = 0;
304 struct task_struct *tsk = current;
305
306 flush_my_cpstate(tsk);
307 /* Note that we just copy everything: 'extra' and 'cp' state together.*/
308 err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
309 memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
310
311#if (XTENSA_CP_EXTRA_SIZE == 0)
312#error Sanity check on memset above, cpextra_size should not be zero.
313#endif
314
315 return err ? -1 : 1;
316#endif
317}
318
319static int
320setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
321 struct pt_regs *regs, unsigned long mask)
322{
323 struct thread_struct *thread;
324 int err = 0;
325
326//printk("setup_sigcontext\n");
327#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
328 COPY(pc);
329 COPY(ps);
330 COPY(depc);
331 COPY(wmask);
332 COPY(lbeg);
333 COPY(lend);
334 COPY(lcount);
335 COPY(sar);
336 COPY(windowbase);
337 COPY(windowstart);
338#undef COPY
339
340 /* Save extended register state.
341 * See struct thread_struct in processor.h.
342 */
343 thread = &current->thread;
344 err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
345 err |= save_cpextra(cpstate);
346 err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
347 /* non-iBCS2 extensions.. */
348 err |= __put_user(mask, &sc->oldmask);
349
350 return err;
351}
352
353asmlinkage int sys_sigreturn(struct pt_regs *regs)
354{
355 struct sigframe *frame = (struct sigframe *)regs->areg[1];
356 sigset_t set;
357 if (regs->depc > 64)
358 panic ("Double exception sys_sigreturn\n");
359
360 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
361 goto badframe;
362
363 if (__get_user(set.sig[0], &frame->sc.oldmask)
364 || (_NSIG_WORDS > 1
365 && __copy_from_user(&set.sig[1], &frame->extramask,
366 sizeof(frame->extramask))))
367 goto badframe;
368
369 sigdelsetmask(&set, ~_BLOCKABLE);
370
371 spin_lock_irq(&current->sighand->siglock);
372 current->blocked = set;
373 recalc_sigpending();
374 spin_unlock_irq(&current->sighand->siglock);
375
376 if (restore_sigcontext(regs, &frame->sc))
377 goto badframe;
378 return regs->areg[2];
379
380badframe:
381 force_sig(SIGSEGV, current);
382 return 0;
383}
384
385asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
386{
387 struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
388 sigset_t set;
389 stack_t st;
390 int ret;
391 if (regs->depc > 64)
392 {
393 printk("!!!!!!! DEPC !!!!!!!\n");
394 return 0;
395 }
396
397 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
398 goto badframe;
399
400 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
401 goto badframe;
402
403 sigdelsetmask(&set, ~_BLOCKABLE);
404 spin_lock_irq(&current->sighand->siglock);
405 current->blocked = set;
406 recalc_sigpending();
407 spin_unlock_irq(&current->sighand->siglock);
408
409 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
410 goto badframe;
411 ret = regs->areg[2];
412
413 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
414 goto badframe;
415 /* It is more difficult to avoid calling this function than to
416 call it and ignore errors. */
417 do_sigaltstack(&st, NULL, regs->areg[1]);
418
419 return ret;
420
421badframe:
422 force_sig(SIGSEGV, current);
423 return 0;
424}
425
426/*
427 * Set up a signal frame.
428 */
429
430/*
431 * Determine which stack to use..
432 */
433static inline void *
434get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
435{
436 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
437 sp = current->sas_ss_sp + current->sas_ss_size;
438
439 return (void *)((sp - frame_size) & -16ul);
440}
441
442#define USE_SIGRETURN 0
443#define USE_RT_SIGRETURN 1
444
445static int
446gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
447{
448 unsigned int retcall;
449 int err = 0;
450
451#if 0
452 /* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
453 * and the xtensa glibc doesn't use it.
454 */
455 if (ka->sa.sa_flags & SA_RESTORER) {
456 regs->pr = (unsigned long) ka->sa.sa_restorer;
457 } else
458#endif /* 0 */
459 {
460
461#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
462
463/* The 12-bit immediate is really split up within the 24-bit MOVI
464 * instruction. As long as the above system call numbers fit within
465 * 8-bits, the following code works fine. See the Xtensa ISA for
466 * details.
467 */
468
469#error Generating the MOVI instruction below breaks!
470#endif
471
472 retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
473
474#ifdef __XTENSA_EB__ /* Big Endian version */
475 /* Generate instruction: MOVI a2, retcall */
476 err |= __put_user(0x22, &codemem[0]);
477 err |= __put_user(0x0a, &codemem[1]);
478 err |= __put_user(retcall, &codemem[2]);
479 /* Generate instruction: SYSCALL */
480 err |= __put_user(0x00, &codemem[3]);
481 err |= __put_user(0x05, &codemem[4]);
482 err |= __put_user(0x00, &codemem[5]);
483
484#elif defined __XTENSA_EL__ /* Little Endian version */
485 /* Generate instruction: MOVI a2, retcall */
486 err |= __put_user(0x22, &codemem[0]);
487 err |= __put_user(0xa0, &codemem[1]);
488 err |= __put_user(retcall, &codemem[2]);
489 /* Generate instruction: SYSCALL */
490 err |= __put_user(0x00, &codemem[3]);
491 err |= __put_user(0x50, &codemem[4]);
492 err |= __put_user(0x00, &codemem[5]);
493#else
494#error Must use compiler for Xtensa processors.
495#endif
496 }
497
498 /* Flush generated code out of the data cache */
499
500 if (err == 0)
501 __flush_invalidate_cache_range((unsigned long)codemem, 6UL);
502
503 return err;
504}
505
506static void
507set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
508 void *handler, unsigned long arg1, void *arg2, void *arg3)
509{
510 /* Set up registers for signal handler */
511 start_thread(regs, (unsigned long) handler, (unsigned long) stack);
512
513 /* Set up a stack frame for a call4
514 * Note: PS.CALLINC is set to one by start_thread
515 */
516 regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
517 regs->areg[6] = arg1;
518 regs->areg[7] = (unsigned long) arg2;
519 regs->areg[8] = (unsigned long) arg3;
520}
521
522static void setup_frame(int sig, struct k_sigaction *ka,
523 sigset_t *set, struct pt_regs *regs)
524{
525 struct sigframe *frame;
526 int err = 0;
527 int signal;
528
529 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
530 if (regs->depc > 64)
531 {
532 printk("!!!!!!! DEPC !!!!!!!\n");
533 return;
534 }
535
536
537 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
538 goto give_sigsegv;
539
540 signal = current_thread_info()->exec_domain
541 && current_thread_info()->exec_domain->signal_invmap
542 && sig < 32
543 ? current_thread_info()->exec_domain->signal_invmap[sig]
544 : sig;
545
546 err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
547
548 if (_NSIG_WORDS > 1) {
549 err |= __copy_to_user(frame->extramask, &set->sig[1],
550 sizeof(frame->extramask));
551 }
552
553 /* Create sys_sigreturn syscall in stack frame */
554 err |= gen_return_code(frame->retcode, USE_SIGRETURN);
555
556 if (err)
557 goto give_sigsegv;
558
559 /* Create signal handler execution context.
560 * Return context not modified until this point.
561 */
562 set_thread_state(regs, frame, frame->retcode,
563 ka->sa.sa_handler, signal, &frame->sc, NULL);
564
565 /* Set access mode to USER_DS. Nomenclature is outdated, but
566 * functionality is used in uaccess.h
567 */
568 set_fs(USER_DS);
569
570
571#if DEBUG_SIG
572 printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
573 current->comm, current->pid, signal, frame, regs->pc);
574#endif
575
576 return;
577
578give_sigsegv:
579 if (sig == SIGSEGV)
580 ka->sa.sa_handler = SIG_DFL;
581 force_sig(SIGSEGV, current);
582}
583
584static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
585 sigset_t *set, struct pt_regs *regs)
586{
587 struct rt_sigframe *frame;
588 int err = 0;
589 int signal;
590
591 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
592 if (regs->depc > 64)
593 panic ("Double exception sys_sigreturn\n");
594
595 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
596 goto give_sigsegv;
597
598 signal = current_thread_info()->exec_domain
599 && current_thread_info()->exec_domain->signal_invmap
600 && sig < 32
601 ? current_thread_info()->exec_domain->signal_invmap[sig]
602 : sig;
603
604 err |= copy_siginfo_to_user(&frame->info, info);
605
606 /* Create the ucontext. */
607 err |= __put_user(0, &frame->uc.uc_flags);
608 err |= __put_user(0, &frame->uc.uc_link);
609 err |= __put_user((void *)current->sas_ss_sp,
610 &frame->uc.uc_stack.ss_sp);
611 err |= __put_user(sas_ss_flags(regs->areg[1]),
612 &frame->uc.uc_stack.ss_flags);
613 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
614 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate,
615 regs, set->sig[0]);
616 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
617
618 /* Create sys_rt_sigreturn syscall in stack frame */
619 err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
620
621 if (err)
622 goto give_sigsegv;
623
624 /* Create signal handler execution context.
625 * Return context not modified until this point.
626 */
627 set_thread_state(regs, frame, frame->retcode,
628 ka->sa.sa_handler, signal, &frame->info, &frame->uc);
629
630 /* Set access mode to USER_DS. Nomenclature is outdated, but
631 * functionality is used in uaccess.h
632 */
633 set_fs(USER_DS);
634
635#if DEBUG_SIG
636 printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
637 current->comm, current->pid, signal, frame, regs->pc);
638#endif
639
640 return;
641
642give_sigsegv:
643 if (sig == SIGSEGV)
644 ka->sa.sa_handler = SIG_DFL;
645 force_sig(SIGSEGV, current);
646}
647
648
649
650/*
651 * Note that 'init' is a special process: it doesn't get signals it doesn't
652 * want to handle. Thus you cannot kill init even with a SIGKILL even by
653 * mistake.
654 *
655 * Note that we go through the signals twice: once to check the signals that
656 * the kernel can handle, and then we build all the user-level signal handling
657 * stack-frames in one go after that.
658 */
659int do_signal(struct pt_regs *regs, sigset_t *oldset)
660{
661 siginfo_t info;
662 int signr;
663 struct k_sigaction ka;
664
665 if (!oldset)
666 oldset = &current->blocked;
667
668 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
669
670 /* Are we from a system call? */
671 if (regs->syscall >= 0) {
672 /* If so, check system call restarting.. */
673 switch (regs->areg[2]) {
674 case ERESTARTNOHAND:
675 case ERESTART_RESTARTBLOCK:
676 regs->areg[2] = -EINTR;
677 break;
678
679 case ERESTARTSYS:
680 if (!(ka.sa.sa_flags & SA_RESTART)) {
681 regs->areg[2] = -EINTR;
682 break;
683 }
684 /* fallthrough */
685 case ERESTARTNOINTR:
686 regs->areg[2] = regs->syscall;
687 regs->pc -= 3;
688 }
689 }
690
691 if (signr == 0)
692 return 0; /* no signals delivered */
693
694 /* Whee! Actually deliver the signal. */
695
696 /* Set up the stack frame */
697 if (ka.sa.sa_flags & SA_SIGINFO)
698 setup_rt_frame(signr, &ka, &info, oldset, regs);
699 else
700 setup_frame(signr, &ka, oldset, regs);
701
702 if (ka.sa.sa_flags & SA_ONESHOT)
703 ka.sa.sa_handler = SIG_DFL;
704
705 if (!(ka.sa.sa_flags & SA_NODEFER)) {
706 spin_lock_irq(&current->sighand->siglock);
707 sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
708 sigaddset(&current->blocked, signr);
709 recalc_sigpending();
710 spin_unlock_irq(&current->sighand->siglock);
711 }
712 return 1;
713}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
new file mode 100644
index 000000000000..abc8ed6c7026
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.c
@@ -0,0 +1,418 @@
1/*
2 * arch/xtensa/kernel/syscall.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
15 * Kevin Chea
16 *
17 */
18
19#define DEBUG 0
20
21#include <linux/config.h>
22#include <linux/linkage.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/mman.h>
27#include <linux/sched.h>
28#include <linux/file.h>
29#include <linux/slab.h>
30#include <linux/utsname.h>
31#include <linux/unistd.h>
32#include <linux/stringify.h>
33#include <linux/syscalls.h>
34#include <linux/sem.h>
35#include <linux/msg.h>
36#include <linux/shm.h>
37#include <linux/errno.h>
38#include <asm/ptrace.h>
39#include <asm/signal.h>
40#include <asm/uaccess.h>
41#include <asm/hardirq.h>
42#include <asm/mman.h>
43#include <asm/shmparam.h>
44#include <asm/page.h>
45#include <asm/ipc.h>
46
47extern void do_syscall_trace(void);
48typedef int (*syscall_t)(void *a0,...);
49extern int (*do_syscalls)(struct pt_regs *regs, syscall_t fun,
50 int narg);
51extern syscall_t sys_call_table[];
52extern unsigned char sys_narg_table[];
53
54/*
55 * sys_pipe() is the normal C calling standard for creating a pipe. It's not
56 * the way unix traditional does this, though.
57 */
58
59int sys_pipe(int __user *userfds)
60{
61 int fd[2];
62 int error;
63
64 error = do_pipe(fd);
65 if (!error) {
66 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
67 error = -EFAULT;
68 }
69 return error;
70}
71
72/*
73 * Common code for old and new mmaps.
74 */
75
76static inline long do_mmap2(unsigned long addr, unsigned long len,
77 unsigned long prot, unsigned long flags,
78 unsigned long fd, unsigned long pgoff)
79{
80 int error = -EBADF;
81 struct file * file = NULL;
82
83 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
84 if (!(flags & MAP_ANONYMOUS)) {
85 file = fget(fd);
86 if (!file)
87 goto out;
88 }
89
90 down_write(&current->mm->mmap_sem);
91 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
92 up_write(&current->mm->mmap_sem);
93
94 if (file)
95 fput(file);
96out:
97 return error;
98}
99
100unsigned long old_mmap(unsigned long addr, size_t len, int prot,
101 int flags, int fd, off_t offset)
102{
103 return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
104}
105
106long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
107 unsigned long flags, unsigned long fd, unsigned long pgoff)
108{
109 return do_mmap2(addr, len, prot, flags, fd, pgoff);
110}
111
112int sys_fork(struct pt_regs *regs)
113{
114 return do_fork(SIGCHLD, regs->areg[1], regs, 0, NULL, NULL);
115}
116
117int sys_vfork(struct pt_regs *regs)
118{
119 return do_fork(CLONE_VFORK|CLONE_VM|SIGCHLD, regs->areg[1],
120 regs, 0, NULL, NULL);
121}
122
123int sys_clone(struct pt_regs *regs)
124{
125 unsigned long clone_flags;
126 unsigned long newsp;
127 int __user *parent_tidptr, *child_tidptr;
128 clone_flags = regs->areg[4];
129 newsp = regs->areg[3];
130 parent_tidptr = (int __user *)regs->areg[5];
131 child_tidptr = (int __user *)regs->areg[6];
132 if (!newsp)
133 newsp = regs->areg[1];
134 return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
135}
136
137/*
138 * sys_execve() executes a new program.
139 */
140
141int sys_execve(struct pt_regs *regs)
142{
143 int error;
144 char * filename;
145
146 filename = getname((char *) (long)regs->areg[5]);
147 error = PTR_ERR(filename);
148 if (IS_ERR(filename))
149 goto out;
150 error = do_execve(filename, (char **) (long)regs->areg[3],
151 (char **) (long)regs->areg[4], regs);
152 putname(filename);
153
154out:
155 return error;
156}
157
158int sys_uname(struct old_utsname * name)
159{
160 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
161 return 0;
162 return -EFAULT;
163}
164
165int sys_olduname(struct oldold_utsname * name)
166{
167 int error;
168
169 if (!name)
170 return -EFAULT;
171 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
172 return -EFAULT;
173
174 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
175 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
176 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
177 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
178 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
179 error -= __put_user(0,name->release+__OLD_UTS_LEN);
180 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
181 error -= __put_user(0,name->version+__OLD_UTS_LEN);
182 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
183 error -= __put_user(0,name->machine+__OLD_UTS_LEN);
184
185 return error ? -EFAULT : 0;
186}
187
188
189/*
190 * Build the string table for the builtin "poor man's strace".
191 */
192
193#if DEBUG
194#define SYSCALL(fun, narg) #fun,
195static char *sfnames[] = {
196#include "syscalls.h"
197};
198#undef SYS
199#endif
200
201void system_call (struct pt_regs *regs)
202{
203 syscall_t syscall;
204 unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
205 int nargs, res;
206 unsigned int syscallnr;
207 int ps;
208
209#if DEBUG
210 int i;
211 unsigned long parms[6];
212 char *sysname;
213#endif
214
215 regs->syscall = regs->areg[2];
216
217 do_syscall_trace();
218
219 /* Have to load after syscall_trace because strace
220 * sometimes changes regs->syscall.
221 */
222 syscallnr = regs->syscall;
223
224 parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
225
226 /* Restore interrupt level to syscall invoker's.
227 * If this were in assembly, we wouldn't disable
228 * interrupts in the first place:
229 */
230 local_save_flags (ps);
231 local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
232 (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
233
234 if (syscallnr > __NR_Linux_syscalls) {
235 regs->areg[2] = -ENOSYS;
236 return;
237 }
238
239 syscall = sys_call_table[syscallnr];
240 nargs = sys_narg_table[syscallnr];
241
242 if (syscall == NULL) {
243 regs->areg[2] = -ENOSYS;
244 return;
245 }
246
247 /* There shouldn't be more than six arguments in the table! */
248
249 if (nargs > 6)
250 panic("Internal error - too many syscall arguments (%d)!\n",
251 nargs);
252
253 /* Linux takes system-call arguments in registers. The ABI
254 * and Xtensa software conventions require the system-call
255 * number in a2. If an argument exists in a2, we move it to
256 * the next available register. Note that for improved
257 * efficiency, we do NOT shift all parameters down one
258 * register to maintain the original order.
259 *
260 * At best case (zero arguments), we just write the syscall
261 * number to a2. At worst case (1 to 6 arguments), we move
262 * the argument in a2 to the next available register, then
263 * write the syscall number to a2.
264 *
265 * For clarity, the following truth table enumerates all
266 * possibilities.
267 *
268 * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
269 * --------- -------------- ----------------------------------
270 * 0 a2
271 * 1 a2 a3
272 * 2 a2 a4, a3
273 * 3 a2 a5, a3, a4
274 * 4 a2 a6, a3, a4, a5
275 * 5 a2 a7, a3, a4, a5, a6
276 * 6 a2 a8, a3, a4, a5, a6, a7
277 */
278 if (nargs) {
279 parm0 = regs->areg[nargs+2];
280 parm1 = regs->areg[3];
281 parm2 = regs->areg[4];
282 parm3 = regs->areg[5];
283 parm4 = regs->areg[6];
284 parm5 = regs->areg[7];
285 } else /* nargs == 0 */
286 parm0 = (unsigned long) regs;
287
288#if DEBUG
289 parms[0] = parm0;
290 parms[1] = parm1;
291 parms[2] = parm2;
292 parms[3] = parm3;
293 parms[4] = parm4;
294 parms[5] = parm5;
295
296 sysname = sfnames[syscallnr];
297 if (strncmp(sysname, "sys_", 4) == 0)
298 sysname = sysname + 4;
299
300 printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
301 current->comm, sysname);
302 for (i = 0; i < nargs; i++)
303 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
304 printk(")\n");
305#endif
306
307 res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
308
309#if DEBUG
310 printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
311 for (i = 0; i < nargs; i++)
312 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
313 if (res < 4096)
314 printk(") = %d\n", res);
315 else
316 printk(") = %#x\n", res);
317#endif /* DEBUG */
318
319 regs->areg[2] = res;
320 do_syscall_trace();
321}
322
323/*
324 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
325 *
326 * This is really horribly ugly.
327 */
328
329int sys_ipc (uint call, int first, int second,
330 int third, void __user *ptr, long fifth)
331{
332 int version, ret;
333
334 version = call >> 16; /* hack for backward compatibility */
335 call &= 0xffff;
336 ret = -ENOSYS;
337
338 switch (call) {
339 case SEMOP:
340 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
341 second, NULL);
342 break;
343
344 case SEMTIMEDOP:
345 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
346 second, (const struct timespec *) fifth);
347 break;
348
349 case SEMGET:
350 ret = sys_semget (first, second, third);
351 break;
352
353 case SEMCTL: {
354 union semun fourth;
355
356 if (ptr && !get_user(fourth.__pad, (void *__user *) ptr))
357 ret = sys_semctl (first, second, third, fourth);
358 break;
359 }
360
361 case MSGSND:
362 ret = sys_msgsnd (first, (struct msgbuf __user*) ptr,
363 second, third);
364 break;
365
366 case MSGRCV:
367 switch (version) {
368 case 0: {
369 struct ipc_kludge tmp;
370
371 if (ptr && !copy_from_user(&tmp,
372 (struct ipc_kludge *) ptr,
373 sizeof (tmp)))
374 ret = sys_msgrcv (first, tmp.msgp, second,
375 tmp.msgtyp, third);
376 break;
377 }
378
379 default:
380 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
381 second, 0, third);
382 break;
383 }
384 break;
385
386 case MSGGET:
387 ret = sys_msgget ((key_t) first, second);
388 break;
389
390 case MSGCTL:
391 ret = sys_msgctl (first, second, (struct msqid_ds __user*) ptr);
392 break;
393
394 case SHMAT: {
395 ulong raddr;
396 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
397
398 if (!ret)
399 ret = put_user (raddr, (ulong __user *) third);
400
401 break;
402 }
403
404 case SHMDT:
405 ret = sys_shmdt ((char __user *)ptr);
406 break;
407
408 case SHMGET:
409 ret = sys_shmget (first, second, third);
410 break;
411
412 case SHMCTL:
413 ret = sys_shmctl (first, second, (struct shmid_ds __user*) ptr);
414 break;
415 }
416 return ret;
417}
418
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
new file mode 100644
index 000000000000..5b3f75f50feb
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.h
@@ -0,0 +1,248 @@
1/*
2 * arch/xtensa/kernel/syscalls.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Changes by Joe Taylor <joe@tensilica.com>
12 */
13
14/*
15 * This file is being included twice - once to build a list of all
16 * syscalls and once to build a table of how many arguments each syscall
17 * accepts. Syscalls that receive a pointer to the saved registers are
18 * marked as having zero arguments.
19 *
20 * The binary compatibility calls are in a separate list.
21 *
22 * Entry '0' used to be system_call. It's removed to disable indirect
23 * system calls for now so user tasks can't recurse. See mips'
24 * sys_syscall for a comparable example.
25 */
26
27SYSCALL(0, 0) /* 00 */
28
29SYSCALL(sys_exit, 1)
30SYSCALL(sys_fork, 0)
31SYSCALL(sys_read, 3)
32SYSCALL(sys_write, 3)
33SYSCALL(sys_open, 3) /* 05 */
34SYSCALL(sys_close, 1)
35SYSCALL(sys_waitpid, 3)
36SYSCALL(sys_creat, 2)
37SYSCALL(sys_link, 2)
38SYSCALL(sys_unlink, 1) /* 10 */
39SYSCALL(sys_execve, 0)
40SYSCALL(sys_chdir, 1)
41SYSCALL(sys_time, 1)
42SYSCALL(sys_mknod, 3)
43SYSCALL(sys_chmod, 2) /* 15 */
44SYSCALL(sys_lchown, 3)
45SYSCALL(sys_ni_syscall, 0)
46SYSCALL(sys_stat, 2)
47SYSCALL(sys_lseek, 3)
48SYSCALL(sys_getpid, 0) /* 20 */
49SYSCALL(sys_mount, 5)
50SYSCALL(sys_oldumount, 1)
51SYSCALL(sys_setuid, 1)
52SYSCALL(sys_getuid, 0)
53SYSCALL(sys_stime, 1) /* 25 */
54SYSCALL(sys_ptrace, 4)
55SYSCALL(sys_alarm, 1)
56SYSCALL(sys_fstat, 2)
57SYSCALL(sys_pause, 0)
58SYSCALL(sys_utime, 2) /* 30 */
59SYSCALL(sys_ni_syscall, 0)
60SYSCALL(sys_ni_syscall, 0)
61SYSCALL(sys_access, 2)
62SYSCALL(sys_nice, 1)
63SYSCALL(sys_ni_syscall, 0) /* 35 */
64SYSCALL(sys_sync, 0)
65SYSCALL(sys_kill, 2)
66SYSCALL(sys_rename, 2)
67SYSCALL(sys_mkdir, 2)
68SYSCALL(sys_rmdir, 1) /* 40 */
69SYSCALL(sys_dup, 1)
70SYSCALL(sys_pipe, 1)
71SYSCALL(sys_times, 1)
72SYSCALL(sys_ni_syscall, 0)
73SYSCALL(sys_brk, 1) /* 45 */
74SYSCALL(sys_setgid, 1)
75SYSCALL(sys_getgid, 0)
76SYSCALL(sys_ni_syscall, 0) /* was signal(2) */
77SYSCALL(sys_geteuid, 0)
78SYSCALL(sys_getegid, 0) /* 50 */
79SYSCALL(sys_acct, 1)
80SYSCALL(sys_umount, 2)
81SYSCALL(sys_ni_syscall, 0)
82SYSCALL(sys_ioctl, 3)
83SYSCALL(sys_fcntl, 3) /* 55 */
84SYSCALL(sys_ni_syscall, 2)
85SYSCALL(sys_setpgid, 2)
86SYSCALL(sys_ni_syscall, 0)
87SYSCALL(sys_olduname, 1)
88SYSCALL(sys_umask, 1) /* 60 */
89SYSCALL(sys_chroot, 1)
90SYSCALL(sys_ustat, 2)
91SYSCALL(sys_dup2, 2)
92SYSCALL(sys_getppid, 0)
93SYSCALL(sys_getpgrp, 0) /* 65 */
94SYSCALL(sys_setsid, 0)
95SYSCALL(sys_sigaction, 3)
96SYSCALL(sys_sgetmask, 0)
97SYSCALL(sys_ssetmask, 1)
98SYSCALL(sys_setreuid, 2) /* 70 */
99SYSCALL(sys_setregid, 2)
100SYSCALL(sys_sigsuspend, 0)
101SYSCALL(sys_sigpending, 1)
102SYSCALL(sys_sethostname, 2)
103SYSCALL(sys_setrlimit, 2) /* 75 */
104SYSCALL(sys_getrlimit, 2)
105SYSCALL(sys_getrusage, 2)
106SYSCALL(sys_gettimeofday, 2)
107SYSCALL(sys_settimeofday, 2)
108SYSCALL(sys_getgroups, 2) /* 80 */
109SYSCALL(sys_setgroups, 2)
110SYSCALL(sys_ni_syscall, 0) /* old_select */
111SYSCALL(sys_symlink, 2)
112SYSCALL(sys_lstat, 2)
113SYSCALL(sys_readlink, 3) /* 85 */
114SYSCALL(sys_uselib, 1)
115SYSCALL(sys_swapon, 2)
116SYSCALL(sys_reboot, 3)
117SYSCALL(old_readdir, 3)
118SYSCALL(old_mmap, 6) /* 90 */
119SYSCALL(sys_munmap, 2)
120SYSCALL(sys_truncate, 2)
121SYSCALL(sys_ftruncate, 2)
122SYSCALL(sys_fchmod, 2)
123SYSCALL(sys_fchown, 3) /* 95 */
124SYSCALL(sys_getpriority, 2)
125SYSCALL(sys_setpriority, 3)
126SYSCALL(sys_ni_syscall, 0)
127SYSCALL(sys_statfs, 2)
128SYSCALL(sys_fstatfs, 2) /* 100 */
129SYSCALL(sys_ni_syscall, 3)
130SYSCALL(sys_socketcall, 2)
131SYSCALL(sys_syslog, 3)
132SYSCALL(sys_setitimer, 3)
133SYSCALL(sys_getitimer, 2) /* 105 */
134SYSCALL(sys_newstat, 2)
135SYSCALL(sys_newlstat, 2)
136SYSCALL(sys_newfstat, 2)
137SYSCALL(sys_uname, 1)
138SYSCALL(sys_ni_syscall, 0) /* 110 */
139SYSCALL(sys_vhangup, 0)
140SYSCALL(sys_ni_syscall, 0) /* was sys_idle() */
141SYSCALL(sys_ni_syscall, 0)
142SYSCALL(sys_wait4, 4)
143SYSCALL(sys_swapoff, 1) /* 115 */
144SYSCALL(sys_sysinfo, 1)
145SYSCALL(sys_ipc, 5) /* 6 really, but glibc uses only 5) */
146SYSCALL(sys_fsync, 1)
147SYSCALL(sys_sigreturn, 0)
148SYSCALL(sys_clone, 0) /* 120 */
149SYSCALL(sys_setdomainname, 2)
150SYSCALL(sys_newuname, 1)
151SYSCALL(sys_ni_syscall, 0) /* sys_modify_ldt */
152SYSCALL(sys_adjtimex, 1)
153SYSCALL(sys_mprotect, 3) /* 125 */
154SYSCALL(sys_sigprocmask, 3)
155SYSCALL(sys_ni_syscall, 2) /* old sys_create_module */
156SYSCALL(sys_init_module, 2)
157SYSCALL(sys_delete_module, 1)
158SYSCALL(sys_ni_syscall, 1) /* old sys_get_kernel_sysm */ /* 130 */
159SYSCALL(sys_quotactl, 0)
160SYSCALL(sys_getpgid, 1)
161SYSCALL(sys_fchdir, 1)
162SYSCALL(sys_bdflush, 2)
163SYSCALL(sys_sysfs, 3) /* 135 */
164SYSCALL(sys_personality, 1)
165SYSCALL(sys_ni_syscall, 0) /* for afs_syscall */
166SYSCALL(sys_setfsuid, 1)
167SYSCALL(sys_setfsgid, 1)
168SYSCALL(sys_llseek, 5) /* 140 */
169SYSCALL(sys_getdents, 3)
170SYSCALL(sys_select, 5)
171SYSCALL(sys_flock, 2)
172SYSCALL(sys_msync, 3)
173SYSCALL(sys_readv, 3) /* 145 */
174SYSCALL(sys_writev, 3)
175SYSCALL(sys_ni_syscall, 3)
176SYSCALL(sys_ni_syscall, 3)
177SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
178SYSCALL(sys_ni_syscall, 0) /* 150 */
179SYSCALL(sys_getsid, 1)
180SYSCALL(sys_fdatasync, 1)
181SYSCALL(sys_sysctl, 1)
182SYSCALL(sys_mlock, 2)
183SYSCALL(sys_munlock, 2) /* 155 */
184SYSCALL(sys_mlockall, 1)
185SYSCALL(sys_munlockall, 0)
186SYSCALL(sys_sched_setparam,2)
187SYSCALL(sys_sched_getparam,2)
188SYSCALL(sys_sched_setscheduler,3) /* 160 */
189SYSCALL(sys_sched_getscheduler,1)
190SYSCALL(sys_sched_yield,0)
191SYSCALL(sys_sched_get_priority_max,1)
192SYSCALL(sys_sched_get_priority_min,1)
193SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
194SYSCALL(sys_nanosleep,2)
195SYSCALL(sys_mremap,4)
196SYSCALL(sys_accept, 3)
197SYSCALL(sys_bind, 3)
198SYSCALL(sys_connect, 3) /* 170 */
199SYSCALL(sys_getpeername, 3)
200SYSCALL(sys_getsockname, 3)
201SYSCALL(sys_getsockopt, 5)
202SYSCALL(sys_listen, 2)
203SYSCALL(sys_recv, 4) /* 175 */
204SYSCALL(sys_recvfrom, 6)
205SYSCALL(sys_recvmsg, 3)
206SYSCALL(sys_send, 4)
207SYSCALL(sys_sendmsg, 3)
208SYSCALL(sys_sendto, 6) /* 180 */
209SYSCALL(sys_setsockopt, 5)
210SYSCALL(sys_shutdown, 2)
211SYSCALL(sys_socket, 3)
212SYSCALL(sys_socketpair, 4)
213SYSCALL(sys_setresuid, 3) /* 185 */
214SYSCALL(sys_getresuid, 3)
215SYSCALL(sys_ni_syscall, 5) /* old sys_query_module */
216SYSCALL(sys_poll, 3)
217SYSCALL(sys_nfsservctl, 3)
218SYSCALL(sys_setresgid, 3) /* 190 */
219SYSCALL(sys_getresgid, 3)
220SYSCALL(sys_prctl, 5)
221SYSCALL(sys_rt_sigreturn, 0)
222SYSCALL(sys_rt_sigaction, 4)
223SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
224SYSCALL(sys_rt_sigpending, 2)
225SYSCALL(sys_rt_sigtimedwait, 4)
226SYSCALL(sys_rt_sigqueueinfo, 3)
227SYSCALL(sys_rt_sigsuspend, 0)
228SYSCALL(sys_pread64, 5) /* 200 */
229SYSCALL(sys_pwrite64, 5)
230SYSCALL(sys_chown, 3)
231SYSCALL(sys_getcwd, 2)
232SYSCALL(sys_capget, 2)
233SYSCALL(sys_capset, 2) /* 205 */
234SYSCALL(sys_sigaltstack, 0)
235SYSCALL(sys_sendfile, 4)
236SYSCALL(sys_ni_syscall, 0)
237SYSCALL(sys_ni_syscall, 0)
238SYSCALL(sys_mmap2, 6) /* 210 */
239SYSCALL(sys_truncate64, 2)
240SYSCALL(sys_ftruncate64, 2)
241SYSCALL(sys_stat64, 2)
242SYSCALL(sys_lstat64, 2)
243SYSCALL(sys_fstat64, 2) /* 215 */
244SYSCALL(sys_pivot_root, 2)
245SYSCALL(sys_mincore, 3)
246SYSCALL(sys_madvise, 3)
247SYSCALL(sys_getdents64, 3)
248SYSCALL(sys_vfork, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 000000000000..e07287db5a40
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,227 @@
1/*
2 * arch/xtensa/kernel/time.c
3 *
4 * Timer and clock support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/timex.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/profile.h>
24#include <linux/delay.h>
25
26#include <asm/timex.h>
27#include <asm/platform.h>
28
29
30extern volatile unsigned long wall_jiffies;
31
32u64 jiffies_64 = INITIAL_JIFFIES;
33EXPORT_SYMBOL(jiffies_64);
34
35spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
36EXPORT_SYMBOL(rtc_lock);
37
38
39#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
40unsigned long ccount_per_jiffy; /* per 1/HZ */
41unsigned long ccount_nsec; /* nsec per ccount increment */
42#endif
43
44unsigned int last_ccount_stamp;
45static long last_rtc_update = 0;
46
47/*
48 * Scheduler clock - returns current tim in nanosec units.
49 */
50
51unsigned long long sched_clock(void)
52{
53 return (unsigned long long)jiffies * (1000000000 / HZ);
54}
55
56static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
57static struct irqaction timer_irqaction = {
58 .handler = timer_interrupt,
59 .flags = SA_INTERRUPT,
60 .name = "timer",
61};
62
63void __init time_init(void)
64{
65 time_t sec_o, sec_n = 0;
66
67 /* The platform must provide a function to calibrate the processor
68 * speed for the CALIBRATE.
69 */
70
71#if CONFIG_XTENSA_CALIBRATE_CCOUNT
72 printk("Calibrating CPU frequency ");
73 platform_calibrate_ccount();
74 printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
75 (int)(ccount_per_jiffy/(10000/HZ))%100);
76#endif
77
78 /* Set time from RTC (if provided) */
79
80 if (platform_get_rtc_time(&sec_o) == 0)
81 while (platform_get_rtc_time(&sec_n))
82 if (sec_o != sec_n)
83 break;
84
85 xtime.tv_nsec = 0;
86 last_rtc_update = xtime.tv_sec = sec_n;
87 last_ccount_stamp = get_ccount();
88
89 set_normalized_timespec(&wall_to_monotonic,
90 -xtime.tv_sec, -xtime.tv_nsec);
91
92 /* Initialize the linux timer interrupt. */
93
94 setup_irq(LINUX_TIMER_INT, &timer_irqaction);
95 set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
96}
97
98
99int do_settimeofday(struct timespec *tv)
100{
101 time_t wtm_sec, sec = tv->tv_sec;
102 long wtm_nsec, nsec = tv->tv_nsec;
103 unsigned long ccount;
104
105 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
106 return -EINVAL;
107
108 write_seqlock_irq(&xtime_lock);
109
110 /* This is revolting. We need to set "xtime" correctly. However, the
111 * value in this location is the value at the most recent update of
112 * wall time. Discover what correction gettimeofday() would have
113 * made, and then undo it!
114 */
115 ccount = get_ccount();
116 nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
117 nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
118
119 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
120 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
121
122 set_normalized_timespec(&xtime, sec, nsec);
123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
124
125 time_adjust = 0; /* stop active adjtime() */
126 time_status |= STA_UNSYNC;
127 time_maxerror = NTP_PHASE_LIMIT;
128 time_esterror = NTP_PHASE_LIMIT;
129 write_sequnlock_irq(&xtime_lock);
130 return 0;
131}
132
133EXPORT_SYMBOL(do_settimeofday);
134
135
136void do_gettimeofday(struct timeval *tv)
137{
138 unsigned long flags;
139 unsigned long sec, usec, delta, lost, seq;
140
141 do {
142 seq = read_seqbegin_irqsave(&xtime_lock, flags);
143
144 delta = get_ccount() - last_ccount_stamp;
145 sec = xtime.tv_sec;
146 usec = (xtime.tv_nsec / NSEC_PER_USEC);
147
148 lost = jiffies - wall_jiffies;
149
150 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
151
152 usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
153 for (; usec >= 1000000; sec++, usec -= 1000000)
154 ;
155
156 tv->tv_sec = sec;
157 tv->tv_usec = usec;
158}
159
160EXPORT_SYMBOL(do_gettimeofday);
161
162/*
163 * The timer interrupt is called HZ times per second.
164 */
165
166irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
167{
168
169 unsigned long next;
170
171 next = get_linux_timer();
172
173again:
174 while ((signed long)(get_ccount() - next) > 0) {
175
176 profile_tick(CPU_PROFILING, regs);
177#ifndef CONFIG_SMP
178 update_process_times(user_mode(regs));
179#endif
180
181 write_seqlock(&xtime_lock);
182
183 last_ccount_stamp = next;
184 next += CCOUNT_PER_JIFFY;
185 do_timer (regs); /* Linux handler in kernel/timer.c */
186
187 if ((time_status & STA_UNSYNC) == 0 &&
188 xtime.tv_sec - last_rtc_update >= 659 &&
189 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
190 jiffies - wall_jiffies == 1) {
191
192 if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
193 last_rtc_update = xtime.tv_sec+1;
194 else
195 /* Do it again in 60 s */
196 last_rtc_update += 60;
197 }
198 write_sequnlock(&xtime_lock);
199 }
200
201 /* NOTE: writing CCOMPAREn clears the interrupt. */
202
203 set_linux_timer (next);
204
205 /* Make sure we didn't miss any tick... */
206
207 if ((signed long)(get_ccount() - next) > 0)
208 goto again;
209
210 /* Allow platform to do something usefull (Wdog). */
211
212 platform_heartbeat();
213
214 return IRQ_HANDLED;
215}
216
217#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
218void __devinit calibrate_delay(void)
219{
220 loops_per_jiffy = CCOUNT_PER_JIFFY;
221 printk("Calibrating delay loop (skipped)... "
222 "%lu.%02lu BogoMIPS preset\n",
223 loops_per_jiffy/(1000000/HZ),
224 (loops_per_jiffy/(10000/HZ)) % 100);
225}
226#endif
227
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 000000000000..804246e743b1
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,498 @@
1/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
14 * Copyright (C) 2001 - 2005 Tensilica Inc.
15 *
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel <chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/stringify.h>
31#include <linux/kallsyms.h>
32
33#include <asm/ptrace.h>
34#include <asm/timex.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/processor.h>
38
39#ifdef CONFIG_KGDB
40extern int gdb_enter;
41extern int return_from_debug_flag;
42#endif
43
44/*
45 * Machine specific interrupt handlers
46 */
47
48extern void kernel_exception(void);
49extern void user_exception(void);
50
51extern void fast_syscall_kernel(void);
52extern void fast_syscall_user(void);
53extern void fast_alloca(void);
54extern void fast_unaligned(void);
55extern void fast_second_level_miss(void);
56extern void fast_store_prohibited(void);
57extern void fast_coprocessor(void);
58
59extern void do_illegal_instruction (struct pt_regs*);
60extern void do_interrupt (struct pt_regs*);
61extern void do_unaligned_user (struct pt_regs*);
62extern void do_multihit (struct pt_regs*, unsigned long);
63extern void do_page_fault (struct pt_regs*, unsigned long);
64extern void do_debug (struct pt_regs*);
65extern void system_call (struct pt_regs*);
66
67/*
68 * The vector table must be preceded by a save area (which
69 * implies it must be in RAM, unless one places RAM immediately
70 * before a ROM and puts the vector at the start of the ROM (!))
71 */
72
73#define KRNL 0x01
74#define USER 0x02
75
76#define COPROCESSOR(x) \
77{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
78
79typedef struct {
80 int cause;
81 int fast;
82 void* handler;
83} dispatch_init_table_t;
84
85dispatch_init_table_t __init dispatch_init_table[] = {
86
87{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
88{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
89{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
90{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
91/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
92/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
93{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
94{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
95/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
96/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
97#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
98#ifdef CONFIG_UNALIGNED_USER
99{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
100#else
101{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
102#endif
103{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
104#endif
105{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
106{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
107{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
108{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
109/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
110{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
111{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
112{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
113{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
114{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
115/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
116{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
117{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
118{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
119/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
120#if (XCHAL_CP_MASK & 1)
121COPROCESSOR(0),
122#endif
123#if (XCHAL_CP_MASK & 2)
124COPROCESSOR(1),
125#endif
126#if (XCHAL_CP_MASK & 4)
127COPROCESSOR(2),
128#endif
129#if (XCHAL_CP_MASK & 8)
130COPROCESSOR(3),
131#endif
132#if (XCHAL_CP_MASK & 16)
133COPROCESSOR(4),
134#endif
135#if (XCHAL_CP_MASK & 32)
136COPROCESSOR(5),
137#endif
138#if (XCHAL_CP_MASK & 64)
139COPROCESSOR(6),
140#endif
141#if (XCHAL_CP_MASK & 128)
142COPROCESSOR(7),
143#endif
144{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
145{ -1, -1, 0 }
146
147};
148
149/* The exception table <exc_table> serves two functions:
150 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
151 * 2. it is a temporary memory buffer for the exception handlers.
152 */
153
154unsigned long exc_table[EXC_TABLE_SIZE/4];
155
156void die(const char*, struct pt_regs*, long);
157
158static inline void
159__die_if_kernel(const char *str, struct pt_regs *regs, long err)
160{
161 if (!user_mode(regs))
162 die(str, regs, err);
163}
164
165/*
166 * Unhandled Exceptions. Kill user task or panic if in kernel space.
167 */
168
169void do_unhandled(struct pt_regs *regs, unsigned long exccause)
170{
171 __die_if_kernel("Caught unhandled exception - should not happen",
172 regs, SIGKILL);
173
174 /* If in user mode, send SIGILL signal to current process */
175 printk("Caught unhandled exception in '%s' "
176 "(pid = %d, pc = %#010lx) - should not happen\n"
177 "\tEXCCAUSE is %ld\n",
178 current->comm, current->pid, regs->pc, exccause);
179 force_sig(SIGILL, current);
180}
181
182/*
183 * Multi-hit exception. This if fatal!
184 */
185
186void do_multihit(struct pt_regs *regs, unsigned long exccause)
187{
188 die("Caught multihit exception", regs, SIGKILL);
189}
190
191/*
192 * Level-1 interrupt.
193 * We currently have no priority encoding.
194 */
195
196unsigned long ignored_level1_interrupts;
197extern void do_IRQ(int, struct pt_regs *);
198
199void do_interrupt (struct pt_regs *regs)
200{
201 unsigned long intread = get_sr (INTREAD);
202 unsigned long intenable = get_sr (INTENABLE);
203 int i, mask;
204
205 /* Handle all interrupts (no priorities).
206 * (Clear the interrupt before processing, in case it's
207 * edge-triggered or software-generated)
208 */
209
210 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
211 if (mask & (intread & intenable)) {
212 set_sr (mask, INTCLEAR);
213 do_IRQ (i,regs);
214 }
215 }
216}
217
218/*
219 * Illegal instruction. Fatal if in kernel space.
220 */
221
222void
223do_illegal_instruction(struct pt_regs *regs)
224{
225 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
226
227 /* If in user mode, send SIGILL signal to current process. */
228
229 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
230 current->comm, current->pid, regs->pc);
231 force_sig(SIGILL, current);
232}
233
234
235/*
236 * Handle unaligned memory accesses from user space. Kill task.
237 *
238 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
239 * accesses causes from user space.
240 */
241
242#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
243#ifndef CONFIG_UNALIGNED_USER
244void
245do_unaligned_user (struct pt_regs *regs)
246{
247 siginfo_t info;
248
249 __die_if_kernel("Unhandled unaligned exception in kernel",
250 regs, SIGKILL);
251
252 current->thread.bad_vaddr = regs->excvaddr;
253 current->thread.error_code = -3;
254 printk("Unaligned memory access to %08lx in '%s' "
255 "(pid = %d, pc = %#010lx)\n",
256 regs->excvaddr, current->comm, current->pid, regs->pc);
257 info.si_signo = SIGBUS;
258 info.si_errno = 0;
259 info.si_code = BUS_ADRALN;
260 info.si_addr = (void *) regs->excvaddr;
261 force_sig_info(SIGSEGV, &info, current);
262
263}
264#endif
265#endif
266
267void
268do_debug(struct pt_regs *regs)
269{
270#ifdef CONFIG_KGDB
271 /* If remote debugging is configured AND enabled, we give control to
272 * kgdb. Otherwise, we fall through, perhaps giving control to the
273 * native debugger.
274 */
275
276 if (gdb_enter) {
277 extern void gdb_handle_exception(struct pt_regs *);
278 gdb_handle_exception(regs);
279 return_from_debug_flag = 1;
280 return;
281 }
282#endif
283
284 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
285
286 /* If in user mode, send SIGTRAP signal to current process */
287
288 force_sig(SIGTRAP, current);
289}
290
291
292/*
293 * Initialize dispatch tables.
294 *
295 * The exception vectors are stored compressed the __init section in the
296 * dispatch_init_table. This function initializes the following three tables
297 * from that compressed table:
298 * - fast user first dispatch table for user exceptions
299 * - fast kernel first dispatch table for kernel exceptions
300 * - default C-handler C-handler called by the default fast handler.
301 *
302 * See vectors.S for more details.
303 */
304
305#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
306
307void trap_init(void)
308{
309 int i;
310
311 /* Setup default vectors. */
312
313 for(i = 0; i < 64; i++) {
314 set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
315 set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
316 set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
317 }
318
319 /* Setup specific handlers. */
320
321 for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
322
323 int fast = dispatch_init_table[i].fast;
324 int cause = dispatch_init_table[i].cause;
325 void *handler = dispatch_init_table[i].handler;
326
327 if (fast == 0)
328 set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
329 if (fast && fast & USER)
330 set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
331 if (fast && fast & KRNL)
332 set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
333 }
334
335 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
336
337 i = (unsigned long)exc_table;
338 __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
339}
340
341/*
342 * This function dumps the current valid window frame and other base registers.
343 */
344
345void show_regs(struct pt_regs * regs)
346{
347 int i, wmask;
348
349 wmask = regs->wmask & ~1;
350
351 for (i = 0; i < 32; i++) {
352 if (wmask & (1 << (i / 4)))
353 break;
354 if ((i % 8) == 0)
355 printk ("\n" KERN_INFO "a%02d: ", i);
356 printk("%08lx ", regs->areg[i]);
357 }
358 printk("\n");
359
360 printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
361 regs->pc, regs->ps, regs->depc, regs->excvaddr);
362 printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
363 regs->lbeg, regs->lend, regs->lcount, regs->sar);
364 if (user_mode(regs))
365 printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
366 regs->windowbase, regs->windowstart, regs->wmask,
367 regs->syscall);
368}
369
370void show_trace(struct task_struct *task, unsigned long *sp)
371{
372 unsigned long a0, a1, pc;
373 unsigned long sp_start, sp_end;
374
375 a1 = (unsigned long)sp;
376
377 if (a1 == 0)
378 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
379
380
381 sp_start = a1 & ~(THREAD_SIZE-1);
382 sp_end = sp_start + THREAD_SIZE;
383
384 printk("Call Trace:");
385#ifdef CONFIG_KALLSYMS
386 printk("\n");
387#endif
388 spill_registers();
389
390 while (a1 > sp_start && a1 < sp_end) {
391 sp = (unsigned long*)a1;
392
393 a0 = *(sp - 4);
394 a1 = *(sp - 3);
395
396 if (a1 <= (unsigned long) sp)
397 break;
398
399 pc = MAKE_PC_FROM_RA(a0, a1);
400
401 if (kernel_text_address(pc)) {
402 printk(" [<%08lx>] ", pc);
403 print_symbol("%s\n", pc);
404 }
405 }
406 printk("\n");
407}
408
409/*
410 * This routine abuses get_user()/put_user() to reference pointers
411 * with at least a bit of error checking ...
412 */
413
414static int kstack_depth_to_print = 24;
415
416void show_stack(struct task_struct *task, unsigned long *sp)
417{
418 int i = 0;
419 unsigned long *stack;
420
421 if (sp == 0)
422 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
423
424 stack = sp;
425
426 printk("\nStack: ");
427
428 for (i = 0; i < kstack_depth_to_print; i++) {
429 if (kstack_end(sp))
430 break;
431 if (i && ((i % 8) == 0))
432 printk("\n ");
433 printk("%08lx ", *sp++);
434 }
435 printk("\n");
436 show_trace(task, stack);
437}
438
439void dump_stack(void)
440{
441 show_stack(current, NULL);
442}
443
444EXPORT_SYMBOL(dump_stack);
445
446
447void show_code(unsigned int *pc)
448{
449 long i;
450
451 printk("\nCode:");
452
453 for(i = -3 ; i < 6 ; i++) {
454 unsigned long insn;
455 if (__get_user(insn, pc + i)) {
456 printk(" (Bad address in pc)\n");
457 break;
458 }
459 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
460 }
461}
462
463spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
464
465void die(const char * str, struct pt_regs * regs, long err)
466{
467 static int die_counter;
468 int nl = 0;
469
470 console_verbose();
471 spin_lock_irq(&die_lock);
472
473 printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
474#ifdef CONFIG_PREEMPT
475 printk("PREEMPT ");
476 nl = 1;
477#endif
478 if (nl)
479 printk("\n");
480 show_regs(regs);
481 if (!user_mode(regs))
482 show_stack(NULL, (unsigned long*)regs->areg[1]);
483
484 spin_unlock_irq(&die_lock);
485
486 if (in_interrupt())
487 panic("Fatal exception in interrupt");
488
489 if (panic_on_oops) {
490 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
491 set_current_state(TASK_UNINTERRUPTIBLE);
492 schedule_timeout(5 * HZ);
493 panic("Fatal exception");
494 }
495 do_exit(err);
496}
497
498
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 000000000000..81808f0c6742
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,464 @@
1/*
2 * arch/xtensa/kernel/vectors.S
3 *
4 * This file contains all exception vectors (user, kernel, and double),
5 * as well as the window vectors (overflow and underflow), and the debug
6 * vector. These are the primary vectors executed by the processor if an
7 * exception occurs.
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file "COPYING" in the main directory of
11 * this archive for more details.
12 *
13 * Copyright (C) 2005 Tensilica, Inc.
14 *
15 * Chris Zankel <chris@zankel.net>
16 *
17 */
18
19/*
20 * We use a two-level table approach. The user and kernel exception vectors
21 * use a first-level dispatch table to dispatch the exception to a registered
22 * fast handler or the default handler, if no fast handler was registered.
23 * The default handler sets up a C-stack and dispatches the exception to a
24 * registerd C handler in the second-level dispatch table.
25 *
26 * Fast handler entry condition:
27 *
28 * a0: trashed, original value saved on stack (PT_AREG0)
29 * a1: a1
30 * a2: new stack pointer, original value in depc
31 * a3: dispatch table
32 * depc: a2, original value saved on stack (PT_DEPC)
33 * excsave_1: a3
34 *
35 * The value for PT_DEPC saved to stack also functions as a boolean to
36 * indicate that the exception is either a double or a regular exception:
37 *
38 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
39 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
40 *
41 * Note: Neither the kernel nor the user exception handler generate literals.
42 *
43 */
44
45#include <linux/linkage.h>
46#include <asm/ptrace.h>
47#include <asm/ptrace.h>
48#include <asm/current.h>
49#include <asm/offsets.h>
50#include <asm/pgtable.h>
51#include <asm/processor.h>
52#include <asm/page.h>
53#include <asm/thread_info.h>
54#include <asm/processor.h>
55
56
57/*
58 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
59 *
60 * We get here when an exception occurred while we were in userland.
61 * We switch to the kernel stack and jump to the first level handler
62 * associated to the exception cause.
63 *
64 * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
65 * decremented by PT_USER_SIZE.
66 */
67
68 .section .UserExceptionVector.text, "ax"
69
70ENTRY(_UserExceptionVector)
71
72 xsr a3, EXCSAVE_1 # save a3 and get dispatch table
73 wsr a2, DEPC # save a2
74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
75 s32i a0, a2, PT_AREG0 # save a0 to ESF
76 rsr a0, EXCCAUSE # retrieve exception cause
77 s32i a0, a2, PT_DEPC # mark it as a regular exception
78 addx4 a0, a0, a3 # find entry in table
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
80 jx a0
81
82/*
83 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
84 *
85 * We get this exception when we were already in kernel space.
86 * We decrement the current stack pointer (kernel) by PT_SIZE and
87 * jump to the first-level handler associated with the exception cause.
88 *
89 * Note: we need to preserve space for the spill region.
90 */
91
92 .section .KernelExceptionVector.text, "ax"
93
94ENTRY(_KernelExceptionVector)
95
96 xsr a3, EXCSAVE_1 # save a3, and get dispatch table
97 wsr a2, DEPC # save a2
98 addi a2, a1, -16-PT_SIZE # adjust stack pointer
99 s32i a0, a2, PT_AREG0 # save a0 to ESF
100 rsr a0, EXCCAUSE # retrieve exception cause
101 s32i a0, a2, PT_DEPC # mark it as a regular exception
102 addx4 a0, a0, a3 # find entry in table
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
104 jx a0
105
106
107/*
108 * Double exception vector (Exceptions with PS.EXCM == 1)
109 * We get this exception when another exception occurs while were are
110 * already in an exception, such as window overflow/underflow exception,
111 * or 'expected' exceptions, for example memory exception when we were trying
112 * to read data from an invalid address in user space.
113 *
114 * Note that this vector is never invoked for level-1 interrupts, because such
115 * interrupts are disabled (masked) when PS.EXCM is set.
116 *
117 * We decode the exception and take the appropriate action. However, the
118 * double exception vector is much more careful, because a lot more error
119 * cases go through the double exception vector than through the user and
120 * kernel exception vectors.
121 *
122 * Occasionally, the kernel expects a double exception to occur. This usually
123 * happens when accessing user-space memory with the user's permissions
124 * (l32e/s32e instructions). The kernel state, though, is not always suitable
125 * for immediate transfer of control to handle_double, where "normal" exception
126 * processing occurs. Also in kernel mode, TLB misses can occur if accessing
127 * vmalloc memory, possibly requiring repair in a double exception handler.
128 *
129 * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
130 * a boolean variable and a pointer to a fixup routine. If the variable
131 * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
132 * zero indicates to use the default kernel/user exception handler.
133 * There is only one exception, when the value is identical to the exc_table
134 * label, the kernel is in trouble. This mechanism is used to protect critical
135 * sections, mainly when the handler writes to the stack to assert the stack
136 * pointer is valid. Once the fixup/default handler leaves that area, the
137 * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
138 *
139 * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
140 * nonzero address of a fixup routine before it could cause a double exception
141 * and reset it before it returns.
142 *
143 * Some other things to take care of when a fast exception handler doesn't
144 * specify a particular fixup handler but wants to use the default handlers:
145 *
146 * - The original stack pointer (in a1) must not be modified. The fast
147 * exception handler should only use a2 as the stack pointer.
148 *
149 * - If the fast handler manipulates the stack pointer (in a2), it has to
150 * register a valid fixup handler and cannot use the default handlers.
151 *
152 * - The handler can use any other generic register from a3 to a15, but it
153 * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
154 *
155 * - These registers must be saved before a double exception can occur.
156 *
157 * - If we ever implement handling signals while in double exceptions, the
158 * number of registers a fast handler has saved (excluding a0 and a1) must
159 * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
160 *
161 * The fixup handlers are special handlers:
162 *
163 * - Fixup entry conditions differ from regular exceptions:
164 *
165 * a0: DEPC
166 * a1: a1
167 * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
168 * a3: exctable
169 * depc: a0
170 * excsave_1: a3
171 *
172 * - When the kernel enters the fixup handler, it still assumes it is in a
173 * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
174 * The fixup handler, therefore, has to re-register itself as the fixup
175 * handler before it returns from the double exception.
176 *
177 * - Fixup handler can share the same exception frame with the fast handler.
178 * The kernel stack pointer is not changed when entering the fixup handler.
179 *
180 * - Fixup handlers can jump to the default kernel and user exception
181 * handlers. Before it jumps, though, it has to setup a exception frame
182 * on stack. Because the default handler resets the register fixup handler
183 * the fixup handler must make sure that the default handler returns to
184 * it instead of the exception address, so it can re-register itself as
185 * the fixup handler.
186 *
187 * In case of a critical condition where the kernel cannot recover, we jump
188 * to unrecoverable_exception with the following entry conditions.
189 * All registers a0...a15 are unchanged from the last exception, except:
190 *
191 * a0: last address before we jumped to the unrecoverable_exception.
192 * excsave_1: a0
193 *
194 *
195 * See the handle_alloca_user and spill_registers routines for example clients.
196 *
197 * FIXME: Note: we currently don't allow signal handling coming from a double
198 * exception, so the item markt with (*) is not required.
199 */
200
201 .section .DoubleExceptionVector.text, "ax"
202 .begin literal_prefix .DoubleExceptionVector
203
204ENTRY(_DoubleExceptionVector)
205
206 /* Deliberately destroy excsave (don't assume it's value was valid). */
207
208 wsr a3, EXCSAVE_1 # save a3
209
210 /* Check for kernel double exception (usually fatal). */
211
212 rsr a3, PS
213 _bbci.l a3, PS_UM_SHIFT, .Lksp
214
215 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */
217
218 xsr a0, DEPC # get DEPC, save a0
219
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup
222 addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
223 _bgeu a0, a3, .Lfixup
224
225 /* Window overflow/underflow exception. Get stack pointer. */
226
227 mov a3, a2
228 movi a2, exc_table
229 l32i a2, a2, EXC_TABLE_KSTK
230
231 /* Check for overflow/underflow exception, jump if overflow. */
232
233 _bbci.l a0, 6, .Lovfl
234
235 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
236
237 /* Restart window underflow exception.
238 * We return to the instruction in user space that caused the window
239 * underflow exception. Therefore, we change window base to the value
240 * before we entered the window underflow exception and prepare the
241 * registers to return as if we were coming from a regular exception
242 * by changing depc (in a0).
243 * Note: We can trash the current window frame (a0...a3) and depc!
244 */
245
246 wsr a2, DEPC # save stack pointer temporarily
247 rsr a0, PS
248 extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
249 wsr a0, WINDOWBASE
250 rsync
251
252 /* We are now in the previous window frame. Save registers again. */
253
254 xsr a2, DEPC # save a2 and get stack pointer
255 s32i a0, a2, PT_AREG0
256
257 wsr a3, EXCSAVE_1 # save a3
258 movi a3, exc_table
259
260 rsr a0, EXCCAUSE
261 s32i a0, a2, PT_DEPC # mark it as a regular exception
262 addx4 a0, a0, a3
263 l32i a0, a0, EXC_TABLE_FAST_USER
264 jx a0
265
266.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
267
268 /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
269
270 movi a3, exc_table
271 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
272
273 /* Enter critical section. */
274
275 l32i a2, a3, EXC_TABLE_FIXUP
276 s32i a3, a3, EXC_TABLE_FIXUP
277 beq a2, a3, .Lunrecoverable_fixup # critical!
278 beqz a2, .Ldflt # no handler was registered
279
280 /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
281
282 jx a2
283
284.Ldflt: /* Get stack pointer. */
285
286 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
287 addi a2, a3, -PT_USER_SIZE
288
289.Lovfl: /* Jump to default handlers. */
290
291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
292
293 xsr a3, DEPC
294 s32i a0, a2, PT_DEPC
295 s32i a3, a2, PT_AREG0
296
297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
298
299 movi a3, exc_table
300 rsr a0, EXCCAUSE
301 addx4 a0, a0, a3
302 l32i a0, a0, EXC_TABLE_FAST_USER
303 jx a0
304
305 /*
306 * We only allow the ITLB miss exception if we are in kernel space.
307 * All other exceptions are unexpected and thus unrecoverable!
308 */
309
310 .extern fast_second_level_miss_double_kernel
311
312.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
313
314 rsr a3, EXCCAUSE
315 beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
316 addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
317 bnez a3, .Lunrecoverable
3181: movi a3, fast_second_level_miss_double_kernel
319 jx a3
320
321 /* Critical! We can't handle this situation. PANIC! */
322
323 .extern unrecoverable_exception
324
325.Lunrecoverable_fixup:
326 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
327 xsr a0, DEPC
328
329.Lunrecoverable:
330 rsr a3, EXCSAVE_1
331 wsr a0, EXCSAVE_1
332 movi a0, unrecoverable_exception
333 callx0 a0
334
335 .end literal_prefix
336
337
338/*
339 * Debug interrupt vector
340 *
341 * There is not much space here, so simply jump to another handler.
342 * EXCSAVE[DEBUGLEVEL] has been set to that handler.
343 */
344
345 .section .DebugInterruptVector.text, "ax"
346
347ENTRY(_DebugInterruptVector)
348 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
349 jx a0
350
351
352
353/* Window overflow and underflow handlers.
354 * The handlers must be 64 bytes apart, first starting with the underflow
355 * handlers underflow-4 to underflow-12, then the overflow handlers
356 * overflow-4 to overflow-12.
357 *
358 * Note: We rerun the underflow handlers if we hit an exception, so
359 * we try to access any page that would cause a page fault early.
360 */
361
362 .section .WindowVectors.text, "ax"
363
364
365/* 4-Register Window Overflow Vector (Handler) */
366
367 .align 64
368.global _WindowOverflow4
369_WindowOverflow4:
370 s32e a0, a5, -16
371 s32e a1, a5, -12
372 s32e a2, a5, -8
373 s32e a3, a5, -4
374 rfwo
375
376
377/* 4-Register Window Underflow Vector (Handler) */
378
379 .align 64
380.global _WindowUnderflow4
381_WindowUnderflow4:
382 l32e a0, a5, -16
383 l32e a1, a5, -12
384 l32e a2, a5, -8
385 l32e a3, a5, -4
386 rfwu
387
388
389/* 8-Register Window Overflow Vector (Handler) */
390
391 .align 64
392.global _WindowOverflow8
393_WindowOverflow8:
394 s32e a0, a9, -16
395 l32e a0, a1, -12
396 s32e a2, a9, -8
397 s32e a1, a9, -12
398 s32e a3, a9, -4
399 s32e a4, a0, -32
400 s32e a5, a0, -28
401 s32e a6, a0, -24
402 s32e a7, a0, -20
403 rfwo
404
405/* 8-Register Window Underflow Vector (Handler) */
406
407 .align 64
408.global _WindowUnderflow8
409_WindowUnderflow8:
410 l32e a1, a9, -12
411 l32e a0, a9, -16
412 l32e a7, a1, -12
413 l32e a2, a9, -8
414 l32e a4, a7, -32
415 l32e a3, a9, -4
416 l32e a5, a7, -28
417 l32e a6, a7, -24
418 l32e a7, a7, -20
419 rfwu
420
421
422/* 12-Register Window Overflow Vector (Handler) */
423
424 .align 64
425.global _WindowOverflow12
426_WindowOverflow12:
427 s32e a0, a13, -16
428 l32e a0, a1, -12
429 s32e a1, a13, -12
430 s32e a2, a13, -8
431 s32e a3, a13, -4
432 s32e a4, a0, -48
433 s32e a5, a0, -44
434 s32e a6, a0, -40
435 s32e a7, a0, -36
436 s32e a8, a0, -32
437 s32e a9, a0, -28
438 s32e a10, a0, -24
439 s32e a11, a0, -20
440 rfwo
441
442/* 12-Register Window Underflow Vector (Handler) */
443
444 .align 64
445.global _WindowUnderflow12
446_WindowUnderflow12:
447 l32e a1, a13, -12
448 l32e a0, a13, -16
449 l32e a11, a1, -12
450 l32e a2, a13, -8
451 l32e a4, a11, -48
452 l32e a8, a11, -32
453 l32e a3, a13, -4
454 l32e a5, a11, -44
455 l32e a6, a11, -40
456 l32e a7, a11, -36
457 l32e a9, a11, -28
458 l32e a10, a11, -24
459 l32e a11, a11, -20
460 rfwu
461
462 .text
463
464
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..476b2b53cd01
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,341 @@
1/*
2 * arch/xtensa/kernel/vmlinux.lds.S
3 *
4 * Xtensa linker script
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 */
16
17#include <asm-generic/vmlinux.lds.h>
18
19#include <linux/config.h>
20#define _NOCLANGUAGE
21#include <xtensa/config/core.h>
22#include <xtensa/config/system.h>
23OUTPUT_ARCH(xtensa)
24ENTRY(_start)
25
26#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
27jiffies = jiffies_64 + 4;
28#else
29jiffies = jiffies_64;
30#endif
31
32#define KERNELOFFSET 0x1000
33
34/* Note: In the following macros, it would be nice to specify only the
35 vector name and section kind and construct "sym" and "section" using
36 CPP concatenation, but that does not work reliably. Concatenating a
37 string with "." produces an invalid token. CPP will not print a
38 warning because it thinks this is an assembly file, but it leaves
39 them as multiple tokens and there may or may not be whitespace
40 between them. */
41
42/* Macro for a relocation entry */
43
44#define RELOCATE_ENTRY(sym, section) \
45 LONG(sym ## _start); \
46 LONG(sym ## _end); \
47 LONG(LOADADDR(section))
48
49/* Macro to define a section for a vector.
50 *
51 * Use of the MIN function catches the types of errors illustrated in
52 * the following example:
53 *
54 * Assume the section .DoubleExceptionVector.literal is completely
55 * full. Then a programmer adds code to .DoubleExceptionVector.text
56 * that produces another literal. The final literal position will
57 * overlay onto the first word of the adjacent code section
58 * .DoubleExceptionVector.text. (In practice, the literals will
59 * overwrite the code, and the first few instructions will be
60 * garbage.)
61 */
62
63#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
64 section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
65 LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
66 { \
67 . = ALIGN(4); \
68 sym ## _start = ABSOLUTE(.); \
69 *(section) \
70 sym ## _end = ABSOLUTE(.); \
71 }
72
73/*
74 * Mapping of input sections to output sections when linking.
75 */
76
77SECTIONS
78{
79 . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
80 /* .text section */
81
82 _text = .;
83 _stext = .;
84 _ftext = .;
85
86 .text :
87 {
88 /* The .head.text section must be the first section! */
89 *(.head.text)
90 *(.literal .text)
91 *(.srom.text)
92 VMLINUX_SYMBOL(__sched_text_start) = .;
93 *(.sched.text.literal .sched.text)
94 VMLINUX_SYMBOL(__sched_text_end) = .;
95 VMLINUX_SYMBOL(__lock_text_start) = .;
96 *(.spinlock.text.literal .spinlock.text)
97 VMLINUX_SYMBOL(__lock_text_end) = .;
98
99 }
100 _etext = .;
101
102 . = ALIGN(16);
103
104 RODATA
105
106 /* Relocation table */
107
108 . = ALIGN(16);
109 __boot_reloc_table_start = ABSOLUTE(.);
110
111 __relocate : {
112
113 RELOCATE_ENTRY(_WindowVectors_text,
114 .WindowVectors.text);
115#if 0
116 RELOCATE_ENTRY(_KernelExceptionVector_literal,
117 .KernelExceptionVector.literal);
118#endif
119 RELOCATE_ENTRY(_KernelExceptionVector_text,
120 .KernelExceptionVector.text);
121#if 0
122 RELOCATE_ENTRY(_UserExceptionVector_literal,
123 .UserExceptionVector.literal);
124#endif
125 RELOCATE_ENTRY(_UserExceptionVector_text,
126 .UserExceptionVector.text);
127 RELOCATE_ENTRY(_DoubleExceptionVector_literal,
128 .DoubleExceptionVector.literal);
129 RELOCATE_ENTRY(_DoubleExceptionVector_text,
130 .DoubleExceptionVector.text);
131 }
132 __boot_reloc_table_end = ABSOLUTE(.) ;
133
134 .fixup : { *(.fixup) }
135
136 . = ALIGN(16);
137
138 __ex_table : {
139 __start___ex_table = .;
140 *(__ex_table)
141 __stop___ex_table = .;
142 }
143
144 /* Data section */
145
146 . = ALIGN(XCHAL_ICACHE_LINESIZE);
147 _fdata = .;
148 .data :
149 {
150 *(.data) CONSTRUCTORS
151 . = ALIGN(XCHAL_ICACHE_LINESIZE);
152 *(.data.cacheline_aligned)
153 }
154
155 _edata = .;
156
157 /* The initial task */
158 . = ALIGN(8192);
159 .data.init_task : { *(.data.init_task) }
160
161 /* Initialization code and data: */
162
163 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
164 __init_begin = .;
165 .init.text : {
166 _sinittext = .;
167 *(.init.text.literal) *(.init.text)
168 _einittext = .;
169 }
170
171 .init.data :
172 {
173 *(.init.data)
174 . = ALIGN(0x4);
175 __tagtable_begin = .;
176 *(.taglist)
177 __tagtable_end = .;
178 }
179
180 . = ALIGN(XCHAL_ICACHE_LINESIZE);
181
182 __setup_start = .;
183 .init.setup : { *(.init.setup) }
184 __setup_end = .;
185
186 __initcall_start = .;
187 .initcall.init : {
188 *(.initcall1.init)
189 *(.initcall2.init)
190 *(.initcall3.init)
191 *(.initcall4.init)
192 *(.initcall5.init)
193 *(.initcall6.init)
194 *(.initcall7.init)
195 }
196 __initcall_end = .;
197
198 __con_initcall_start = .;
199 .con_initcall.init : { *(.con_initcall.init) }
200 __con_initcall_end = .;
201
202 SECURITY_INIT
203
204 . = ALIGN(4);
205
206 __start___ftr_fixup = .;
207 __ftr_fixup : { *(__ftr_fixup) }
208 __stop___ftr_fixup = .;
209
210 . = ALIGN(32);
211 __per_cpu_start = .;
212 .data.percpu : { *(.data.percpu) }
213 __per_cpu_end = .;
214
215 . = ALIGN(4096);
216 __initramfs_start =.;
217 .init.ramfs : { *(.init.ramfs) }
218 __initramfs_end = .;
219
220 /* We need this dummy segment here */
221
222 . = ALIGN(4);
223 .dummy : { LONG(0) }
224
225 /* The vectors are relocated to the real position at startup time */
226
227 SECTION_VECTOR (_WindowVectors_text,
228 .WindowVectors.text,
229 XCHAL_WINDOW_VECTORS_VADDR, 4,
230 .dummy)
231 SECTION_VECTOR (_DebugInterruptVector_literal,
232 .DebugInterruptVector.literal,
233 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
234 SIZEOF(.WindowVectors.text),
235 .WindowVectors.text)
236 SECTION_VECTOR (_DebugInterruptVector_text,
237 .DebugInterruptVector.text,
238 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
239 4,
240 .DebugInterruptVector.literal)
241 SECTION_VECTOR (_KernelExceptionVector_literal,
242 .KernelExceptionVector.literal,
243 XCHAL_KERNELEXC_VECTOR_VADDR - 4,
244 SIZEOF(.DebugInterruptVector.text),
245 .DebugInterruptVector.text)
246 SECTION_VECTOR (_KernelExceptionVector_text,
247 .KernelExceptionVector.text,
248 XCHAL_KERNELEXC_VECTOR_VADDR,
249 4,
250 .KernelExceptionVector.literal)
251 SECTION_VECTOR (_UserExceptionVector_literal,
252 .UserExceptionVector.literal,
253 XCHAL_USEREXC_VECTOR_VADDR - 4,
254 SIZEOF(.KernelExceptionVector.text),
255 .KernelExceptionVector.text)
256 SECTION_VECTOR (_UserExceptionVector_text,
257 .UserExceptionVector.text,
258 XCHAL_USEREXC_VECTOR_VADDR,
259 4,
260 .UserExceptionVector.literal)
261 SECTION_VECTOR (_DoubleExceptionVector_literal,
262 .DoubleExceptionVector.literal,
263 XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
264 SIZEOF(.UserExceptionVector.text),
265 .UserExceptionVector.text)
266 SECTION_VECTOR (_DoubleExceptionVector_text,
267 .DoubleExceptionVector.text,
268 XCHAL_DOUBLEEXC_VECTOR_VADDR,
269 32,
270 .DoubleExceptionVector.literal)
271
272 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
273 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
274
275 __init_end = .;
276
277 . = ALIGN(8192);
278
279 /* BSS section */
280 _bss_start = .;
281 .sbss : { *(.sbss) *(.scommon) }
282 .bss : { *(COMMON) *(.bss) }
283 _bss_end = .;
284 _end = .;
285
286 /* only used by the boot loader */
287
288 . = ALIGN(0x10);
289 .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
290
291 . = ALIGN(0x1000);
292 __initrd_start = .;
293 .initrd : { *(.initrd) }
294 __initrd_end = .;
295
296 .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
297 {
298 *(.ResetVector.text)
299 }
300
301
302 /* Sections to be discarded */
303 /DISCARD/ :
304 {
305 *(.text.exit)
306 *(.text.exit.literal)
307 *(.data.exit)
308 *(.exitcall.exit)
309 }
310
311
312 .debug 0 : { *(.debug) }
313 .line 0 : { *(.line) }
314 .debug_srcinfo 0 : { *(.debug_srcinfo) }
315 .debug_sfnames 0 : { *(.debug_sfnames) }
316 .debug_aranges 0 : { *(.debug_aranges) }
317 .debug_pubnames 0 : { *(.debug_pubnames) }
318 .debug_info 0 : { *(.debug_info) }
319 .debug_abbrev 0 : { *(.debug_abbrev) }
320 .debug_line 0 : { *(.debug_line) }
321 .debug_frame 0 : { *(.debug_frame) }
322 .debug_str 0 : { *(.debug_str) }
323 .debug_loc 0 : { *(.debug_loc) }
324 .debug_macinfo 0 : { *(.debug_macinfo) }
325 .debug_weaknames 0 : { *(.debug_weaknames) }
326 .debug_funcnames 0 : { *(.debug_funcnames) }
327 .debug_typenames 0 : { *(.debug_typenames) }
328 .debug_varnames 0 : { *(.debug_varnames) }
329
330 .xt.insn 0 :
331 {
332 *(.xt.insn)
333 *(.gnu.linkonce.x*)
334 }
335
336 .xt.lit 0 :
337 {
338 *(.xt.lit)
339 *(.gnu.linkonce.p*)
340 }
341}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 000000000000..efae56a51475
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,123 @@
1/*
2 * arch/xtensa/kernel/xtensa_ksyms.c
3 *
4 * Export Xtensa-specific functions for loadable modules.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com>
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <asm/irq.h>
21#include <linux/in6.h>
22#include <linux/pci.h>
23#include <linux/ide.h>
24
25#include <asm/uaccess.h>
26#include <asm/checksum.h>
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/semaphore.h>
32#ifdef CONFIG_BLK_DEV_FD
33#include <asm/floppy.h>
34#endif
35#ifdef CONFIG_NET
36#include <net/checksum.h>
37#endif /* CONFIG_NET */
38
39
40/*
41 * String functions
42 */
43EXPORT_SYMBOL(memcmp);
44EXPORT_SYMBOL(memset);
45EXPORT_SYMBOL(memcpy);
46EXPORT_SYMBOL(memmove);
47EXPORT_SYMBOL(memchr);
48EXPORT_SYMBOL(strcat);
49EXPORT_SYMBOL(strchr);
50EXPORT_SYMBOL(strlen);
51EXPORT_SYMBOL(strpbrk);
52EXPORT_SYMBOL(strncat);
53EXPORT_SYMBOL(strnlen);
54EXPORT_SYMBOL(strrchr);
55EXPORT_SYMBOL(strstr);
56
57EXPORT_SYMBOL(enable_irq);
58EXPORT_SYMBOL(disable_irq);
59EXPORT_SYMBOL(kernel_thread);
60
61/*
62 * gcc internal math functions
63 */
64extern long long __ashrdi3(long long, int);
65extern long long __ashldi3(long long, int);
66extern long long __lshrdi3(long long, int);
67extern int __divsi3(int, int);
68extern int __modsi3(int, int);
69extern long long __muldi3(long long, long long);
70extern int __mulsi3(int, int);
71extern unsigned int __udivsi3(unsigned int, unsigned int);
72extern unsigned int __umodsi3(unsigned int, unsigned int);
73extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
74extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
75
76EXPORT_SYMBOL(__ashldi3);
77EXPORT_SYMBOL(__ashrdi3);
78EXPORT_SYMBOL(__lshrdi3);
79EXPORT_SYMBOL(__divsi3);
80EXPORT_SYMBOL(__modsi3);
81EXPORT_SYMBOL(__muldi3);
82EXPORT_SYMBOL(__mulsi3);
83EXPORT_SYMBOL(__udivsi3);
84EXPORT_SYMBOL(__umodsi3);
85EXPORT_SYMBOL(__udivdi3);
86EXPORT_SYMBOL(__umoddi3);
87
88/*
89 * Semaphore operations
90 */
91EXPORT_SYMBOL(__down);
92EXPORT_SYMBOL(__down_interruptible);
93EXPORT_SYMBOL(__down_trylock);
94EXPORT_SYMBOL(__up);
95
96#ifdef CONFIG_NET
97/*
98 * Networking support
99 */
100EXPORT_SYMBOL(csum_partial_copy_generic);
101#endif /* CONFIG_NET */
102
103/*
104 * Architecture-specific symbols
105 */
106EXPORT_SYMBOL(__xtensa_copy_user);
107
108/*
109 * Kernel hacking ...
110 */
111
112#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
113// FIXME EXPORT_SYMBOL(screen_info);
114#endif
115
116EXPORT_SYMBOL(get_wchan);
117
118EXPORT_SYMBOL(outsb);
119EXPORT_SYMBOL(outsw);
120EXPORT_SYMBOL(outsl);
121EXPORT_SYMBOL(insb);
122EXPORT_SYMBOL(insw);
123EXPORT_SYMBOL(insl);