aboutsummaryrefslogtreecommitdiffstats
path: root/arch/unicore32
diff options
context:
space:
mode:
authorGuanXuetao <gxt@mprc.pku.edu.cn>2011-01-15 05:15:45 -0500
committerGuanXuetao <gxt@mprc.pku.edu.cn>2011-03-16 21:19:06 -0400
commit141c943fd4b323bae2b47f67743dba96134afb1f (patch)
tree0482b81478e8b40ce06eeebe7f4ed88aafc593c0 /arch/unicore32
parent79725df5786d2fa48f582b116ea1d74193cc96ca (diff)
unicore32 core architecture: low level entry and setup codes
This patch implements low level entry and setup codes. Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/unicore32')
-rw-r--r--arch/unicore32/include/asm/traps.h21
-rw-r--r--arch/unicore32/kernel/entry.S824
-rw-r--r--arch/unicore32/kernel/head.S252
-rw-r--r--arch/unicore32/kernel/setup.c360
-rw-r--r--arch/unicore32/kernel/setup.h30
-rw-r--r--arch/unicore32/kernel/traps.c333
6 files changed, 1820 insertions, 0 deletions
diff --git a/arch/unicore32/include/asm/traps.h b/arch/unicore32/include/asm/traps.h
new file mode 100644
index 000000000000..66e17a724bfe
--- /dev/null
+++ b/arch/unicore32/include/asm/traps.h
@@ -0,0 +1,21 @@
1/*
2 * linux/arch/unicore32/include/asm/traps.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_TRAP_H__
13#define __UNICORE_TRAP_H__
14
15extern void __init early_trap_init(void);
16extern void dump_backtrace_entry(unsigned long where,
17 unsigned long from, unsigned long frame);
18
19extern void do_DataAbort(unsigned long addr, unsigned int fsr,
20 struct pt_regs *regs);
21#endif
diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S
new file mode 100644
index 000000000000..83698b7c8f5b
--- /dev/null
+++ b/arch/unicore32/kernel/entry.S
@@ -0,0 +1,824 @@
1/*
2 * linux/arch/unicore32/kernel/entry.S
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 */
14#include <linux/init.h>
15#include <linux/linkage.h>
16#include <asm/assembler.h>
17#include <asm/errno.h>
18#include <asm/thread_info.h>
19#include <asm/memory.h>
20#include <asm/unistd.h>
21#include <generated/asm-offsets.h>
22#include "debug-macro.S"
23
24@
25@ Most of the stack format comes from struct pt_regs, but with
26@ the addition of 8 bytes for storing syscall args 5 and 6.
27@
28#define S_OFF 8
29
30/*
31 * The SWI code relies on the fact that R0 is at the bottom of the stack
32 * (due to slow/fast restore user regs).
33 */
34#if S_R0 != 0
35#error "Please fix"
36#endif
37
38 .macro zero_fp
39#ifdef CONFIG_FRAME_POINTER
40 mov fp, #0
41#endif
42 .endm
43
44 .macro alignment_trap, rtemp
45#ifdef CONFIG_ALIGNMENT_TRAP
46 ldw \rtemp, .LCcralign
47 ldw \rtemp, [\rtemp]
48 movc p0.c1, \rtemp, #0
49#endif
50 .endm
51
52 .macro load_user_sp_lr, rd, rtemp, offset = 0
53 mov \rtemp, asr
54 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
55 mov.a asr, \rtemp @ switch to the SUSR mode
56
57 ldw sp, [\rd+], #\offset @ load sp_user
58 ldw lr, [\rd+], #\offset + 4 @ load lr_user
59
60 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
61 mov.a asr, \rtemp @ switch back to the PRIV mode
62 .endm
63
64 .macro priv_exit, rpsr
65 mov.a bsr, \rpsr
66 ldm.w (r0 - r15), [sp]+
67 ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
68 .endm
69
70 .macro restore_user_regs, fast = 0, offset = 0
71 ldw r1, [sp+], #\offset + S_PSR @ get calling asr
72 ldw lr, [sp+], #\offset + S_PC @ get pc
73 mov.a bsr, r1 @ save in bsr_priv
74 .if \fast
75 add sp, sp, #\offset + S_R1 @ r0 is syscall return value
76 ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
77 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
78 .else
79 ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
80 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
81 .endif
82 nop
83 add sp, sp, #S_FRAME_SIZE - S_R16
84 mov.a pc, lr @ return
85 @ and move bsr_priv into asr
86 .endm
87
88 .macro get_thread_info, rd
89 mov \rd, sp >> #13
90 mov \rd, \rd << #13
91 .endm
92
93 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
94 ldw \base, =(io_p2v(PKUNITY_INTC_BASE))
95 ldw \irqstat, [\base+], #0xC @ INTC_ICIP
96 ldw \tmp, [\base+], #0x4 @ INTC_ICMR
97 and.a \irqstat, \irqstat, \tmp
98 beq 1001f
99 cntlz \irqnr, \irqstat
100 rsub \irqnr, \irqnr, #31
1011001: /* EQ will be set if no irqs pending */
102 .endm
103
104#ifdef CONFIG_DEBUG_LL
105 .macro printreg, reg, temp
106 adr \temp, 901f
107 stm (r0-r3), [\temp]+
108 stw lr, [\temp+], #0x10
109 mov r0, \reg
110 b.l printhex8
111 mov r0, #':'
112 b.l printch
113 mov r0, pc
114 b.l printhex8
115 adr r0, 902f
116 b.l printascii
117 adr \temp, 901f
118 ldm (r0-r3), [\temp]+
119 ldw lr, [\temp+], #0x10
120 b 903f
121901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
122902: .asciz ": epip4d\n"
123 .align
124903:
125 .endm
126#endif
127
128/*
129 * These are the registers used in the syscall handler, and allow us to
130 * have in theory up to 7 arguments to a function - r0 to r6.
131 *
132 * Note that tbl == why is intentional.
133 *
134 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
135 */
136scno .req r21 @ syscall number
137tbl .req r22 @ syscall table pointer
138why .req r22 @ Linux syscall (!= 0)
139tsk .req r23 @ current thread_info
140
141/*
142 * Interrupt handling. Preserves r17, r18, r19
143 */
144 .macro intr_handler
1451: get_irqnr_and_base r0, r6, r5, lr
146 beq 2f
147 mov r1, sp
148 @
149 @ routine called with r0 = irq number, r1 = struct pt_regs *
150 @
151 adr lr, 1b
152 b asm_do_IRQ
1532:
154 .endm
155
156/*
157 * PRIV mode handlers
158 */
159 .macro priv_entry
160 sub sp, sp, #(S_FRAME_SIZE - 4)
161 stm (r1 - r15), [sp]+
162 add r5, sp, #S_R15
163 stm (r16 - r28), [r5]+
164
165 ldm (r1 - r3), [r0]+
166 add r5, sp, #S_SP - 4 @ here for interlock avoidance
167 mov r4, #-1 @ "" "" "" ""
168 add r0, sp, #(S_FRAME_SIZE - 4)
169 stw.w r1, [sp+], #-4 @ save the "real" r0 copied
170 @ from the exception stack
171
172 mov r1, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r0 - sp_priv
178 @ r1 - lr_priv
179 @ r2 - lr_<exception>, already fixed up for correct return/restart
180 @ r3 - bsr_<exception>
181 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stm (r0 - r4), [r5]+
184 .endm
185
186/*
187 * User mode handlers
188 *
189 */
190 .macro user_entry
191 sub sp, sp, #S_FRAME_SIZE
192 stm (r1 - r15), [sp+]
193 add r4, sp, #S_R16
194 stm (r16 - r28), [r4]+
195
196 ldm (r1 - r3), [r0]+
197 add r0, sp, #S_PC @ here for interlock avoidance
198 mov r4, #-1 @ "" "" "" ""
199
200 stw r1, [sp] @ save the "real" r0 copied
201 @ from the exception stack
202
203 @
204 @ We are now ready to fill in the remaining blanks on the stack:
205 @
206 @ r2 - lr_<exception>, already fixed up for correct return/restart
207 @ r3 - bsr_<exception>
208 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
209 @
210 @ Also, separately save sp_user and lr_user
211 @
212 stm (r2 - r4), [r0]+
213 stur (sp, lr), [r0-]
214
215 @
216 @ Enable the alignment trap while in kernel mode
217 @
218 alignment_trap r0
219
220 @
221 @ Clear FP to mark the first stack frame
222 @
223 zero_fp
224 .endm
225
226 .text
227
228@
229@ __invalid - generic code for failed exception
230@ (re-entrant version of handlers)
231@
232__invalid:
233 sub sp, sp, #S_FRAME_SIZE
234 stm (r1 - r15), [sp+]
235 add r1, sp, #S_R16
236 stm (r16 - r28, sp, lr), [r1]+
237
238 zero_fp
239
240 ldm (r4 - r6), [r0]+
241 add r0, sp, #S_PC @ here for interlock avoidance
242 mov r7, #-1 @ "" "" "" ""
243 stw r4, [sp] @ save preserved r0
244 stm (r5 - r7), [r0]+ @ lr_<exception>,
245 @ asr_<exception>, "old_r0"
246
247 mov r0, sp
248 mov r1, asr
249 b bad_mode
250ENDPROC(__invalid)
251
252 .align 5
253__dabt_priv:
254 priv_entry
255
256 @
257 @ get ready to re-enable interrupts if appropriate
258 @
259 mov r17, asr
260 cand.a r3, #PSR_I_BIT
261 bne 1f
262 andn r17, r17, #PSR_I_BIT
2631:
264
265 @
266 @ Call the processor-specific abort handler:
267 @
268 @ r2 - aborted context pc
269 @ r3 - aborted context asr
270 @
271 @ The abort handler must return the aborted address in r0, and
272 @ the fault status register in r1.
273 @
274 movc r1, p0.c3, #0 @ get FSR
275 movc r0, p0.c4, #0 @ get FAR
276
277 @
278 @ set desired INTR state, then call main handler
279 @
280 mov.a asr, r17
281 mov r2, sp
282 b.l do_DataAbort
283
284 @
285 @ INTRs off again before pulling preserved data off the stack
286 @
287 disable_irq r0
288
289 @
290 @ restore BSR and restart the instruction
291 @
292 ldw r2, [sp+], #S_PSR
293 priv_exit r2 @ return from exception
294ENDPROC(__dabt_priv)
295
296 .align 5
297__intr_priv:
298 priv_entry
299
300 intr_handler
301
302 mov r0, #0 @ epip4d
303 movc p0.c5, r0, #14
304 nop; nop; nop; nop; nop; nop; nop; nop
305
306 ldw r4, [sp+], #S_PSR @ irqs are already disabled
307
308 priv_exit r4 @ return from exception
309ENDPROC(__intr_priv)
310
311 .ltorg
312
313 .align 5
314__extn_priv:
315 priv_entry
316
317 mov r0, sp @ struct pt_regs *regs
318 mov r1, asr
319 b bad_mode @ not supported
320ENDPROC(__extn_priv)
321
322 .align 5
323__pabt_priv:
324 priv_entry
325
326 @
327 @ re-enable interrupts if appropriate
328 @
329 mov r17, asr
330 cand.a r3, #PSR_I_BIT
331 bne 1f
332 andn r17, r17, #PSR_I_BIT
3331:
334
335 @
336 @ set args, then call main handler
337 @
338 @ r0 - address of faulting instruction
339 @ r1 - pointer to registers on stack
340 @
341 mov r0, r2 @ pass address of aborted instruction
342 mov r1, #5
343 mov.a asr, r17
344 mov r2, sp @ regs
345 b.l do_PrefetchAbort @ call abort handler
346
347 @
348 @ INTRs off again before pulling preserved data off the stack
349 @
350 disable_irq r0
351
352 @
353 @ restore BSR and restart the instruction
354 @
355 ldw r2, [sp+], #S_PSR
356 priv_exit r2 @ return from exception
357ENDPROC(__pabt_priv)
358
359 .align 5
360.LCcralign:
361 .word cr_alignment
362
363 .align 5
364__dabt_user:
365 user_entry
366
367#ifdef CONFIG_UNICORE_FPU_F64
368 cff ip, s31
369 cand.a ip, #0x08000000 @ FPU execption traps?
370 beq 209f
371
372 ldw ip, [sp+], #S_PC
373 add ip, ip, #4
374 stw ip, [sp+], #S_PC
375 @
376 @ fall through to the emulation code, which returns using r19 if
377 @ it has emulated the instruction, or the more conventional lr
378 @ if we are to treat this as a real extended instruction
379 @
380 @ r0 - instruction
381 @
3821: ldw.u r0, [r2]
383 adr r19, ret_from_exception
384 adr lr, 209f
385 @
386 @ fallthrough to call do_uc_f64
387 @
388/*
389 * Check whether the instruction is a co-processor instruction.
390 * If yes, we need to call the relevant co-processor handler.
391 *
392 * Note that we don't do a full check here for the co-processor
393 * instructions; all instructions with bit 27 set are well
394 * defined. The only instructions that should fault are the
395 * co-processor instructions.
396 *
397 * Emulators may wish to make use of the following registers:
398 * r0 = instruction opcode.
399 * r2 = PC
400 * r19 = normal "successful" return address
401 * r20 = this threads thread_info structure.
402 * lr = unrecognised instruction return address
403 */
404 get_thread_info r20 @ get current thread
405 and r8, r0, #0x00003c00 @ mask out CP number
406 mov r7, #1
407 stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
408
409 @ F64 hardware support entry point.
410 @ r0 = faulted instruction
411 @ r19 = return address
412 @ r20 = fp_state
413 enable_irq r4
414 add r20, r20, #TI_FPSTATE @ r20 = workspace
415 cff r1, s31 @ get fpu FPSCR
416 andn r2, r1, #0x08000000
417 ctf r2, s31 @ clear 27 bit
418 mov r2, sp @ nothing stacked - regdump is at TOS
419 mov lr, r19 @ setup for a return to the user code
420
421 @ Now call the C code to package up the bounce to the support code
422 @ r0 holds the trigger instruction
423 @ r1 holds the FPSCR value
424 @ r2 pointer to register dump
425 b ucf64_exchandler
426209:
427#endif
428 @
429 @ Call the processor-specific abort handler:
430 @
431 @ r2 - aborted context pc
432 @ r3 - aborted context asr
433 @
434 @ The abort handler must return the aborted address in r0, and
435 @ the fault status register in r1.
436 @
437 movc r1, p0.c3, #0 @ get FSR
438 movc r0, p0.c4, #0 @ get FAR
439
440 @
441 @ INTRs on, then call the main handler
442 @
443 enable_irq r2
444 mov r2, sp
445 adr lr, ret_from_exception
446 b do_DataAbort
447ENDPROC(__dabt_user)
448
449 .align 5
450__intr_user:
451 user_entry
452
453 get_thread_info tsk
454
455 intr_handler
456
457 mov why, #0
458 b ret_to_user
459ENDPROC(__intr_user)
460
461 .ltorg
462
463 .align 5
464__extn_user:
465 user_entry
466
467 mov r0, sp
468 mov r1, asr
469 b bad_mode
470ENDPROC(__extn_user)
471
472 .align 5
473__pabt_user:
474 user_entry
475
476 mov r0, r2 @ pass address of aborted instruction.
477 mov r1, #5
478 enable_irq r1 @ Enable interrupts
479 mov r2, sp @ regs
480 b.l do_PrefetchAbort @ call abort handler
481 /* fall through */
482/*
483 * This is the return code to user mode for abort handlers
484 */
485ENTRY(ret_from_exception)
486 get_thread_info tsk
487 mov why, #0
488 b ret_to_user
489ENDPROC(__pabt_user)
490ENDPROC(ret_from_exception)
491
492/*
493 * Register switch for UniCore V2 processors
494 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
495 * previous and next are guaranteed not to be the same.
496 */
497ENTRY(__switch_to)
498 add ip, r1, #TI_CPU_SAVE
499 stm.w (r4 - r15), [ip]+
500 stm.w (r16 - r27, sp, lr), [ip]+
501
502#ifdef CONFIG_UNICORE_FPU_F64
503 add ip, r1, #TI_FPSTATE
504 sfm.w (f0 - f7 ), [ip]+
505 sfm.w (f8 - f15), [ip]+
506 sfm.w (f16 - f23), [ip]+
507 sfm.w (f24 - f31), [ip]+
508 cff r4, s31
509 stw r4, [ip]
510
511 add ip, r2, #TI_FPSTATE
512 lfm.w (f0 - f7 ), [ip]+
513 lfm.w (f8 - f15), [ip]+
514 lfm.w (f16 - f23), [ip]+
515 lfm.w (f24 - f31), [ip]+
516 ldw r4, [ip]
517 ctf r4, s31
518#endif
519 add ip, r2, #TI_CPU_SAVE
520 ldm.w (r4 - r15), [ip]+
521 ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
522ENDPROC(__switch_to)
523
524 .align 5
525/*
526 * This is the fast syscall return path. We do as little as
527 * possible here, and this includes saving r0 back into the PRIV
528 * stack.
529 */
530ret_fast_syscall:
531 disable_irq r1 @ disable interrupts
532 ldw r1, [tsk+], #TI_FLAGS
533 cand.a r1, #_TIF_WORK_MASK
534 bne fast_work_pending
535
536 @ fast_restore_user_regs
537 restore_user_regs fast = 1, offset = S_OFF
538
539/*
540 * Ok, we need to do extra processing, enter the slow path.
541 */
542fast_work_pending:
543 stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
544work_pending:
545 cand.a r1, #_TIF_NEED_RESCHED
546 bne work_resched
547 cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
548 beq no_work_pending
549 mov r0, sp @ 'regs'
550 mov r2, why @ 'syscall'
551 cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
552 cmovne why, #0 @ prevent further restarts
553 b.l do_notify_resume
554 b ret_slow_syscall @ Check work again
555
556work_resched:
557 b.l schedule
558/*
559 * "slow" syscall return path. "why" tells us if this was a real syscall.
560 */
561ENTRY(ret_to_user)
562ret_slow_syscall:
563 disable_irq r1 @ disable interrupts
564 get_thread_info tsk @ epip4d, one path error?!
565 ldw r1, [tsk+], #TI_FLAGS
566 cand.a r1, #_TIF_WORK_MASK
567 bne work_pending
568no_work_pending:
569 @ slow_restore_user_regs
570 restore_user_regs fast = 0, offset = 0
571ENDPROC(ret_to_user)
572
573/*
574 * This is how we return from a fork.
575 */
576ENTRY(ret_from_fork)
577 b.l schedule_tail
578 get_thread_info tsk
579 ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
580 mov why, #1
581 cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
582 beq ret_slow_syscall
583 mov r1, sp
584 mov r0, #1 @ trace exit [IP = 1]
585 b.l syscall_trace
586 b ret_slow_syscall
587ENDPROC(ret_from_fork)
588
589/*=============================================================================
590 * SWI handler
591 *-----------------------------------------------------------------------------
592 */
593 .align 5
594ENTRY(vector_swi)
595 sub sp, sp, #S_FRAME_SIZE
596 stm (r0 - r15), [sp]+ @ Calling r0 - r15
597 add r8, sp, #S_R16
598 stm (r16 - r28), [r8]+ @ Calling r16 - r28
599 add r8, sp, #S_PC
600 stur (sp, lr), [r8-] @ Calling sp, lr
601 mov r8, bsr @ called from non-REAL mode
602 stw lr, [sp+], #S_PC @ Save calling PC
603 stw r8, [sp+], #S_PSR @ Save ASR
604 stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
605 zero_fp
606
607 /*
608 * Get the system call number.
609 */
610 sub ip, lr, #4
611 ldw.u scno, [ip] @ get SWI instruction
612
613#ifdef CONFIG_ALIGNMENT_TRAP
614 ldw ip, __cr_alignment
615 ldw ip, [ip]
616 movc p0.c1, ip, #0 @ update control register
617#endif
618 enable_irq ip
619
620 get_thread_info tsk
621 ldw tbl, =sys_call_table @ load syscall table pointer
622
623 andn scno, scno, #0xff000000 @ mask off SWI op-code
624 andn scno, scno, #0x00ff0000 @ mask off SWI op-code
625
626 stm.w (r4, r5), [sp-] @ push fifth and sixth args
627 ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
628 cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
629 bne __sys_trace
630
631 csub.a scno, #__NR_syscalls @ check upper syscall limit
632 adr lr, ret_fast_syscall @ return address
633 bea 1f
634 ldw pc, [tbl+], scno << #2 @ call sys_* routine
6351:
636 add r1, sp, #S_OFF
6372: mov why, #0 @ no longer a real syscall
638 b sys_ni_syscall @ not private func
639
640 /*
641 * This is the really slow path. We're going to be doing
642 * context switches, and waiting for our parent to respond.
643 */
644__sys_trace:
645 mov r2, scno
646 add r1, sp, #S_OFF
647 mov r0, #0 @ trace entry [IP = 0]
648 b.l syscall_trace
649
650 adr lr, __sys_trace_return @ return address
651 mov scno, r0 @ syscall number (possibly new)
652 add r1, sp, #S_R0 + S_OFF @ pointer to regs
653 csub.a scno, #__NR_syscalls @ check upper syscall limit
654 bea 2b
655 ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
656 ldw pc, [tbl+], scno << #2 @ call sys_* routine
657
658__sys_trace_return:
659 stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
660 mov r2, scno
661 mov r1, sp
662 mov r0, #1 @ trace exit [IP = 1]
663 b.l syscall_trace
664 b ret_slow_syscall
665
666 .align 5
667#ifdef CONFIG_ALIGNMENT_TRAP
668 .type __cr_alignment, #object
669__cr_alignment:
670 .word cr_alignment
671#endif
672 .ltorg
673
674ENTRY(sys_execve)
675 add r3, sp, #S_OFF
676 b __sys_execve
677ENDPROC(sys_execve)
678
679ENTRY(sys_clone)
680 add ip, sp, #S_OFF
681 stw ip, [sp+], #4
682 b __sys_clone
683ENDPROC(sys_clone)
684
685ENTRY(sys_rt_sigreturn)
686 add r0, sp, #S_OFF
687 mov why, #0 @ prevent syscall restart handling
688 b __sys_rt_sigreturn
689ENDPROC(sys_rt_sigreturn)
690
691ENTRY(sys_sigaltstack)
692 ldw r2, [sp+], #S_OFF + S_SP
693 b do_sigaltstack
694ENDPROC(sys_sigaltstack)
695
696 __INIT
697
698/*
699 * Vector stubs.
700 *
701 * This code is copied to 0xffff0200 so we can use branches in the
702 * vectors, rather than ldr's. Note that this code must not
703 * exceed 0x300 bytes.
704 *
705 * Common stub entry macro:
706 * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
707 *
708 * SP points to a minimal amount of processor-private memory, the address
709 * of which is copied into r0 for the mode specific abort handler.
710 */
711 .macro vector_stub, name, mode
712 .align 5
713
714vector_\name:
715 @
716 @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
717 @ (parent ASR)
718 @
719 stw r0, [sp]
720 stw lr, [sp+], #4 @ save r0, lr
721 mov lr, bsr
722 stw lr, [sp+], #8 @ save bsr
723
724 @
725 @ Prepare for PRIV mode. INTRs remain disabled.
726 @
727 mov r0, asr
728 xor r0, r0, #(\mode ^ PRIV_MODE)
729 mov.a bsr, r0
730
731 @
732 @ the branch table must immediately follow this code
733 @
734 and lr, lr, #0x03
735 add lr, lr, #1
736 mov r0, sp
737 ldw lr, [pc+], lr << #2
738 mov.a pc, lr @ branch to handler in PRIV mode
739ENDPROC(vector_\name)
740 .align 2
741 @ handler addresses follow this label
742 .endm
743
744 .globl __stubs_start
745__stubs_start:
746/*
747 * Interrupt dispatcher
748 */
749 vector_stub intr, INTR_MODE
750
751 .long __intr_user @ 0 (USER)
752 .long __invalid @ 1
753 .long __invalid @ 2
754 .long __intr_priv @ 3 (PRIV)
755
756/*
757 * Data abort dispatcher
758 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
759 */
760 vector_stub dabt, ABRT_MODE
761
762 .long __dabt_user @ 0 (USER)
763 .long __invalid @ 1
764 .long __invalid @ 2 (INTR)
765 .long __dabt_priv @ 3 (PRIV)
766
767/*
768 * Prefetch abort dispatcher
769 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
770 */
771 vector_stub pabt, ABRT_MODE
772
773 .long __pabt_user @ 0 (USER)
774 .long __invalid @ 1
775 .long __invalid @ 2 (INTR)
776 .long __pabt_priv @ 3 (PRIV)
777
778/*
779 * Undef instr entry dispatcher
780 * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
781 */
782 vector_stub extn, EXTN_MODE
783
784 .long __extn_user @ 0 (USER)
785 .long __invalid @ 1
786 .long __invalid @ 2 (INTR)
787 .long __extn_priv @ 3 (PRIV)
788
789/*
790 * We group all the following data together to optimise
791 * for CPUs with separate I & D caches.
792 */
793 .align 5
794
795.LCvswi:
796 .word vector_swi
797
798 .globl __stubs_end
799__stubs_end:
800
801 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
802
803 .globl __vectors_start
804__vectors_start:
805 jepriv SYS_ERROR0
806 b vector_extn + stubs_offset
807 ldw pc, .LCvswi + stubs_offset
808 b vector_pabt + stubs_offset
809 b vector_dabt + stubs_offset
810 jepriv SYS_ERROR0
811 b vector_intr + stubs_offset
812 jepriv SYS_ERROR0
813
814 .globl __vectors_end
815__vectors_end:
816
817 .data
818
819 .globl cr_alignment
820 .globl cr_no_alignment
821cr_alignment:
822 .space 4
823cr_no_alignment:
824 .space 4
diff --git a/arch/unicore32/kernel/head.S b/arch/unicore32/kernel/head.S
new file mode 100644
index 000000000000..92255f3ab6a7
--- /dev/null
+++ b/arch/unicore32/kernel/head.S
@@ -0,0 +1,252 @@
1/*
2 * linux/arch/unicore32/kernel/head.S
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14
15#include <asm/assembler.h>
16#include <asm/ptrace.h>
17#include <generated/asm-offsets.h>
18#include <asm/memory.h>
19#include <asm/thread_info.h>
20#include <asm/system.h>
21#include <asm/pgtable-hwdef.h>
22
23#if (PHYS_OFFSET & 0x003fffff)
24#error "PHYS_OFFSET must be at an even 4MiB boundary!"
25#endif
26
27#define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START)
28#define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START)
29
30#define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000)
31#define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000)
32
33#define KERNEL_START KERNEL_RAM_VADDR
34#define KERNEL_END _end
35
36/*
37 * swapper_pg_dir is the virtual address of the initial page table.
38 * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must
39 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
40 * the least significant 16 bits to be 0x8000, but we could probably
41 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.
42 */
43#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
44#error KERNEL_RAM_VADDR must start at 0xXXXX8000
45#endif
46
47 .globl swapper_pg_dir
48 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000
49
50/*
51 * Kernel startup entry point.
52 * ---------------------------
53 *
54 * This is normally called from the decompressor code. The requirements
55 * are: MMU = off, D-cache = off, I-cache = dont care
56 *
57 * This code is mostly position independent, so if you link the kernel at
58 * 0xc0008000, you call this at __pa(0xc0008000).
59 */
60 __HEAD
61ENTRY(stext)
62 @ set asr
63 mov r0, #PRIV_MODE @ ensure priv mode
64 or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs
65 mov.a asr, r0
66
67 @ process identify
68 movc r0, p0.c0, #0 @ cpuid
69 movl r1, 0xff00ffff @ mask
70 movl r2, 0x4d000863 @ value
71 and r0, r1, r0
72 cxor.a r0, r2
73 bne __error_p @ invalid processor id
74
75 /*
76 * Clear the 4K level 1 swapper page table
77 */
78 movl r0, #KERNEL_PGD_PADDR @ page table address
79 mov r1, #0
80 add r2, r0, #0x1000
81101: stw.w r1, [r0]+, #4
82 stw.w r1, [r0]+, #4
83 stw.w r1, [r0]+, #4
84 stw.w r1, [r0]+, #4
85 cxor.a r0, r2
86 bne 101b
87
88 movl r4, #KERNEL_PGD_PADDR @ page table address
89 mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section
90 or r7, r7, #PMD_SECT_CACHEABLE @ cacheable
91 or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC
92
93 /*
94 * Create identity mapping for first 4MB of kernel to
95 * cater for the MMU enable. This identity mapping
96 * will be removed by paging_init(). We use our current program
97 * counter to determine corresponding section base address.
98 */
99 mov r6, pc
100 mov r6, r6 >> #22 @ start of kernel section
101 or r1, r7, r6 << #22 @ flags + kernel base
102 stw r1, [r4+], r6 << #2 @ identity mapping
103
104 /*
105 * Now setup the pagetables for our kernel direct
106 * mapped region.
107 */
108 add r0, r4, #(KERNEL_START & 0xff000000) >> 20
109 stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20
110 movl r6, #(KERNEL_END - 1)
111 add r0, r0, #4
112 add r6, r4, r6 >> #20
113102: csub.a r0, r6
114 add r1, r1, #1 << 22
115 bua 103f
116 stw.w r1, [r0]+, #4
117 b 102b
118103:
119 /*
120 * Then map first 4MB of ram in case it contains our boot params.
121 */
122 add r0, r4, #PAGE_OFFSET >> 20
123 or r6, r7, #(PHYS_OFFSET & 0xffc00000)
124 stw r6, [r0]
125
126 ldw r15, __switch_data @ address to jump to after
127
128 /*
129 * Initialise TLB, Caches, and MMU state ready to switch the MMU
130 * on.
131 */
132 mov r0, #0
133 movc p0.c5, r0, #28 @ cache invalidate all
134 nop8
135 movc p0.c6, r0, #6 @ TLB invalidate all
136 nop8
137
138 /*
139 * ..V. .... ..TB IDAM
140 * ..1. .... ..01 1111
141 */
142 movl r0, #0x201f @ control register setting
143
144 /*
145 * Setup common bits before finally enabling the MMU. Essentially
146 * this is just loading the page table pointer and domain access
147 * registers.
148 */
149 #ifndef CONFIG_ALIGNMENT_TRAP
150 andn r0, r0, #CR_A
151 #endif
152 #ifdef CONFIG_CPU_DCACHE_DISABLE
153 andn r0, r0, #CR_D
154 #endif
155 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
156 andn r0, r0, #CR_B
157 #endif
158 #ifdef CONFIG_CPU_ICACHE_DISABLE
159 andn r0, r0, #CR_I
160 #endif
161
162 movc p0.c2, r4, #0 @ set pgd
163 b __turn_mmu_on
164ENDPROC(stext)
165
166/*
167 * Enable the MMU. This completely changes the stucture of the visible
168 * memory space. You will not be able to trace execution through this.
169 *
170 * r0 = cp#0 control register
171 * r15 = *virtual* address to jump to upon completion
172 */
173 .align 5
174__turn_mmu_on:
175 mov r0, r0
176 movc p0.c1, r0, #0 @ write control reg
177 nop @ fetch inst by phys addr
178 mov pc, r15
179 nop8 @ fetch inst by phys addr
180ENDPROC(__turn_mmu_on)
181
182/*
183 * Setup the initial page tables. We only setup the barest
184 * amount which are required to get the kernel running, which
185 * generally means mapping in the kernel code.
186 *
187 * r9 = cpuid
188 * r10 = procinfo
189 *
190 * Returns:
191 * r0, r3, r6, r7 corrupted
192 * r4 = physical page table address
193 */
194 .ltorg
195
196 .align 2
197 .type __switch_data, %object
198__switch_data:
199 .long __mmap_switched
200 .long __bss_start @ r6
201 .long _end @ r7
202 .long cr_alignment @ r8
203 .long init_thread_union + THREAD_START_SP @ sp
204
205/*
206 * The following fragment of code is executed with the MMU on in MMU mode,
207 * and uses absolute addresses; this is not position independent.
208 *
209 * r0 = cp#0 control register
210 */
211__mmap_switched:
212 adr r3, __switch_data + 4
213
214 ldm.w (r6, r7, r8), [r3]+
215 ldw sp, [r3]
216
217 mov fp, #0 @ Clear BSS (and zero fp)
218203: csub.a r6, r7
219 bea 204f
220 stw.w fp, [r6]+,#4
221 b 203b
222204:
223 andn r1, r0, #CR_A @ Clear 'A' bit
224 stm (r0, r1), [r8]+ @ Save control register values
225 b start_kernel
226ENDPROC(__mmap_switched)
227
228/*
229 * Exception handling. Something went wrong and we can't proceed. We
230 * ought to tell the user, but since we don't have any guarantee that
231 * we're even running on the right architecture, we do virtually nothing.
232 *
233 * If CONFIG_DEBUG_LL is set we try to print out something about the error
234 * and hope for the best (useful if bootloader fails to pass a proper
235 * machine ID for example).
236 */
237__error_p:
238#ifdef CONFIG_DEBUG_LL
239 adr r0, str_p1
240 b.l printascii
241 mov r0, r9
242 b.l printhex8
243 adr r0, str_p2
244 b.l printascii
245901: nop8
246 b 901b
247str_p1: .asciz "\nError: unrecognized processor variant (0x"
248str_p2: .asciz ").\n"
249 .align
250#endif
251ENDPROC(__error_p)
252
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
new file mode 100644
index 000000000000..1e175a82844d
--- /dev/null
+++ b/arch/unicore32/kernel/setup.c
@@ -0,0 +1,360 @@
1/*
2 * linux/arch/unicore32/kernel/setup.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/stddef.h>
15#include <linux/ioport.h>
16#include <linux/delay.h>
17#include <linux/utsname.h>
18#include <linux/initrd.h>
19#include <linux/console.h>
20#include <linux/bootmem.h>
21#include <linux/seq_file.h>
22#include <linux/screen_info.h>
23#include <linux/init.h>
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
27#include <linux/smp.h>
28#include <linux/fs.h>
29#include <linux/proc_fs.h>
30#include <linux/memblock.h>
31#include <linux/elf.h>
32#include <linux/io.h>
33
34#include <asm/cputype.h>
35#include <asm/sections.h>
36#include <asm/setup.h>
37#include <asm/cacheflush.h>
38#include <asm/tlbflush.h>
39#include <asm/traps.h>
40
41#include "setup.h"
42
43#ifndef MEM_SIZE
44#define MEM_SIZE (16*1024*1024)
45#endif
46
47struct stack {
48 u32 irq[3];
49 u32 abt[3];
50 u32 und[3];
51} ____cacheline_aligned;
52
53static struct stack stacks[NR_CPUS];
54
55char elf_platform[ELF_PLATFORM_SIZE];
56EXPORT_SYMBOL(elf_platform);
57
58static char __initdata cmd_line[COMMAND_LINE_SIZE];
59
60static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
61
62/*
63 * Standard memory resources
64 */
65static struct resource mem_res[] = {
66 {
67 .name = "Video RAM",
68 .start = 0,
69 .end = 0,
70 .flags = IORESOURCE_MEM
71 },
72 {
73 .name = "Kernel text",
74 .start = 0,
75 .end = 0,
76 .flags = IORESOURCE_MEM
77 },
78 {
79 .name = "Kernel data",
80 .start = 0,
81 .end = 0,
82 .flags = IORESOURCE_MEM
83 }
84};
85
86#define video_ram mem_res[0]
87#define kernel_code mem_res[1]
88#define kernel_data mem_res[2]
89
90/*
91 * These functions re-use the assembly code in head.S, which
92 * already provide the required functionality.
93 */
94static void __init setup_processor(void)
95{
96 printk(KERN_DEFAULT "CPU: UniCore-II [%08x] revision %d, cr=%08lx\n",
97 uc32_cpuid, (int)(uc32_cpuid >> 16) & 15, cr_alignment);
98
99 sprintf(init_utsname()->machine, "puv3");
100 sprintf(elf_platform, "ucv2");
101}
102
103/*
104 * cpu_init - initialise one CPU.
105 *
106 * cpu_init sets up the per-CPU stacks.
107 */
108void cpu_init(void)
109{
110 unsigned int cpu = smp_processor_id();
111 struct stack *stk = &stacks[cpu];
112
113 /*
114 * setup stacks for re-entrant exception handlers
115 */
116 __asm__ (
117 "mov.a asr, %1\n\t"
118 "add sp, %0, %2\n\t"
119 "mov.a asr, %3\n\t"
120 "add sp, %0, %4\n\t"
121 "mov.a asr, %5\n\t"
122 "add sp, %0, %6\n\t"
123 "mov.a asr, %7"
124 :
125 : "r" (stk),
126 "r" (PSR_R_BIT | PSR_I_BIT | INTR_MODE),
127 "I" (offsetof(struct stack, irq[0])),
128 "r" (PSR_R_BIT | PSR_I_BIT | ABRT_MODE),
129 "I" (offsetof(struct stack, abt[0])),
130 "r" (PSR_R_BIT | PSR_I_BIT | EXTN_MODE),
131 "I" (offsetof(struct stack, und[0])),
132 "r" (PSR_R_BIT | PSR_I_BIT | PRIV_MODE)
133 : "r30", "cc");
134}
135
136static int __init uc32_add_memory(unsigned long start, unsigned long size)
137{
138 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
139
140 if (meminfo.nr_banks >= NR_BANKS) {
141 printk(KERN_CRIT "NR_BANKS too low, "
142 "ignoring memory at %#lx\n", start);
143 return -EINVAL;
144 }
145
146 /*
147 * Ensure that start/size are aligned to a page boundary.
148 * Size is appropriately rounded down, start is rounded up.
149 */
150 size -= start & ~PAGE_MASK;
151
152 bank->start = PAGE_ALIGN(start);
153 bank->size = size & PAGE_MASK;
154
155 /*
156 * Check whether this memory region has non-zero size or
157 * invalid node number.
158 */
159 if (bank->size == 0)
160 return -EINVAL;
161
162 meminfo.nr_banks++;
163 return 0;
164}
165
166/*
167 * Pick out the memory size. We look for mem=size@start,
168 * where start and size are "size[KkMm]"
169 */
170static int __init early_mem(char *p)
171{
172 static int usermem __initdata = 1;
173 unsigned long size, start;
174 char *endp;
175
176 /*
177 * If the user specifies memory size, we
178 * blow away any automatically generated
179 * size.
180 */
181 if (usermem) {
182 usermem = 0;
183 meminfo.nr_banks = 0;
184 }
185
186 start = PHYS_OFFSET;
187 size = memparse(p, &endp);
188 if (*endp == '@')
189 start = memparse(endp + 1, NULL);
190
191 uc32_add_memory(start, size);
192
193 return 0;
194}
195early_param("mem", early_mem);
196
197static void __init
198request_standard_resources(struct meminfo *mi)
199{
200 struct resource *res;
201 int i;
202
203 kernel_code.start = virt_to_phys(_stext);
204 kernel_code.end = virt_to_phys(_etext - 1);
205 kernel_data.start = virt_to_phys(_sdata);
206 kernel_data.end = virt_to_phys(_end - 1);
207
208 for (i = 0; i < mi->nr_banks; i++) {
209 if (mi->bank[i].size == 0)
210 continue;
211
212 res = alloc_bootmem_low(sizeof(*res));
213 res->name = "System RAM";
214 res->start = mi->bank[i].start;
215 res->end = mi->bank[i].start + mi->bank[i].size - 1;
216 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
217
218 request_resource(&iomem_resource, res);
219
220 if (kernel_code.start >= res->start &&
221 kernel_code.end <= res->end)
222 request_resource(res, &kernel_code);
223 if (kernel_data.start >= res->start &&
224 kernel_data.end <= res->end)
225 request_resource(res, &kernel_data);
226 }
227
228 video_ram.start = PKUNITY_UNIGFX_MMAP_BASE;
229 video_ram.end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE;
230 request_resource(&iomem_resource, &video_ram);
231}
232
233static void (*init_machine)(void) __initdata;
234
235static int __init customize_machine(void)
236{
237 /* customizes platform devices, or adds new ones */
238 if (init_machine)
239 init_machine();
240 return 0;
241}
242arch_initcall(customize_machine);
243
244void __init setup_arch(char **cmdline_p)
245{
246 char *from = default_command_line;
247
248 setup_processor();
249
250 init_mm.start_code = (unsigned long) _stext;
251 init_mm.end_code = (unsigned long) _etext;
252 init_mm.end_data = (unsigned long) _edata;
253 init_mm.brk = (unsigned long) _end;
254
255 /* parse_early_param needs a boot_command_line */
256 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
257
258 /* populate cmd_line too for later use, preserving boot_command_line */
259 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
260 *cmdline_p = cmd_line;
261
262 parse_early_param();
263
264 uc32_memblock_init(&meminfo);
265
266 paging_init();
267 request_standard_resources(&meminfo);
268
269 cpu_init();
270
271 /*
272 * Set up various architecture-specific pointers
273 */
274 init_machine = puv3_core_init;
275
276#ifdef CONFIG_VT
277#if defined(CONFIG_VGA_CONSOLE)
278 conswitchp = &vga_con;
279#elif defined(CONFIG_DUMMY_CONSOLE)
280 conswitchp = &dummy_con;
281#endif
282#endif
283 early_trap_init();
284}
285
286static struct cpu cpuinfo_unicore;
287
288static int __init topology_init(void)
289{
290 int i;
291
292 for_each_possible_cpu(i)
293 register_cpu(&cpuinfo_unicore, i);
294
295 return 0;
296}
297subsys_initcall(topology_init);
298
299#ifdef CONFIG_HAVE_PROC_CPU
300static int __init proc_cpu_init(void)
301{
302 struct proc_dir_entry *res;
303
304 res = proc_mkdir("cpu", NULL);
305 if (!res)
306 return -ENOMEM;
307 return 0;
308}
309fs_initcall(proc_cpu_init);
310#endif
311
312static int c_show(struct seq_file *m, void *v)
313{
314 seq_printf(m, "Processor\t: UniCore-II rev %d (%s)\n",
315 (int)(uc32_cpuid >> 16) & 15, elf_platform);
316
317 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
318 loops_per_jiffy / (500000/HZ),
319 (loops_per_jiffy / (5000/HZ)) % 100);
320
321 /* dump out the processor features */
322 seq_puts(m, "Features\t: CMOV UC-F64");
323
324 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", uc32_cpuid >> 24);
325 seq_printf(m, "CPU architecture: 2\n");
326 seq_printf(m, "CPU revision\t: %d\n", (uc32_cpuid >> 16) & 15);
327
328 seq_printf(m, "Cache type\t: write-back\n"
329 "Cache clean\t: cp0 c5 ops\n"
330 "Cache lockdown\t: not support\n"
331 "Cache format\t: Harvard\n");
332
333 seq_puts(m, "\n");
334
335 seq_printf(m, "Hardware\t: PKUnity v3\n");
336
337 return 0;
338}
339
340static void *c_start(struct seq_file *m, loff_t *pos)
341{
342 return *pos < 1 ? (void *)1 : NULL;
343}
344
345static void *c_next(struct seq_file *m, void *v, loff_t *pos)
346{
347 ++*pos;
348 return NULL;
349}
350
351static void c_stop(struct seq_file *m, void *v)
352{
353}
354
355const struct seq_operations cpuinfo_op = {
356 .start = c_start,
357 .next = c_next,
358 .stop = c_stop,
359 .show = c_show
360};
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
new file mode 100644
index 000000000000..dcd1306eb5c6
--- /dev/null
+++ b/arch/unicore32/kernel/setup.h
@@ -0,0 +1,30 @@
1/*
2 * linux/arch/unicore32/kernel/setup.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_KERNEL_SETUP_H__
13#define __UNICORE_KERNEL_SETUP_H__
14
15extern void paging_init(void);
16extern void puv3_core_init(void);
17
18extern void puv3_ps2_init(void);
19extern void pci_puv3_preinit(void);
20extern void __init puv3_init_gpio(void);
21
22extern void setup_mm_for_reboot(char mode);
23
24extern char __stubs_start[], __stubs_end[];
25extern char __vectors_start[], __vectors_end[];
26
27extern void kernel_thread_helper(void);
28
29extern void __init early_signal_init(void);
30#endif
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
new file mode 100644
index 000000000000..25abbb101729
--- /dev/null
+++ b/arch/unicore32/kernel/traps.c
@@ -0,0 +1,333 @@
1/*
2 * linux/arch/unicore32/kernel/traps.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * 'traps.c' handles hardware exceptions after we have saved some state.
13 * Mostly a debugging aid, but will probably kill the offending process.
14 */
15#include <linux/module.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/personality.h>
19#include <linux/kallsyms.h>
20#include <linux/kdebug.h>
21#include <linux/uaccess.h>
22#include <linux/delay.h>
23#include <linux/hardirq.h>
24#include <linux/init.h>
25#include <linux/uaccess.h>
26#include <linux/atomic.h>
27#include <linux/unistd.h>
28
29#include <asm/cacheflush.h>
30#include <asm/system.h>
31#include <asm/traps.h>
32
33#include "setup.h"
34
35static void dump_mem(const char *, const char *, unsigned long, unsigned long);
36
37void dump_backtrace_entry(unsigned long where,
38 unsigned long from, unsigned long frame)
39{
40#ifdef CONFIG_KALLSYMS
41 printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",
42 where, (void *)where, from, (void *)from);
43#else
44 printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",
45 where, from);
46#endif
47}
48
49/*
50 * Stack pointers should always be within the kernels view of
51 * physical memory. If it is not there, then we can't dump
52 * out any information relating to the stack.
53 */
54static int verify_stack(unsigned long sp)
55{
56 if (sp < PAGE_OFFSET ||
57 (sp > (unsigned long)high_memory && high_memory != NULL))
58 return -EFAULT;
59
60 return 0;
61}
62
63/*
64 * Dump out the contents of some memory nicely...
65 */
66static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
67 unsigned long top)
68{
69 unsigned long first;
70 mm_segment_t fs;
71 int i;
72
73 /*
74 * We need to switch to kernel mode so that we can use __get_user
75 * to safely read from kernel space. Note that we now dump the
76 * code first, just in case the backtrace kills us.
77 */
78 fs = get_fs();
79 set_fs(KERNEL_DS);
80
81 printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",
82 lvl, str, bottom, top);
83
84 for (first = bottom & ~31; first < top; first += 32) {
85 unsigned long p;
86 char str[sizeof(" 12345678") * 8 + 1];
87
88 memset(str, ' ', sizeof(str));
89 str[sizeof(str) - 1] = '\0';
90
91 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
92 if (p >= bottom && p < top) {
93 unsigned long val;
94 if (__get_user(val, (unsigned long *)p) == 0)
95 sprintf(str + i * 9, " %08lx", val);
96 else
97 sprintf(str + i * 9, " ????????");
98 }
99 }
100 printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);
101 }
102
103 set_fs(fs);
104}
105
106static void dump_instr(const char *lvl, struct pt_regs *regs)
107{
108 unsigned long addr = instruction_pointer(regs);
109 const int width = 8;
110 mm_segment_t fs;
111 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
112 int i;
113
114 /*
115 * We need to switch to kernel mode so that we can use __get_user
116 * to safely read from kernel space. Note that we now dump the
117 * code first, just in case the backtrace kills us.
118 */
119 fs = get_fs();
120 set_fs(KERNEL_DS);
121
122 for (i = -4; i < 1; i++) {
123 unsigned int val, bad;
124
125 bad = __get_user(val, &((u32 *)addr)[i]);
126
127 if (!bad)
128 p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
129 width, val);
130 else {
131 p += sprintf(p, "bad PC value");
132 break;
133 }
134 }
135 printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);
136
137 set_fs(fs);
138}
139
140static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
141{
142 unsigned int fp, mode;
143 int ok = 1;
144
145 printk(KERN_DEFAULT "Backtrace: ");
146
147 if (!tsk)
148 tsk = current;
149
150 if (regs) {
151 fp = regs->UCreg_fp;
152 mode = processor_mode(regs);
153 } else if (tsk != current) {
154 fp = thread_saved_fp(tsk);
155 mode = 0x10;
156 } else {
157 asm("mov %0, fp" : "=r" (fp) : : "cc");
158 mode = 0x10;
159 }
160
161 if (!fp) {
162 printk("no frame pointer");
163 ok = 0;
164 } else if (verify_stack(fp)) {
165 printk("invalid frame pointer 0x%08x", fp);
166 ok = 0;
167 } else if (fp < (unsigned long)end_of_stack(tsk))
168 printk("frame pointer underflow");
169 printk("\n");
170
171 if (ok)
172 c_backtrace(fp, mode);
173}
174
175void dump_stack(void)
176{
177 dump_backtrace(NULL, NULL);
178}
179EXPORT_SYMBOL(dump_stack);
180
181void show_stack(struct task_struct *tsk, unsigned long *sp)
182{
183 dump_backtrace(NULL, tsk);
184 barrier();
185}
186
187static int __die(const char *str, int err, struct thread_info *thread,
188 struct pt_regs *regs)
189{
190 struct task_struct *tsk = thread->task;
191 static int die_counter;
192 int ret;
193
194 printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",
195 str, err, ++die_counter);
196 sysfs_printk_last_file();
197
198 /* trap and error numbers are mostly meaningless on UniCore */
199 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
200 SIGSEGV);
201 if (ret == NOTIFY_STOP)
202 return ret;
203
204 print_modules();
205 __show_regs(regs);
206 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
207 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
208
209 if (!user_mode(regs) || in_interrupt()) {
210 dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
211 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
212 dump_backtrace(regs, tsk);
213 dump_instr(KERN_EMERG, regs);
214 }
215
216 return ret;
217}
218
219DEFINE_SPINLOCK(die_lock);
220
221/*
222 * This function is protected against re-entrancy.
223 */
224void die(const char *str, struct pt_regs *regs, int err)
225{
226 struct thread_info *thread = current_thread_info();
227 int ret;
228
229 oops_enter();
230
231 spin_lock_irq(&die_lock);
232 console_verbose();
233 bust_spinlocks(1);
234 ret = __die(str, err, thread, regs);
235
236 bust_spinlocks(0);
237 add_taint(TAINT_DIE);
238 spin_unlock_irq(&die_lock);
239 oops_exit();
240
241 if (in_interrupt())
242 panic("Fatal exception in interrupt");
243 if (panic_on_oops)
244 panic("Fatal exception");
245 if (ret != NOTIFY_STOP)
246 do_exit(SIGSEGV);
247}
248
249void uc32_notify_die(const char *str, struct pt_regs *regs,
250 struct siginfo *info, unsigned long err, unsigned long trap)
251{
252 if (user_mode(regs)) {
253 current->thread.error_code = err;
254 current->thread.trap_no = trap;
255
256 force_sig_info(info->si_signo, info, current);
257 } else
258 die(str, regs, err);
259}
260
261/*
262 * bad_mode handles the impossible case in the vectors. If you see one of
263 * these, then it's extremely serious, and could mean you have buggy hardware.
264 * It never returns, and never tries to sync. We hope that we can at least
265 * dump out some state information...
266 */
267asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)
268{
269 console_verbose();
270
271 printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);
272
273 die("Oops - bad mode", regs, 0);
274 local_irq_disable();
275 panic("bad mode");
276}
277
278void __pte_error(const char *file, int line, unsigned long val)
279{
280 printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);
281}
282
283void __pmd_error(const char *file, int line, unsigned long val)
284{
285 printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);
286}
287
288void __pgd_error(const char *file, int line, unsigned long val)
289{
290 printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);
291}
292
293asmlinkage void __div0(void)
294{
295 printk(KERN_DEFAULT "Division by zero in kernel.\n");
296 dump_stack();
297}
298EXPORT_SYMBOL(__div0);
299
300void abort(void)
301{
302 BUG();
303
304 /* if that doesn't kill us, halt */
305 panic("Oops failed to kill thread");
306}
307EXPORT_SYMBOL(abort);
308
309void __init trap_init(void)
310{
311 return;
312}
313
314void __init early_trap_init(void)
315{
316 unsigned long vectors = VECTORS_BASE;
317
318 /*
319 * Copy the vectors, stubs (in entry-unicore.S)
320 * into the vector page, mapped at 0xffff0000, and ensure these
321 * are visible to the instruction stream.
322 */
323 memcpy((void *)vectors,
324 __vectors_start,
325 __vectors_end - __vectors_start);
326 memcpy((void *)vectors + 0x200,
327 __stubs_start,
328 __stubs_end - __stubs_start);
329
330 early_signal_init();
331
332 flush_icache_range(vectors, vectors + PAGE_SIZE);
333}