aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/entry.S')
-rw-r--r--arch/tile/kernel/entry.S141
1 files changed, 141 insertions, 0 deletions
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
new file mode 100644
index 000000000000..136261f7d7f9
--- /dev/null
+++ b/arch/tile/kernel/entry.S
@@ -0,0 +1,141 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <arch/abi.h>
17#include <asm/unistd.h>
18#include <asm/irqflags.h>
19
20#ifdef __tilegx__
21#define bnzt bnezt
22#endif
23
24STD_ENTRY(current_text_addr)
25 { move r0, lr; jrp lr }
26 STD_ENDPROC(current_text_addr)
27
28STD_ENTRY(_sim_syscall)
29 /*
30 * Wait for r0-r9 to be ready (and lr on the off chance we
31 * want the syscall to locate its caller), then make a magic
32 * simulator syscall.
33 *
34 * We carefully stall until the registers are readable in case they
35 * are the target of a slow load, etc. so that tile-sim will
36 * definitely be able to read all of them inside the magic syscall.
37 *
38 * Technically this is wrong for r3-r9 and lr, since an interrupt
39 * could come in and restore the registers with a slow load right
40 * before executing the mtspr. We may need to modify tile-sim to
41 * explicitly stall for this case, but we do not yet have
42 * a way to implement such a stall.
43 */
44 { and zero, lr, r9 ; and zero, r8, r7 }
45 { and zero, r6, r5 ; and zero, r4, r3 }
46 { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
47 { jrp lr }
48 STD_ENDPROC(_sim_syscall)
49
50/*
51 * Implement execve(). The i386 code has a note that forking from kernel
52 * space results in no copy on write until the execve, so we should be
53 * careful not to write to the stack here.
54 */
55STD_ENTRY(kernel_execve)
56 moveli TREG_SYSCALL_NR_NAME, __NR_execve
57 swint1
58 jrp lr
59 STD_ENDPROC(kernel_execve)
60
61/* Delay a fixed number of cycles. */
62STD_ENTRY(__delay)
63 { addi r0, r0, -1; bnzt r0, . }
64 jrp lr
65 STD_ENDPROC(__delay)
66
67/*
68 * We don't run this function directly, but instead copy it to a page
69 * we map into every user process. See vdso_setup().
70 *
71 * Note that libc has a copy of this function that it uses to compare
72 * against the PC when a stack backtrace ends, so if this code is
73 * changed, the libc implementation(s) should also be updated.
74 */
75 .pushsection .data
76ENTRY(__rt_sigreturn)
77 moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
78 swint1
79 ENDPROC(__rt_sigreturn)
80 ENTRY(__rt_sigreturn_end)
81 .popsection
82
83STD_ENTRY(dump_stack)
84 { move r2, lr; lnk r1 }
85 { move r4, r52; addli r1, r1, dump_stack - . }
86 { move r3, sp; j _dump_stack }
87 jrp lr /* keep backtracer happy */
88 STD_ENDPROC(dump_stack)
89
90STD_ENTRY(KBacktraceIterator_init_current)
91 { move r2, lr; lnk r1 }
92 { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
93 { move r3, sp; j _KBacktraceIterator_init_current }
94 jrp lr /* keep backtracer happy */
95 STD_ENDPROC(KBacktraceIterator_init_current)
96
97/*
98 * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
99 * free the old stack (passed in r0) and re-invoke cpu_idle().
100 * We update sp and ksp0 simultaneously to avoid backtracer warnings.
101 */
102STD_ENTRY(cpu_idle_on_new_stack)
103 {
104 move sp, r1
105 mtspr SYSTEM_SAVE_1_0, r2
106 }
107 jal free_thread_info
108 j cpu_idle
109 STD_ENDPROC(cpu_idle_on_new_stack)
110
111/* Loop forever on a nap during SMP boot. */
112STD_ENTRY(smp_nap)
113 nap
114 j smp_nap /* we are not architecturally guaranteed not to exit nap */
115 jrp lr /* clue in the backtracer */
116 STD_ENDPROC(smp_nap)
117
118/*
119 * Enable interrupts racelessly and then nap until interrupted.
120 * This function's _cpu_idle_nap address is special; see intvec.S.
121 * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
122 * as a result return to the function that called _cpu_idle().
123 */
124STD_ENTRY(_cpu_idle)
125 {
126 lnk r0
127 movei r1, 1
128 }
129 {
130 addli r0, r0, _cpu_idle_nap - .
131 mtspr INTERRUPT_CRITICAL_SECTION, r1
132 }
133 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
134 mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
135 mtspr EX_CONTEXT_1_0, r0
136 iret
137 .global _cpu_idle_nap
138_cpu_idle_nap:
139 nap
140 jrp lr
141 STD_ENDPROC(_cpu_idle)