diff options
Diffstat (limited to 'arch/hexagon/kernel/vm_entry.S')
-rw-r--r-- | arch/hexagon/kernel/vm_entry.S | 269 |
1 files changed, 269 insertions, 0 deletions
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S new file mode 100644 index 000000000000..5b99066cbc8d --- /dev/null +++ b/arch/hexagon/kernel/vm_entry.S | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * Event entry/exit for Hexagon | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #include <asm/asm-offsets.h> /* assembly-safer versions of C defines */ | ||
22 | #include <asm/mem-layout.h> /* sigh, except for page_offset */ | ||
23 | #include <asm/hexagon_vm.h> | ||
24 | #include <asm/thread_info.h> | ||
25 | |||
26 | /* | ||
27 | * Entry into guest-mode Linux under Hexagon Virtual Machine. | ||
28 | * Stack pointer points to event record - build pt_regs on top of it, | ||
29 | * set up a plausible C stack frame, and dispatch to the C handler. | ||
30 | * On return, do vmrte virtual instruction with SP where we started. | ||
31 | * | ||
32 | * VM Spec 0.5 uses a trap to fetch HVM record now. | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * Save full register state, while setting up thread_info struct | ||
37 | * pointer derived from kernel stack pointer in THREADINFO_REG | ||
38 | * register, putting prior thread_info.regs pointer in a callee-save | ||
39 | * register (R24, which had better not ever be assigned to THREADINFO_REG), | ||
40 | * and updating thread_info.regs to point to current stack frame, | ||
41 | * so as to support nested events in kernel mode. | ||
42 | * | ||
43 | * As this is common code, we set the pt_regs system call number | ||
44 | * to -1 for all events. It will be replaced with the system call | ||
45 | * number in the case where we decode a system call (trap0(#1)). | ||
46 | */ | ||
47 | |||
48 | #define save_pt_regs()\ | ||
49 | memd(R0 + #_PT_R3130) = R31:30; \ | ||
50 | { memw(R0 + #_PT_R2928) = R28; \ | ||
51 | R31 = memw(R0 + #_PT_ER_VMPSP); }\ | ||
52 | { memw(R0 + #(_PT_R2928 + 4)) = R31; \ | ||
53 | R31 = ugp; } \ | ||
54 | { memd(R0 + #_PT_R2726) = R27:26; \ | ||
55 | R30 = gp ; } \ | ||
56 | memd(R0 + #_PT_R2524) = R25:24; \ | ||
57 | memd(R0 + #_PT_R2322) = R23:22; \ | ||
58 | memd(R0 + #_PT_R2120) = R21:20; \ | ||
59 | memd(R0 + #_PT_R1918) = R19:18; \ | ||
60 | memd(R0 + #_PT_R1716) = R17:16; \ | ||
61 | memd(R0 + #_PT_R1514) = R15:14; \ | ||
62 | memd(R0 + #_PT_R1312) = R13:12; \ | ||
63 | { memd(R0 + #_PT_R1110) = R11:10; \ | ||
64 | R15 = lc0; } \ | ||
65 | { memd(R0 + #_PT_R0908) = R9:8; \ | ||
66 | R14 = sa0; } \ | ||
67 | { memd(R0 + #_PT_R0706) = R7:6; \ | ||
68 | R13 = lc1; } \ | ||
69 | { memd(R0 + #_PT_R0504) = R5:4; \ | ||
70 | R12 = sa1; } \ | ||
71 | { memd(R0 + #_PT_UGPGP) = R31:30; \ | ||
72 | R11 = m1; \ | ||
73 | R2.H = #HI(_THREAD_SIZE); } \ | ||
74 | { memd(R0 + #_PT_LC0SA0) = R15:14; \ | ||
75 | R10 = m0; \ | ||
76 | R2.L = #LO(_THREAD_SIZE); } \ | ||
77 | { memd(R0 + #_PT_LC1SA1) = R13:12; \ | ||
78 | R15 = p3:0; \ | ||
79 | R2 = neg(R2); } \ | ||
80 | { memd(R0 + #_PT_M1M0) = R11:10; \ | ||
81 | R14 = usr; \ | ||
82 | R2 = and(R0,R2); } \ | ||
83 | { memd(R0 + #_PT_PREDSUSR) = R15:14; \ | ||
84 | THREADINFO_REG = R2; } \ | ||
85 | { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ | ||
86 | memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ | ||
87 | R2 = #-1; } \ | ||
88 | { memw(R0 + #_PT_SYSCALL_NR) = R2; \ | ||
89 | R30 = #0; } | ||
90 | |||
91 | /* | ||
92 | * Restore registers and thread_info.regs state. THREADINFO_REG | ||
93 | * is assumed to still be sane, and R24 to have been correctly | ||
94 | * preserved. Don't restore R29 (SP) until later. | ||
95 | */ | ||
96 | |||
97 | #define restore_pt_regs() \ | ||
98 | { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ | ||
99 | R15:14 = memd(R0 + #_PT_PREDSUSR); } \ | ||
100 | { R11:10 = memd(R0 + #_PT_M1M0); \ | ||
101 | p3:0 = R15; } \ | ||
102 | { R13:12 = memd(R0 + #_PT_LC1SA1); \ | ||
103 | usr = R14; } \ | ||
104 | { R15:14 = memd(R0 + #_PT_LC0SA0); \ | ||
105 | m1 = R11; } \ | ||
106 | { R3:2 = memd(R0 + #_PT_R0302); \ | ||
107 | m0 = R10; } \ | ||
108 | { R5:4 = memd(R0 + #_PT_R0504); \ | ||
109 | lc1 = R13; } \ | ||
110 | { R7:6 = memd(R0 + #_PT_R0706); \ | ||
111 | sa1 = R12; } \ | ||
112 | { R9:8 = memd(R0 + #_PT_R0908); \ | ||
113 | lc0 = R15; } \ | ||
114 | { R11:10 = memd(R0 + #_PT_R1110); \ | ||
115 | sa0 = R14; } \ | ||
116 | { R13:12 = memd(R0 + #_PT_R1312); \ | ||
117 | R15:14 = memd(R0 + #_PT_R1514); } \ | ||
118 | { R17:16 = memd(R0 + #_PT_R1716); \ | ||
119 | R19:18 = memd(R0 + #_PT_R1918); } \ | ||
120 | { R21:20 = memd(R0 + #_PT_R2120); \ | ||
121 | R23:22 = memd(R0 + #_PT_R2322); } \ | ||
122 | { R25:24 = memd(R0 + #_PT_R2524); \ | ||
123 | R27:26 = memd(R0 + #_PT_R2726); } \ | ||
124 | R31:30 = memd(R0 + #_PT_UGPGP); \ | ||
125 | { R28 = memw(R0 + #_PT_R2928); \ | ||
126 | ugp = R31; } \ | ||
127 | { R31:30 = memd(R0 + #_PT_R3130); \ | ||
128 | gp = R30; } | ||
129 | |||
130 | /* | ||
131 | * Clears off enough space for the rest of pt_regs; evrec is a part | ||
132 | * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1. | ||
133 | * R0 is the address of pt_regs and is the parameter to save_pt_regs. | ||
134 | */ | ||
135 | |||
136 | /* | ||
137 | * Since the HVM isn't automagically pushing the EVREC onto the stack anymore, | ||
138 | * we'll subract the entire size out and then fill it in ourselves. | ||
139 | * Need to save off R0, R1, R2, R3 immediately. | ||
140 | */ | ||
141 | |||
142 | #define vm_event_entry(CHandler) \ | ||
143 | { \ | ||
144 | R29 = add(R29, #-(_PT_REGS_SIZE)); \ | ||
145 | memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ | ||
146 | } \ | ||
147 | { \ | ||
148 | memd(R29 +#_PT_R0302) = R3:2; \ | ||
149 | } \ | ||
150 | trap1(#HVM_TRAP1_VMGETREGS); \ | ||
151 | { \ | ||
152 | memd(R29 + #_PT_ER_VMEL) = R1:0; \ | ||
153 | R0 = R29; \ | ||
154 | R1.L = #LO(CHandler); \ | ||
155 | } \ | ||
156 | { \ | ||
157 | memd(R29 + #_PT_ER_VMPSP) = R3:2; \ | ||
158 | R1.H = #HI(CHandler); \ | ||
159 | jump event_dispatch; \ | ||
160 | } | ||
161 | |||
162 | .text | ||
163 | /* | ||
164 | * Do bulk save/restore in one place. | ||
165 | * Adds a jump to dispatch latency, but | ||
166 | * saves hundreds of bytes. | ||
167 | */ | ||
168 | |||
169 | event_dispatch: | ||
170 | save_pt_regs() | ||
171 | callr r1 | ||
172 | |||
173 | /* | ||
174 | * If we were in kernel mode, we don't need to check scheduler | ||
175 | * or signals if CONFIG_PREEMPT is not set. If set, then it has | ||
176 | * to jump to a need_resched kind of block. | ||
177 | * BTW, CONFIG_PREEMPT is not supported yet. | ||
178 | */ | ||
179 | |||
180 | #ifdef CONFIG_PREEMPT | ||
181 | R0 = #VM_INT_DISABLE | ||
182 | trap1(#HVM_TRAP1_VMSETIE) | ||
183 | #endif | ||
184 | |||
185 | /* "Nested control path" -- if the previous mode was kernel */ | ||
186 | R0 = memw(R29 + #_PT_ER_VMEST); | ||
187 | P0 = tstbit(R0, #HVM_VMEST_UM_SFT); | ||
188 | if !P0 jump restore_all; | ||
189 | /* | ||
190 | * Returning from system call, normally coming back from user mode | ||
191 | */ | ||
192 | return_from_syscall: | ||
193 | /* Disable interrupts while checking TIF */ | ||
194 | R0 = #VM_INT_DISABLE | ||
195 | trap1(#HVM_TRAP1_VMSETIE) | ||
196 | |||
197 | /* | ||
198 | * Coming back from the C-world, our thread info pointer | ||
199 | * should be in the designated register (usually R19) | ||
200 | */ | ||
201 | R1.L = #LO(_TIF_ALLWORK_MASK) | ||
202 | { | ||
203 | R1.H = #HI(_TIF_ALLWORK_MASK); | ||
204 | R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Compare against the "return to userspace" _TIF_WORK_MASK | ||
209 | */ | ||
210 | R1 = and(R1,R0); | ||
211 | { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;} | ||
212 | jump restore_all; /* we're outta here! */ | ||
213 | |||
214 | work_pending: | ||
215 | { | ||
216 | P0 = tstbit(R1, #TIF_NEED_RESCHED); | ||
217 | if (!P0.new) jump:nt work_notifysig; | ||
218 | } | ||
219 | call schedule | ||
220 | jump return_from_syscall; /* check for more work */ | ||
221 | |||
222 | work_notifysig: | ||
223 | /* this is the part that's kind of fuzzy. */ | ||
224 | R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME)); | ||
225 | P0 = cmp.eq(R1, #0); | ||
226 | if P0 jump restore_all | ||
227 | R1 = R0; /* unsigned long thread_info_flags */ | ||
228 | R0 = R29; /* regs should still be at top of stack */ | ||
229 | call do_notify_resume | ||
230 | |||
231 | restore_all: | ||
232 | /* Disable interrupts, if they weren't already, before reg restore. */ | ||
233 | R0 = #VM_INT_DISABLE | ||
234 | trap1(#HVM_TRAP1_VMSETIE) | ||
235 | |||
236 | /* do the setregs here for VM 0.5 */ | ||
237 | /* R29 here should already be pointing at pt_regs */ | ||
238 | R1:0 = memd(R29 + #_PT_ER_VMEL); | ||
239 | R3:2 = memd(R29 + #_PT_ER_VMPSP); | ||
240 | trap1(#HVM_TRAP1_VMSETREGS); | ||
241 | |||
242 | R0 = R29 | ||
243 | restore_pt_regs() | ||
244 | R1:0 = memd(R29 + #_PT_R0100); | ||
245 | R29 = add(R29, #_PT_REGS_SIZE); | ||
246 | trap1(#HVM_TRAP1_VMRTE) | ||
247 | /* Notreached */ | ||
248 | |||
249 | .globl _K_enter_genex | ||
250 | _K_enter_genex: | ||
251 | vm_event_entry(do_genex) | ||
252 | |||
253 | .globl _K_enter_interrupt | ||
254 | _K_enter_interrupt: | ||
255 | vm_event_entry(arch_do_IRQ) | ||
256 | |||
257 | .globl _K_enter_trap0 | ||
258 | _K_enter_trap0: | ||
259 | vm_event_entry(do_trap0) | ||
260 | |||
261 | .globl _K_enter_machcheck | ||
262 | _K_enter_machcheck: | ||
263 | vm_event_entry(do_machcheck) | ||
264 | |||
265 | |||
266 | .globl ret_from_fork | ||
267 | ret_from_fork: | ||
268 | call schedule_tail | ||
269 | jump return_from_syscall | ||