aboutsummaryrefslogtreecommitdiffstats
path: root/arch/hexagon/kernel/vm_entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/hexagon/kernel/vm_entry.S')
-rw-r--r--arch/hexagon/kernel/vm_entry.S282
1 files changed, 201 insertions, 81 deletions
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index 425e50c694f7..e3086185fc9f 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * Event entry/exit for Hexagon 2 * Event entry/exit for Hexagon
3 * 3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -45,48 +45,88 @@
45 * number in the case where we decode a system call (trap0(#1)). 45 * number in the case where we decode a system call (trap0(#1)).
46 */ 46 */
47 47
48#if CONFIG_HEXAGON_ARCH_VERSION < 4
48#define save_pt_regs()\ 49#define save_pt_regs()\
49 memd(R0 + #_PT_R3130) = R31:30; \ 50 memd(R0 + #_PT_R3130) = R31:30; \
51 { memw(R0 + #_PT_R2928) = R28; \
52 R31 = memw(R0 + #_PT_ER_VMPSP); }\
53 { memw(R0 + #(_PT_R2928 + 4)) = R31; \
54 R31 = ugp; } \
55 { memd(R0 + #_PT_R2726) = R27:26; \
56 R30 = gp ; } \
57 memd(R0 + #_PT_R2524) = R25:24; \
58 memd(R0 + #_PT_R2322) = R23:22; \
59 memd(R0 + #_PT_R2120) = R21:20; \
60 memd(R0 + #_PT_R1918) = R19:18; \
61 memd(R0 + #_PT_R1716) = R17:16; \
62 memd(R0 + #_PT_R1514) = R15:14; \
63 memd(R0 + #_PT_R1312) = R13:12; \
64 { memd(R0 + #_PT_R1110) = R11:10; \
65 R15 = lc0; } \
66 { memd(R0 + #_PT_R0908) = R9:8; \
67 R14 = sa0; } \
68 { memd(R0 + #_PT_R0706) = R7:6; \
69 R13 = lc1; } \
70 { memd(R0 + #_PT_R0504) = R5:4; \
71 R12 = sa1; } \
72 { memd(R0 + #_PT_GPUGP) = R31:30; \
73 R11 = m1; \
74 R2.H = #HI(_THREAD_SIZE); } \
75 { memd(R0 + #_PT_LC0SA0) = R15:14; \
76 R10 = m0; \
77 R2.L = #LO(_THREAD_SIZE); } \
78 { memd(R0 + #_PT_LC1SA1) = R13:12; \
79 R15 = p3:0; \
80 R2 = neg(R2); } \
81 { memd(R0 + #_PT_M1M0) = R11:10; \
82 R14 = usr; \
83 R2 = and(R0,R2); } \
84 { memd(R0 + #_PT_PREDSUSR) = R15:14; \
85 THREADINFO_REG = R2; } \
86 { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
87 memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
88 R2 = #-1; } \
89 { memw(R0 + #_PT_SYSCALL_NR) = R2; \
90 R30 = #0; }
91#else
92/* V4+ */
93/* the # ## # syntax inserts a literal ## */
94#define save_pt_regs()\
95 { memd(R0 + #_PT_R3130) = R31:30; \
96 R30 = memw(R0 + #_PT_ER_VMPSP); }\
50 { memw(R0 + #_PT_R2928) = R28; \ 97 { memw(R0 + #_PT_R2928) = R28; \
51 R31 = memw(R0 + #_PT_ER_VMPSP); }\ 98 memw(R0 + #(_PT_R2928 + 4)) = R30; }\
52 { memw(R0 + #(_PT_R2928 + 4)) = R31; \ 99 { R31:30 = C11:10; \
53 R31 = ugp; } \ 100 memd(R0 + #_PT_R2726) = R27:26; \
54 { memd(R0 + #_PT_R2726) = R27:26; \ 101 memd(R0 + #_PT_R2524) = R25:24; }\
55 R30 = gp ; } \ 102 { memd(R0 + #_PT_R2322) = R23:22; \
56 memd(R0 + #_PT_R2524) = R25:24; \ 103 memd(R0 + #_PT_R2120) = R21:20; }\
57 memd(R0 + #_PT_R2322) = R23:22; \ 104 { memd(R0 + #_PT_R1918) = R19:18; \
58 memd(R0 + #_PT_R2120) = R21:20; \ 105 memd(R0 + #_PT_R1716) = R17:16; }\
59 memd(R0 + #_PT_R1918) = R19:18; \ 106 { memd(R0 + #_PT_R1514) = R15:14; \
60 memd(R0 + #_PT_R1716) = R17:16; \ 107 memd(R0 + #_PT_R1312) = R13:12; \
61 memd(R0 + #_PT_R1514) = R15:14; \ 108 R17:16 = C13:12; }\
62 memd(R0 + #_PT_R1312) = R13:12; \
63 { memd(R0 + #_PT_R1110) = R11:10; \ 109 { memd(R0 + #_PT_R1110) = R11:10; \
64 R15 = lc0; } \ 110 memd(R0 + #_PT_R0908) = R9:8; \
65 { memd(R0 + #_PT_R0908) = R9:8; \ 111 R15:14 = C1:0; } \
66 R14 = sa0; } \
67 { memd(R0 + #_PT_R0706) = R7:6; \ 112 { memd(R0 + #_PT_R0706) = R7:6; \
68 R13 = lc1; } \ 113 memd(R0 + #_PT_R0504) = R5:4; \
69 { memd(R0 + #_PT_R0504) = R5:4; \ 114 R13:12 = C3:2; } \
70 R12 = sa1; } \ 115 { memd(R0 + #_PT_GPUGP) = R31:30; \
71 { memd(R0 + #_PT_UGPGP) = R31:30; \ 116 memd(R0 + #_PT_LC0SA0) = R15:14; \
72 R11 = m1; \ 117 R11:10 = C7:6; }\
73 R2.H = #HI(_THREAD_SIZE); } \ 118 { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \
74 { memd(R0 + #_PT_LC0SA0) = R15:14; \ 119 memd(R0 + #_PT_LC1SA1) = R13:12; \
75 R10 = m0; \ 120 R15 = p3:0; }\
76 R2.L = #LO(_THREAD_SIZE); } \
77 { memd(R0 + #_PT_LC1SA1) = R13:12; \
78 R15 = p3:0; \
79 R2 = neg(R2); } \
80 { memd(R0 + #_PT_M1M0) = R11:10; \ 121 { memd(R0 + #_PT_M1M0) = R11:10; \
81 R14 = usr; \ 122 memw(R0 + #_PT_PREDSUSR + 4) = R15; }\
82 R2 = and(R0,R2); } \
83 { memd(R0 + #_PT_PREDSUSR) = R15:14; \
84 THREADINFO_REG = R2; } \
85 { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ 123 { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
86 memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ 124 memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
87 R2 = #-1; } \ 125 R2 = #-1; } \
88 { memw(R0 + #_PT_SYSCALL_NR) = R2; \ 126 { memw(R0 + #_PT_SYSCALL_NR) = R2; \
127 memd(R0 + #_PT_CS1CS0) = R17:16; \
89 R30 = #0; } 128 R30 = #0; }
129#endif
90 130
91/* 131/*
92 * Restore registers and thread_info.regs state. THREADINFO_REG 132 * Restore registers and thread_info.regs state. THREADINFO_REG
@@ -94,6 +134,7 @@
94 * preserved. Don't restore R29 (SP) until later. 134 * preserved. Don't restore R29 (SP) until later.
95 */ 135 */
96 136
137#if CONFIG_HEXAGON_ARCH_VERSION < 4
97#define restore_pt_regs() \ 138#define restore_pt_regs() \
98 { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ 139 { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
99 R15:14 = memd(R0 + #_PT_PREDSUSR); } \ 140 R15:14 = memd(R0 + #_PT_PREDSUSR); } \
@@ -121,11 +162,44 @@
121 R23:22 = memd(R0 + #_PT_R2322); } \ 162 R23:22 = memd(R0 + #_PT_R2322); } \
122 { R25:24 = memd(R0 + #_PT_R2524); \ 163 { R25:24 = memd(R0 + #_PT_R2524); \
123 R27:26 = memd(R0 + #_PT_R2726); } \ 164 R27:26 = memd(R0 + #_PT_R2726); } \
124 R31:30 = memd(R0 + #_PT_UGPGP); \ 165 R31:30 = memd(R0 + #_PT_GPUGP); \
125 { R28 = memw(R0 + #_PT_R2928); \ 166 { R28 = memw(R0 + #_PT_R2928); \
126 ugp = R31; } \ 167 ugp = R31; } \
127 { R31:30 = memd(R0 + #_PT_R3130); \ 168 { R31:30 = memd(R0 + #_PT_R3130); \
128 gp = R30; } 169 gp = R30; }
170#else
171/* V4+ */
172#define restore_pt_regs() \
173 { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
174 R15:14 = memd(R0 + #_PT_PREDSUSR); } \
175 { R11:10 = memd(R0 + #_PT_M1M0); \
176 R13:12 = memd(R0 + #_PT_LC1SA1); \
177 p3:0 = R15; } \
178 { R15:14 = memd(R0 + #_PT_LC0SA0); \
179 R3:2 = memd(R0 + #_PT_R0302); \
180 usr = R14; } \
181 { R5:4 = memd(R0 + #_PT_R0504); \
182 R7:6 = memd(R0 + #_PT_R0706); \
183 C7:6 = R11:10; }\
184 { R9:8 = memd(R0 + #_PT_R0908); \
185 R11:10 = memd(R0 + #_PT_R1110); \
186 C3:2 = R13:12; }\
187 { R13:12 = memd(R0 + #_PT_R1312); \
188 R15:14 = memd(R0 + #_PT_R1514); \
189 C1:0 = R15:14; }\
190 { R17:16 = memd(R0 + #_PT_R1716); \
191 R19:18 = memd(R0 + #_PT_R1918); } \
192 { R21:20 = memd(R0 + #_PT_R2120); \
193 R23:22 = memd(R0 + #_PT_R2322); } \
194 { R25:24 = memd(R0 + #_PT_R2524); \
195 R27:26 = memd(R0 + #_PT_R2726); } \
196 R31:30 = memd(R0 + #_PT_CS1CS0); \
197 { C13:12 = R31:30; \
198 R31:30 = memd(R0 + #_PT_GPUGP) ; \
199 R28 = memw(R0 + #_PT_R2928); }\
200 { C11:10 = R31:30; \
201 R31:30 = memd(R0 + #_PT_R3130); }
202#endif
129 203
130 /* 204 /*
131 * Clears off enough space for the rest of pt_regs; evrec is a part 205 * Clears off enough space for the rest of pt_regs; evrec is a part
@@ -139,6 +213,7 @@
139 * Need to save off R0, R1, R2, R3 immediately. 213 * Need to save off R0, R1, R2, R3 immediately.
140 */ 214 */
141 215
216#if CONFIG_HEXAGON_ARCH_VERSION < 4
142#define vm_event_entry(CHandler) \ 217#define vm_event_entry(CHandler) \
143 { \ 218 { \
144 R29 = add(R29, #-(_PT_REGS_SIZE)); \ 219 R29 = add(R29, #-(_PT_REGS_SIZE)); \
@@ -158,6 +233,34 @@
158 R1.H = #HI(CHandler); \ 233 R1.H = #HI(CHandler); \
159 jump event_dispatch; \ 234 jump event_dispatch; \
160 } 235 }
236#else
237/* V4+ */
238/* turn on I$ prefetch early */
239/* the # ## # syntax inserts a literal ## */
240#define vm_event_entry(CHandler) \
241 { \
242 R29 = add(R29, #-(_PT_REGS_SIZE)); \
243 memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
244 memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \
245 R0 = usr; \
246 } \
247 { \
248 memw(R29 + #_PT_PREDSUSR) = R0; \
249 R0 = setbit(R0, #16); \
250 } \
251 usr = R0; \
252 R1:0 = G1:0; \
253 { \
254 memd(R29 + #_PT_ER_VMEL) = R1:0; \
255 R1 = # ## #(CHandler); \
256 R3:2 = G3:2; \
257 } \
258 { \
259 R0 = R29; \
260 memd(R29 + #_PT_ER_VMPSP) = R3:2; \
261 jump event_dispatch; \
262 }
263#endif
161 264
162.text 265.text
163 /* 266 /*
@@ -171,6 +274,9 @@ event_dispatch:
171 callr r1 274 callr r1
172 275
173 /* 276 /*
277 * Coming back from the C-world, our thread info pointer
278 * should be in the designated register (usually R19)
279 *
174 * If we were in kernel mode, we don't need to check scheduler 280 * If we were in kernel mode, we don't need to check scheduler
175 * or signals if CONFIG_PREEMPT is not set. If set, then it has 281 * or signals if CONFIG_PREEMPT is not set. If set, then it has
176 * to jump to a need_resched kind of block. 282 * to jump to a need_resched kind of block.
@@ -183,69 +289,68 @@ event_dispatch:
183#endif 289#endif
184 290
185 /* "Nested control path" -- if the previous mode was kernel */ 291 /* "Nested control path" -- if the previous mode was kernel */
186 R0 = memw(R29 + #_PT_ER_VMEST);
187 P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
188 if !P0 jump restore_all;
189 /*
190 * Returning from system call, normally coming back from user mode
191 */
192return_from_syscall:
193 /* Disable interrupts while checking TIF */
194 R0 = #VM_INT_DISABLE
195 trap1(#HVM_TRAP1_VMSETIE)
196
197 /*
198 * Coming back from the C-world, our thread info pointer
199 * should be in the designated register (usually R19)
200 */
201 R1.L = #LO(_TIF_ALLWORK_MASK)
202 { 292 {
203 R1.H = #HI(_TIF_ALLWORK_MASK); 293 R0 = memw(R29 + #_PT_ER_VMEST);
204 R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); 294 R16.L = #LO(do_work_pending);
295 }
296 {
297 P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
298 if (!P0.new) jump:nt restore_all;
299 R16.H = #HI(do_work_pending);
300 R0 = #VM_INT_DISABLE;
205 } 301 }
206 302
207 /* 303 /*
208 * Compare against the "return to userspace" _TIF_WORK_MASK 304 * Check also the return from fork/system call, normally coming back from
305 * user mode
306 *
307 * R16 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
209 */ 308 */
210 R1 = and(R1,R0);
211 { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;}
212 jump restore_all; /* we're outta here! */
213 309
214work_pending: 310check_work_pending:
311 /* Disable interrupts while checking TIF */
312 trap1(#HVM_TRAP1_VMSETIE)
215 { 313 {
216 P0 = tstbit(R1, #TIF_NEED_RESCHED); 314 R0 = R29; /* regs should still be at top of stack */
217 if (!P0.new) jump:nt work_notifysig; 315 R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
316 callr R16;
218 } 317 }
219 call schedule
220 jump return_from_syscall; /* check for more work */
221 318
222work_notifysig: 319 {
223 /* this is the part that's kind of fuzzy. */ 320 P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending;
224 R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME)); 321 R0 = #VM_INT_DISABLE;
225 P0 = cmp.eq(R1, #0); 322 }
226 if P0 jump restore_all
227 R1 = R0; /* unsigned long thread_info_flags */
228 R0 = R29; /* regs should still be at top of stack */
229 call do_notify_resume
230 323
231restore_all: 324restore_all:
232 /* Disable interrupts, if they weren't already, before reg restore. */ 325 /*
233 R0 = #VM_INT_DISABLE 326 * Disable interrupts, if they weren't already, before reg restore.
327 * R0 gets preloaded with #VM_INT_DISABLE before we get here.
328 */
234 trap1(#HVM_TRAP1_VMSETIE) 329 trap1(#HVM_TRAP1_VMSETIE)
235 330
236 /* do the setregs here for VM 0.5 */ 331 /* do the setregs here for VM 0.5 */
237 /* R29 here should already be pointing at pt_regs */ 332 /* R29 here should already be pointing at pt_regs */
238 R1:0 = memd(R29 + #_PT_ER_VMEL); 333 {
239 R3:2 = memd(R29 + #_PT_ER_VMPSP); 334 R1:0 = memd(R29 + #_PT_ER_VMEL);
335 R3:2 = memd(R29 + #_PT_ER_VMPSP);
336 }
337#if CONFIG_HEXAGON_ARCH_VERSION < 4
240 trap1(#HVM_TRAP1_VMSETREGS); 338 trap1(#HVM_TRAP1_VMSETREGS);
339#else
340 G1:0 = R1:0;
341 G3:2 = R3:2;
342#endif
241 343
242 R0 = R29 344 R0 = R29
243 restore_pt_regs() 345 restore_pt_regs()
244 R1:0 = memd(R29 + #_PT_R0100); 346 {
245 R29 = add(R29, #_PT_REGS_SIZE); 347 R1:0 = memd(R29 + #_PT_R0100);
348 R29 = add(R29, #_PT_REGS_SIZE);
349 }
246 trap1(#HVM_TRAP1_VMRTE) 350 trap1(#HVM_TRAP1_VMRTE)
247 /* Notreached */ 351 /* Notreached */
248 352
353
249 .globl _K_enter_genex 354 .globl _K_enter_genex
250_K_enter_genex: 355_K_enter_genex:
251 vm_event_entry(do_genex) 356 vm_event_entry(do_genex)
@@ -262,12 +367,27 @@ _K_enter_trap0:
262_K_enter_machcheck: 367_K_enter_machcheck:
263 vm_event_entry(do_machcheck) 368 vm_event_entry(do_machcheck)
264 369
370 .globl _K_enter_debug
371_K_enter_debug:
372 vm_event_entry(do_debug_exception)
265 373
266 .globl ret_from_fork 374 .globl ret_from_fork
267ret_from_fork: 375ret_from_fork:
268 call schedule_tail 376 {
269 P0 = cmp.eq(R24, #0); 377 call schedule_tail
270 if P0 jump return_from_syscall 378 R16.H = #HI(do_work_pending);
271 R0 = R25; 379 }
272 callr R24 380 {
273 jump return_from_syscall 381 P0 = cmp.eq(R24, #0);
382 R16.L = #LO(do_work_pending);
383 R0 = #VM_INT_DISABLE;
384 }
385 if P0 jump check_work_pending
386 {
387 R0 = R25;
388 callr R24
389 }
390 {
391 jump check_work_pending
392 R0 = #VM_INT_DISABLE;
393 }