diff options
Diffstat (limited to 'arch/tile/kernel/intvec_64.S')
-rw-r--r-- | arch/tile/kernel/intvec_64.S | 1231 |
1 files changed, 1231 insertions, 0 deletions
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S new file mode 100644 index 000000000000..79c93e10ba27 --- /dev/null +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -0,0 +1,1231 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Linux interrupt vectors. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/unistd.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/irqflags.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <asm/types.h> | ||
25 | #include <hv/hypervisor.h> | ||
26 | #include <arch/abi.h> | ||
27 | #include <arch/interrupts.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | #ifdef CONFIG_PREEMPT | ||
31 | # error "No support for kernel preemption currently" | ||
32 | #endif | ||
33 | |||
34 | #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) | ||
35 | |||
36 | #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) | ||
37 | |||
38 | |||
39 | .macro push_reg reg, ptr=sp, delta=-8 | ||
40 | { | ||
41 | st \ptr, \reg | ||
42 | addli \ptr, \ptr, \delta | ||
43 | } | ||
44 | .endm | ||
45 | |||
46 | .macro pop_reg reg, ptr=sp, delta=8 | ||
47 | { | ||
48 | ld \reg, \ptr | ||
49 | addli \ptr, \ptr, \delta | ||
50 | } | ||
51 | .endm | ||
52 | |||
53 | .macro pop_reg_zero reg, zreg, ptr=sp, delta=8 | ||
54 | { | ||
55 | move \zreg, zero | ||
56 | ld \reg, \ptr | ||
57 | addi \ptr, \ptr, \delta | ||
58 | } | ||
59 | .endm | ||
60 | |||
61 | .macro push_extra_callee_saves reg | ||
62 | PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51)) | ||
63 | push_reg r51, \reg | ||
64 | push_reg r50, \reg | ||
65 | push_reg r49, \reg | ||
66 | push_reg r48, \reg | ||
67 | push_reg r47, \reg | ||
68 | push_reg r46, \reg | ||
69 | push_reg r45, \reg | ||
70 | push_reg r44, \reg | ||
71 | push_reg r43, \reg | ||
72 | push_reg r42, \reg | ||
73 | push_reg r41, \reg | ||
74 | push_reg r40, \reg | ||
75 | push_reg r39, \reg | ||
76 | push_reg r38, \reg | ||
77 | push_reg r37, \reg | ||
78 | push_reg r36, \reg | ||
79 | push_reg r35, \reg | ||
80 | push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34) | ||
81 | .endm | ||
82 | |||
83 | .macro panic str | ||
84 | .pushsection .rodata, "a" | ||
85 | 1: | ||
86 | .asciz "\str" | ||
87 | .popsection | ||
88 | { | ||
89 | moveli r0, hw2_last(1b) | ||
90 | } | ||
91 | { | ||
92 | shl16insli r0, r0, hw1(1b) | ||
93 | } | ||
94 | { | ||
95 | shl16insli r0, r0, hw0(1b) | ||
96 | jal panic | ||
97 | } | ||
98 | .endm | ||
99 | |||
100 | |||
101 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
102 | .pushsection .text.intvec_feedback,"ax" | ||
103 | intvec_feedback: | ||
104 | .popsection | ||
105 | #endif | ||
106 | |||
107 | /* | ||
108 | * Default interrupt handler. | ||
109 | * | ||
110 | * vecnum is where we'll put this code. | ||
111 | * c_routine is the C routine we'll call. | ||
112 | * | ||
113 | * The C routine is passed two arguments: | ||
114 | * - A pointer to the pt_regs state. | ||
115 | * - The interrupt vector number. | ||
116 | * | ||
117 | * The "processing" argument specifies the code for processing | ||
118 | * the interrupt. Defaults to "handle_interrupt". | ||
119 | */ | ||
120 | .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt | ||
121 | .org (\vecnum << 8) | ||
122 | intvec_\vecname: | ||
123 | /* Temporarily save a register so we have somewhere to work. */ | ||
124 | |||
125 | mtspr SPR_SYSTEM_SAVE_K_1, r0 | ||
126 | mfspr r0, SPR_EX_CONTEXT_K_1 | ||
127 | |||
128 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
129 | |||
130 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
131 | /* | ||
132 | * For double-faults from user-space, fall through to the normal | ||
133 | * register save and stack setup path. Otherwise, it's the | ||
134 | * hypervisor giving us one last chance to dump diagnostics, and we | ||
135 | * branch to the kernel_double_fault routine to do so. | ||
136 | */ | ||
137 | beqz r0, 1f | ||
138 | j _kernel_double_fault | ||
139 | 1: | ||
140 | .else | ||
141 | /* | ||
142 | * If we're coming from user-space, then set sp to the top of | ||
143 | * the kernel stack. Otherwise, assume sp is already valid. | ||
144 | */ | ||
145 | { | ||
146 | bnez r0, 0f | ||
147 | move r0, sp | ||
148 | } | ||
149 | .endif | ||
150 | |||
151 | .ifc \c_routine, do_page_fault | ||
152 | /* | ||
153 | * The page_fault handler may be downcalled directly by the | ||
154 | * hypervisor even when Linux is running and has ICS set. | ||
155 | * | ||
156 | * In this case the contents of EX_CONTEXT_K_1 reflect the | ||
157 | * previous fault and can't be relied on to choose whether or | ||
158 | * not to reinitialize the stack pointer. So we add a test | ||
159 | * to see whether SYSTEM_SAVE_K_2 has the high bit set, | ||
160 | * and if so we don't reinitialize sp, since we must be coming | ||
161 | * from Linux. (In fact the precise case is !(val & ~1), | ||
162 | * but any Linux PC has to have the high bit set.) | ||
163 | * | ||
164 | * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for | ||
165 | * any path that turns into a downcall to one of our TLB handlers. | ||
166 | * | ||
167 | * FIXME: if we end up never using this path, perhaps we should | ||
168 | * prevent the hypervisor from generating downcalls in this case. | ||
169 | * The advantage of getting a downcall is we can panic in Linux. | ||
170 | */ | ||
171 | mfspr r0, SPR_SYSTEM_SAVE_K_2 | ||
172 | { | ||
173 | bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ | ||
174 | move r0, sp | ||
175 | } | ||
176 | .endif | ||
177 | |||
178 | |||
179 | /* | ||
180 | * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and | ||
181 | * the current stack top in the higher bits. So we recover | ||
182 | * our stack top by just masking off the low bits, then | ||
183 | * point sp at the top aligned address on the actual stack page. | ||
184 | */ | ||
185 | mfspr r0, SPR_SYSTEM_SAVE_K_0 | ||
186 | mm r0, zero, LOG2_THREAD_SIZE, 63 | ||
187 | |||
188 | 0: | ||
189 | /* | ||
190 | * Align the stack mod 64 so we can properly predict what | ||
191 | * cache lines we need to write-hint to reduce memory fetch | ||
192 | * latency as we enter the kernel. The layout of memory is | ||
193 | * as follows, with cache line 0 at the lowest VA, and cache | ||
194 | * line 8 just below the r0 value this "andi" computes. | ||
195 | * Note that we never write to cache line 8, and we skip | ||
196 | * cache lines 1-3 for syscalls. | ||
197 | * | ||
198 | * cache line 8: ptregs padding (two words) | ||
199 | * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch | ||
200 | * cache line 6: r46...r53 (tp) | ||
201 | * cache line 5: r38...r45 | ||
202 | * cache line 4: r30...r37 | ||
203 | * cache line 3: r22...r29 | ||
204 | * cache line 2: r14...r21 | ||
205 | * cache line 1: r6...r13 | ||
206 | * cache line 0: 2 x frame, r0..r5 | ||
207 | */ | ||
208 | andi r0, r0, -64 | ||
209 | |||
210 | /* | ||
211 | * Push the first four registers on the stack, so that we can set | ||
212 | * them to vector-unique values before we jump to the common code. | ||
213 | * | ||
214 | * Registers are pushed on the stack as a struct pt_regs, | ||
215 | * with the sp initially just above the struct, and when we're | ||
216 | * done, sp points to the base of the struct, minus | ||
217 | * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code. | ||
218 | * | ||
219 | * This routine saves just the first four registers, plus the | ||
220 | * stack context so we can do proper backtracing right away, | ||
221 | * and defers to handle_interrupt to save the rest. | ||
222 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. | ||
223 | */ | ||
224 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) | ||
225 | wh64 r0 /* cache line 7 */ | ||
226 | { | ||
227 | st r0, lr | ||
228 | addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
229 | } | ||
230 | { | ||
231 | st r0, sp | ||
232 | addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP | ||
233 | } | ||
234 | wh64 sp /* cache line 6 */ | ||
235 | { | ||
236 | st sp, r52 | ||
237 | addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52) | ||
238 | } | ||
239 | wh64 sp /* cache line 0 */ | ||
240 | { | ||
241 | st sp, r1 | ||
242 | addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1) | ||
243 | } | ||
244 | { | ||
245 | st sp, r2 | ||
246 | addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2) | ||
247 | } | ||
248 | { | ||
249 | st sp, r3 | ||
250 | addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) | ||
251 | } | ||
252 | mfspr r0, SPR_EX_CONTEXT_K_0 | ||
253 | .ifc \processing,handle_syscall | ||
254 | /* | ||
255 | * Bump the saved PC by one bundle so that when we return, we won't | ||
256 | * execute the same swint instruction again. We need to do this while | ||
257 | * we're in the critical section. | ||
258 | */ | ||
259 | addi r0, r0, 8 | ||
260 | .endif | ||
261 | { | ||
262 | st sp, r0 | ||
263 | addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
264 | } | ||
265 | mfspr r0, SPR_EX_CONTEXT_K_1 | ||
266 | { | ||
267 | st sp, r0 | ||
268 | addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
269 | /* | ||
270 | * Use r0 for syscalls so it's a temporary; use r1 for interrupts | ||
271 | * so that it gets passed through unchanged to the handler routine. | ||
272 | * Note that the .if conditional confusingly spans bundles. | ||
273 | */ | ||
274 | .ifc \processing,handle_syscall | ||
275 | movei r0, \vecnum | ||
276 | } | ||
277 | { | ||
278 | st sp, r0 | ||
279 | .else | ||
280 | movei r1, \vecnum | ||
281 | } | ||
282 | { | ||
283 | st sp, r1 | ||
284 | .endif | ||
285 | addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM | ||
286 | } | ||
287 | mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */ | ||
288 | { | ||
289 | st sp, r0 | ||
290 | addi sp, sp, -PTREGS_OFFSET_REG(0) - 8 | ||
291 | } | ||
292 | { | ||
293 | st sp, zero /* write zero into "Next SP" frame pointer */ | ||
294 | addi sp, sp, -8 /* leave SP pointing at bottom of frame */ | ||
295 | } | ||
296 | .ifc \processing,handle_syscall | ||
297 | j handle_syscall | ||
298 | .else | ||
299 | /* Capture per-interrupt SPR context to registers. */ | ||
300 | .ifc \c_routine, do_page_fault | ||
301 | mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */ | ||
302 | mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */ | ||
303 | .else | ||
304 | .ifc \vecnum, INT_ILL_TRANS | ||
305 | mfspr r2, ILL_TRANS_REASON | ||
306 | .else | ||
307 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
308 | mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */ | ||
309 | .else | ||
310 | .ifc \c_routine, do_trap | ||
311 | mfspr r2, GPV_REASON | ||
312 | .else | ||
313 | .ifc \c_routine, op_handle_perf_interrupt | ||
314 | mfspr r2, PERF_COUNT_STS | ||
315 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
316 | .else | ||
317 | .ifc \c_routine, op_handle_aux_perf_interrupt | ||
318 | mfspr r2, AUX_PERF_COUNT_STS | ||
319 | .endif | ||
320 | #endif | ||
321 | .endif | ||
322 | .endif | ||
323 | .endif | ||
324 | .endif | ||
325 | .endif | ||
326 | /* Put function pointer in r0 */ | ||
327 | moveli r0, hw2_last(\c_routine) | ||
328 | shl16insli r0, r0, hw1(\c_routine) | ||
329 | { | ||
330 | shl16insli r0, r0, hw0(\c_routine) | ||
331 | j \processing | ||
332 | } | ||
333 | .endif | ||
334 | ENDPROC(intvec_\vecname) | ||
335 | |||
336 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
337 | .pushsection .text.intvec_feedback,"ax" | ||
338 | .org (\vecnum << 5) | ||
339 | FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) | ||
340 | jrp lr | ||
341 | .popsection | ||
342 | #endif | ||
343 | |||
344 | .endm | ||
345 | |||
346 | |||
347 | /* | ||
348 | * Save the rest of the registers that we didn't save in the actual | ||
349 | * vector itself. We can't use r0-r10 inclusive here. | ||
350 | */ | ||
351 | .macro finish_interrupt_save, function | ||
352 | |||
353 | /* If it's a syscall, save a proper orig_r0, otherwise just zero. */ | ||
354 | PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0) | ||
355 | { | ||
356 | .ifc \function,handle_syscall | ||
357 | st r52, r0 | ||
358 | .else | ||
359 | st r52, zero | ||
360 | .endif | ||
361 | PTREGS_PTR(r52, PTREGS_OFFSET_TP) | ||
362 | } | ||
363 | st r52, tp | ||
364 | { | ||
365 | mfspr tp, CMPEXCH_VALUE | ||
366 | PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH) | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * For ordinary syscalls, we save neither caller- nor callee- | ||
371 | * save registers, since the syscall invoker doesn't expect the | ||
372 | * caller-saves to be saved, and the called kernel functions will | ||
373 | * take care of saving the callee-saves for us. | ||
374 | * | ||
375 | * For interrupts we save just the caller-save registers. Saving | ||
376 | * them is required (since the "caller" can't save them). Again, | ||
377 | * the called kernel functions will restore the callee-save | ||
378 | * registers for us appropriately. | ||
379 | * | ||
380 | * On return, we normally restore nothing special for syscalls, | ||
381 | * and just the caller-save registers for interrupts. | ||
382 | * | ||
383 | * However, there are some important caveats to all this: | ||
384 | * | ||
385 | * - We always save a few callee-save registers to give us | ||
386 | * some scratchpad registers to carry across function calls. | ||
387 | * | ||
388 | * - fork/vfork/etc require us to save all the callee-save | ||
389 | * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below. | ||
390 | * | ||
391 | * - We always save r0..r5 and r10 for syscalls, since we need | ||
392 | * to reload them a bit later for the actual kernel call, and | ||
393 | * since we might need them for -ERESTARTNOINTR, etc. | ||
394 | * | ||
395 | * - Before invoking a signal handler, we save the unsaved | ||
396 | * callee-save registers so they are visible to the | ||
397 | * signal handler or any ptracer. | ||
398 | * | ||
399 | * - If the unsaved callee-save registers are modified, we set | ||
400 | * a bit in pt_regs so we know to reload them from pt_regs | ||
401 | * and not just rely on the kernel function unwinding. | ||
402 | * (Done for ptrace register writes and SA_SIGINFO handler.) | ||
403 | */ | ||
404 | { | ||
405 | st r52, tp | ||
406 | PTREGS_PTR(r52, PTREGS_OFFSET_REG(33)) | ||
407 | } | ||
408 | wh64 r52 /* cache line 4 */ | ||
409 | push_reg r33, r52 | ||
410 | push_reg r32, r52 | ||
411 | push_reg r31, r52 | ||
412 | .ifc \function,handle_syscall | ||
413 | push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30) | ||
414 | push_reg TREG_SYSCALL_NR_NAME, r52, \ | ||
415 | PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL | ||
416 | .else | ||
417 | |||
418 | push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30) | ||
419 | wh64 r52 /* cache line 3 */ | ||
420 | push_reg r29, r52 | ||
421 | push_reg r28, r52 | ||
422 | push_reg r27, r52 | ||
423 | push_reg r26, r52 | ||
424 | push_reg r25, r52 | ||
425 | push_reg r24, r52 | ||
426 | push_reg r23, r52 | ||
427 | push_reg r22, r52 | ||
428 | wh64 r52 /* cache line 2 */ | ||
429 | push_reg r21, r52 | ||
430 | push_reg r20, r52 | ||
431 | push_reg r19, r52 | ||
432 | push_reg r18, r52 | ||
433 | push_reg r17, r52 | ||
434 | push_reg r16, r52 | ||
435 | push_reg r15, r52 | ||
436 | push_reg r14, r52 | ||
437 | wh64 r52 /* cache line 1 */ | ||
438 | push_reg r13, r52 | ||
439 | push_reg r12, r52 | ||
440 | push_reg r11, r52 | ||
441 | push_reg r10, r52 | ||
442 | push_reg r9, r52 | ||
443 | push_reg r8, r52 | ||
444 | push_reg r7, r52 | ||
445 | push_reg r6, r52 | ||
446 | |||
447 | .endif | ||
448 | |||
449 | push_reg r5, r52 | ||
450 | st r52, r4 | ||
451 | |||
452 | /* Load tp with our per-cpu offset. */ | ||
453 | #ifdef CONFIG_SMP | ||
454 | { | ||
455 | mfspr r20, SPR_SYSTEM_SAVE_K_0 | ||
456 | moveli r21, hw2_last(__per_cpu_offset) | ||
457 | } | ||
458 | { | ||
459 | shl16insli r21, r21, hw1(__per_cpu_offset) | ||
460 | bfextu r20, r20, 0, LOG2_THREAD_SIZE-1 | ||
461 | } | ||
462 | shl16insli r21, r21, hw0(__per_cpu_offset) | ||
463 | shl3add r20, r20, r21 | ||
464 | ld tp, r20 | ||
465 | #else | ||
466 | move tp, zero | ||
467 | #endif | ||
468 | |||
469 | /* | ||
470 | * If we will be returning to the kernel, we will need to | ||
471 | * reset the interrupt masks to the state they had before. | ||
472 | * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. | ||
473 | */ | ||
474 | mfspr r32, SPR_EX_CONTEXT_K_1 | ||
475 | { | ||
476 | andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
477 | PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) | ||
478 | } | ||
479 | beqzt r32, 1f /* zero if from user space */ | ||
480 | IRQS_DISABLED(r32) /* zero if irqs enabled */ | ||
481 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
482 | # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix | ||
483 | #endif | ||
484 | 1: | ||
485 | .ifnc \function,handle_syscall | ||
486 | /* Record the fact that we saved the caller-save registers above. */ | ||
487 | ori r32, r32, PT_FLAGS_CALLER_SAVES | ||
488 | .endif | ||
489 | st r21, r32 | ||
490 | |||
491 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
492 | /* | ||
493 | * Notify the feedback routines that we were in the | ||
494 | * appropriate fixed interrupt vector area. Note that we | ||
495 | * still have ICS set at this point, so we can't invoke any | ||
496 | * atomic operations or we will panic. The feedback | ||
497 | * routines internally preserve r0..r10 and r30 up. | ||
498 | */ | ||
499 | .ifnc \function,handle_syscall | ||
500 | shli r20, r1, 5 | ||
501 | .else | ||
502 | moveli r20, INT_SWINT_1 << 5 | ||
503 | .endif | ||
504 | moveli r21, hw2_last(intvec_feedback) | ||
505 | shl16insli r21, r21, hw1(intvec_feedback) | ||
506 | shl16insli r21, r21, hw0(intvec_feedback) | ||
507 | add r20, r20, r21 | ||
508 | jalr r20 | ||
509 | |||
510 | /* And now notify the feedback routines that we are here. */ | ||
511 | FEEDBACK_ENTER(\function) | ||
512 | #endif | ||
513 | |||
514 | /* | ||
515 | * we've captured enough state to the stack (including in | ||
516 | * particular our EX_CONTEXT state) that we can now release | ||
517 | * the interrupt critical section and replace it with our | ||
518 | * standard "interrupts disabled" mask value. This allows | ||
519 | * synchronous interrupts (and profile interrupts) to punch | ||
520 | * through from this point onwards. | ||
521 | */ | ||
522 | .ifc \function,handle_nmi | ||
523 | IRQ_DISABLE_ALL(r20) | ||
524 | .else | ||
525 | IRQ_DISABLE(r20, r21) | ||
526 | .endif | ||
527 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
528 | |||
529 | /* | ||
530 | * Prepare the first 256 stack bytes to be rapidly accessible | ||
531 | * without having to fetch the background data. | ||
532 | */ | ||
533 | addi r52, sp, -64 | ||
534 | { | ||
535 | wh64 r52 | ||
536 | addi r52, r52, -64 | ||
537 | } | ||
538 | { | ||
539 | wh64 r52 | ||
540 | addi r52, r52, -64 | ||
541 | } | ||
542 | { | ||
543 | wh64 r52 | ||
544 | addi r52, r52, -64 | ||
545 | } | ||
546 | wh64 r52 | ||
547 | |||
548 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
549 | .ifnc \function,handle_nmi | ||
550 | /* | ||
551 | * We finally have enough state set up to notify the irq | ||
552 | * tracing code that irqs were disabled on entry to the handler. | ||
553 | * The TRACE_IRQS_OFF call clobbers registers r0-r29. | ||
554 | * For syscalls, we already have the register state saved away | ||
555 | * on the stack, so we don't bother to do any register saves here, | ||
556 | * and later we pop the registers back off the kernel stack. | ||
557 | * For interrupt handlers, save r0-r3 in callee-saved registers. | ||
558 | */ | ||
559 | .ifnc \function,handle_syscall | ||
560 | { move r30, r0; move r31, r1 } | ||
561 | { move r32, r2; move r33, r3 } | ||
562 | .endif | ||
563 | TRACE_IRQS_OFF | ||
564 | .ifnc \function,handle_syscall | ||
565 | { move r0, r30; move r1, r31 } | ||
566 | { move r2, r32; move r3, r33 } | ||
567 | .endif | ||
568 | .endif | ||
569 | #endif | ||
570 | |||
571 | .endm | ||
572 | |||
573 | /* | ||
574 | * Redispatch a downcall. | ||
575 | */ | ||
576 | .macro dc_dispatch vecnum, vecname | ||
577 | .org (\vecnum << 8) | ||
578 | intvec_\vecname: | ||
579 | j hv_downcall_dispatch | ||
580 | ENDPROC(intvec_\vecname) | ||
581 | .endm | ||
582 | |||
583 | /* | ||
584 | * Common code for most interrupts. The C function we're eventually | ||
585 | * going to is in r0, and the faultnum is in r1; the original | ||
586 | * values for those registers are on the stack. | ||
587 | */ | ||
588 | .pushsection .text.handle_interrupt,"ax" | ||
589 | handle_interrupt: | ||
590 | finish_interrupt_save handle_interrupt | ||
591 | |||
592 | /* Jump to the C routine; it should enable irqs as soon as possible. */ | ||
593 | { | ||
594 | jalr r0 | ||
595 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
596 | } | ||
597 | FEEDBACK_REENTER(handle_interrupt) | ||
598 | { | ||
599 | movei r30, 0 /* not an NMI */ | ||
600 | j interrupt_return | ||
601 | } | ||
602 | STD_ENDPROC(handle_interrupt) | ||
603 | |||
604 | /* | ||
605 | * This routine takes a boolean in r30 indicating if this is an NMI. | ||
606 | * If so, we also expect a boolean in r31 indicating whether to | ||
607 | * re-enable the oprofile interrupts. | ||
608 | */ | ||
609 | STD_ENTRY(interrupt_return) | ||
610 | /* If we're resuming to kernel space, don't check thread flags. */ | ||
611 | { | ||
612 | bnez r30, .Lrestore_all /* NMIs don't special-case user-space */ | ||
613 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
614 | } | ||
615 | ld r29, r29 | ||
616 | andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
617 | { | ||
618 | beqzt r29, .Lresume_userspace | ||
619 | PTREGS_PTR(r29, PTREGS_OFFSET_PC) | ||
620 | } | ||
621 | |||
622 | /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ | ||
623 | moveli r27, hw2_last(_cpu_idle_nap) | ||
624 | { | ||
625 | ld r28, r29 | ||
626 | shl16insli r27, r27, hw1(_cpu_idle_nap) | ||
627 | } | ||
628 | { | ||
629 | shl16insli r27, r27, hw0(_cpu_idle_nap) | ||
630 | } | ||
631 | { | ||
632 | cmpeq r27, r27, r28 | ||
633 | } | ||
634 | { | ||
635 | blbc r27, .Lrestore_all | ||
636 | addi r28, r28, 8 | ||
637 | } | ||
638 | st r29, r28 | ||
639 | j .Lrestore_all | ||
640 | |||
641 | .Lresume_userspace: | ||
642 | FEEDBACK_REENTER(interrupt_return) | ||
643 | |||
644 | /* | ||
645 | * Disable interrupts so as to make sure we don't | ||
646 | * miss an interrupt that sets any of the thread flags (like | ||
647 | * need_resched or sigpending) between sampling and the iret. | ||
648 | * Routines like schedule() or do_signal() may re-enable | ||
649 | * interrupts before returning. | ||
650 | */ | ||
651 | IRQ_DISABLE(r20, r21) | ||
652 | TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ | ||
653 | |||
654 | /* Get base of stack in r32; note r30/31 are used as arguments here. */ | ||
655 | GET_THREAD_INFO(r32) | ||
656 | |||
657 | |||
658 | /* Check to see if there is any work to do before returning to user. */ | ||
659 | { | ||
660 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET | ||
661 | moveli r1, hw1_last(_TIF_ALLWORK_MASK) | ||
662 | } | ||
663 | { | ||
664 | ld r29, r29 | ||
665 | shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK) | ||
666 | } | ||
667 | and r1, r29, r1 | ||
668 | beqzt r1, .Lrestore_all | ||
669 | |||
670 | /* | ||
671 | * Make sure we have all the registers saved for signal | ||
672 | * handling or single-step. Call out to C code to figure out | ||
673 | * exactly what we need to do for each flag bit, then if | ||
674 | * necessary, reload the flags and recheck. | ||
675 | */ | ||
676 | push_extra_callee_saves r0 | ||
677 | { | ||
678 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
679 | jal do_work_pending | ||
680 | } | ||
681 | bnez r0, .Lresume_userspace | ||
682 | |||
683 | /* | ||
684 | * In the NMI case we | ||
685 | * omit the call to single_process_check_nohz, which normally checks | ||
686 | * to see if we should start or stop the scheduler tick, because | ||
687 | * we can't call arbitrary Linux code from an NMI context. | ||
688 | * We always call the homecache TLB deferral code to re-trigger | ||
689 | * the deferral mechanism. | ||
690 | * | ||
691 | * The other chunk of responsibility this code has is to reset the | ||
692 | * interrupt masks appropriately to reset irqs and NMIs. We have | ||
693 | * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the | ||
694 | * lockdep-type stuff, but we can't set ICS until afterwards, since | ||
695 | * ICS can only be used in very tight chunks of code to avoid | ||
696 | * tripping over various assertions that it is off. | ||
697 | */ | ||
698 | .Lrestore_all: | ||
699 | PTREGS_PTR(r0, PTREGS_OFFSET_EX1) | ||
700 | { | ||
701 | ld r0, r0 | ||
702 | PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) | ||
703 | } | ||
704 | { | ||
705 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK | ||
706 | ld r32, r32 | ||
707 | } | ||
708 | bnez r0, 1f | ||
709 | j 2f | ||
710 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
711 | # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below | ||
712 | #endif | ||
713 | 1: blbct r32, 2f | ||
714 | IRQ_DISABLE(r20,r21) | ||
715 | TRACE_IRQS_OFF | ||
716 | movei r0, 1 | ||
717 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
718 | beqzt r30, .Lrestore_regs | ||
719 | j 3f | ||
720 | 2: TRACE_IRQS_ON | ||
721 | movei r0, 1 | ||
722 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
723 | IRQ_ENABLE(r20, r21) | ||
724 | beqzt r30, .Lrestore_regs | ||
725 | 3: | ||
726 | |||
727 | |||
728 | /* | ||
729 | * We now commit to returning from this interrupt, since we will be | ||
730 | * doing things like setting EX_CONTEXT SPRs and unwinding the stack | ||
731 | * frame. No calls should be made to any other code after this point. | ||
732 | * This code should only be entered with ICS set. | ||
733 | * r32 must still be set to ptregs.flags. | ||
734 | * We launch loads to each cache line separately first, so we can | ||
735 | * get some parallelism out of the memory subsystem. | ||
736 | * We start zeroing caller-saved registers throughout, since | ||
737 | * that will save some cycles if this turns out to be a syscall. | ||
738 | */ | ||
739 | .Lrestore_regs: | ||
740 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ | ||
741 | |||
742 | /* | ||
743 | * Rotate so we have one high bit and one low bit to test. | ||
744 | * - low bit says whether to restore all the callee-saved registers, | ||
745 | * or just r30-r33, and r52 up. | ||
746 | * - high bit (i.e. sign bit) says whether to restore all the | ||
747 | * caller-saved registers, or just r0. | ||
748 | */ | ||
749 | #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4 | ||
750 | # error Rotate trick does not work :-) | ||
751 | #endif | ||
752 | { | ||
753 | rotli r20, r32, 62 | ||
754 | PTREGS_PTR(sp, PTREGS_OFFSET_REG(0)) | ||
755 | } | ||
756 | |||
757 | /* | ||
758 | * Load cache lines 0, 4, 6 and 7, in that order, then use | ||
759 | * the last loaded value, which makes it likely that the other | ||
760 | * cache lines have also loaded, at which point we should be | ||
761 | * able to safely read all the remaining words on those cache | ||
762 | * lines without waiting for the memory subsystem. | ||
763 | */ | ||
764 | pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) | ||
765 | pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30) | ||
766 | pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52) | ||
767 | pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH | ||
768 | pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1 | ||
769 | { | ||
770 | mtspr CMPEXCH_VALUE, r21 | ||
771 | move r4, zero | ||
772 | } | ||
773 | pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC | ||
774 | { | ||
775 | mtspr SPR_EX_CONTEXT_K_1, lr | ||
776 | andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
777 | } | ||
778 | { | ||
779 | mtspr SPR_EX_CONTEXT_K_0, r21 | ||
780 | move r5, zero | ||
781 | } | ||
782 | |||
783 | /* Restore callee-saveds that we actually use. */ | ||
784 | pop_reg_zero r31, r6 | ||
785 | pop_reg_zero r32, r7 | ||
786 | pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33) | ||
787 | |||
788 | /* | ||
789 | * If we modified other callee-saveds, restore them now. | ||
790 | * This is rare, but could be via ptrace or signal handler. | ||
791 | */ | ||
792 | { | ||
793 | move r9, zero | ||
794 | blbs r20, .Lrestore_callees | ||
795 | } | ||
796 | .Lcontinue_restore_regs: | ||
797 | |||
798 | /* Check if we're returning from a syscall. */ | ||
799 | { | ||
800 | move r10, zero | ||
801 | bltzt r20, 1f /* no, so go restore callee-save registers */ | ||
802 | } | ||
803 | |||
804 | /* | ||
805 | * Check if we're returning to userspace. | ||
806 | * Note that if we're not, we don't worry about zeroing everything. | ||
807 | */ | ||
808 | { | ||
809 | addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29) | ||
810 | bnez lr, .Lkernel_return | ||
811 | } | ||
812 | |||
813 | /* | ||
814 | * On return from syscall, we've restored r0 from pt_regs, but we | ||
815 | * clear the remainder of the caller-saved registers. We could | ||
816 | * restore the syscall arguments, but there's not much point, | ||
817 | * and it ensures user programs aren't trying to use the | ||
818 | * caller-saves if we clear them, as well as avoiding leaking | ||
819 | * kernel pointers into userspace. | ||
820 | */ | ||
821 | pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
822 | pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
823 | { | ||
824 | ld sp, sp | ||
825 | move r13, zero | ||
826 | move r14, zero | ||
827 | } | ||
828 | { move r15, zero; move r16, zero } | ||
829 | { move r17, zero; move r18, zero } | ||
830 | { move r19, zero; move r20, zero } | ||
831 | { move r21, zero; move r22, zero } | ||
832 | { move r23, zero; move r24, zero } | ||
833 | { move r25, zero; move r26, zero } | ||
834 | |||
835 | /* Set r1 to errno if we are returning an error, otherwise zero. */ | ||
836 | { | ||
837 | moveli r29, 4096 | ||
838 | sub r1, zero, r0 | ||
839 | } | ||
840 | { | ||
841 | move r28, zero | ||
842 | cmpltu r29, r1, r29 | ||
843 | } | ||
844 | { | ||
845 | mnz r1, r29, r1 | ||
846 | move r29, zero | ||
847 | } | ||
848 | iret | ||
849 | |||
850 | /* | ||
851 | * Not a syscall, so restore caller-saved registers. | ||
852 | * First kick off loads for cache lines 1-3, which we're touching | ||
853 | * for the first time here. | ||
854 | */ | ||
855 | .align 64 | ||
856 | 1: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29) | ||
857 | pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21) | ||
858 | pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13) | ||
859 | pop_reg r1 | ||
860 | pop_reg r2 | ||
861 | pop_reg r3 | ||
862 | pop_reg r4 | ||
863 | pop_reg r5 | ||
864 | pop_reg r6 | ||
865 | pop_reg r7 | ||
866 | pop_reg r8 | ||
867 | pop_reg r9 | ||
868 | pop_reg r10 | ||
869 | pop_reg r11 | ||
870 | pop_reg r12, sp, 16 | ||
871 | /* r13 already restored above */ | ||
872 | pop_reg r14 | ||
873 | pop_reg r15 | ||
874 | pop_reg r16 | ||
875 | pop_reg r17 | ||
876 | pop_reg r18 | ||
877 | pop_reg r19 | ||
878 | pop_reg r20, sp, 16 | ||
879 | /* r21 already restored above */ | ||
880 | pop_reg r22 | ||
881 | pop_reg r23 | ||
882 | pop_reg r24 | ||
883 | pop_reg r25 | ||
884 | pop_reg r26 | ||
885 | pop_reg r27 | ||
886 | pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28) | ||
887 | /* r29 already restored above */ | ||
888 | bnez lr, .Lkernel_return | ||
889 | pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
890 | pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
891 | ld sp, sp | ||
892 | iret | ||
893 | |||
894 | /* | ||
895 | * We can't restore tp when in kernel mode, since a thread might | ||
896 | * have migrated from another cpu and brought a stale tp value. | ||
897 | */ | ||
898 | .Lkernel_return: | ||
899 | pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
900 | ld sp, sp | ||
901 | iret | ||
902 | |||
903 | /* Restore callee-saved registers from r34 to r51. */ | ||
904 | .Lrestore_callees: | ||
905 | addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29) | ||
906 | pop_reg r34 | ||
907 | pop_reg r35 | ||
908 | pop_reg r36 | ||
909 | pop_reg r37 | ||
910 | pop_reg r38 | ||
911 | pop_reg r39 | ||
912 | pop_reg r40 | ||
913 | pop_reg r41 | ||
914 | pop_reg r42 | ||
915 | pop_reg r43 | ||
916 | pop_reg r44 | ||
917 | pop_reg r45 | ||
918 | pop_reg r46 | ||
919 | pop_reg r47 | ||
920 | pop_reg r48 | ||
921 | pop_reg r49 | ||
922 | pop_reg r50 | ||
923 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) | ||
924 | j .Lcontinue_restore_regs | ||
925 | STD_ENDPROC(interrupt_return) | ||
926 | |||
927 | /* | ||
928 | * "NMI" interrupts mask ALL interrupts before calling the | ||
929 | * handler, and don't check thread flags, etc., on the way | ||
930 | * back out. In general, the only things we do here for NMIs | ||
931 | * are register save/restore and dataplane kernel-TLB management. | ||
932 | * We don't (for example) deal with start/stop of the sched tick. | ||
933 | */ | ||
934 | .pushsection .text.handle_nmi,"ax" | ||
935 | handle_nmi: | ||
936 | finish_interrupt_save handle_nmi | ||
937 | { | ||
938 | jalr r0 | ||
939 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
940 | } | ||
941 | FEEDBACK_REENTER(handle_nmi) | ||
942 | { | ||
943 | movei r30, 1 | ||
944 | move r31, r0 | ||
945 | } | ||
946 | j interrupt_return | ||
947 | STD_ENDPROC(handle_nmi) | ||
948 | |||
949 | /* | ||
950 | * Parallel code for syscalls to handle_interrupt. | ||
951 | */ | ||
952 | .pushsection .text.handle_syscall,"ax" | ||
953 | handle_syscall: | ||
954 | finish_interrupt_save handle_syscall | ||
955 | |||
956 | /* Enable irqs. */ | ||
957 | TRACE_IRQS_ON | ||
958 | IRQ_ENABLE(r20, r21) | ||
959 | |||
960 | /* Bump the counter for syscalls made on this tile. */ | ||
961 | moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
962 | shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
963 | shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
964 | add r20, r20, tp | ||
965 | ld4s r21, r20 | ||
966 | addi r21, r21, 1 | ||
967 | st4 r20, r21 | ||
968 | |||
969 | /* Trace syscalls, if requested. */ | ||
970 | GET_THREAD_INFO(r31) | ||
971 | addi r31, r31, THREAD_INFO_FLAGS_OFFSET | ||
972 | ld r30, r31 | ||
973 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
974 | { | ||
975 | addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET | ||
976 | beqzt r30, .Lrestore_syscall_regs | ||
977 | } | ||
978 | jal do_syscall_trace | ||
979 | FEEDBACK_REENTER(handle_syscall) | ||
980 | |||
981 | /* | ||
982 | * We always reload our registers from the stack at this | ||
983 | * point. They might be valid, if we didn't build with | ||
984 | * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not | ||
985 | * doing syscall tracing, but there are enough cases now that it | ||
986 | * seems simplest just to do the reload unconditionally. | ||
987 | */ | ||
988 | .Lrestore_syscall_regs: | ||
989 | { | ||
990 | ld r30, r30 | ||
991 | PTREGS_PTR(r11, PTREGS_OFFSET_REG(0)) | ||
992 | } | ||
993 | pop_reg r0, r11 | ||
994 | pop_reg r1, r11 | ||
995 | pop_reg r2, r11 | ||
996 | pop_reg r3, r11 | ||
997 | pop_reg r4, r11 | ||
998 | pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5) | ||
999 | { | ||
1000 | ld TREG_SYSCALL_NR_NAME, r11 | ||
1001 | moveli r21, __NR_syscalls | ||
1002 | } | ||
1003 | |||
1004 | /* Ensure that the syscall number is within the legal range. */ | ||
1005 | { | ||
1006 | moveli r20, hw2(sys_call_table) | ||
1007 | blbs r30, .Lcompat_syscall | ||
1008 | } | ||
1009 | { | ||
1010 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | ||
1011 | shl16insli r20, r20, hw1(sys_call_table) | ||
1012 | } | ||
1013 | { | ||
1014 | blbc r21, .Linvalid_syscall | ||
1015 | shl16insli r20, r20, hw0(sys_call_table) | ||
1016 | } | ||
1017 | .Lload_syscall_pointer: | ||
1018 | shl3add r20, TREG_SYSCALL_NR_NAME, r20 | ||
1019 | ld r20, r20 | ||
1020 | |||
1021 | /* Jump to syscall handler. */ | ||
1022 | jalr r20 | ||
1023 | .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */ | ||
1024 | |||
1025 | /* | ||
1026 | * Write our r0 onto the stack so it gets restored instead | ||
1027 | * of whatever the user had there before. | ||
1028 | * In compat mode, sign-extend r0 before storing it. | ||
1029 | */ | ||
1030 | { | ||
1031 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1032 | blbct r30, 1f | ||
1033 | } | ||
1034 | addxi r0, r0, 0 | ||
1035 | 1: st r29, r0 | ||
1036 | |||
1037 | .Lsyscall_sigreturn_skip: | ||
1038 | FEEDBACK_REENTER(handle_syscall) | ||
1039 | |||
1040 | /* Do syscall trace again, if requested. */ | ||
1041 | ld r30, r31 | ||
1042 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1043 | beqzt r30, 1f | ||
1044 | jal do_syscall_trace | ||
1045 | FEEDBACK_REENTER(handle_syscall) | ||
1046 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1047 | |||
1048 | .Lcompat_syscall: | ||
1049 | /* | ||
1050 | * Load the base of the compat syscall table in r20, and | ||
1051 | * range-check the syscall number (duplicated from 64-bit path). | ||
1052 | * Sign-extend all the user's passed arguments to make them consistent. | ||
1053 | * Also save the original "r(n)" values away in "r(11+n)" in | ||
1054 | * case the syscall table entry wants to validate them. | ||
1055 | */ | ||
1056 | moveli r20, hw2(compat_sys_call_table) | ||
1057 | { | ||
1058 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | ||
1059 | shl16insli r20, r20, hw1(compat_sys_call_table) | ||
1060 | } | ||
1061 | { | ||
1062 | blbc r21, .Linvalid_syscall | ||
1063 | shl16insli r20, r20, hw0(compat_sys_call_table) | ||
1064 | } | ||
1065 | { move r11, r0; addxi r0, r0, 0 } | ||
1066 | { move r12, r1; addxi r1, r1, 0 } | ||
1067 | { move r13, r2; addxi r2, r2, 0 } | ||
1068 | { move r14, r3; addxi r3, r3, 0 } | ||
1069 | { move r15, r4; addxi r4, r4, 0 } | ||
1070 | { move r16, r5; addxi r5, r5, 0 } | ||
1071 | j .Lload_syscall_pointer | ||
1072 | |||
1073 | .Linvalid_syscall: | ||
1074 | /* Report an invalid syscall back to the user program */ | ||
1075 | { | ||
1076 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1077 | movei r28, -ENOSYS | ||
1078 | } | ||
1079 | st r29, r28 | ||
1080 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1081 | STD_ENDPROC(handle_syscall) | ||
1082 | |||
1083 | /* Return the address for oprofile to suppress in backtraces. */ | ||
1084 | STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall) | ||
1085 | lnk r0 | ||
1086 | { | ||
1087 | addli r0, r0, .Lhandle_syscall_link - . | ||
1088 | jrp lr | ||
1089 | } | ||
1090 | STD_ENDPROC(handle_syscall_link_address) | ||
1091 | |||
1092 | STD_ENTRY(ret_from_fork) | ||
1093 | jal sim_notify_fork | ||
1094 | jal schedule_tail | ||
1095 | FEEDBACK_REENTER(ret_from_fork) | ||
1096 | j .Lresume_userspace | ||
1097 | STD_ENDPROC(ret_from_fork) | ||
1098 | |||
1099 | /* Various stub interrupt handlers and syscall handlers */ | ||
1100 | |||
1101 | STD_ENTRY_LOCAL(_kernel_double_fault) | ||
1102 | mfspr r1, SPR_EX_CONTEXT_K_0 | ||
1103 | move r2, lr | ||
1104 | move r3, sp | ||
1105 | move r4, r52 | ||
1106 | addi sp, sp, -C_ABI_SAVE_AREA_SIZE | ||
1107 | j kernel_double_fault | ||
1108 | STD_ENDPROC(_kernel_double_fault) | ||
1109 | |||
1110 | STD_ENTRY_LOCAL(bad_intr) | ||
1111 | mfspr r2, SPR_EX_CONTEXT_K_0 | ||
1112 | panic "Unhandled interrupt %#x: PC %#lx" | ||
1113 | STD_ENDPROC(bad_intr) | ||
1114 | |||
1115 | /* Put address of pt_regs in reg and jump. */ | ||
1116 | #define PTREGS_SYSCALL(x, reg) \ | ||
1117 | STD_ENTRY(_##x); \ | ||
1118 | { \ | ||
1119 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ | ||
1120 | j x \ | ||
1121 | }; \ | ||
1122 | STD_ENDPROC(_##x) | ||
1123 | |||
1124 | /* | ||
1125 | * Special-case sigreturn to not write r0 to the stack on return. | ||
1126 | * This is technically more efficient, but it also avoids difficulties | ||
1127 | * in the 64-bit OS when handling 32-bit compat code, since we must not | ||
1128 | * sign-extend r0 for the sigreturn return-value case. | ||
1129 | */ | ||
1130 | #define PTREGS_SYSCALL_SIGRETURN(x, reg) \ | ||
1131 | STD_ENTRY(_##x); \ | ||
1132 | addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \ | ||
1133 | { \ | ||
1134 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ | ||
1135 | j x \ | ||
1136 | }; \ | ||
1137 | STD_ENDPROC(_##x) | ||
1138 | |||
1139 | PTREGS_SYSCALL(sys_execve, r3) | ||
1140 | PTREGS_SYSCALL(sys_sigaltstack, r2) | ||
1141 | PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0) | ||
1142 | #ifdef CONFIG_COMPAT | ||
1143 | PTREGS_SYSCALL(compat_sys_execve, r3) | ||
1144 | PTREGS_SYSCALL(compat_sys_sigaltstack, r2) | ||
1145 | PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0) | ||
1146 | #endif | ||
1147 | |||
1148 | /* Save additional callee-saves to pt_regs, put address in r4 and jump. */ | ||
1149 | STD_ENTRY(_sys_clone) | ||
1150 | push_extra_callee_saves r4 | ||
1151 | j sys_clone | ||
1152 | STD_ENDPROC(_sys_clone) | ||
1153 | |||
1154 | /* The single-step support may need to read all the registers. */ | ||
1155 | int_unalign: | ||
1156 | push_extra_callee_saves r0 | ||
1157 | j do_trap | ||
1158 | |||
1159 | /* Include .intrpt1 array of interrupt vectors */ | ||
1160 | .section ".intrpt1", "ax" | ||
1161 | |||
1162 | #define op_handle_perf_interrupt bad_intr | ||
1163 | #define op_handle_aux_perf_interrupt bad_intr | ||
1164 | |||
1165 | #ifndef CONFIG_HARDWALL | ||
1166 | #define do_hardwall_trap bad_intr | ||
1167 | #endif | ||
1168 | |||
1169 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | ||
1170 | int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr | ||
1171 | #if CONFIG_KERNEL_PL == 2 | ||
1172 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle | ||
1173 | int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr | ||
1174 | #else | ||
1175 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr | ||
1176 | int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle | ||
1177 | #endif | ||
1178 | int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr | ||
1179 | int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr | ||
1180 | int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr | ||
1181 | int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault | ||
1182 | int_hand INT_ILL, ILL, do_trap | ||
1183 | int_hand INT_GPV, GPV, do_trap | ||
1184 | int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap | ||
1185 | int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap | ||
1186 | int_hand INT_SWINT_3, SWINT_3, do_trap | ||
1187 | int_hand INT_SWINT_2, SWINT_2, do_trap | ||
1188 | int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall | ||
1189 | int_hand INT_SWINT_0, SWINT_0, do_trap | ||
1190 | int_hand INT_ILL_TRANS, ILL_TRANS, do_trap | ||
1191 | int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign | ||
1192 | int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault | ||
1193 | int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault | ||
1194 | int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr | ||
1195 | int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap | ||
1196 | int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt | ||
1197 | int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr | ||
1198 | int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr | ||
1199 | int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr | ||
1200 | int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr | ||
1201 | int_hand INT_IPI_3, IPI_3, bad_intr | ||
1202 | #if CONFIG_KERNEL_PL == 2 | ||
1203 | int_hand INT_IPI_2, IPI_2, tile_dev_intr | ||
1204 | int_hand INT_IPI_1, IPI_1, bad_intr | ||
1205 | #else | ||
1206 | int_hand INT_IPI_2, IPI_2, bad_intr | ||
1207 | int_hand INT_IPI_1, IPI_1, tile_dev_intr | ||
1208 | #endif | ||
1209 | int_hand INT_IPI_0, IPI_0, bad_intr | ||
1210 | int_hand INT_PERF_COUNT, PERF_COUNT, \ | ||
1211 | op_handle_perf_interrupt, handle_nmi | ||
1212 | int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ | ||
1213 | op_handle_perf_interrupt, handle_nmi | ||
1214 | int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr | ||
1215 | #if CONFIG_KERNEL_PL == 2 | ||
1216 | dc_dispatch INT_INTCTRL_2, INTCTRL_2 | ||
1217 | int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr | ||
1218 | #else | ||
1219 | int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr | ||
1220 | dc_dispatch INT_INTCTRL_1, INTCTRL_1 | ||
1221 | #endif | ||
1222 | int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr | ||
1223 | int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ | ||
1224 | hv_message_intr | ||
1225 | int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr | ||
1226 | int_hand INT_I_ASID, I_ASID, bad_intr | ||
1227 | int_hand INT_D_ASID, D_ASID, bad_intr | ||
1228 | int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap | ||
1229 | |||
1230 | /* Synthetic interrupt delivered only by the simulator */ | ||
1231 | int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint | ||