aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/intvec_32.S
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-05-28 23:09:12 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-04 17:11:18 -0400
commit867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch)
treec5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/kernel/intvec_32.S
parent5360bd776f73d0a7da571d72a09a03f237e99900 (diff)
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r--arch/tile/kernel/intvec_32.S2006
1 files changed, 2006 insertions, 0 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
new file mode 100644
index 000000000000..207271f0cce1
--- /dev/null
+++ b/arch/tile/kernel/intvec_32.S
@@ -0,0 +1,2006 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Linux interrupt vectors.
15 */
16
17#include <linux/linkage.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/unistd.h>
23#include <asm/irqflags.h>
24#include <asm/atomic.h>
25#include <asm/asm-offsets.h>
26#include <hv/hypervisor.h>
27#include <arch/abi.h>
28#include <arch/interrupts.h>
29#include <arch/spr_def.h>
30
31#ifdef CONFIG_PREEMPT
32# error "No support for kernel preemption currently"
33#endif
34
35#if INT_INTCTRL_1 < 32 || INT_INTCTL_1 >= 48
36# error INT_INTCTRL_1 coded to set high interrupt mask
37#endif
38
39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
40
41#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
42
43#if !CHIP_HAS_WH64()
44 /* By making this an empty macro, we can use wh64 in the code. */
45 .macro wh64 reg
46 .endm
47#endif
48
49 .macro push_reg reg, ptr=sp, delta=-4
50 {
51 sw \ptr, \reg
52 addli \ptr, \ptr, \delta
53 }
54 .endm
55
56 .macro pop_reg reg, ptr=sp, delta=4
57 {
58 lw \reg, \ptr
59 addli \ptr, \ptr, \delta
60 }
61 .endm
62
63 .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
64 {
65 move \zreg, zero
66 lw \reg, \ptr
67 addi \ptr, \ptr, \delta
68 }
69 .endm
70
71 .macro push_extra_callee_saves reg
72 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
73 push_reg r51, \reg
74 push_reg r50, \reg
75 push_reg r49, \reg
76 push_reg r48, \reg
77 push_reg r47, \reg
78 push_reg r46, \reg
79 push_reg r45, \reg
80 push_reg r44, \reg
81 push_reg r43, \reg
82 push_reg r42, \reg
83 push_reg r41, \reg
84 push_reg r40, \reg
85 push_reg r39, \reg
86 push_reg r38, \reg
87 push_reg r37, \reg
88 push_reg r36, \reg
89 push_reg r35, \reg
90 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
91 .endm
92
93 .macro panic str
94 .pushsection .rodata, "a"
951:
96 .asciz "\str"
97 .popsection
98 {
99 moveli r0, lo16(1b)
100 }
101 {
102 auli r0, r0, ha16(1b)
103 jal panic
104 }
105 .endm
106
107#ifdef __COLLECT_LINKER_FEEDBACK__
108 .pushsection .text.intvec_feedback,"ax"
109intvec_feedback:
110 .popsection
111#endif
112
113 /*
114 * Default interrupt handler.
115 *
116 * vecnum is where we'll put this code.
117 * c_routine is the C routine we'll call.
118 *
119 * The C routine is passed two arguments:
120 * - A pointer to the pt_regs state.
121 * - The interrupt vector number.
122 *
123 * The "processing" argument specifies the code for processing
124 * the interrupt. Defaults to "handle_interrupt".
125 */
126 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
127 .org (\vecnum << 8)
128intvec_\vecname:
129 .ifc \vecnum, INT_SWINT_1
130 blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
131 .endif
132
133 /* Temporarily save a register so we have somewhere to work. */
134
135 mtspr SYSTEM_SAVE_1_1, r0
136 mfspr r0, EX_CONTEXT_1_1
137
138 /* The cmpxchg code clears sp to force us to reset it here on fault. */
139 {
140 bz sp, 2f
141 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
142 }
143
144 .ifc \vecnum, INT_DOUBLE_FAULT
145 /*
146 * For double-faults from user-space, fall through to the normal
147 * register save and stack setup path. Otherwise, it's the
148 * hypervisor giving us one last chance to dump diagnostics, and we
149 * branch to the kernel_double_fault routine to do so.
150 */
151 bz r0, 1f
152 j _kernel_double_fault
1531:
154 .else
155 /*
156 * If we're coming from user-space, then set sp to the top of
157 * the kernel stack. Otherwise, assume sp is already valid.
158 */
159 {
160 bnz r0, 0f
161 move r0, sp
162 }
163 .endif
164
165 .ifc \c_routine, do_page_fault
166 /*
167 * The page_fault handler may be downcalled directly by the
168 * hypervisor even when Linux is running and has ICS set.
169 *
170 * In this case the contents of EX_CONTEXT_1_1 reflect the
171 * previous fault and can't be relied on to choose whether or
172 * not to reinitialize the stack pointer. So we add a test
173 * to see whether SYSTEM_SAVE_1_2 has the high bit set,
174 * and if so we don't reinitialize sp, since we must be coming
175 * from Linux. (In fact the precise case is !(val & ~1),
176 * but any Linux PC has to have the high bit set.)
177 *
178 * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
179 * any path that turns into a downcall to one of our TLB handlers.
180 */
181 mfspr r0, SYSTEM_SAVE_1_2
182 {
183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
184 move r0, sp
185 }
186 .endif
187
1882:
189 /*
190 * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
191 * the current stack top in the higher bits. So we recover
192 * our stack top by just masking off the low bits, then
193 * point sp at the top aligned address on the actual stack page.
194 */
195 mfspr r0, SYSTEM_SAVE_1_0
196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31
197
1980:
199 /*
200 * Align the stack mod 64 so we can properly predict what
201 * cache lines we need to write-hint to reduce memory fetch
202 * latency as we enter the kernel. The layout of memory is
203 * as follows, with cache line 0 at the lowest VA, and cache
204 * line 4 just below the r0 value this "andi" computes.
205 * Note that we never write to cache line 4, and we skip
206 * cache line 1 for syscalls.
207 *
208 * cache line 4: ptregs padding (two words)
209 * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
210 * cache line 2: r30...r45
211 * cache line 1: r14...r29
212 * cache line 0: 2 x frame, r0..r13
213 */
214 andi r0, r0, -64
215
216 /*
217 * Push the first four registers on the stack, so that we can set
218 * them to vector-unique values before we jump to the common code.
219 *
220 * Registers are pushed on the stack as a struct pt_regs,
221 * with the sp initially just above the struct, and when we're
222 * done, sp points to the base of the struct, minus
223 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
224 *
225 * This routine saves just the first four registers, plus the
226 * stack context so we can do proper backtracing right away,
227 * and defers to handle_interrupt to save the rest.
228 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
229 */
230 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
231 wh64 r0 /* cache line 3 */
232 {
233 sw r0, lr
234 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
235 }
236 {
237 sw r0, sp
238 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
239 }
240 {
241 sw sp, r52
242 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
243 }
244 wh64 sp /* cache line 0 */
245 {
246 sw sp, r1
247 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
248 }
249 {
250 sw sp, r2
251 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
252 }
253 {
254 sw sp, r3
255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
256 }
257 mfspr r0, EX_CONTEXT_1_0
258 .ifc \processing,handle_syscall
259 /*
260 * Bump the saved PC by one bundle so that when we return, we won't
261 * execute the same swint instruction again. We need to do this while
262 * we're in the critical section.
263 */
264 addi r0, r0, 8
265 .endif
266 {
267 sw sp, r0
268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
269 }
270 mfspr r0, EX_CONTEXT_1_1
271 {
272 sw sp, r0
273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
274 /*
275 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
276 * so that it gets passed through unchanged to the handler routine.
277 * Note that the .if conditional confusingly spans bundles.
278 */
279 .ifc \processing,handle_syscall
280 movei r0, \vecnum
281 }
282 {
283 sw sp, r0
284 .else
285 movei r1, \vecnum
286 }
287 {
288 sw sp, r1
289 .endif
290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
291 }
292 mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */
293 {
294 sw sp, r0
295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
296 }
297 {
298 sw sp, zero /* write zero into "Next SP" frame pointer */
299 addi sp, sp, -4 /* leave SP pointing at bottom of frame */
300 }
301 .ifc \processing,handle_syscall
302 j handle_syscall
303 .else
304 /*
305 * Capture per-interrupt SPR context to registers.
306 * We overload the meaning of r3 on this path such that if its bit 31
307 * is set, we have to mask all interrupts including NMIs before
308 * clearing the interrupt critical section bit.
309 * See discussion below at "finish_interrupt_save".
310 */
311 .ifc \c_routine, do_page_fault
312 mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */
313 mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */
314 .else
315 .ifc \vecnum, INT_DOUBLE_FAULT
316 {
317 mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */
318 movei r3, 0
319 }
320 .else
321 .ifc \c_routine, do_trap
322 {
323 mfspr r2, GPV_REASON
324 movei r3, 0
325 }
326 .else
327 .ifc \c_routine, op_handle_perf_interrupt
328 {
329 mfspr r2, PERF_COUNT_STS
330 movei r3, -1 /* not used, but set for consistency */
331 }
332 .else
333#if CHIP_HAS_AUX_PERF_COUNTERS()
334 .ifc \c_routine, op_handle_aux_perf_interrupt
335 {
336 mfspr r2, AUX_PERF_COUNT_STS
337 movei r3, -1 /* not used, but set for consistency */
338 }
339 .else
340#endif
341 movei r3, 0
342#if CHIP_HAS_AUX_PERF_COUNTERS()
343 .endif
344#endif
345 .endif
346 .endif
347 .endif
348 .endif
349 /* Put function pointer in r0 */
350 moveli r0, lo16(\c_routine)
351 {
352 auli r0, r0, ha16(\c_routine)
353 j \processing
354 }
355 .endif
356 ENDPROC(intvec_\vecname)
357
358#ifdef __COLLECT_LINKER_FEEDBACK__
359 .pushsection .text.intvec_feedback,"ax"
360 .org (\vecnum << 5)
361 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
362 jrp lr
363 .popsection
364#endif
365
366 .endm
367
368
369 /*
370 * Save the rest of the registers that we didn't save in the actual
371 * vector itself. We can't use r0-r10 inclusive here.
372 */
373 .macro finish_interrupt_save, function
374
375 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
376 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
377 {
378 .ifc \function,handle_syscall
379 sw r52, r0
380 .else
381 sw r52, zero
382 .endif
383 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
384 }
385
386 /*
387 * For ordinary syscalls, we save neither caller- nor callee-
388 * save registers, since the syscall invoker doesn't expect the
389 * caller-saves to be saved, and the called kernel functions will
390 * take care of saving the callee-saves for us.
391 *
392 * For interrupts we save just the caller-save registers. Saving
393 * them is required (since the "caller" can't save them). Again,
394 * the called kernel functions will restore the callee-save
395 * registers for us appropriately.
396 *
397 * On return, we normally restore nothing special for syscalls,
398 * and just the caller-save registers for interrupts.
399 *
400 * However, there are some important caveats to all this:
401 *
402 * - We always save a few callee-save registers to give us
403 * some scratchpad registers to carry across function calls.
404 *
405 * - fork/vfork/etc require us to save all the callee-save
406 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
407 *
408 * - We always save r0..r5 and r10 for syscalls, since we need
409 * to reload them a bit later for the actual kernel call, and
410 * since we might need them for -ERESTARTNOINTR, etc.
411 *
412 * - Before invoking a signal handler, we save the unsaved
413 * callee-save registers so they are visible to the
414 * signal handler or any ptracer.
415 *
416 * - If the unsaved callee-save registers are modified, we set
417 * a bit in pt_regs so we know to reload them from pt_regs
418 * and not just rely on the kernel function unwinding.
419 * (Done for ptrace register writes and SA_SIGINFO handler.)
420 */
421 {
422 sw r52, tp
423 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
424 }
425 wh64 r52 /* cache line 2 */
426 push_reg r33, r52
427 push_reg r32, r52
428 push_reg r31, r52
429 .ifc \function,handle_syscall
430 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
431 push_reg TREG_SYSCALL_NR_NAME, r52, \
432 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
433 .else
434
435 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
436 wh64 r52 /* cache line 1 */
437 push_reg r29, r52
438 push_reg r28, r52
439 push_reg r27, r52
440 push_reg r26, r52
441 push_reg r25, r52
442 push_reg r24, r52
443 push_reg r23, r52
444 push_reg r22, r52
445 push_reg r21, r52
446 push_reg r20, r52
447 push_reg r19, r52
448 push_reg r18, r52
449 push_reg r17, r52
450 push_reg r16, r52
451 push_reg r15, r52
452 push_reg r14, r52
453 push_reg r13, r52
454 push_reg r12, r52
455 push_reg r11, r52
456 push_reg r10, r52
457 push_reg r9, r52
458 push_reg r8, r52
459 push_reg r7, r52
460 push_reg r6, r52
461
462 .endif
463
464 push_reg r5, r52
465 sw r52, r4
466
467 /* Load tp with our per-cpu offset. */
468#ifdef CONFIG_SMP
469 {
470 mfspr r20, SYSTEM_SAVE_1_0
471 moveli r21, lo16(__per_cpu_offset)
472 }
473 {
474 auli r21, r21, ha16(__per_cpu_offset)
475 mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1
476 }
477 s2a r20, r20, r21
478 lw tp, r20
479#else
480 move tp, zero
481#endif
482
483 /*
484 * If we will be returning to the kernel, we will need to
485 * reset the interrupt masks to the state they had before.
486 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
487 * We load flags in r32 here so we can jump to .Lrestore_regs
488 * directly after do_page_fault_ics() if necessary.
489 */
490 mfspr r32, EX_CONTEXT_1_1
491 {
492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
494 }
495 bzt r32, 1f /* zero if from user space */
496 IRQS_DISABLED(r32) /* zero if irqs enabled */
497#if PT_FLAGS_DISABLE_IRQ != 1
498# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
499#endif
5001:
501 .ifnc \function,handle_syscall
502 /* Record the fact that we saved the caller-save registers above. */
503 ori r32, r32, PT_FLAGS_CALLER_SAVES
504 .endif
505 sw r21, r32
506
507#ifdef __COLLECT_LINKER_FEEDBACK__
508 /*
509 * Notify the feedback routines that we were in the
510 * appropriate fixed interrupt vector area. Note that we
511 * still have ICS set at this point, so we can't invoke any
512 * atomic operations or we will panic. The feedback
513 * routines internally preserve r0..r10 and r30 up.
514 */
515 .ifnc \function,handle_syscall
516 shli r20, r1, 5
517 .else
518 moveli r20, INT_SWINT_1 << 5
519 .endif
520 addli r20, r20, lo16(intvec_feedback)
521 auli r20, r20, ha16(intvec_feedback)
522 jalr r20
523
524 /* And now notify the feedback routines that we are here. */
525 FEEDBACK_ENTER(\function)
526#endif
527
528 /*
529 * we've captured enough state to the stack (including in
530 * particular our EX_CONTEXT state) that we can now release
531 * the interrupt critical section and replace it with our
532 * standard "interrupts disabled" mask value. This allows
533 * synchronous interrupts (and profile interrupts) to punch
534 * through from this point onwards.
535 *
536 * If bit 31 of r3 is set during a non-NMI interrupt, we know we
537 * are on the path where the hypervisor has punched through our
538 * ICS with a page fault, so we call out to do_page_fault_ics()
539 * to figure out what to do with it. If the fault was in
540 * an atomic op, we unlock the atomic lock, adjust the
541 * saved register state a little, and return "zero" in r4,
542 * falling through into the normal page-fault interrupt code.
543 * If the fault was in a kernel-space atomic operation, then
544 * do_page_fault_ics() resolves it itself, returns "one" in r4,
545 * and as a result goes directly to restoring registers and iret,
546 * without trying to adjust the interrupt masks at all.
547 * The do_page_fault_ics() API involves passing and returning
548 * a five-word struct (in registers) to avoid writing the
549 * save and restore code here.
550 */
551 .ifc \function,handle_nmi
552 IRQ_DISABLE_ALL(r20)
553 .else
554 .ifnc \function,handle_syscall
555 bgezt r3, 1f
556 {
557 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
558 jal do_page_fault_ics
559 }
560 FEEDBACK_REENTER(\function)
561 bzt r4, 1f
562 j .Lrestore_regs
5631:
564 .endif
565 IRQ_DISABLE(r20, r21)
566 .endif
567 mtspr INTERRUPT_CRITICAL_SECTION, zero
568
569#if CHIP_HAS_WH64()
570 /*
571 * Prepare the first 256 stack bytes to be rapidly accessible
572 * without having to fetch the background data. We don't really
573 * know how far to write-hint, but kernel stacks generally
574 * aren't that big, and write-hinting here does take some time.
575 */
576 addi r52, sp, -64
577 {
578 wh64 r52
579 addi r52, r52, -64
580 }
581 {
582 wh64 r52
583 addi r52, r52, -64
584 }
585 {
586 wh64 r52
587 addi r52, r52, -64
588 }
589 wh64 r52
590#endif
591
592#ifdef CONFIG_TRACE_IRQFLAGS
593 .ifnc \function,handle_nmi
594 /*
595 * We finally have enough state set up to notify the irq
596 * tracing code that irqs were disabled on entry to the handler.
597 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
598 * For syscalls, we already have the register state saved away
599 * on the stack, so we don't bother to do any register saves here,
600 * and later we pop the registers back off the kernel stack.
601 * For interrupt handlers, save r0-r3 in callee-saved registers.
602 */
603 .ifnc \function,handle_syscall
604 { move r30, r0; move r31, r1 }
605 { move r32, r2; move r33, r3 }
606 .endif
607 TRACE_IRQS_OFF
608 .ifnc \function,handle_syscall
609 { move r0, r30; move r1, r31 }
610 { move r2, r32; move r3, r33 }
611 .endif
612 .endif
613#endif
614
615 .endm
616
617 .macro check_single_stepping, kind, not_single_stepping
618 /*
619 * Check for single stepping in user-level priv
620 * kind can be "normal", "ill", or "syscall"
621 * At end, if fall-thru
622 * r29: thread_info->step_state
623 * r28: &pt_regs->pc
624 * r27: pt_regs->pc
625 * r26: thread_info->step_state->buffer
626 */
627
628 /* Check for single stepping */
629 GET_THREAD_INFO(r29)
630 {
631 /* Get pointer to field holding step state */
632 addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
633
634 /* Get pointer to EX1 in register state */
635 PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
636 }
637 {
638 /* Get pointer to field holding PC */
639 PTREGS_PTR(r28, PTREGS_OFFSET_PC)
640
641 /* Load the pointer to the step state */
642 lw r29, r29
643 }
644 /* Load EX1 */
645 lw r27, r27
646 {
647 /* Points to flags */
648 addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
649
650 /* No single stepping if there is no step state structure */
651 bzt r29, \not_single_stepping
652 }
653 {
654 /* mask off ICS and any other high bits */
655 andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
656
657 /* Load pointer to single step instruction buffer */
658 lw r26, r29
659 }
660 /* Check priv state */
661 bnz r27, \not_single_stepping
662
663 /* Get flags */
664 lw r22, r23
665 {
666 /* Branch if single-step mode not enabled */
667 bbnst r22, \not_single_stepping
668
669 /* Clear enabled flag */
670 andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
671 }
672 .ifc \kind,normal
673 {
674 /* Load PC */
675 lw r27, r28
676
677 /* Point to the entry containing the original PC */
678 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
679 }
680 {
681 /* Disable single stepping flag */
682 sw r23, r22
683 }
684 {
685 /* Get the original pc */
686 lw r24, r24
687
688 /* See if the PC is at the start of the single step buffer */
689 seq r25, r26, r27
690 }
691 /*
692 * NOTE: it is really expected that the PC be in the single step buffer
693 * at this point
694 */
695 bzt r25, \not_single_stepping
696
697 /* Restore the original PC */
698 sw r28, r24
699 .else
700 .ifc \kind,syscall
701 {
702 /* Load PC */
703 lw r27, r28
704
705 /* Point to the entry containing the next PC */
706 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
707 }
708 {
709 /* Increment the stopped PC by the bundle size */
710 addi r26, r26, 8
711
712 /* Disable single stepping flag */
713 sw r23, r22
714 }
715 {
716 /* Get the next pc */
717 lw r24, r24
718
719 /*
720 * See if the PC is one bundle past the start of the
721 * single step buffer
722 */
723 seq r25, r26, r27
724 }
725 {
726 /*
727 * NOTE: it is really expected that the PC be in the
728 * single step buffer at this point
729 */
730 bzt r25, \not_single_stepping
731 }
732 /* Set to the next PC */
733 sw r28, r24
734 .else
735 {
736 /* Point to 3rd bundle in buffer */
737 addi r25, r26, 16
738
739 /* Load PC */
740 lw r27, r28
741 }
742 {
743 /* Disable single stepping flag */
744 sw r23, r22
745
746 /* See if the PC is in the single step buffer */
747 slte_u r24, r26, r27
748 }
749 {
750 slte_u r25, r27, r25
751
752 /*
753 * NOTE: it is really expected that the PC be in the
754 * single step buffer at this point
755 */
756 bzt r24, \not_single_stepping
757 }
758 bzt r25, \not_single_stepping
759 .endif
760 .endif
761 .endm
762
763 /*
764 * Redispatch a downcall.
765 */
766 .macro dc_dispatch vecnum, vecname
767 .org (\vecnum << 8)
768intvec_\vecname:
769 j hv_downcall_dispatch
770 ENDPROC(intvec_\vecname)
771 .endm
772
773 /*
774 * Common code for most interrupts. The C function we're eventually
775 * going to is in r0, and the faultnum is in r1; the original
776 * values for those registers are on the stack.
777 */
778 .pushsection .text.handle_interrupt,"ax"
779handle_interrupt:
780 finish_interrupt_save handle_interrupt
781
782 /*
783 * Check for if we are single stepping in user level. If so, then
784 * we need to restore the PC.
785 */
786
787 check_single_stepping normal, .Ldispatch_interrupt
788.Ldispatch_interrupt:
789
790 /* Jump to the C routine; it should enable irqs as soon as possible. */
791 {
792 jalr r0
793 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
794 }
795 FEEDBACK_REENTER(handle_interrupt)
796 {
797 movei r30, 0 /* not an NMI */
798 j interrupt_return
799 }
800 STD_ENDPROC(handle_interrupt)
801
802/*
803 * This routine takes a boolean in r30 indicating if this is an NMI.
804 * If so, we also expect a boolean in r31 indicating whether to
805 * re-enable the oprofile interrupts.
806 */
807STD_ENTRY(interrupt_return)
808 /* If we're resuming to kernel space, don't check thread flags. */
809 {
810 bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
811 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
812 }
813 lw r29, r29
814 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
815 {
816 bzt r29, .Lresume_userspace
817 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
818 }
819
820 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
821 {
822 lw r28, r29
823 moveli r27, lo16(_cpu_idle_nap)
824 }
825 {
826 auli r27, r27, ha16(_cpu_idle_nap)
827 }
828 {
829 seq r27, r27, r28
830 }
831 {
832 bbns r27, .Lrestore_all
833 addi r28, r28, 8
834 }
835 sw r29, r28
836 j .Lrestore_all
837
838.Lresume_userspace:
839 FEEDBACK_REENTER(interrupt_return)
840
841 /*
842 * Disable interrupts so as to make sure we don't
843 * miss an interrupt that sets any of the thread flags (like
844 * need_resched or sigpending) between sampling and the iret.
845 * Routines like schedule() or do_signal() may re-enable
846 * interrupts before returning.
847 */
848 IRQ_DISABLE(r20, r21)
849 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
850
851 /* Get base of stack in r32; note r30/31 are used as arguments here. */
852 GET_THREAD_INFO(r32)
853
854
855 /* Check to see if there is any work to do before returning to user. */
856 {
857 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
858 moveli r28, lo16(_TIF_ALLWORK_MASK)
859 }
860 {
861 lw r29, r29
862 auli r28, r28, ha16(_TIF_ALLWORK_MASK)
863 }
864 and r28, r29, r28
865 bnz r28, .Lwork_pending
866
867 /*
868 * In the NMI case we
869 * omit the call to single_process_check_nohz, which normally checks
870 * to see if we should start or stop the scheduler tick, because
871 * we can't call arbitrary Linux code from an NMI context.
872 * We always call the homecache TLB deferral code to re-trigger
873 * the deferral mechanism.
874 *
875 * The other chunk of responsibility this code has is to reset the
876 * interrupt masks appropriately to reset irqs and NMIs. We have
877 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
878 * lockdep-type stuff, but we can't set ICS until afterwards, since
879 * ICS can only be used in very tight chunks of code to avoid
880 * tripping over various assertions that it is off.
881 *
882 * (There is what looks like a window of vulnerability here since
883 * we might take a profile interrupt between the two SPR writes
884 * that set the mask, but since we write the low SPR word first,
885 * and our interrupt entry code checks the low SPR word, any
886 * profile interrupt will actually disable interrupts in both SPRs
887 * before returning, which is OK.)
888 */
889.Lrestore_all:
890 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
891 {
892 lw r0, r0
893 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
894 }
895 {
896 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
897 lw r32, r32
898 }
899 bnz r0, 1f
900 j 2f
901#if PT_FLAGS_DISABLE_IRQ != 1
902# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
903#endif
9041: bbnst r32, 2f
905 IRQ_DISABLE(r20,r21)
906 TRACE_IRQS_OFF
907 movei r0, 1
908 mtspr INTERRUPT_CRITICAL_SECTION, r0
909 bzt r30, .Lrestore_regs
910 j 3f
9112: TRACE_IRQS_ON
912 movei r0, 1
913 mtspr INTERRUPT_CRITICAL_SECTION, r0
914 IRQ_ENABLE(r20, r21)
915 bzt r30, .Lrestore_regs
9163:
917
918
919 /*
920 * We now commit to returning from this interrupt, since we will be
921 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
922 * frame. No calls should be made to any other code after this point.
923 * This code should only be entered with ICS set.
924 * r32 must still be set to ptregs.flags.
925 * We launch loads to each cache line separately first, so we can
926 * get some parallelism out of the memory subsystem.
927 * We start zeroing caller-saved registers throughout, since
928 * that will save some cycles if this turns out to be a syscall.
929 */
930.Lrestore_regs:
931 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
932
933 /*
934 * Rotate so we have one high bit and one low bit to test.
935 * - low bit says whether to restore all the callee-saved registers,
936 * or just r30-r33, and r52 up.
937 * - high bit (i.e. sign bit) says whether to restore all the
938 * caller-saved registers, or just r0.
939 */
940#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
941# error Rotate trick does not work :-)
942#endif
943 {
944 rli r20, r32, 30
945 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
946 }
947
948 /*
949 * Load cache lines 0, 2, and 3 in that order, then use
950 * the last loaded value, which makes it likely that the other
951 * cache lines have also loaded, at which point we should be
952 * able to safely read all the remaining words on those cache
953 * lines without waiting for the memory subsystem.
954 */
955 pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
956 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
959 {
960 mtspr EX_CONTEXT_1_0, r21
961 move r5, zero
962 }
963 {
964 mtspr EX_CONTEXT_1_1, lr
965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
966 }
967
968 /* Restore callee-saveds that we actually use. */
969 pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
970 pop_reg_zero r31, r7
971 pop_reg_zero r32, r8
972 pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
973
974 /*
975 * If we modified other callee-saveds, restore them now.
976 * This is rare, but could be via ptrace or signal handler.
977 */
978 {
979 move r10, zero
980 bbs r20, .Lrestore_callees
981 }
982.Lcontinue_restore_regs:
983
984 /* Check if we're returning from a syscall. */
985 {
986 move r11, zero
987 blzt r20, 1f /* no, so go restore callee-save registers */
988 }
989
990 /*
991 * Check if we're returning to userspace.
992 * Note that if we're not, we don't worry about zeroing everything.
993 */
994 {
995 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
996 bnz lr, .Lkernel_return
997 }
998
999 /*
1000 * On return from syscall, we've restored r0 from pt_regs, but we
1001 * clear the remainder of the caller-saved registers. We could
1002 * restore the syscall arguments, but there's not much point,
1003 * and it ensures user programs aren't trying to use the
1004 * caller-saves if we clear them, as well as avoiding leaking
1005 * kernel pointers into userspace.
1006 */
1007 pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1008 pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1009 {
1010 lw sp, sp
1011 move r14, zero
1012 move r15, zero
1013 }
1014 { move r16, zero; move r17, zero }
1015 { move r18, zero; move r19, zero }
1016 { move r20, zero; move r21, zero }
1017 { move r22, zero; move r23, zero }
1018 { move r24, zero; move r25, zero }
1019 { move r26, zero; move r27, zero }
1020 { move r28, zero; move r29, zero }
1021 iret
1022
1023 /*
1024 * Not a syscall, so restore caller-saved registers.
1025 * First kick off a load for cache line 1, which we're touching
1026 * for the first time here.
1027 */
1028 .align 64
10291: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
1030 pop_reg r1
1031 pop_reg r2
1032 pop_reg r3
1033 pop_reg r4
1034 pop_reg r5
1035 pop_reg r6
1036 pop_reg r7
1037 pop_reg r8
1038 pop_reg r9
1039 pop_reg r10
1040 pop_reg r11
1041 pop_reg r12
1042 pop_reg r13
1043 pop_reg r14
1044 pop_reg r15
1045 pop_reg r16
1046 pop_reg r17
1047 pop_reg r18
1048 pop_reg r19
1049 pop_reg r20
1050 pop_reg r21
1051 pop_reg r22
1052 pop_reg r23
1053 pop_reg r24
1054 pop_reg r25
1055 pop_reg r26
1056 pop_reg r27
1057 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
1058 /* r29 already restored above */
1059 bnz lr, .Lkernel_return
1060 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1061 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1062 lw sp, sp
1063 iret
1064
1065 /*
1066 * We can't restore tp when in kernel mode, since a thread might
1067 * have migrated from another cpu and brought a stale tp value.
1068 */
1069.Lkernel_return:
1070 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
1071 lw sp, sp
1072 iret
1073
1074 /* Restore callee-saved registers from r34 to r51. */
1075.Lrestore_callees:
1076 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
1077 pop_reg r34
1078 pop_reg r35
1079 pop_reg r36
1080 pop_reg r37
1081 pop_reg r38
1082 pop_reg r39
1083 pop_reg r40
1084 pop_reg r41
1085 pop_reg r42
1086 pop_reg r43
1087 pop_reg r44
1088 pop_reg r45
1089 pop_reg r46
1090 pop_reg r47
1091 pop_reg r48
1092 pop_reg r49
1093 pop_reg r50
1094 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1095 j .Lcontinue_restore_regs
1096
1097.Lwork_pending:
1098 /* Mask the reschedule flag */
1099 andi r28, r29, _TIF_NEED_RESCHED
1100
1101 {
1102 /*
1103 * If the NEED_RESCHED flag is called, we call schedule(), which
1104 * may drop this context right here and go do something else.
1105 * On return, jump back to .Lresume_userspace and recheck.
1106 */
1107 bz r28, .Lasync_tlb
1108
1109 /* Mask the async-tlb flag */
1110 andi r28, r29, _TIF_ASYNC_TLB
1111 }
1112
1113 jal schedule
1114 FEEDBACK_REENTER(interrupt_return)
1115
1116 /* Reload the flags and check again */
1117 j .Lresume_userspace
1118
1119.Lasync_tlb:
1120 {
1121 bz r28, .Lneed_sigpending
1122
1123 /* Mask the sigpending flag */
1124 andi r28, r29, _TIF_SIGPENDING
1125 }
1126
1127 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1128 jal do_async_page_fault
1129 FEEDBACK_REENTER(interrupt_return)
1130
1131 /*
1132 * Go restart the "resume userspace" process. We may have
1133 * fired a signal, and we need to disable interrupts again.
1134 */
1135 j .Lresume_userspace
1136
1137.Lneed_sigpending:
1138 /*
1139 * At this point we are either doing signal handling or single-step,
1140 * so either way make sure we have all the registers saved.
1141 */
1142 push_extra_callee_saves r0
1143
1144 {
1145 /* If no signal pending, skip to singlestep check */
1146 bz r28, .Lneed_singlestep
1147
1148 /* Mask the singlestep flag */
1149 andi r28, r29, _TIF_SINGLESTEP
1150 }
1151
1152 jal do_signal
1153 FEEDBACK_REENTER(interrupt_return)
1154
1155 /* Reload the flags and check again */
1156 j .Lresume_userspace
1157
1158.Lneed_singlestep:
1159 {
1160 /* Get a pointer to the EX1 field */
1161 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
1162
1163 /* If we get here, our bit must be set. */
1164 bz r28, .Lwork_confusion
1165 }
1166 /* If we are in priv mode, don't single step */
1167 lw r28, r29
1168 andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1169 bnz r28, .Lrestore_all
1170
1171 /* Allow interrupts within the single step code */
1172 TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
1173 IRQ_ENABLE(r20, r21)
1174
1175 /* try to single-step the current instruction */
1176 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1177 jal single_step_once
1178 FEEDBACK_REENTER(interrupt_return)
1179
1180 /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
1181 IRQ_DISABLE(r20,r21)
1182
1183 j .Lrestore_all
1184
1185.Lwork_confusion:
1186 move r0, r28
1187 panic "thread_info allwork flags unhandled on userspace resume: %#x"
1188
1189 STD_ENDPROC(interrupt_return)
1190
1191 /*
1192 * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
1193 * before returning, so we can properly get more downcalls.
1194 */
1195 .pushsection .text.handle_interrupt_downcall,"ax"
1196handle_interrupt_downcall:
1197 finish_interrupt_save handle_interrupt_downcall
1198 check_single_stepping normal, .Ldispatch_downcall
1199.Ldispatch_downcall:
1200
1201 /* Clear INTCTRL_1 from the set of interrupts we ever enable. */
1202 GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1203 {
1204 addi r30, r30, 4
1205 movei r31, INT_MASK(INT_INTCTRL_1)
1206 }
1207 {
1208 lw r20, r30
1209 nor r21, r31, zero
1210 }
1211 and r20, r20, r21
1212 sw r30, r20
1213
1214 {
1215 jalr r0
1216 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1217 }
1218 FEEDBACK_REENTER(handle_interrupt_downcall)
1219
1220 /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
1221 lw r20, r30
1222 or r20, r20, r31
1223 sw r30, r20
1224
1225 {
1226 movei r30, 0 /* not an NMI */
1227 j interrupt_return
1228 }
1229 STD_ENDPROC(handle_interrupt_downcall)
1230
1231 /*
1232 * Some interrupts don't check for single stepping
1233 */
1234 .pushsection .text.handle_interrupt_no_single_step,"ax"
1235handle_interrupt_no_single_step:
1236 finish_interrupt_save handle_interrupt_no_single_step
1237 {
1238 jalr r0
1239 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1240 }
1241 FEEDBACK_REENTER(handle_interrupt_no_single_step)
1242 {
1243 movei r30, 0 /* not an NMI */
1244 j interrupt_return
1245 }
1246 STD_ENDPROC(handle_interrupt_no_single_step)
1247
1248 /*
1249 * "NMI" interrupts mask ALL interrupts before calling the
1250 * handler, and don't check thread flags, etc., on the way
1251 * back out. In general, the only things we do here for NMIs
1252 * are the register save/restore, fixing the PC if we were
1253 * doing single step, and the dataplane kernel-TLB management.
1254 * We don't (for example) deal with start/stop of the sched tick.
1255 */
1256 .pushsection .text.handle_nmi,"ax"
1257handle_nmi:
1258 finish_interrupt_save handle_nmi
1259 check_single_stepping normal, .Ldispatch_nmi
1260.Ldispatch_nmi:
1261 {
1262 jalr r0
1263 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1264 }
1265 FEEDBACK_REENTER(handle_nmi)
1266 j interrupt_return
1267 STD_ENDPROC(handle_nmi)
1268
1269 /*
1270 * Parallel code for syscalls to handle_interrupt.
1271 */
1272 .pushsection .text.handle_syscall,"ax"
1273handle_syscall:
1274 finish_interrupt_save handle_syscall
1275
1276 /*
1277 * Check for if we are single stepping in user level. If so, then
1278 * we need to restore the PC.
1279 */
1280 check_single_stepping syscall, .Ldispatch_syscall
1281.Ldispatch_syscall:
1282
1283 /* Enable irqs. */
1284 TRACE_IRQS_ON
1285 IRQ_ENABLE(r20, r21)
1286
1287 /* Bump the counter for syscalls made on this tile. */
1288 moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1289 auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1290 add r20, r20, tp
1291 lw r21, r20
1292 addi r21, r21, 1
1293 sw r20, r21
1294
1295 /* Trace syscalls, if requested. */
1296 GET_THREAD_INFO(r31)
1297 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
1298 lw r30, r31
1299 andi r30, r30, _TIF_SYSCALL_TRACE
1300 bzt r30, .Lrestore_syscall_regs
1301 jal do_syscall_trace
1302 FEEDBACK_REENTER(handle_syscall)
1303
1304 /*
1305 * We always reload our registers from the stack at this
1306 * point. They might be valid, if we didn't build with
1307 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1308 * doing syscall tracing, but there are enough cases now that it
1309 * seems simplest just to do the reload unconditionally.
1310 */
1311.Lrestore_syscall_regs:
1312 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1313 pop_reg r0, r11
1314 pop_reg r1, r11
1315 pop_reg r2, r11
1316 pop_reg r3, r11
1317 pop_reg r4, r11
1318 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1319 pop_reg TREG_SYSCALL_NR_NAME, r11
1320
1321 /* Ensure that the syscall number is within the legal range. */
1322 moveli r21, __NR_syscalls
1323 {
1324 slt_u r21, TREG_SYSCALL_NR_NAME, r21
1325 moveli r20, lo16(sys_call_table)
1326 }
1327 {
1328 bbns r21, .Linvalid_syscall
1329 auli r20, r20, ha16(sys_call_table)
1330 }
1331 s2a r20, TREG_SYSCALL_NR_NAME, r20
1332 lw r20, r20
1333
1334 /* Jump to syscall handler. */
1335 jalr r20; .Lhandle_syscall_link:
1336 FEEDBACK_REENTER(handle_syscall)
1337
1338 /*
1339 * Write our r0 onto the stack so it gets restored instead
1340 * of whatever the user had there before.
1341 */
1342 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1343 sw r29, r0
1344
1345 /* Do syscall trace again, if requested. */
1346 lw r30, r31
1347 andi r30, r30, _TIF_SYSCALL_TRACE
1348 bzt r30, 1f
1349 jal do_syscall_trace
1350 FEEDBACK_REENTER(handle_syscall)
13511: j .Lresume_userspace /* jump into middle of interrupt_return */
1352
1353.Linvalid_syscall:
1354 /* Report an invalid syscall back to the user program */
1355 {
1356 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1357 movei r28, -ENOSYS
1358 }
1359 sw r29, r28
1360 j .Lresume_userspace /* jump into middle of interrupt_return */
1361 STD_ENDPROC(handle_syscall)
1362
1363 /* Return the address for oprofile to suppress in backtraces. */
1364STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1365 lnk r0
1366 {
1367 addli r0, r0, .Lhandle_syscall_link - .
1368 jrp lr
1369 }
1370 STD_ENDPROC(handle_syscall_link_address)
1371
1372STD_ENTRY(ret_from_fork)
1373 jal sim_notify_fork
1374 jal schedule_tail
1375 FEEDBACK_REENTER(ret_from_fork)
1376 j .Lresume_userspace /* jump into middle of interrupt_return */
1377 STD_ENDPROC(ret_from_fork)
1378
1379 /*
1380 * Code for ill interrupt.
1381 */
1382 .pushsection .text.handle_ill,"ax"
1383handle_ill:
1384 finish_interrupt_save handle_ill
1385
1386 /*
1387 * Check for if we are single stepping in user level. If so, then
1388 * we need to restore the PC.
1389 */
1390 check_single_stepping ill, .Ldispatch_normal_ill
1391
1392 {
1393 /* See if the PC is the 1st bundle in the buffer */
1394 seq r25, r27, r26
1395
1396 /* Point to the 2nd bundle in the buffer */
1397 addi r26, r26, 8
1398 }
1399 {
1400 /* Point to the original pc */
1401 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
1402
1403 /* Branch if the PC is the 1st bundle in the buffer */
1404 bnz r25, 3f
1405 }
1406 {
1407 /* See if the PC is the 2nd bundle of the buffer */
1408 seq r25, r27, r26
1409
1410 /* Set PC to next instruction */
1411 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
1412 }
1413 {
1414 /* Point to flags */
1415 addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
1416
1417 /* Branch if PC is in the second bundle */
1418 bz r25, 2f
1419 }
1420 /* Load flags */
1421 lw r25, r25
1422 {
1423 /*
1424 * Get the offset for the register to restore
1425 * Note: the lower bound is 2, so we have implicit scaling by 4.
1426 * No multiplication of the register number by the size of a register
1427 * is needed.
1428 */
1429 mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
1430 SINGLESTEP_STATE_TARGET_UB
1431
1432 /* Mask Rewrite_LR */
1433 andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
1434 }
1435 {
1436 addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
1437
1438 /* Don't rewrite temp register */
1439 bz r25, 3f
1440 }
1441 {
1442 /* Get the temp value */
1443 lw r29, r29
1444
1445 /* Point to where the register is stored */
1446 add r27, r27, sp
1447 }
1448
1449 /* Add in the C ABI save area size to the register offset */
1450 addi r27, r27, C_ABI_SAVE_AREA_SIZE
1451
1452 /* Restore the user's register with the temp value */
1453 sw r27, r29
1454 j 3f
1455
14562:
1457 /* Must be in the third bundle */
1458 addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
1459
14603:
1461 /* set PC and continue */
1462 lw r26, r24
1463 sw r28, r26
1464
1465 /* Clear TIF_SINGLESTEP */
1466 GET_THREAD_INFO(r0)
1467
1468 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
1469 {
1470 lw r2, r1
1471 addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
1472 }
1473 andi r2, r2, ~_TIF_SINGLESTEP
1474 sw r1, r2
1475
1476 /* Issue a sigtrap */
1477 {
1478 lw r0, r0 /* indirect thru thread_info to get task_info*/
1479 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
1480 move r2, zero /* load error code into r2 */
1481 }
1482
1483 jal send_sigtrap /* issue a SIGTRAP */
1484 FEEDBACK_REENTER(handle_ill)
1485 j .Lresume_userspace /* jump into middle of interrupt_return */
1486
1487.Ldispatch_normal_ill:
1488 {
1489 jalr r0
1490 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1491 }
1492 FEEDBACK_REENTER(handle_ill)
1493 {
1494 movei r30, 0 /* not an NMI */
1495 j interrupt_return
1496 }
1497 STD_ENDPROC(handle_ill)
1498
1499 .pushsection .rodata, "a"
1500 .align 8
1501bpt_code:
1502 bpt
1503 ENDPROC(bpt_code)
1504 .popsection
1505
1506/* Various stub interrupt handlers and syscall handlers */
1507
1508STD_ENTRY_LOCAL(_kernel_double_fault)
1509 mfspr r1, EX_CONTEXT_1_0
1510 move r2, lr
1511 move r3, sp
1512 move r4, r52
1513 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1514 j kernel_double_fault
1515 STD_ENDPROC(_kernel_double_fault)
1516
1517STD_ENTRY_LOCAL(bad_intr)
1518 mfspr r2, EX_CONTEXT_1_0
1519 panic "Unhandled interrupt %#x: PC %#lx"
1520 STD_ENDPROC(bad_intr)
1521
1522/* Put address of pt_regs in reg and jump. */
1523#define PTREGS_SYSCALL(x, reg) \
1524 STD_ENTRY(x); \
1525 { \
1526 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1527 j _##x \
1528 }; \
1529 STD_ENDPROC(x)
1530
1531PTREGS_SYSCALL(sys_execve, r3)
1532PTREGS_SYSCALL(sys_sigaltstack, r2)
1533PTREGS_SYSCALL(sys_rt_sigreturn, r0)
1534
1535/* Save additional callee-saves to pt_regs, put address in reg and jump. */
1536#define PTREGS_SYSCALL_ALL_REGS(x, reg) \
1537 STD_ENTRY(x); \
1538 push_extra_callee_saves reg; \
1539 j _##x; \
1540 STD_ENDPROC(x)
1541
1542PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
1543PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
1544PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
1545PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
1546
1547/*
1548 * This entrypoint is taken for the cmpxchg and atomic_update fast
1549 * swints. We may wish to generalize it to other fast swints at some
1550 * point, but for now there are just two very similar ones, which
1551 * makes it faster.
1552 *
1553 * The fast swint code is designed to have a small footprint. It does
1554 * not save or restore any GPRs, counting on the caller-save registers
1555 * to be available to it on entry. It does not modify any callee-save
1556 * registers (including "lr"). It does not check what PL it is being
1557 * called at, so you'd better not call it other than at PL0.
1558 *
1559 * It does not use the stack, but since it might be re-interrupted by
1560 * a page fault which would assume the stack was valid, it does
1561 * save/restore the stack pointer and zero it out to make sure it gets reset.
1562 * Since we always keep interrupts disabled, the hypervisor won't
1563 * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
1564 * (other than to advance the PC on return).
1565 *
1566 * We have to manually validate the user vs kernel address range
1567 * (since at PL1 we can read/write both), and for performance reasons
1568 * we don't allow cmpxchg on the fc000000 memory region, since we only
1569 * validate that the user address is below PAGE_OFFSET.
1570 *
1571 * We place it in the __HEAD section to ensure it is relatively
1572 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1573 *
1574 * Must match register usage in do_page_fault().
1575 */
1576 __HEAD
1577 .align 64
1578 /* Align much later jump on the start of a cache line. */
1579#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
1580 nop; nop
1581#endif
1582ENTRY(sys_cmpxchg)
1583
1584 /*
1585 * Save "sp" and set it zero for any possible page fault.
1586 *
1587 * HACK: We want to both zero sp and check r0's alignment,
1588 * so we do both at once. If "sp" becomes nonzero we
1589 * know r0 is unaligned and branch to the error handler that
1590 * restores sp, so this is OK.
1591 *
1592 * ICS is disabled right now so having a garbage but nonzero
1593 * sp is OK, since we won't execute any faulting instructions
1594 * when it is nonzero.
1595 */
1596 {
1597 move r27, sp
1598 andi sp, r0, 3
1599 }
1600
1601 /*
1602 * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
1603 * address is less than PAGE_OFFSET, since that won't trap at PL1.
1604 * We only use bits less than PAGE_SHIFT to avoid having to worry
1605 * about aliasing among multiple mappings of the same physical page,
1606 * and we ignore the low 3 bits so we have one lock that covers
1607 * both a cmpxchg64() and a cmpxchg() on either its low or high word.
1608 * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c.
1609 */
1610
1611#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1612 {
1613 /* Check for unaligned input. */
1614 bnz sp, .Lcmpxchg_badaddr
1615 mm r25, r0, zero, 3, PAGE_SHIFT-1
1616 }
1617 {
1618 crc32_32 r25, zero, r25
1619 moveli r21, lo16(atomic_lock_ptr)
1620 }
1621 {
1622 auli r21, r21, ha16(atomic_lock_ptr)
1623 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1624 }
1625 {
1626 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1627 slt_u r23, r0, r23
1628
1629 /*
1630 * Ensure that the TLB is loaded before we take out the lock.
1631 * On TILEPro, this will start fetching the value all the way
1632 * into our L1 as well (and if it gets modified before we
1633 * grab the lock, it will be invalidated from our cache
1634 * before we reload it). On tile64, we'll start fetching it
1635 * into our L1 if we're the home, and if we're not, we'll
1636 * still at least start fetching it into the home's L2.
1637 */
1638 lw r26, r0
1639 }
1640 {
1641 s2a r21, r20, r21
1642 bbns r23, .Lcmpxchg_badaddr
1643 }
1644 {
1645 lw r21, r21
1646 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1647 andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
1648 }
1649 {
1650 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1651 bbs r23, .Lcmpxchg64
1652 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1653 }
1654
1655 {
1656 /*
1657 * We very carefully align the code that actually runs with
1658 * the lock held (nine bundles) so that we know it is all in
1659 * the icache when we start. This instruction (the jump) is
1660 * at the start of the first cache line, address zero mod 64;
1661 * we jump to somewhere in the second cache line to issue the
1662 * tns, then jump back to finish up.
1663 */
1664 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1665 j .Lcmpxchg32_tns
1666 }
1667
1668#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1669 {
1670 /* Check for unaligned input. */
1671 bnz sp, .Lcmpxchg_badaddr
1672 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1673 }
1674 {
1675 /*
1676 * Slide bits into position for 'mm'. We want to ignore
1677 * the low 3 bits of r0, and consider only the next
1678 * ATOMIC_HASH_SHIFT bits.
1679 * Because of C pointer arithmetic, we want to compute this:
1680 *
1681 * ((char*)atomic_locks +
1682 * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
1683 *
1684 * Instead of two shifts we just ">> 1", and use 'mm'
1685 * to ignore the low and high bits we don't want.
1686 */
1687 shri r25, r0, 1
1688
1689 slt_u r23, r0, r23
1690
1691 /*
1692 * Ensure that the TLB is loaded before we take out the lock.
1693 * On tilepro, this will start fetching the value all the way
1694 * into our L1 as well (and if it gets modified before we
1695 * grab the lock, it will be invalidated from our cache
1696 * before we reload it). On tile64, we'll start fetching it
1697 * into our L1 if we're the home, and if we're not, we'll
1698 * still at least start fetching it into the home's L2.
1699 */
1700 lw r26, r0
1701 }
1702 {
1703 /* atomic_locks is page aligned so this suffices to get its addr. */
1704 auli r21, zero, hi16(atomic_locks)
1705
1706 bbns r23, .Lcmpxchg_badaddr
1707 }
1708 {
1709 /*
1710 * Insert the hash bits into the page-aligned pointer.
1711 * ATOMIC_HASH_SHIFT is so big that we don't actually hash
1712 * the unmasked address bits, as that may cause unnecessary
1713 * collisions.
1714 */
1715 mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
1716
1717 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1718 }
1719 {
1720 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1721 bbs r23, .Lcmpxchg64
1722 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1723 }
1724 {
1725 /*
1726 * We very carefully align the code that actually runs with
1727 * the lock held (nine bundles) so that we know it is all in
1728 * the icache when we start. This instruction (the jump) is
1729 * at the start of the first cache line, address zero mod 64;
1730 * we jump to somewhere in the second cache line to issue the
1731 * tns, then jump back to finish up.
1732 */
1733 j .Lcmpxchg32_tns
1734 }
1735
1736#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1737
1738 ENTRY(__sys_cmpxchg_grab_lock)
1739
1740 /*
1741 * Perform the actual cmpxchg or atomic_update.
1742 * Note that __futex_mark_unlocked() in uClibc relies on
1743 * atomic_update() to always perform an "mf", so don't make
1744 * it optional or conditional without modifying that code.
1745 */
1746.Ldo_cmpxchg32:
1747 {
1748 lw r21, r0
1749 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
1750 move r24, r2
1751 }
1752 {
1753 seq r22, r21, r1 /* See if cmpxchg matches. */
1754 and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
1755 }
1756 {
1757 or r22, r22, r23 /* Skip compare branch for atomic_update. */
1758 add r25, r25, r2 /* Compute (*mem & mask) + addend. */
1759 }
1760 {
1761 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
1762 bbns r22, .Lcmpxchg32_mismatch
1763 }
1764 sw r0, r24
1765
1766 /* Do slow mtspr here so the following "mf" waits less. */
1767 {
1768 move sp, r27
1769 mtspr EX_CONTEXT_1_0, r28
1770 }
1771 mf
1772
1773 /* The following instruction is the start of the second cache line. */
1774 {
1775 move r0, r21
1776 sw ATOMIC_LOCK_REG_NAME, zero
1777 }
1778 iret
1779
1780 /* Duplicated code here in the case where we don't overlap "mf" */
1781.Lcmpxchg32_mismatch:
1782 {
1783 move r0, r21
1784 sw ATOMIC_LOCK_REG_NAME, zero
1785 }
1786 {
1787 move sp, r27
1788 mtspr EX_CONTEXT_1_0, r28
1789 }
1790 iret
1791
1792 /*
1793 * The locking code is the same for 32-bit cmpxchg/atomic_update,
1794 * and for 64-bit cmpxchg. We provide it as a macro and put
1795 * it into both versions. We can't share the code literally
1796 * since it depends on having the right branch-back address.
1797 * Note that the first few instructions should share the cache
1798 * line with the second half of the actual locked code.
1799 */
1800 .macro cmpxchg_lock, bitwidth
1801
1802 /* Lock; if we succeed, jump back up to the read-modify-write. */
1803#ifdef CONFIG_SMP
1804 tns r21, ATOMIC_LOCK_REG_NAME
1805#else
1806 /*
1807 * Non-SMP preserves all the lock infrastructure, to keep the
1808 * code simpler for the interesting (SMP) case. However, we do
1809 * one small optimization here and in atomic_asm.S, which is
1810 * to fake out acquiring the actual lock in the atomic_lock table.
1811 */
1812 movei r21, 0
1813#endif
1814
1815 /* Issue the slow SPR here while the tns result is in flight. */
1816 mfspr r28, EX_CONTEXT_1_0
1817
1818 {
1819 addi r28, r28, 8 /* return to the instruction after the swint1 */
1820 bzt r21, .Ldo_cmpxchg\bitwidth
1821 }
1822 /*
1823 * The preceding instruction is the last thing that must be
1824 * on the second cache line.
1825 */
1826
1827#ifdef CONFIG_SMP
1828 /*
1829 * We failed to acquire the tns lock on our first try. Now use
1830 * bounded exponential backoff to retry, like __atomic_spinlock().
1831 */
1832 {
1833 moveli r23, 2048 /* maximum backoff time in cycles */
1834 moveli r25, 32 /* starting backoff time in cycles */
1835 }
18361: mfspr r26, CYCLE_LOW /* get start point for this backoff */
18372: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
1838 sub r22, r22, r26
1839 slt r22, r22, r25
1840 bbst r22, 2b
1841 {
1842 shli r25, r25, 1 /* double the backoff; retry the tns */
1843 tns r21, ATOMIC_LOCK_REG_NAME
1844 }
1845 slt r26, r23, r25 /* is the proposed backoff too big? */
1846 {
1847 mvnz r25, r26, r23
1848 bzt r21, .Ldo_cmpxchg\bitwidth
1849 }
1850 j 1b
1851#endif /* CONFIG_SMP */
1852 .endm
1853
1854.Lcmpxchg32_tns:
1855 cmpxchg_lock 32
1856
1857 /*
1858 * This code is invoked from sys_cmpxchg after most of the
1859 * preconditions have been checked. We still need to check
1860 * that r0 is 8-byte aligned, since if it's not we won't
1861 * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
1862 * lock pointer and r27/r28 have the saved SP/PC.
1863 * r23 is holding "r0 & 7" so we can test for alignment.
1864 * The compare value is in r2/r3; the new value is in r4/r5.
1865 * On return, we must put the old value in r0/r1.
1866 */
1867 .align 64
1868.Lcmpxchg64:
1869 {
1870#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1871 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1872#endif
1873 bzt r23, .Lcmpxchg64_tns
1874 }
1875 j .Lcmpxchg_badaddr
1876
1877.Ldo_cmpxchg64:
1878 {
1879 lw r21, r0
1880 addi r25, r0, 4
1881 }
1882 {
1883 lw r1, r25
1884 }
1885 seq r26, r21, r2
1886 {
1887 bz r26, .Lcmpxchg64_mismatch
1888 seq r26, r1, r3
1889 }
1890 {
1891 bz r26, .Lcmpxchg64_mismatch
1892 }
1893 sw r0, r4
1894 sw r25, r5
1895
1896 /*
1897 * The 32-bit path provides optimized "match" and "mismatch"
1898 * iret paths, but we don't have enough bundles in this cache line
1899 * to do that, so we just make even the "mismatch" path do an "mf".
1900 */
1901.Lcmpxchg64_mismatch:
1902 {
1903 move sp, r27
1904 mtspr EX_CONTEXT_1_0, r28
1905 }
1906 mf
1907 {
1908 move r0, r21
1909 sw ATOMIC_LOCK_REG_NAME, zero
1910 }
1911 iret
1912
1913.Lcmpxchg64_tns:
1914 cmpxchg_lock 64
1915
1916
1917 /*
1918 * Reset sp and revector to sys_cmpxchg_badaddr(), which will
1919 * just raise the appropriate signal and exit. Doing it this
1920 * way means we don't have to duplicate the code in intvec.S's
1921 * int_hand macro that locates the top of the stack.
1922 */
1923.Lcmpxchg_badaddr:
1924 {
1925 moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
1926 move sp, r27
1927 }
1928 j intvec_SWINT_1
1929 ENDPROC(sys_cmpxchg)
1930 ENTRY(__sys_cmpxchg_end)
1931
1932
1933/* The single-step support may need to read all the registers. */
1934int_unalign:
1935 push_extra_callee_saves r0
1936 j do_trap
1937
1938/* Include .intrpt1 array of interrupt vectors */
1939 .section ".intrpt1", "ax"
1940
1941#define op_handle_perf_interrupt bad_intr
1942#define op_handle_aux_perf_interrupt bad_intr
1943
1944#define do_hardwall_trap bad_intr
1945
1946 int_hand INT_ITLB_MISS, ITLB_MISS, \
1947 do_page_fault, handle_interrupt_no_single_step
1948 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1949 int_hand INT_ILL, ILL, do_trap, handle_ill
1950 int_hand INT_GPV, GPV, do_trap
1951 int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
1952 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1953 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1954 int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
1955 int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
1956 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1957 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1958 int_hand INT_SWINT_3, SWINT_3, do_trap
1959 int_hand INT_SWINT_2, SWINT_2, do_trap
1960 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1961 int_hand INT_SWINT_0, SWINT_0, do_trap
1962 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1963 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1964 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1965 int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
1966 int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
1967 int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
1968 int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
1969 int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
1970 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1971 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1972 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1973 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1974 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1975 int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
1976 int_hand INT_IDN_CA, IDN_CA, bad_intr
1977 int_hand INT_UDN_CA, UDN_CA, bad_intr
1978 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1979 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1980 int_hand INT_PERF_COUNT, PERF_COUNT, \
1981 op_handle_perf_interrupt, handle_nmi
1982 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1983 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1984 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1985 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1986 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1987 hv_message_intr, handle_interrupt_downcall
1988 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
1989 tile_dev_intr, handle_interrupt_downcall
1990 int_hand INT_I_ASID, I_ASID, bad_intr
1991 int_hand INT_D_ASID, D_ASID, bad_intr
1992 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
1993 do_page_fault, handle_interrupt_downcall
1994 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
1995 do_page_fault, handle_interrupt_downcall
1996 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
1997 do_page_fault, handle_interrupt_downcall
1998 int_hand INT_SN_CPL, SN_CPL, bad_intr
1999 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
2000#if CHIP_HAS_AUX_PERF_COUNTERS()
2001 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
2002 op_handle_aux_perf_interrupt, handle_nmi
2003#endif
2004
2005 /* Synthetic interrupt delivered only by the simulator */
2006 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint