aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-02-14 16:53:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-14 19:09:35 -0500
commit28baebae73c3ea8b75c7cae225a7db817ab825a9 (patch)
tree940476b4d03b96480d451b7b5b6f3df3f0ff18dc
parent68f624fc8b9fa50de9cc0ebd612ef7b7b9fa32d0 (diff)
[PATCH] FRV: Use virtual interrupt disablement
Make the FRV arch use virtual interrupt disablement because accesses to the processor status register (PSR) are relatively slow and because we will soon have the need to deal with multiple interrupt controls at the same time (separate h/w and inter-core interrupts). The way this is done is to dedicate one of the four integer condition code registers (ICC2) to maintaining a virtual interrupt disablement state whilst inside the kernel. This uses the ICC2.Z flag (Zero) to indicate whether the interrupts are virtually disabled and the ICC2.C flag (Carry) to indicate whether the interrupts are physically disabled. ICC2.Z is set to indicate interrupts are virtually disabled. ICC2.C is set to indicate interrupts are physically enabled. Under normal running conditions Z==0 and C==1. Disabling interrupts with local_irq_disable() doesn't then actually physically disable interrupts - it merely sets ICC2.Z to 1. Should an interrupt then happen, the exception prologue will note ICC2.Z is set and branch out of line using one instruction (an unlikely BEQ). Here it will physically disable interrupts and clear ICC2.C. When it comes time to enable interrupts (local_irq_enable()), this simply clears the ICC2.Z flag and invokes a trap #2 if both Z and C flags are clear (the HI integer condition). This can be done with the TIHI conditional trap instruction. The trap then physically reenables interrupts and sets ICC2.C again. Upon returning the interrupt will be taken as interrupts will then be enabled. Note that whilst processing the trap, the whole exceptions system is disabled, and so an interrupt can't happen till it returns. If no pending interrupt had happened, ICC2.C would still be set, the HI condition would not be fulfilled, and no trap will happen. Saving interrupts (local_irq_save) is simply a matter of pulling the ICC2.Z flag out of the CCR register, shifting it down and masking it off. This gives a result of 0 if interrupts were enabled and 1 if they weren't. Restoring interrupts (local_irq_restore) is then a matter of taking the saved value mentioned previously and XOR'ing it against 1. If it was one, the result will be zero, and if it was zero the result will be non-zero. This result is then used to affect the ICC2.Z flag directly (it is a condition code flag after all). An XOR instruction does not affect the Carry flag, and so that bit of state is unchanged. The two flags can then be sampled to see if they're both zero using the trap (TIHI) as for the unconditional reenablement (local_irq_enable). This patch also: (1) Modifies the debugging stub (break.S) to handle single-stepping crossing into the trap #2 handler and into virtually disabled interrupts. (2) Removes superseded fixup pointers from the second instructions in the trap tables (there's no a separate fixup table for this). (3) Declares the trap #3 vector for use in .org directives in the trap table. (4) Moves irq_enter() and irq_exit() in do_IRQ() to avoid problems with virtual interrupt handling, and removes the duplicate code that has now been folded into irq_exit() (softirq and preemption handling). (5) Tells the compiler in the arch Makefile that ICC2 is now reserved. (6) Documents the in-kernel ABI, including the virtual interrupts. (7) Renames the old irq management functions to different names. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--Documentation/fujitsu/frv/kernel-ABI.txt234
-rw-r--r--arch/frv/Makefile2
-rw-r--r--arch/frv/kernel/break.S77
-rw-r--r--arch/frv/kernel/entry-table.S39
-rw-r--r--arch/frv/kernel/entry.S65
-rw-r--r--arch/frv/kernel/head.S3
-rw-r--r--arch/frv/kernel/irq.c41
-rw-r--r--include/asm-frv/spr-regs.h1
-rw-r--r--include/asm-frv/system.h88
9 files changed, 489 insertions, 61 deletions
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt
new file mode 100644
index 000000000000..0ed9b0a779bc
--- /dev/null
+++ b/Documentation/fujitsu/frv/kernel-ABI.txt
@@ -0,0 +1,234 @@
1 =================================
2 INTERNAL KERNEL ABI FOR FR-V ARCH
3 =================================
4
5The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers
6are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs
7no-MMU.
8
9This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and
10most of them do not have any scratch registers, thus requiring at least one general purpose
11register to be clobbered in such an event. Also, within the kernel core, it is possible to simply
12jump or call directly between functions using a relative offset. This cannot be extended to modules
13for the displacement is likely to be too far. Thus in modules the address of a function to call
14must be calculated in a register and then used, requiring two extra instructions.
15
16This document has the following sections:
17
18 (*) System call register ABI
19 (*) CPU operating modes
20 (*) Internal kernel-mode register ABI
21 (*) Internal debug-mode register ABI
22 (*) Virtual interrupt handling
23
24
25========================
26SYSTEM CALL REGISTER ABI
27========================
28
29When a system call is made, the following registers are effective:
30
31 REGISTERS CALL RETURN
32 =============== ======================= =======================
33 GR7 System call number Preserved
34 GR8 Syscall arg #1 Return value
35 GR9-GR13 Syscall arg #2-6 Preserved
36
37
38===================
39CPU OPERATING MODES
40===================
41
42The FR-V CPU has three basic operating modes. In order of increasing capability:
43
44 (1) User mode.
45
46 Basic userspace running mode.
47
48 (2) Kernel mode.
49
50 Normal kernel mode. There are many additional control registers available that may be
51 accessed in this mode, in addition to all the stuff available to user mode. This has two
52 submodes:
53
54 (a) Exceptions enabled (PSR.T == 1).
55
56 Exceptions will invoke the appropriate normal kernel mode handler. On entry to the
57 handler, the PSR.T bit will be cleared.
58
59 (b) Exceptions disabled (PSR.T == 0).
60
61 No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to
62 halt unless the CPU is told to jump into debug mode instead.
63
64 (3) Debug mode.
65
66 No exceptions may happen in this mode. Memory protection and management exceptions will be
67 flagged for later consideration, but the exception handler won't be invoked. Debugging traps
68 such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by
69 debugging events obtained from the other two modes.
70
71 All kernel mode registers may be accessed, plus a few extra debugging specific registers.
72
73
74=================================
75INTERNAL KERNEL-MODE REGISTER ABI
76=================================
77
78There are a number of permanent register assignments that are set up by entry.S in the exception
79prologue. Note that there is a complete set of exception prologues for each of user->kernel
80transition and kernel->kernel transition. There are also user->debug and kernel->debug mode
81transition prologues.
82
83
84 REGISTER FLAVOUR USE
85 =============== ======= ====================================================
86 GR1 Supervisor stack pointer
87 GR15 Current thread info pointer
88 GR16 GP-Rel base register for small data
89 GR28 Current exception frame pointer (__frame)
90 GR29 Current task pointer (current)
91 GR30 Destroyed by kernel mode entry
92 GR31 NOMMU Destroyed by debug mode entry
93 GR31 MMU Destroyed by TLB miss kernel mode entry
94 CCR.ICC2 Virtual interrupt disablement tracking
95 CCCR.CC3 Cleared by exception prologue (atomic op emulation)
96 SCR0 MMU See mmu-layout.txt.
97 SCR1 MMU See mmu-layout.txt.
98 SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode)
99 SCR3 MMU Save for GR31 during debug exceptions
100 DAMR/IAMR NOMMU Fixed memory protection layout.
101 DAMR/IAMR MMU See mmu-layout.txt.
102
103
104Certain registers are also used or modified across function calls:
105
106 REGISTER CALL RETURN
107 =============== =============================== ===============================
108 GR0 Fixed Zero -
109 GR2 Function call frame pointer
110 GR3 Special Preserved
111 GR3-GR7 - Clobbered
112 GR8 Function call arg #1 Return value (or clobbered)
113 GR9 Function call arg #2 Return value MSW (or clobbered)
114 GR10-GR13 Function call arg #3-#6 Clobbered
115 GR14 - Clobbered
116 GR15-GR16 Special Preserved
117 GR17-GR27 - Preserved
118 GR28-GR31 Special Only accessed explicitly
119 LR Return address after CALL Clobbered
120 CCR/CCCR - Mostly Clobbered
121
122
123================================
124INTERNAL DEBUG-MODE REGISTER ABI
125================================
126
127This is the same as the kernel-mode register ABI for functions calls. The difference is that in
128debug-mode there's a different stack and a different exception frame. Almost all the global
129registers from kernel-mode (including the stack pointer) may be changed.
130
131 REGISTER FLAVOUR USE
132 =============== ======= ====================================================
133 GR1 Debug stack pointer
134 GR16 GP-Rel base register for small data
135 GR31 Current debug exception frame pointer (__debug_frame)
136 SCR3 MMU Saved value of GR31
137
138
139Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be
140exceedingly careful not to do any that would interact with the main kernel in this regard. Hence
141the debug mode code (gdbstub) is almost completely self-contained. The only external code used is
142the sprintf family of functions.
143
144Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an
145exception. That means unless manually disabled, single-stepping will blithely go on stepping into
146things like interrupts. See gdbstub.txt for more information.
147
148
149==========================
150VIRTUAL INTERRUPT HANDLING
151==========================
152
153Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once
154to read and once to write), we don't actually disable interrupts at all if we don't have to. What
155we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we
156then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume
157execution at the point the interrupt happened. Setting condition flags as a side effect of an
158arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the
159kernel - it does not affect userspace.
160
161The flags we use are:
162
163 (*) CCR.ICC2.Z [Zero flag]
164
165 Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be
166 modified by logical instructions without affecting the Carry flag.
167
168 (*) CCR.ICC2.C [Carry flag]
169
170 Clear to indicate hardware interrupts are really disabled, set otherwise.
171
172
173What happens is this:
174
175 (1) Normal kernel-mode operation.
176
177 ICC2.Z is 0, ICC2.C is 1.
178
179 (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs
180 doing. This is done simply with an unlikely BEQ instruction.
181
182 (3) The interrupts are disabled (local_irq_disable)
183
184 ICC2.Z is set to 1.
185
186 (4) If interrupts were then re-enabled (local_irq_enable):
187
188 ICC2.Z would be set to 0.
189
190 A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if
191 interrupts were now virtually enabled, but physically disabled - which they're not, so the
192 trap isn't taken. The kernel would then be back to state (1).
193
194 (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt
195 shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting
196 PSR.PIL to 14 and then it clears ICC2.C.
197
198 (6) If interrupts were then saved and disabled again (local_irq_save):
199
200 ICC2.Z would be shifted into the save variable and masked off (giving a 1).
201
202 ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0).
203
204 (7) If interrupts were then restored from state (6) (local_irq_restore):
205
206 ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which
207 gives a result of 0 - thus leaving ICC2.Z set.
208
209 ICC2.C would remain unaffected (ie: 0).
210
211 A TIHI #2 instruction would be used to again assay the current state, but this would do
212 nothing as Z==1.
213
214 (8) If interrupts were then enabled (local_irq_enable):
215
216 ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0.
217
218 A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0
219 [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true.
220
221 (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to
222 1 and return.
223
224(10) Immediately upon returning, the pending interrupt would be taken.
225
226(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is
227 clear, BEQ fails as per step (2)).
228
229(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely
230 enabled - or else the kernel wouldn't be here.
231
232(13) On return from the interrupt handler, things would be back to state (1).
233
234This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL.
diff --git a/arch/frv/Makefile b/arch/frv/Makefile
index 90c0fb8d9dc3..d163747d17c0 100644
--- a/arch/frv/Makefile
+++ b/arch/frv/Makefile
@@ -81,7 +81,7 @@ endif
81# - reserve CC3 for use with atomic ops 81# - reserve CC3 for use with atomic ops
82# - all the extra registers are dealt with only at context switch time 82# - all the extra registers are dealt with only at context switch time
83CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media 83CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media
84CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 84CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2
85AFLAGS += -mno-fdpic 85AFLAGS += -mno-fdpic
86ASFLAGS += -mno-fdpic 86ASFLAGS += -mno-fdpic
87 87
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S
index 33233dc23e29..687c48d62dde 100644
--- a/arch/frv/kernel/break.S
+++ b/arch/frv/kernel/break.S
@@ -200,12 +200,20 @@ __break_step:
200 movsg bpcsr,gr2 200 movsg bpcsr,gr2
201 sethi.p %hi(__entry_kernel_external_interrupt),gr3 201 sethi.p %hi(__entry_kernel_external_interrupt),gr3
202 setlo %lo(__entry_kernel_external_interrupt),gr3 202 setlo %lo(__entry_kernel_external_interrupt),gr3
203 subcc gr2,gr3,gr0,icc0 203 subcc.p gr2,gr3,gr0,icc0
204 sethi %hi(__entry_uspace_external_interrupt),gr3
205 setlo.p %lo(__entry_uspace_external_interrupt),gr3
204 beq icc0,#2,__break_step_kernel_external_interrupt 206 beq icc0,#2,__break_step_kernel_external_interrupt
205 sethi.p %hi(__entry_uspace_external_interrupt),gr3 207 subcc.p gr2,gr3,gr0,icc0
206 setlo %lo(__entry_uspace_external_interrupt),gr3 208 sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3
207 subcc gr2,gr3,gr0,icc0 209 setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3
208 beq icc0,#2,__break_step_uspace_external_interrupt 210 beq icc0,#2,__break_step_uspace_external_interrupt
211 subcc.p gr2,gr3,gr0,icc0
212 sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3
213 setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3
214 beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled
215 subcc gr2,gr3,gr0,icc0
216 beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable
209 217
210 LEDS 0x2007,gr2 218 LEDS 0x2007,gr2
211 219
@@ -254,6 +262,9 @@ __break_step_kernel_softprog_interrupt:
254# step through an external interrupt from kernel mode 262# step through an external interrupt from kernel mode
255 .globl __break_step_kernel_external_interrupt 263 .globl __break_step_kernel_external_interrupt
256__break_step_kernel_external_interrupt: 264__break_step_kernel_external_interrupt:
265 # deal with virtual interrupt disablement
266 beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled
267
257 sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3 268 sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3
258 setlo %lo(__entry_kernel_external_interrupt_reentry),gr3 269 setlo %lo(__entry_kernel_external_interrupt_reentry),gr3
259 270
@@ -294,6 +305,64 @@ __break_return_as_kernel_prologue:
294#endif 305#endif
295 rett #1 306 rett #1
296 307
308# we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled
309# need to really disable interrupts, set flag, fix up and return
310__break_step_kernel_external_interrupt_virtually_disabled:
311 movsg psr,gr2
312 andi gr2,#~PSR_PIL,gr2
313 ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */
314 movgs gr2,psr
315
316 ldi @(gr31,#REG_CCR),gr3
317 movgs gr3,ccr
318 subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */
319
320 # exceptions must've been enabled and we must've been in supervisor mode
321 setlos BPSR_BET|BPSR_BS,gr3
322 movgs gr3,bpsr
323
324 # return to where the interrupt happened
325 movsg pcsr,gr2
326 movgs gr2,bpcsr
327
328 lddi.p @(gr31,#REG_GR(2)),gr2
329
330 xor gr31,gr31,gr31
331 movgs gr0,brr
332#ifdef CONFIG_MMU
333 movsg scr3,gr31
334#endif
335 rett #1
336
337# we stepped through into the virtual interrupt reenablement trap
338#
339# we also want to single step anyway, but after fixing up so that we get an event on the
340# instruction after the broken-into exception returns
341 .globl __break_step_kernel_external_interrupt_virtual_reenable
342__break_step_kernel_external_interrupt_virtual_reenable:
343 movsg psr,gr2
344 andi gr2,#~PSR_PIL,gr2
345 movgs gr2,psr
346
347 ldi @(gr31,#REG_CCR),gr3
348 movgs gr3,ccr
349 subicc gr0,#1,gr0,icc2 /* clear Z, set C */
350
351 # save the adjusted ICC2
352 movsg ccr,gr3
353 sti gr3,@(gr31,#REG_CCR)
354
355 # exceptions must've been enabled and we must've been in supervisor mode
356 setlos BPSR_BET|BPSR_BS,gr3
357 movgs gr3,bpsr
358
359 # return to where the trap happened
360 movsg pcsr,gr2
361 movgs gr2,bpcsr
362
363 # and then process the single step
364 bra __break_continue
365
297# step through an internal exception from uspace mode 366# step through an internal exception from uspace mode
298 .globl __break_step_uspace_softprog_interrupt 367 .globl __break_step_uspace_softprog_interrupt
299__break_step_uspace_softprog_interrupt: 368__break_step_uspace_softprog_interrupt:
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S
index 9b9243e2103c..81568acea9cd 100644
--- a/arch/frv/kernel/entry-table.S
+++ b/arch/frv/kernel/entry-table.S
@@ -116,6 +116,8 @@ __break_kerneltrap_fixup_table:
116 .long __break_step_uspace_external_interrupt 116 .long __break_step_uspace_external_interrupt
117 .section .trap.kernel 117 .section .trap.kernel
118 .org \tbr_tt 118 .org \tbr_tt
119 # deal with virtual interrupt disablement
120 beq icc2,#0,__entry_kernel_external_interrupt_virtually_disabled
119 bra __entry_kernel_external_interrupt 121 bra __entry_kernel_external_interrupt
120 .section .trap.fixup.kernel 122 .section .trap.fixup.kernel
121 .org \tbr_tt >> 2 123 .org \tbr_tt >> 2
@@ -259,25 +261,52 @@ __trap_fixup_kernel_data_tlb_miss:
259 .org TBR_TT_TRAP0 261 .org TBR_TT_TRAP0
260 .rept 127 262 .rept 127
261 bra __entry_uspace_softprog_interrupt 263 bra __entry_uspace_softprog_interrupt
262 bra __break_step_uspace_softprog_interrupt 264 .long 0,0,0
263 .long 0,0
264 .endr 265 .endr
265 .org TBR_TT_BREAK 266 .org TBR_TT_BREAK
266 bra __entry_break 267 bra __entry_break
267 .long 0,0,0 268 .long 0,0,0
268 269
270 .section .trap.fixup.user
271 .org TBR_TT_TRAP0 >> 2
272 .rept 127
273 .long __break_step_uspace_softprog_interrupt
274 .endr
275 .org TBR_TT_BREAK >> 2
276 .long 0
277
269 # miscellaneous kernel mode entry points 278 # miscellaneous kernel mode entry points
270 .section .trap.kernel 279 .section .trap.kernel
271 .org TBR_TT_TRAP0 280 .org TBR_TT_TRAP0
272 .rept 127
273 bra __entry_kernel_softprog_interrupt 281 bra __entry_kernel_softprog_interrupt
274 bra __break_step_kernel_softprog_interrupt 282 .org TBR_TT_TRAP1
275 .long 0,0 283 bra __entry_kernel_softprog_interrupt
284
285 # trap #2 in kernel - reenable interrupts
286 .org TBR_TT_TRAP2
287 bra __entry_kernel_external_interrupt_virtual_reenable
288
289 # miscellaneous kernel traps
290 .org TBR_TT_TRAP3
291 .rept 124
292 bra __entry_kernel_softprog_interrupt
293 .long 0,0,0
276 .endr 294 .endr
277 .org TBR_TT_BREAK 295 .org TBR_TT_BREAK
278 bra __entry_break 296 bra __entry_break
279 .long 0,0,0 297 .long 0,0,0
280 298
299 .section .trap.fixup.kernel
300 .org TBR_TT_TRAP0 >> 2
301 .long __break_step_kernel_softprog_interrupt
302 .long __break_step_kernel_softprog_interrupt
303 .long __break_step_kernel_external_interrupt_virtual_reenable
304 .rept 124
305 .long __break_step_kernel_softprog_interrupt
306 .endr
307 .org TBR_TT_BREAK >> 2
308 .long 0
309
281 # miscellaneous debug mode entry points 310 # miscellaneous debug mode entry points
282 .section .trap.break 311 .section .trap.break
283 .org TBR_TT_BREAK 312 .org TBR_TT_BREAK
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index c69d499d28cf..1d21c8d34d8a 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -141,7 +141,10 @@ __entry_uspace_external_interrupt_reentry:
141 141
142 movsg gner0,gr4 142 movsg gner0,gr4
143 movsg gner1,gr5 143 movsg gner1,gr5
144 stdi gr4,@(gr28,#REG_GNER0) 144 stdi.p gr4,@(gr28,#REG_GNER0)
145
146 # interrupts start off fully disabled in the interrupt handler
147 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
145 148
146 # set up kernel global registers 149 # set up kernel global registers
147 sethi.p %hi(__kernel_current_task),gr5 150 sethi.p %hi(__kernel_current_task),gr5
@@ -193,9 +196,8 @@ __entry_uspace_external_interrupt_reentry:
193 .type __entry_kernel_external_interrupt,@function 196 .type __entry_kernel_external_interrupt,@function
194__entry_kernel_external_interrupt: 197__entry_kernel_external_interrupt:
195 LEDS 0x6210 198 LEDS 0x6210
196 199// sub sp,gr15,gr31
197 sub sp,gr15,gr31 200// LEDS32
198 LEDS32
199 201
200 # set up the stack pointer 202 # set up the stack pointer
201 or.p sp,gr0,gr30 203 or.p sp,gr0,gr30
@@ -231,7 +233,10 @@ __entry_kernel_external_interrupt_reentry:
231 stdi gr24,@(gr28,#REG_GR(24)) 233 stdi gr24,@(gr28,#REG_GR(24))
232 stdi gr26,@(gr28,#REG_GR(26)) 234 stdi gr26,@(gr28,#REG_GR(26))
233 sti gr29,@(gr28,#REG_GR(29)) 235 sti gr29,@(gr28,#REG_GR(29))
234 stdi gr30,@(gr28,#REG_GR(30)) 236 stdi.p gr30,@(gr28,#REG_GR(30))
237
238 # note virtual interrupts will be fully enabled upon return
239 subicc gr0,#1,gr0,icc2 /* clear Z, set C */
235 240
236 movsg tbr ,gr20 241 movsg tbr ,gr20
237 movsg psr ,gr22 242 movsg psr ,gr22
@@ -267,7 +272,10 @@ __entry_kernel_external_interrupt_reentry:
267 272
268 movsg gner0,gr4 273 movsg gner0,gr4
269 movsg gner1,gr5 274 movsg gner1,gr5
270 stdi gr4,@(gr28,#REG_GNER0) 275 stdi.p gr4,@(gr28,#REG_GNER0)
276
277 # interrupts start off fully disabled in the interrupt handler
278 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
271 279
272 # set the return address 280 # set the return address
273 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 281 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
@@ -291,6 +299,45 @@ __entry_kernel_external_interrupt_reentry:
291 299
292 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt 300 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
293 301
302###############################################################################
303#
304# deal with interrupts that were actually virtually disabled
305# - we need to really disable them, flag the fact and return immediately
306# - if you change this, you must alter break.S also
307#
308###############################################################################
309 .balign L1_CACHE_BYTES
310 .globl __entry_kernel_external_interrupt_virtually_disabled
311 .type __entry_kernel_external_interrupt_virtually_disabled,@function
312__entry_kernel_external_interrupt_virtually_disabled:
313 movsg psr,gr30
314 andi gr30,#~PSR_PIL,gr30
315 ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
316 movgs gr30,psr
317 subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
318 rett #0
319
320 .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
321
322###############################################################################
323#
324# deal with re-enablement of interrupts that were pending when virtually re-enabled
325# - set ICC2.C, re-enable the real interrupts and return
326# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
327# - if you change this, you must alter break.S also
328#
329###############################################################################
330 .balign L1_CACHE_BYTES
331 .globl __entry_kernel_external_interrupt_virtual_reenable
332 .type __entry_kernel_external_interrupt_virtual_reenable,@function
333__entry_kernel_external_interrupt_virtual_reenable:
334 movsg psr,gr30
335 andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
336 movgs gr30,psr
337 subicc gr0,#1,gr0,icc2 ; clear Z, set C
338 rett #0
339
340 .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
294 341
295############################################################################### 342###############################################################################
296# 343#
@@ -335,6 +382,7 @@ __entry_uspace_softprog_interrupt_reentry:
335 382
336 sethi.p %hi(__entry_return_from_user_exception),gr23 383 sethi.p %hi(__entry_return_from_user_exception),gr23
337 setlo %lo(__entry_return_from_user_exception),gr23 384 setlo %lo(__entry_return_from_user_exception),gr23
385
338 bra __entry_common 386 bra __entry_common
339 387
340 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt 388 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
@@ -495,7 +543,10 @@ __entry_common:
495 543
496 movsg gner0,gr4 544 movsg gner0,gr4
497 movsg gner1,gr5 545 movsg gner1,gr5
498 stdi gr4,@(gr28,#REG_GNER0) 546 stdi.p gr4,@(gr28,#REG_GNER0)
547
548 # set up virtual interrupt disablement
549 subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
499 550
500 # set up kernel global registers 551 # set up kernel global registers
501 sethi.p %hi(__kernel_current_task),gr5 552 sethi.p %hi(__kernel_current_task),gr5
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S
index c73b4fe9f6ca..29a5265489b7 100644
--- a/arch/frv/kernel/head.S
+++ b/arch/frv/kernel/head.S
@@ -513,6 +513,9 @@ __head_mmu_enabled:
513 movgs gr0,ccr 513 movgs gr0,ccr
514 movgs gr0,cccr 514 movgs gr0,cccr
515 515
516 # initialise the virtual interrupt handling
517 subcc gr0,gr0,gr0,icc2 /* set Z, clear C */
518
516#ifdef CONFIG_MMU 519#ifdef CONFIG_MMU
517 movgs gr3,scr2 520 movgs gr3,scr2
518 movgs gr3,scr3 521 movgs gr3,scr3
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 59580c59c62c..27ab4c30aac6 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -287,18 +287,11 @@ asmlinkage void do_IRQ(void)
287 struct irq_source *source; 287 struct irq_source *source;
288 int level, cpu; 288 int level, cpu;
289 289
290 irq_enter();
291
290 level = (__frame->tbr >> 4) & 0xf; 292 level = (__frame->tbr >> 4) & 0xf;
291 cpu = smp_processor_id(); 293 cpu = smp_processor_id();
292 294
293#if 0
294 {
295 static u32 irqcount;
296 *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level);
297 *(volatile u16 *) 0xffc00100 = (u16) ~0x9999;
298 mb();
299 }
300#endif
301
302 if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) 295 if ((unsigned long) __frame - (unsigned long) (current + 1) < 512)
303 BUG(); 296 BUG();
304 297
@@ -308,40 +301,12 @@ asmlinkage void do_IRQ(void)
308 301
309 kstat_this_cpu.irqs[level]++; 302 kstat_this_cpu.irqs[level]++;
310 303
311 irq_enter();
312
313 for (source = frv_irq_levels[level].sources; source; source = source->next) 304 for (source = frv_irq_levels[level].sources; source; source = source->next)
314 source->doirq(source); 305 source->doirq(source);
315 306
316 irq_exit();
317
318 __clr_MASK(level); 307 __clr_MASK(level);
319 308
320 /* only process softirqs if we didn't interrupt another interrupt handler */ 309 irq_exit();
321 if ((__frame->psr & PSR_PIL) == PSR_PIL_0)
322 if (local_softirq_pending())
323 do_softirq();
324
325#ifdef CONFIG_PREEMPT
326 local_irq_disable();
327 while (--current->preempt_count == 0) {
328 if (!(__frame->psr & PSR_S) ||
329 current->need_resched == 0 ||
330 in_interrupt())
331 break;
332 current->preempt_count++;
333 local_irq_enable();
334 preempt_schedule();
335 local_irq_disable();
336 }
337#endif
338
339#if 0
340 {
341 *(volatile u16 *) 0xffc00100 = (u16) ~0x6666;
342 mb();
343 }
344#endif
345 310
346} /* end do_IRQ() */ 311} /* end do_IRQ() */
347 312
diff --git a/include/asm-frv/spr-regs.h b/include/asm-frv/spr-regs.h
index ef472f058d9c..c2a541ef828d 100644
--- a/include/asm-frv/spr-regs.h
+++ b/include/asm-frv/spr-regs.h
@@ -98,6 +98,7 @@
98#define TBR_TT_TRAP0 (0x80 << 4) 98#define TBR_TT_TRAP0 (0x80 << 4)
99#define TBR_TT_TRAP1 (0x81 << 4) 99#define TBR_TT_TRAP1 (0x81 << 4)
100#define TBR_TT_TRAP2 (0x82 << 4) 100#define TBR_TT_TRAP2 (0x82 << 4)
101#define TBR_TT_TRAP3 (0x83 << 4)
101#define TBR_TT_TRAP126 (0xfe << 4) 102#define TBR_TT_TRAP126 (0xfe << 4)
102#define TBR_TT_BREAK (0xff << 4) 103#define TBR_TT_BREAK (0xff << 4)
103 104
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index d2aea70a5f64..f72ff0c4dc0b 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -40,8 +40,84 @@ do { \
40 40
41/* 41/*
42 * interrupt flag manipulation 42 * interrupt flag manipulation
43 * - use virtual interrupt management since touching the PSR is slow
44 * - ICC2.Z: T if interrupts virtually disabled
45 * - ICC2.C: F if interrupts really disabled
46 * - if Z==1 upon interrupt:
47 * - C is set to 0
48 * - interrupts are really disabled
49 * - entry.S returns immediately
50 * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
51 * - if taken, the trap:
52 * - sets ICC2.C
53 * - enables interrupts
43 */ 54 */
44#define local_irq_disable() \ 55#define local_irq_disable() \
56do { \
57 /* set Z flag, but don't change the C flag */ \
58 asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \
59 : \
60 : \
61 : "memory", "icc2" \
62 ); \
63} while(0)
64
65#define local_irq_enable() \
66do { \
67 /* clear Z flag and then test the C flag */ \
68 asm volatile(" oricc gr0,#1,gr0,icc2 \n" \
69 " tihi icc2,gr0,#2 \n" \
70 : \
71 : \
72 : "memory", "icc2" \
73 ); \
74} while(0)
75
76#define local_save_flags(flags) \
77do { \
78 typecheck(unsigned long, flags); \
79 asm volatile("movsg ccr,%0" \
80 : "=r"(flags) \
81 : \
82 : "memory"); \
83 \
84 /* shift ICC2.Z to bit 0 */ \
85 flags >>= 26; \
86 \
87 /* make flags 1 if interrupts disabled, 0 otherwise */ \
88 flags &= 1UL; \
89} while(0)
90
91#define irqs_disabled() \
92 ({unsigned long flags; local_save_flags(flags); flags; })
93
94#define local_irq_save(flags) \
95do { \
96 typecheck(unsigned long, flags); \
97 local_save_flags(flags); \
98 local_irq_disable(); \
99} while(0)
100
101#define local_irq_restore(flags) \
102do { \
103 typecheck(unsigned long, flags); \
104 \
105 /* load the Z flag by turning 1 if disabled into 0 if disabled \
106 * and thus setting the Z flag but not the C flag */ \
107 asm volatile(" xoricc %0,#1,gr0,icc2 \n" \
108 /* then test Z=0 and C=0 */ \
109 " tihi icc2,gr0,#2 \n" \
110 : \
111 : "r"(flags) \
112 : "memory", "icc2" \
113 ); \
114 \
115} while(0)
116
117/*
118 * real interrupt flag manipulation
119 */
120#define __local_irq_disable() \
45do { \ 121do { \
46 unsigned long psr; \ 122 unsigned long psr; \
47 asm volatile(" movsg psr,%0 \n" \ 123 asm volatile(" movsg psr,%0 \n" \
@@ -53,7 +129,7 @@ do { \
53 : "memory"); \ 129 : "memory"); \
54} while(0) 130} while(0)
55 131
56#define local_irq_enable() \ 132#define __local_irq_enable() \
57do { \ 133do { \
58 unsigned long psr; \ 134 unsigned long psr; \
59 asm volatile(" movsg psr,%0 \n" \ 135 asm volatile(" movsg psr,%0 \n" \
@@ -64,7 +140,7 @@ do { \
64 : "memory"); \ 140 : "memory"); \
65} while(0) 141} while(0)
66 142
67#define local_save_flags(flags) \ 143#define __local_save_flags(flags) \
68do { \ 144do { \
69 typecheck(unsigned long, flags); \ 145 typecheck(unsigned long, flags); \
70 asm("movsg psr,%0" \ 146 asm("movsg psr,%0" \
@@ -73,7 +149,7 @@ do { \
73 : "memory"); \ 149 : "memory"); \
74} while(0) 150} while(0)
75 151
76#define local_irq_save(flags) \ 152#define __local_irq_save(flags) \
77do { \ 153do { \
78 unsigned long npsr; \ 154 unsigned long npsr; \
79 typecheck(unsigned long, flags); \ 155 typecheck(unsigned long, flags); \
@@ -86,7 +162,7 @@ do { \
86 : "memory"); \ 162 : "memory"); \
87} while(0) 163} while(0)
88 164
89#define local_irq_restore(flags) \ 165#define __local_irq_restore(flags) \
90do { \ 166do { \
91 typecheck(unsigned long, flags); \ 167 typecheck(unsigned long, flags); \
92 asm volatile(" movgs %0,psr \n" \ 168 asm volatile(" movgs %0,psr \n" \
@@ -95,7 +171,7 @@ do { \
95 : "memory"); \ 171 : "memory"); \
96} while(0) 172} while(0)
97 173
98#define irqs_disabled() \ 174#define __irqs_disabled() \
99 ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) 175 ((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
100 176
101/* 177/*