aboutsummaryrefslogtreecommitdiffstats
path: root/arch/frv/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/frv/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/frv/kernel')
-rw-r--r--arch/frv/kernel/Makefile22
-rw-r--r--arch/frv/kernel/break.S720
-rw-r--r--arch/frv/kernel/cmode.S190
-rw-r--r--arch/frv/kernel/debug-stub.c259
-rw-r--r--arch/frv/kernel/dma.c464
-rw-r--r--arch/frv/kernel/entry-table.S295
-rw-r--r--arch/frv/kernel/entry.S1428
-rw-r--r--arch/frv/kernel/frv_ksyms.c124
-rw-r--r--arch/frv/kernel/gdb-io.c216
-rw-r--r--arch/frv/kernel/gdb-io.h55
-rw-r--r--arch/frv/kernel/gdb-stub.c2084
-rw-r--r--arch/frv/kernel/head-mmu-fr451.S374
-rw-r--r--arch/frv/kernel/head-uc-fr401.S311
-rw-r--r--arch/frv/kernel/head-uc-fr451.S174
-rw-r--r--arch/frv/kernel/head-uc-fr555.S347
-rw-r--r--arch/frv/kernel/head.S639
-rw-r--r--arch/frv/kernel/head.inc50
-rw-r--r--arch/frv/kernel/init_task.c39
-rw-r--r--arch/frv/kernel/irq-mb93091.c116
-rw-r--r--arch/frv/kernel/irq-mb93093.c99
-rw-r--r--arch/frv/kernel/irq-mb93493.c108
-rw-r--r--arch/frv/kernel/irq-routing.c291
-rw-r--r--arch/frv/kernel/irq.c764
-rw-r--r--arch/frv/kernel/kernel_thread.S77
-rw-r--r--arch/frv/kernel/local.h56
-rw-r--r--arch/frv/kernel/pm-mb93093.c66
-rw-r--r--arch/frv/kernel/pm.c432
-rw-r--r--arch/frv/kernel/process.c388
-rw-r--r--arch/frv/kernel/ptrace.c764
-rw-r--r--arch/frv/kernel/semaphore.c156
-rw-r--r--arch/frv/kernel/setup.c1194
-rw-r--r--arch/frv/kernel/signal.c588
-rw-r--r--arch/frv/kernel/sleep.S374
-rw-r--r--arch/frv/kernel/switch_to.S496
-rw-r--r--arch/frv/kernel/sys_frv.c214
-rw-r--r--arch/frv/kernel/sysctl.c206
-rw-r--r--arch/frv/kernel/time.c234
-rw-r--r--arch/frv/kernel/traps.c431
-rw-r--r--arch/frv/kernel/uaccess.c95
-rw-r--r--arch/frv/kernel/vmlinux.lds.S187
40 files changed, 15127 insertions, 0 deletions
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
new file mode 100644
index 000000000000..981c2c7dec0d
--- /dev/null
+++ b/arch/frv/kernel/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for the linux kernel.
3#
4
5heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o
6heads-$(CONFIG_MMU) := head-mmu-fr451.o
7
8extra-y:= head.o init_task.o vmlinux.lds
9
10obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
11 process.o traps.o ptrace.o signal.o dma.o \
12 sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \
13 debug-stub.o irq.o irq-routing.o sleep.o uaccess.o
14
15obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
16
17obj-$(CONFIG_MB93091_VDK) += irq-mb93091.o
18obj-$(CONFIG_MB93093_PDK) += irq-mb93093.o
19obj-$(CONFIG_FUJITSU_MB93493) += irq-mb93493.o
20obj-$(CONFIG_PM) += pm.o cmode.o
21obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o
22obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S
new file mode 100644
index 000000000000..33233dc23e29
--- /dev/null
+++ b/arch/frv/kernel/break.S
@@ -0,0 +1,720 @@
1/* break.S: Break interrupt handling (kept separate from entry.S)
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/config.h>
14#include <linux/linkage.h>
15#include <asm/setup.h>
16#include <asm/segment.h>
17#include <asm/ptrace.h>
18#include <asm/spr-regs.h>
19
20#include <asm/errno.h>
21
22#
23# the break handler has its own stack
24#
25 .section .bss.stack
26 .globl __break_user_context
27 .balign 8192
28__break_stack:
29 .space (8192 - (USER_CONTEXT_SIZE + REG__DEBUG_XTRA)) & ~7
30__break_stack_tos:
31 .space REG__DEBUG_XTRA
32__break_user_context:
33 .space USER_CONTEXT_SIZE
34
35#
36# miscellaneous variables
37#
38 .section .bss
39#ifdef CONFIG_MMU
40 .globl __break_tlb_miss_real_return_info
41__break_tlb_miss_real_return_info:
42 .balign 8
43 .space 2*4 /* saved PCSR, PSR for TLB-miss handler fixup */
44#endif
45
46__break_trace_through_exceptions:
47 .space 4
48
49#define CS2_ECS1 0xe1200000
50#define CS2_USERLED 0x4
51
52.macro LEDS val,reg
53# sethi.p %hi(CS2_ECS1+CS2_USERLED),gr30
54# setlo %lo(CS2_ECS1+CS2_USERLED),gr30
55# setlos #~\val,\reg
56# st \reg,@(gr30,gr0)
57# setlos #0x5555,\reg
58# sethi.p %hi(0xffc00100),gr30
59# setlo %lo(0xffc00100),gr30
60# sth \reg,@(gr30,gr0)
61# membar
62.endm
63
64###############################################################################
65#
66# entry point for Break Exceptions/Interrupts
67#
68###############################################################################
69 .text
70 .balign 4
71 .globl __entry_break
72__entry_break:
73#ifdef CONFIG_MMU
74 movgs gr31,scr3
75#endif
76 LEDS 0x1001,gr31
77
78 sethi.p %hi(__break_user_context),gr31
79 setlo %lo(__break_user_context),gr31
80
81 stdi gr2,@(gr31,#REG_GR(2))
82 movsg ccr,gr3
83 sti gr3,@(gr31,#REG_CCR)
84
85 # catch the return from a TLB-miss handler that had single-step disabled
86 # traps will be enabled, so we have to do this now
87#ifdef CONFIG_MMU
88 movsg bpcsr,gr3
89 sethi.p %hi(__break_tlb_miss_return_breaks_here),gr2
90 setlo %lo(__break_tlb_miss_return_breaks_here),gr2
91 subcc gr2,gr3,gr0,icc0
92 beq icc0,#2,__break_return_singlestep_tlbmiss
93#endif
94
95 # determine whether we have stepped through into an exception
96 # - we need to take special action to suspend h/w single stepping if we've done
97 # that, so that the gdbstub doesn't get bogged down endlessly stepping through
98 # external interrupt handling
99 movsg bpsr,gr3
100 andicc gr3,#BPSR_BET,gr0,icc0
101 bne icc0,#2,__break_maybe_userspace /* jump if PSR.ET was 1 */
102
103 LEDS 0x1003,gr2
104
105 movsg brr,gr3
106 andicc gr3,#BRR_ST,gr0,icc0
107 andicc.p gr3,#BRR_SB,gr0,icc1
108 bne icc0,#2,__break_step /* jump if single-step caused break */
109 beq icc1,#2,__break_continue /* jump if BREAK didn't cause break */
110
111 LEDS 0x1007,gr2
112
113 # handle special breaks
114 movsg bpcsr,gr3
115
116 sethi.p %hi(__entry_return_singlestep_breaks_here),gr2
117 setlo %lo(__entry_return_singlestep_breaks_here),gr2
118 subcc gr2,gr3,gr0,icc0
119 beq icc0,#2,__break_return_singlestep
120
121 bra __break_continue
122
123
124###############################################################################
125#
126# handle BREAK instruction in kernel-mode exception epilogue
127#
128###############################################################################
129__break_return_singlestep:
130 LEDS 0x100f,gr2
131
132 # special break insn requests single-stepping to be turned back on
133 # HERE RETT
134 # PSR.ET 0 0
135 # PSR.PS old PSR.S ?
136 # PSR.S 1 1
137 # BPSR.ET 0 1 (can't have caused orig excep otherwise)
138 # BPSR.BS 1 old PSR.S
139 movsg dcr,gr2
140 sethi.p %hi(DCR_SE),gr3
141 setlo %lo(DCR_SE),gr3
142 or gr2,gr3,gr2
143 movgs gr2,dcr
144
145 movsg psr,gr2
146 andi gr2,#PSR_PS,gr2
147 slli gr2,#11,gr2 /* PSR.PS -> BPSR.BS */
148 ori gr2,#BPSR_BET,gr2 /* 1 -> BPSR.BET */
149 movgs gr2,bpsr
150
151 # return to the invoker of the original kernel exception
152 movsg pcsr,gr2
153 movgs gr2,bpcsr
154
155 LEDS 0x101f,gr2
156
157 ldi @(gr31,#REG_CCR),gr3
158 movgs gr3,ccr
159 lddi.p @(gr31,#REG_GR(2)),gr2
160 xor gr31,gr31,gr31
161 movgs gr0,brr
162#ifdef CONFIG_MMU
163 movsg scr3,gr31
164#endif
165 rett #1
166
167###############################################################################
168#
169# handle BREAK instruction in TLB-miss handler return path
170#
171###############################################################################
172#ifdef CONFIG_MMU
173__break_return_singlestep_tlbmiss:
174 LEDS 0x1100,gr2
175
176 sethi.p %hi(__break_tlb_miss_real_return_info),gr3
177 setlo %lo(__break_tlb_miss_real_return_info),gr3
178 lddi @(gr3,#0),gr2
179 movgs gr2,pcsr
180 movgs gr3,psr
181
182 bra __break_return_singlestep
183#endif
184
185
186###############################################################################
187#
188# handle single stepping into an exception prologue from kernel mode
189# - we try and catch it whilst it is still in the main vector table
190# - if we catch it there, we have to jump to the fixup handler
191# - there is a fixup table that has a pointer for every 16b slot in the trap
192# table
193#
194###############################################################################
195__break_step:
196 LEDS 0x2003,gr2
197
198 # external interrupts seem to escape from the trap table before single
199 # step catches up with them
200 movsg bpcsr,gr2
201 sethi.p %hi(__entry_kernel_external_interrupt),gr3
202 setlo %lo(__entry_kernel_external_interrupt),gr3
203 subcc gr2,gr3,gr0,icc0
204 beq icc0,#2,__break_step_kernel_external_interrupt
205 sethi.p %hi(__entry_uspace_external_interrupt),gr3
206 setlo %lo(__entry_uspace_external_interrupt),gr3
207 subcc gr2,gr3,gr0,icc0
208 beq icc0,#2,__break_step_uspace_external_interrupt
209
210 LEDS 0x2007,gr2
211
212 # the two main vector tables are adjacent on one 8Kb slab
213 movsg bpcsr,gr2
214 setlos #0xffffe000,gr3
215 and gr2,gr3,gr2
216 sethi.p %hi(__trap_tables),gr3
217 setlo %lo(__trap_tables),gr3
218 subcc gr2,gr3,gr0,icc0
219 bne icc0,#2,__break_continue
220
221 LEDS 0x200f,gr2
222
223 # skip workaround if so requested by GDB
224 sethi.p %hi(__break_trace_through_exceptions),gr3
225 setlo %lo(__break_trace_through_exceptions),gr3
226 ld @(gr3,gr0),gr3
227 subcc gr3,gr0,gr0,icc0
228 bne icc0,#0,__break_continue
229
230 LEDS 0x201f,gr2
231
232 # access the fixup table - there's a 1:1 mapping between the slots in the trap tables and
233 # the slots in the trap fixup tables allowing us to simply divide the offset into the
234 # former by 4 to access the latter
235 sethi.p %hi(__trap_tables),gr3
236 setlo %lo(__trap_tables),gr3
237 movsg bpcsr,gr2
238 sub gr2,gr3,gr2
239 srli.p gr2,#2,gr2
240
241 sethi %hi(__trap_fixup_tables),gr3
242 setlo.p %lo(__trap_fixup_tables),gr3
243 andi gr2,#~3,gr2
244 ld @(gr2,gr3),gr2
245 jmpil @(gr2,#0)
246
247# step through an internal exception from kernel mode
248 .globl __break_step_kernel_softprog_interrupt
249__break_step_kernel_softprog_interrupt:
250 sethi.p %hi(__entry_kernel_softprog_interrupt_reentry),gr3
251 setlo %lo(__entry_kernel_softprog_interrupt_reentry),gr3
252 bra __break_return_as_kernel_prologue
253
254# step through an external interrupt from kernel mode
255 .globl __break_step_kernel_external_interrupt
256__break_step_kernel_external_interrupt:
257 sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3
258 setlo %lo(__entry_kernel_external_interrupt_reentry),gr3
259
260__break_return_as_kernel_prologue:
261 LEDS 0x203f,gr2
262
263 movgs gr3,bpcsr
264
265 # do the bit we had to skip
266#ifdef CONFIG_MMU
267 movsg ear0,gr2 /* EAR0 can get clobbered by gdb-stub (ICI/ICEI) */
268 movgs gr2,scr2
269#endif
270
271 or.p sp,gr0,gr2 /* set up the stack pointer */
272 subi sp,#REG__END,sp
273 sti.p gr2,@(sp,#REG_SP)
274
275 setlos #REG__STATUS_STEP,gr2
276 sti gr2,@(sp,#REG__STATUS) /* record single step status */
277
278 # cancel single-stepping mode
279 movsg dcr,gr2
280 sethi.p %hi(~DCR_SE),gr3
281 setlo %lo(~DCR_SE),gr3
282 and gr2,gr3,gr2
283 movgs gr2,dcr
284
285 LEDS 0x207f,gr2
286
287 ldi @(gr31,#REG_CCR),gr3
288 movgs gr3,ccr
289 lddi.p @(gr31,#REG_GR(2)),gr2
290 xor gr31,gr31,gr31
291 movgs gr0,brr
292#ifdef CONFIG_MMU
293 movsg scr3,gr31
294#endif
295 rett #1
296
297# step through an internal exception from uspace mode
298 .globl __break_step_uspace_softprog_interrupt
299__break_step_uspace_softprog_interrupt:
300 sethi.p %hi(__entry_uspace_softprog_interrupt_reentry),gr3
301 setlo %lo(__entry_uspace_softprog_interrupt_reentry),gr3
302 bra __break_return_as_uspace_prologue
303
304# step through an external interrupt from kernel mode
305 .globl __break_step_uspace_external_interrupt
306__break_step_uspace_external_interrupt:
307 sethi.p %hi(__entry_uspace_external_interrupt_reentry),gr3
308 setlo %lo(__entry_uspace_external_interrupt_reentry),gr3
309
310__break_return_as_uspace_prologue:
311 LEDS 0x20ff,gr2
312
313 movgs gr3,bpcsr
314
315 # do the bit we had to skip
316 sethi.p %hi(__kernel_frame0_ptr),gr28
317 setlo %lo(__kernel_frame0_ptr),gr28
318 ldi.p @(gr28,#0),gr28
319
320 setlos #REG__STATUS_STEP,gr2
321 sti gr2,@(gr28,#REG__STATUS) /* record single step status */
322
323 # cancel single-stepping mode
324 movsg dcr,gr2
325 sethi.p %hi(~DCR_SE),gr3
326 setlo %lo(~DCR_SE),gr3
327 and gr2,gr3,gr2
328 movgs gr2,dcr
329
330 LEDS 0x20fe,gr2
331
332 ldi @(gr31,#REG_CCR),gr3
333 movgs gr3,ccr
334 lddi.p @(gr31,#REG_GR(2)),gr2
335 xor gr31,gr31,gr31
336 movgs gr0,brr
337#ifdef CONFIG_MMU
338 movsg scr3,gr31
339#endif
340 rett #1
341
342#ifdef CONFIG_MMU
343# step through an ITLB-miss handler from user mode
344 .globl __break_user_insn_tlb_miss
345__break_user_insn_tlb_miss:
346 # we'll want to try the trap stub again
347 sethi.p %hi(__trap_user_insn_tlb_miss),gr2
348 setlo %lo(__trap_user_insn_tlb_miss),gr2
349 movgs gr2,bpcsr
350
351__break_tlb_miss_common:
352 LEDS 0x2101,gr2
353
354 # cancel single-stepping mode
355 movsg dcr,gr2
356 sethi.p %hi(~DCR_SE),gr3
357 setlo %lo(~DCR_SE),gr3
358 and gr2,gr3,gr2
359 movgs gr2,dcr
360
361 # we'll swap the real return address for one with a BREAK insn so that we can re-enable
362 # single stepping on return
363 movsg pcsr,gr2
364 sethi.p %hi(__break_tlb_miss_real_return_info),gr3
365 setlo %lo(__break_tlb_miss_real_return_info),gr3
366 sti gr2,@(gr3,#0)
367
368 sethi.p %hi(__break_tlb_miss_return_break),gr2
369 setlo %lo(__break_tlb_miss_return_break),gr2
370 movgs gr2,pcsr
371
372 # we also have to fudge PSR because the return BREAK is in kernel space and we want
373 # to get a BREAK fault not an access violation should the return be to userspace
374 movsg psr,gr2
375 sti.p gr2,@(gr3,#4)
376 ori gr2,#PSR_PS,gr2
377 movgs gr2,psr
378
379 LEDS 0x2102,gr2
380
381 ldi @(gr31,#REG_CCR),gr3
382 movgs gr3,ccr
383 lddi @(gr31,#REG_GR(2)),gr2
384 movsg scr3,gr31
385 movgs gr0,brr
386 rett #1
387
388# step through a DTLB-miss handler from user mode
389 .globl __break_user_data_tlb_miss
390__break_user_data_tlb_miss:
391 # we'll want to try the trap stub again
392 sethi.p %hi(__trap_user_data_tlb_miss),gr2
393 setlo %lo(__trap_user_data_tlb_miss),gr2
394 movgs gr2,bpcsr
395 bra __break_tlb_miss_common
396
397# step through an ITLB-miss handler from kernel mode
398 .globl __break_kernel_insn_tlb_miss
399__break_kernel_insn_tlb_miss:
400 # we'll want to try the trap stub again
401 sethi.p %hi(__trap_kernel_insn_tlb_miss),gr2
402 setlo %lo(__trap_kernel_insn_tlb_miss),gr2
403 movgs gr2,bpcsr
404 bra __break_tlb_miss_common
405
406# step through a DTLB-miss handler from kernel mode
407 .globl __break_kernel_data_tlb_miss
408__break_kernel_data_tlb_miss:
409 # we'll want to try the trap stub again
410 sethi.p %hi(__trap_kernel_data_tlb_miss),gr2
411 setlo %lo(__trap_kernel_data_tlb_miss),gr2
412 movgs gr2,bpcsr
413 bra __break_tlb_miss_common
414#endif
415
416###############################################################################
417#
418# handle debug events originating with userspace
419#
420###############################################################################
421__break_maybe_userspace:
422 LEDS 0x3003,gr2
423
424 setlos #BPSR_BS,gr2
425 andcc gr3,gr2,gr0,icc0
426 bne icc0,#0,__break_continue /* skip if PSR.S was 1 */
427
428 movsg brr,gr2
429 andicc gr2,#BRR_ST|BRR_SB,gr0,icc0
430 beq icc0,#0,__break_continue /* jump if not BREAK or single-step */
431
432 LEDS 0x3007,gr2
433
434 # do the first part of the exception prologue here
435 sethi.p %hi(__kernel_frame0_ptr),gr28
436 setlo %lo(__kernel_frame0_ptr),gr28
437 ldi @(gr28,#0),gr28
438 andi gr28,#~7,gr28
439
440 # set up the kernel stack pointer
441 sti sp ,@(gr28,#REG_SP)
442 ori gr28,0,sp
443 sti gr0 ,@(gr28,#REG_GR(28))
444
445 stdi gr20,@(gr28,#REG_GR(20))
446 stdi gr22,@(gr28,#REG_GR(22))
447
448 movsg tbr,gr20
449 movsg bpcsr,gr21
450 movsg psr,gr22
451
452 # determine the exception type and cancel single-stepping mode
453 or gr0,gr0,gr23
454
455 movsg dcr,gr2
456 sethi.p %hi(DCR_SE),gr3
457 setlo %lo(DCR_SE),gr3
458 andcc gr2,gr3,gr0,icc0
459 beq icc0,#0,__break_no_user_sstep /* must have been a BREAK insn */
460
461 not gr3,gr3
462 and gr2,gr3,gr2
463 movgs gr2,dcr
464 ori gr23,#REG__STATUS_STEP,gr23
465
466__break_no_user_sstep:
467 LEDS 0x300f,gr2
468
469 movsg brr,gr2
470 andi gr2,#BRR_ST|BRR_SB,gr2
471 slli gr2,#1,gr2
472 or gr23,gr2,gr23
473 sti.p gr23,@(gr28,#REG__STATUS) /* record single step status */
474
475 # adjust the value acquired from TBR - this indicates the exception
476 setlos #~TBR_TT,gr2
477 and.p gr20,gr2,gr20
478 setlos #TBR_TT_BREAK,gr2
479 or.p gr20,gr2,gr20
480
481 # fudge PSR.PS and BPSR.BS to return to kernel mode through the trap
482 # table as trap 126
483 andi gr22,#~PSR_PS,gr22 /* PSR.PS should be 0 */
484 movgs gr22,psr
485
486 setlos #BPSR_BS,gr2 /* BPSR.BS should be 1 and BPSR.BET 0 */
487 movgs gr2,bpsr
488
489 # return through remainder of the exception prologue
490 # - need to load gr23 with return handler address
491 sethi.p %hi(__entry_return_from_user_exception),gr23
492 setlo %lo(__entry_return_from_user_exception),gr23
493 sethi.p %hi(__entry_common),gr3
494 setlo %lo(__entry_common),gr3
495 movgs gr3,bpcsr
496
497 LEDS 0x301f,gr2
498
499 ldi @(gr31,#REG_CCR),gr3
500 movgs gr3,ccr
501 lddi.p @(gr31,#REG_GR(2)),gr2
502 xor gr31,gr31,gr31
503 movgs gr0,brr
504#ifdef CONFIG_MMU
505 movsg scr3,gr31
506#endif
507 rett #1
508
509###############################################################################
510#
511# resume normal debug-mode entry
512#
513###############################################################################
514__break_continue:
515 LEDS 0x4003,gr2
516
517 # set up the kernel stack pointer
518 sti sp,@(gr31,#REG_SP)
519
520 sethi.p %hi(__break_stack_tos),sp
521 setlo %lo(__break_stack_tos),sp
522
523 # finish building the exception frame
524 stdi gr4 ,@(gr31,#REG_GR(4))
525 stdi gr6 ,@(gr31,#REG_GR(6))
526 stdi gr8 ,@(gr31,#REG_GR(8))
527 stdi gr10,@(gr31,#REG_GR(10))
528 stdi gr12,@(gr31,#REG_GR(12))
529 stdi gr14,@(gr31,#REG_GR(14))
530 stdi gr16,@(gr31,#REG_GR(16))
531 stdi gr18,@(gr31,#REG_GR(18))
532 stdi gr20,@(gr31,#REG_GR(20))
533 stdi gr22,@(gr31,#REG_GR(22))
534 stdi gr24,@(gr31,#REG_GR(24))
535 stdi gr26,@(gr31,#REG_GR(26))
536 sti gr0 ,@(gr31,#REG_GR(28)) /* NULL frame pointer */
537 sti gr29,@(gr31,#REG_GR(29))
538 sti gr30,@(gr31,#REG_GR(30))
539 sti gr8 ,@(gr31,#REG_ORIG_GR8)
540
541#ifdef CONFIG_MMU
542 movsg scr3,gr19
543 sti gr19,@(gr31,#REG_GR(31))
544#endif
545
546 movsg bpsr ,gr19
547 movsg tbr ,gr20
548 movsg bpcsr,gr21
549 movsg psr ,gr22
550 movsg isr ,gr23
551 movsg cccr ,gr25
552 movsg lr ,gr26
553 movsg lcr ,gr27
554
555 andi.p gr22,#~(PSR_S|PSR_ET),gr5 /* rebuild PSR */
556 andi gr19,#PSR_ET,gr4
557 or.p gr4,gr5,gr5
558 srli gr19,#10,gr4
559 andi gr4,#PSR_S,gr4
560 or.p gr4,gr5,gr5
561
562 setlos #-1,gr6
563 sti gr20,@(gr31,#REG_TBR)
564 sti gr21,@(gr31,#REG_PC)
565 sti gr5 ,@(gr31,#REG_PSR)
566 sti gr23,@(gr31,#REG_ISR)
567 sti gr25,@(gr31,#REG_CCCR)
568 stdi gr26,@(gr31,#REG_LR)
569 sti gr6 ,@(gr31,#REG_SYSCALLNO)
570
571 # store CPU-specific regs
572 movsg iacc0h,gr4
573 movsg iacc0l,gr5
574 stdi gr4,@(gr31,#REG_IACC0)
575
576 movsg gner0,gr4
577 movsg gner1,gr5
578 stdi gr4,@(gr31,#REG_GNER0)
579
580 # build the debug register frame
581 movsg brr,gr4
582 movgs gr0,brr
583 movsg nmar,gr5
584 movsg dcr,gr6
585
586 stdi gr4 ,@(gr31,#REG_BRR)
587 sti gr19,@(gr31,#REG_BPSR)
588 sti.p gr6 ,@(gr31,#REG_DCR)
589
590 # trap exceptions during break handling and disable h/w breakpoints/watchpoints
591 sethi %hi(DCR_EBE),gr5
592 setlo.p %lo(DCR_EBE),gr5
593 sethi %hi(__entry_breaktrap_table),gr4
594 setlo %lo(__entry_breaktrap_table),gr4
595 movgs gr5,dcr
596 movgs gr4,tbr
597
598 # set up kernel global registers
599 sethi.p %hi(__kernel_current_task),gr5
600 setlo %lo(__kernel_current_task),gr5
601 ld @(gr5,gr0),gr29
602 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
603
604 sethi %hi(_gp),gr16
605 setlo.p %lo(_gp),gr16
606
607 # make sure we (the kernel) get div-zero and misalignment exceptions
608 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
609 movgs gr5,isr
610
611 # enter the GDB stub
612 LEDS 0x4007,gr2
613
614 or.p gr0,gr0,fp
615 call debug_stub
616
617 LEDS 0x403f,gr2
618
619 # return from break
620 lddi @(gr31,#REG_IACC0),gr4
621 movgs gr4,iacc0h
622 movgs gr5,iacc0l
623
624 lddi @(gr31,#REG_GNER0),gr4
625 movgs gr4,gner0
626 movgs gr5,gner1
627
628 lddi @(gr31,#REG_LR) ,gr26
629 lddi @(gr31,#REG_CCR) ,gr24
630 lddi @(gr31,#REG_PSR) ,gr22
631 ldi @(gr31,#REG_PC) ,gr21
632 ldi @(gr31,#REG_TBR) ,gr20
633 ldi.p @(gr31,#REG_DCR) ,gr6
634
635 andi gr22,#PSR_S,gr19 /* rebuild BPSR */
636 andi.p gr22,#PSR_ET,gr5
637 slli gr19,#10,gr19
638 or gr5,gr19,gr19
639
640 movgs gr6 ,dcr
641 movgs gr19,bpsr
642 movgs gr20,tbr
643 movgs gr21,bpcsr
644 movgs gr23,isr
645 movgs gr24,ccr
646 movgs gr25,cccr
647 movgs gr26,lr
648 movgs gr27,lcr
649
650 LEDS 0x407f,gr2
651
652#ifdef CONFIG_MMU
653 ldi @(gr31,#REG_GR(31)),gr2
654 movgs gr2,scr3
655#endif
656
657 ldi @(gr31,#REG_GR(30)),gr30
658 ldi @(gr31,#REG_GR(29)),gr29
659 lddi @(gr31,#REG_GR(26)),gr26
660 lddi @(gr31,#REG_GR(24)),gr24
661 lddi @(gr31,#REG_GR(22)),gr22
662 lddi @(gr31,#REG_GR(20)),gr20
663 lddi @(gr31,#REG_GR(18)),gr18
664 lddi @(gr31,#REG_GR(16)),gr16
665 lddi @(gr31,#REG_GR(14)),gr14
666 lddi @(gr31,#REG_GR(12)),gr12
667 lddi @(gr31,#REG_GR(10)),gr10
668 lddi @(gr31,#REG_GR(8)) ,gr8
669 lddi @(gr31,#REG_GR(6)) ,gr6
670 lddi @(gr31,#REG_GR(4)) ,gr4
671 lddi @(gr31,#REG_GR(2)) ,gr2
672 ldi.p @(gr31,#REG_SP) ,sp
673
674 xor gr31,gr31,gr31
675 movgs gr0,brr
676#ifdef CONFIG_MMU
677 movsg scr3,gr31
678#endif
679 rett #1
680
681###################################################################################################
682#
683# GDB stub "system calls"
684#
685###################################################################################################
686
687#ifdef CONFIG_GDBSTUB
688 # void gdbstub_console_write(struct console *con, const char *p, unsigned n)
689 .globl gdbstub_console_write
690gdbstub_console_write:
691 break
692 bralr
693#endif
694
695 # GDB stub BUG() trap
696 # GR8 is the proposed signal number
697 .globl __debug_bug_trap
698__debug_bug_trap:
699 break
700 bralr
701
702 # transfer kernel exeception to GDB for handling
703 .globl __break_hijack_kernel_event
704__break_hijack_kernel_event:
705 break
706 .globl __break_hijack_kernel_event_breaks_here
707__break_hijack_kernel_event_breaks_here:
708 nop
709
710#ifdef CONFIG_MMU
711 # handle a return from TLB-miss that requires single-step reactivation
712 .globl __break_tlb_miss_return_break
713__break_tlb_miss_return_break:
714 break
715__break_tlb_miss_return_breaks_here:
716 nop
717#endif
718
719 # guard the first .text label in the next file from confusion
720 nop
diff --git a/arch/frv/kernel/cmode.S b/arch/frv/kernel/cmode.S
new file mode 100644
index 000000000000..6591e6a37ae9
--- /dev/null
+++ b/arch/frv/kernel/cmode.S
@@ -0,0 +1,190 @@
1/* cmode.S: clock mode management
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Woodhouse (dwmw2@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/sys.h>
14#include <linux/config.h>
15#include <linux/linkage.h>
16#include <asm/setup.h>
17#include <asm/segment.h>
18#include <asm/ptrace.h>
19#include <asm/errno.h>
20#include <asm/cache.h>
21#include <asm/spr-regs.h>
22
23#define __addr_MASK 0xfeff9820 /* interrupt controller mask */
24
25#define __addr_SDRAMC 0xfe000400 /* SDRAM controller regs */
26#define SDRAMC_DSTS 0x28 /* SDRAM status */
27#define SDRAMC_DSTS_SSI 0x00000001 /* indicates that the SDRAM is in self-refresh mode */
28#define SDRAMC_DRCN 0x30 /* SDRAM refresh control */
29#define SDRAMC_DRCN_SR 0x00000001 /* transition SDRAM into self-refresh mode */
30#define __addr_CLKC 0xfeff9a00
31#define CLKC_SWCMODE 0x00000008
32#define __addr_LEDS 0xe1200004
33
34.macro li v r
35 sethi.p %hi(\v),\r
36 setlo %lo(\v),\r
37.endm
38
39 .text
40 .balign 4
41
42
43###############################################################################
44#
45# Change CMODE
46# - void frv_change_cmode(int cmode)
47#
48###############################################################################
49 .globl frv_change_cmode
50 .type frv_change_cmode,@function
51
52.macro LEDS v
53#ifdef DEBUG_CMODE
54 setlos #~\v,gr10
55 sti gr10,@(gr11,#0)
56 membar
57#endif
58.endm
59
60frv_change_cmode:
61 movsg lr,gr9
62#ifdef DEBUG_CMODE
63 li __addr_LEDS,gr11
64#endif
65 dcef @(gr0,gr0),#1
66
67 # Shift argument left by 24 bits to fit in SWCMODE register later.
68 slli gr8,#24,gr8
69
70 # (1) Set '0' in the PSR.ET bit, and prohibit interrupts.
71 movsg psr,gr14
72 andi gr14,#~PSR_ET,gr3
73 movgs gr3,psr
74
75#if 0 // Fujitsu recommend to skip this and will update docs.
76 # (2) Set '0' to all bits of the MASK register of the interrupt
77 # controller, and mask interrupts.
78 li __addr_MASK,gr12
79 ldi @(gr12,#0),gr13
80 li 0xffff0000,gr4
81 sti gr4,@(gr12,#0)
82#endif
83
84 # (3) Stop the transfer function of DMAC. Stop all the bus masters
85 # to access SDRAM and the internal resources.
86
87 # (already done by caller)
88
89 # (4) Preload a series of following instructions to the instruction
90 # cache.
91 li #__cmode_icache_lock_start,gr3
92 li #__cmode_icache_lock_end,gr4
93
941: icpl gr3,gr0,#1
95 addi gr3,#L1_CACHE_BYTES,gr3
96 cmp gr4,gr3,icc0
97 bhi icc0,#0,1b
98
99 # Set up addresses in regs for later steps.
100 setlos SDRAMC_DRCN_SR,gr3
101 li __addr_SDRAMC,gr4
102 li __addr_CLKC,gr5
103 ldi @(gr5,#0),gr6
104 li #0x80000000,gr7
105 or gr6,gr7,gr6
106
107 bra __cmode_icache_lock_start
108
109 .balign L1_CACHE_BYTES
110__cmode_icache_lock_start:
111
112 # (5) Flush the content of all caches by the DCEF instruction.
113 dcef @(gr0,gr0),#1
114
115 # (6) Execute loading the dummy for SDRAM.
116 ldi @(gr9,#0),gr0
117
118 # (7) Set '1' to the DRCN.SR bit, and change SDRAM to the
119 # self-refresh mode. Execute the dummy load to all memory
120 # devices set to cacheable on the external bus side in parallel
121 # with this.
122 sti gr3,@(gr4,#SDRAMC_DRCN)
123
124 # (8) Execute memory barrier instruction (MEMBAR).
125 membar
126
127 # (9) Read the DSTS register repeatedly until '1' stands in the
128 # DSTS.SSI field.
1291: ldi @(gr4,#SDRAMC_DSTS),gr3
130 andicc gr3,#SDRAMC_DSTS_SSI,gr3,icc0
131 beq icc0,#0,1b
132
133 # (10) Execute memory barrier instruction (MEMBAR).
134 membar
135
136#if 1
137 # (11) Set the value of CMODE that you want to change to
138 # SWCMODE.SWCM[3:0].
139 sti gr8,@(gr5,#CLKC_SWCMODE)
140
141 # (12) Set '1' to the CLKC.SWEN bit. In that case, do not change
142 # fields other than SWEN of the CLKC register.
143 sti gr6,@(gr5,#0)
144#endif
145 # (13) Execute the instruction just after the memory barrier
146 # instruction that executes the self-loop 256 times. (Meanwhile,
147 # the CMODE switch is done.)
148 membar
149 setlos #256,gr7
1502: subicc gr7,#1,gr7,icc0
151 bne icc0,#2,2b
152
153 LEDS 0x36
154
155 # (14) Release the self-refresh of SDRAM.
156 sti gr0,@(gr4,#SDRAMC_DRCN)
157
158 # Wait for it...
1593: ldi @(gr4,#SDRAMC_DSTS),gr3
160 andicc gr3,#SDRAMC_DSTS_SSI,gr3,icc0
161 bne icc0,#2,3b
162
163#if 0
164 li 0x0100000,gr10
1654: subicc gr10,#1,gr10,icc0
166
167 bne icc0,#0,4b
168#endif
169
170__cmode_icache_lock_end:
171
172 li #__cmode_icache_lock_start,gr3
173 li #__cmode_icache_lock_end,gr4
174
1754: icul gr3
176 addi gr3,#L1_CACHE_BYTES,gr3
177 cmp gr4,gr3,icc0
178 bhi icc0,#0,4b
179
180#if 0 // Fujitsu recommend to skip this and will update docs.
181 # (15) Release the interrupt mask setting of the MASK register of
182 # the interrupt controller if necessary.
183 sti gr13,@(gr12,#0)
184#endif
185 # (16) Set 1' in the PSR.ET bit, and permit interrupt.
186 movgs gr14,psr
187
188 bralr
189
190 .size frv_change_cmode, .-frv_change_cmode
diff --git a/arch/frv/kernel/debug-stub.c b/arch/frv/kernel/debug-stub.c
new file mode 100644
index 000000000000..4761cc4b4a90
--- /dev/null
+++ b/arch/frv/kernel/debug-stub.c
@@ -0,0 +1,259 @@
1/* debug-stub.c: debug-mode stub
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/serial_reg.h>
18
19#include <asm/system.h>
20#include <asm/serial-regs.h>
21#include <asm/timer-regs.h>
22#include <asm/irc-regs.h>
23#include <asm/gdb-stub.h>
24#include "gdb-io.h"
25
26/* CPU board CON5 */
27#define __UART0(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X)))
28
29#define LSR_WAIT_FOR0(STATE) \
30do { \
31} while (!(__UART0(LSR) & UART_LSR_##STATE))
32
33#define FLOWCTL_QUERY0(LINE) ({ __UART0(MSR) & UART_MSR_##LINE; })
34#define FLOWCTL_CLEAR0(LINE) do { __UART0(MCR) &= ~UART_MCR_##LINE; } while (0)
35#define FLOWCTL_SET0(LINE) do { __UART0(MCR) |= UART_MCR_##LINE; } while (0)
36
37#define FLOWCTL_WAIT_FOR0(LINE) \
38do { \
39 gdbstub_do_rx(); \
40} while(!FLOWCTL_QUERY(LINE))
41
42static void __init debug_stub_init(void);
43
44extern asmlinkage void __break_hijack_kernel_event(void);
45extern asmlinkage void __break_hijack_kernel_event_breaks_here(void);
46
47/*****************************************************************************/
48/*
49 * debug mode handler stub
50 * - we come here with the CPU in debug mode and with exceptions disabled
51 * - handle debugging services for userspace
52 */
53asmlinkage void debug_stub(void)
54{
55 unsigned long hsr0;
56 int type = 0;
57
58 static u8 inited = 0;
59 if (!inited) {
60 debug_stub_init();
61 type = -1;
62 inited = 1;
63 }
64
65 hsr0 = __get_HSR(0);
66 if (hsr0 & HSR0_ETMD)
67 __set_HSR(0, hsr0 & ~HSR0_ETMD);
68
69 /* disable single stepping */
70 __debug_regs->dcr &= ~DCR_SE;
71
72 /* kernel mode can propose an exception be handled in debug mode by jumping to a special
73 * location */
74 if (__debug_frame->pc == (unsigned long) __break_hijack_kernel_event_breaks_here) {
75 /* replace the debug frame with the kernel frame and discard
76 * the top kernel context */
77 *__debug_frame = *__frame;
78 __frame = __debug_frame->next_frame;
79 __debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12;
80 __debug_regs->brr |= BRR_EB;
81 }
82
83 if (__debug_frame->pc == (unsigned long) __debug_bug_trap + 4) {
84 __debug_frame->pc = __debug_frame->lr;
85 type = __debug_frame->gr8;
86 }
87
88#ifdef CONFIG_GDBSTUB
89 gdbstub(type);
90#endif
91
92 if (hsr0 & HSR0_ETMD)
93 __set_HSR(0, __get_HSR(0) | HSR0_ETMD);
94
95} /* end debug_stub() */
96
97/*****************************************************************************/
98/*
99 * debug stub initialisation
100 */
101static void __init debug_stub_init(void)
102{
103 __set_IRR(6, 0xff000000); /* map ERRs to NMI */
104 __set_IITMR(1, 0x20000000); /* ERR0/1, UART0/1 IRQ detect levels */
105
106 asm volatile(" movgs gr0,ibar0 \n"
107 " movgs gr0,ibar1 \n"
108 " movgs gr0,ibar2 \n"
109 " movgs gr0,ibar3 \n"
110 " movgs gr0,dbar0 \n"
111 " movgs gr0,dbmr00 \n"
112 " movgs gr0,dbmr01 \n"
113 " movgs gr0,dbdr00 \n"
114 " movgs gr0,dbdr01 \n"
115 " movgs gr0,dbar1 \n"
116 " movgs gr0,dbmr10 \n"
117 " movgs gr0,dbmr11 \n"
118 " movgs gr0,dbdr10 \n"
119 " movgs gr0,dbdr11 \n"
120 );
121
122 /* deal with debugging stub initialisation and initial pause */
123 if (__debug_frame->pc == (unsigned long) __debug_stub_init_break)
124 __debug_frame->pc = (unsigned long) start_kernel;
125
126 /* enable the debug events we want to trap */
127 __debug_regs->dcr = DCR_EBE;
128
129#ifdef CONFIG_GDBSTUB
130 gdbstub_init();
131#endif
132
133 __clr_MASK_all();
134 __clr_MASK(15);
135 __clr_RC(15);
136
137} /* end debug_stub_init() */
138
139/*****************************************************************************/
140/*
141 * kernel "exit" trap for gdb stub
142 */
143void debug_stub_exit(int status)
144{
145
146#ifdef CONFIG_GDBSTUB
147 gdbstub_exit(status);
148#endif
149
150} /* end debug_stub_exit() */
151
152/*****************************************************************************/
153/*
154 * send string to serial port
155 */
156void debug_to_serial(const char *p, int n)
157{
158 char ch;
159
160 for (; n > 0; n--) {
161 ch = *p++;
162 FLOWCTL_SET0(DTR);
163 LSR_WAIT_FOR0(THRE);
164 // FLOWCTL_WAIT_FOR(CTS);
165
166 if (ch == 0x0a) {
167 __UART0(TX) = 0x0d;
168 mb();
169 LSR_WAIT_FOR0(THRE);
170 // FLOWCTL_WAIT_FOR(CTS);
171 }
172 __UART0(TX) = ch;
173 mb();
174
175 FLOWCTL_CLEAR0(DTR);
176 }
177
178} /* end debug_to_serial() */
179
180/*****************************************************************************/
181/*
182 * send string to serial port
183 */
184void debug_to_serial2(const char *fmt, ...)
185{
186 va_list va;
187 char buf[64];
188 int n;
189
190 va_start(va, fmt);
191 n = vsprintf(buf, fmt, va);
192 va_end(va);
193
194 debug_to_serial(buf, n);
195
196} /* end debug_to_serial2() */
197
198/*****************************************************************************/
199/*
200 * set up the ttyS0 serial port baud rate timers
201 */
202void __init console_set_baud(unsigned baud)
203{
204 unsigned value, high, low;
205 u8 lcr;
206
207 /* work out the divisor to give us the nearest higher baud rate */
208 value = __serial_clock_speed_HZ / 16 / baud;
209
210 /* determine the baud rate range */
211 high = __serial_clock_speed_HZ / 16 / value;
212 low = __serial_clock_speed_HZ / 16 / (value + 1);
213
214 /* pick the nearest bound */
215 if (low + (high - low) / 2 > baud)
216 value++;
217
218 lcr = __UART0(LCR);
219 __UART0(LCR) |= UART_LCR_DLAB;
220 mb();
221 __UART0(DLL) = value & 0xff;
222 __UART0(DLM) = (value >> 8) & 0xff;
223 mb();
224 __UART0(LCR) = lcr;
225 mb();
226
227} /* end console_set_baud() */
228
229/*****************************************************************************/
230/*
231 *
232 */
233int __init console_get_baud(void)
234{
235 unsigned value;
236 u8 lcr;
237
238 lcr = __UART0(LCR);
239 __UART0(LCR) |= UART_LCR_DLAB;
240 mb();
241 value = __UART0(DLM) << 8;
242 value |= __UART0(DLL);
243 __UART0(LCR) = lcr;
244 mb();
245
246 return value;
247} /* end console_get_baud() */
248
249/*****************************************************************************/
250/*
251 * display BUG() info
252 */
253#ifndef CONFIG_NO_KERNEL_MSG
254void __debug_bug_printk(const char *file, unsigned line)
255{
256 printk("kernel BUG at %s:%d!\n", file, line);
257
258} /* end __debug_bug_printk() */
259#endif
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
new file mode 100644
index 000000000000..f5de6cf7df4e
--- /dev/null
+++ b/arch/frv/kernel/dma.c
@@ -0,0 +1,464 @@
1/* dma.c: DMA controller management on FR401 and the like
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <asm/dma.h>
18#include <asm/gpio-regs.h>
19#include <asm/irc-regs.h>
20#include <asm/cpu-irqs.h>
21
22struct frv_dma_channel {
23 uint8_t flags;
24#define FRV_DMA_FLAGS_RESERVED 0x01
25#define FRV_DMA_FLAGS_INUSE 0x02
26#define FRV_DMA_FLAGS_PAUSED 0x04
27 uint8_t cap; /* capabilities available */
28 int irq; /* completion IRQ */
29 uint32_t dreqbit;
30 uint32_t dackbit;
31 uint32_t donebit;
32 const unsigned long ioaddr; /* DMA controller regs addr */
33 const char *devname;
34 dma_irq_handler_t handler;
35 void *data;
36};
37
38
39#define __get_DMAC(IO,X) ({ *(volatile unsigned long *)((IO) + DMAC_##X##x); })
40
41#define __set_DMAC(IO,X,V) \
42do { \
43 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
44 mb(); \
45} while(0)
46
47#define ___set_DMAC(IO,X,V) \
48do { \
49 *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
50} while(0)
51
52
53static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
54 [0] = {
55 .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK | FRV_DMA_CAP_DONE,
56 .irq = IRQ_CPU_DMA0,
57 .dreqbit = SIR_DREQ0_INPUT,
58 .dackbit = SOR_DACK0_OUTPUT,
59 .donebit = SOR_DONE0_OUTPUT,
60 .ioaddr = 0xfe000900,
61 },
62 [1] = {
63 .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK | FRV_DMA_CAP_DONE,
64 .irq = IRQ_CPU_DMA1,
65 .dreqbit = SIR_DREQ1_INPUT,
66 .dackbit = SOR_DACK1_OUTPUT,
67 .donebit = SOR_DONE1_OUTPUT,
68 .ioaddr = 0xfe000980,
69 },
70 [2] = {
71 .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK,
72 .irq = IRQ_CPU_DMA2,
73 .dreqbit = SIR_DREQ2_INPUT,
74 .dackbit = SOR_DACK2_OUTPUT,
75 .ioaddr = 0xfe000a00,
76 },
77 [3] = {
78 .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK,
79 .irq = IRQ_CPU_DMA3,
80 .dreqbit = SIR_DREQ3_INPUT,
81 .dackbit = SOR_DACK3_OUTPUT,
82 .ioaddr = 0xfe000a80,
83 },
84 [4] = {
85 .cap = FRV_DMA_CAP_DREQ,
86 .irq = IRQ_CPU_DMA4,
87 .dreqbit = SIR_DREQ4_INPUT,
88 .ioaddr = 0xfe001000,
89 },
90 [5] = {
91 .cap = FRV_DMA_CAP_DREQ,
92 .irq = IRQ_CPU_DMA5,
93 .dreqbit = SIR_DREQ5_INPUT,
94 .ioaddr = 0xfe001080,
95 },
96 [6] = {
97 .cap = FRV_DMA_CAP_DREQ,
98 .irq = IRQ_CPU_DMA6,
99 .dreqbit = SIR_DREQ6_INPUT,
100 .ioaddr = 0xfe001100,
101 },
102 [7] = {
103 .cap = FRV_DMA_CAP_DREQ,
104 .irq = IRQ_CPU_DMA7,
105 .dreqbit = SIR_DREQ7_INPUT,
106 .ioaddr = 0xfe001180,
107 },
108};
109
110static DEFINE_RWLOCK(frv_dma_channels_lock);
111
112unsigned long frv_dma_inprogress;
113
114#define frv_clear_dma_inprogress(channel) \
115 atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
116
117#define frv_set_dma_inprogress(channel) \
118 atomic_set_mask(1 << (channel), &frv_dma_inprogress);
119
120/*****************************************************************************/
121/*
122 * DMA irq handler - determine channel involved, grab status and call real handler
123 */
124static irqreturn_t dma_irq_handler(int irq, void *_channel, struct pt_regs *regs)
125{
126 struct frv_dma_channel *channel = _channel;
127
128 frv_clear_dma_inprogress(channel - frv_dma_channels);
129 return channel->handler(channel - frv_dma_channels,
130 __get_DMAC(channel->ioaddr, CSTR),
131 channel->data,
132 regs);
133
134} /* end dma_irq_handler() */
135
136/*****************************************************************************/
137/*
138 * Determine which DMA controllers are present on this CPU
139 */
140void __init frv_dma_init(void)
141{
142 unsigned long psr = __get_PSR();
143 int num_dma, i;
144
145 /* First, determine how many DMA channels are available */
146 switch (PSR_IMPLE(psr)) {
147 case PSR_IMPLE_FR405:
148 case PSR_IMPLE_FR451:
149 case PSR_IMPLE_FR501:
150 case PSR_IMPLE_FR551:
151 num_dma = FRV_DMA_8CHANS;
152 break;
153
154 case PSR_IMPLE_FR401:
155 default:
156 num_dma = FRV_DMA_4CHANS;
157 break;
158 }
159
160 /* Now mark all of the non-existent channels as reserved */
161 for(i = num_dma; i < FRV_DMA_NCHANS; i++)
162 frv_dma_channels[i].flags = FRV_DMA_FLAGS_RESERVED;
163
164} /* end frv_dma_init() */
165
166/*****************************************************************************/
167/*
168 * allocate a DMA controller channel and the IRQ associated with it
169 */
170int frv_dma_open(const char *devname,
171 unsigned long dmamask,
172 int dmacap,
173 dma_irq_handler_t handler,
174 unsigned long irq_flags,
175 void *data)
176{
177 struct frv_dma_channel *channel;
178 int dma, ret;
179 uint32_t val;
180
181 write_lock(&frv_dma_channels_lock);
182
183 ret = -ENOSPC;
184
185 for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
186 channel = &frv_dma_channels[dma];
187
188 if (!test_bit(dma, &dmamask))
189 continue;
190
191 if ((channel->cap & dmacap) != dmacap)
192 continue;
193
194 if (!frv_dma_channels[dma].flags)
195 goto found;
196 }
197
198 goto out;
199
200 found:
201 ret = request_irq(channel->irq, dma_irq_handler, irq_flags, devname, channel);
202 if (ret < 0)
203 goto out;
204
205 /* okay, we've allocated all the resources */
206 channel = &frv_dma_channels[dma];
207
208 channel->flags |= FRV_DMA_FLAGS_INUSE;
209 channel->devname = devname;
210 channel->handler = handler;
211 channel->data = data;
212
213 /* Now make sure we are set up for DMA and not GPIO */
214 /* SIR bit must be set for DMA to work */
215 __set_SIR(channel->dreqbit | __get_SIR());
216 /* SOR bits depend on what the caller requests */
217 val = __get_SOR();
218 if(dmacap & FRV_DMA_CAP_DACK)
219 val |= channel->dackbit;
220 else
221 val &= ~channel->dackbit;
222 if(dmacap & FRV_DMA_CAP_DONE)
223 val |= channel->donebit;
224 else
225 val &= ~channel->donebit;
226 __set_SOR(val);
227
228 ret = dma;
229 out:
230 write_unlock(&frv_dma_channels_lock);
231 return ret;
232} /* end frv_dma_open() */
233
234EXPORT_SYMBOL(frv_dma_open);
235
236/*****************************************************************************/
237/*
238 * close a DMA channel and its associated interrupt
239 */
240void frv_dma_close(int dma)
241{
242 struct frv_dma_channel *channel = &frv_dma_channels[dma];
243 unsigned long flags;
244
245 write_lock_irqsave(&frv_dma_channels_lock, flags);
246
247 free_irq(channel->irq, channel);
248 frv_dma_stop(dma);
249
250 channel->flags &= ~FRV_DMA_FLAGS_INUSE;
251
252 write_unlock_irqrestore(&frv_dma_channels_lock, flags);
253} /* end frv_dma_close() */
254
255EXPORT_SYMBOL(frv_dma_close);
256
257/*****************************************************************************/
258/*
259 * set static configuration on a DMA channel
260 */
261void frv_dma_config(int dma, unsigned long ccfr, unsigned long cctr, unsigned long apr)
262{
263 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
264
265 ___set_DMAC(ioaddr, CCFR, ccfr);
266 ___set_DMAC(ioaddr, CCTR, cctr);
267 ___set_DMAC(ioaddr, APR, apr);
268 mb();
269
270} /* end frv_dma_config() */
271
272EXPORT_SYMBOL(frv_dma_config);
273
274/*****************************************************************************/
275/*
276 * start a DMA channel
277 */
278void frv_dma_start(int dma,
279 unsigned long sba, unsigned long dba,
280 unsigned long pix, unsigned long six, unsigned long bcl)
281{
282 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
283
284 ___set_DMAC(ioaddr, SBA, sba);
285 ___set_DMAC(ioaddr, DBA, dba);
286 ___set_DMAC(ioaddr, PIX, pix);
287 ___set_DMAC(ioaddr, SIX, six);
288 ___set_DMAC(ioaddr, BCL, bcl);
289 ___set_DMAC(ioaddr, CSTR, 0);
290 mb();
291
292 __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
293 frv_set_dma_inprogress(dma);
294
295} /* end frv_dma_start() */
296
297EXPORT_SYMBOL(frv_dma_start);
298
299/*****************************************************************************/
300/*
301 * restart a DMA channel that's been stopped in circular addressing mode by comparison-end
302 */
303void frv_dma_restart_circular(int dma, unsigned long six)
304{
305 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
306
307 ___set_DMAC(ioaddr, SIX, six);
308 ___set_DMAC(ioaddr, CSTR, __get_DMAC(ioaddr, CSTR) & ~DMAC_CSTRx_CE);
309 mb();
310
311 __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
312 frv_set_dma_inprogress(dma);
313
314} /* end frv_dma_restart_circular() */
315
316EXPORT_SYMBOL(frv_dma_restart_circular);
317
318/*****************************************************************************/
319/*
320 * stop a DMA channel
321 */
322void frv_dma_stop(int dma)
323{
324 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
325 uint32_t cctr;
326
327 ___set_DMAC(ioaddr, CSTR, 0);
328 cctr = __get_DMAC(ioaddr, CCTR);
329 cctr &= ~(DMAC_CCTRx_IE | DMAC_CCTRx_ACT);
330 cctr |= DMAC_CCTRx_FC; /* fifo clear */
331 __set_DMAC(ioaddr, CCTR, cctr);
332 __set_DMAC(ioaddr, BCL, 0);
333 frv_clear_dma_inprogress(dma);
334} /* end frv_dma_stop() */
335
336EXPORT_SYMBOL(frv_dma_stop);
337
338/*****************************************************************************/
339/*
340 * test interrupt status of DMA channel
341 */
342int is_frv_dma_interrupting(int dma)
343{
344 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
345
346 return __get_DMAC(ioaddr, CSTR) & (1 << 23);
347
348} /* end is_frv_dma_interrupting() */
349
350EXPORT_SYMBOL(is_frv_dma_interrupting);
351
352/*****************************************************************************/
353/*
354 * dump data about a DMA channel
355 */
356void frv_dma_dump(int dma)
357{
358 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
359 unsigned long cstr, pix, six, bcl;
360
361 cstr = __get_DMAC(ioaddr, CSTR);
362 pix = __get_DMAC(ioaddr, PIX);
363 six = __get_DMAC(ioaddr, SIX);
364 bcl = __get_DMAC(ioaddr, BCL);
365
366 printk("DMA[%d] cstr=%lx pix=%lx six=%lx bcl=%lx\n", dma, cstr, pix, six, bcl);
367
368} /* end frv_dma_dump() */
369
370EXPORT_SYMBOL(frv_dma_dump);
371
372/*****************************************************************************/
373/*
374 * pause all DMA controllers
375 * - called by clock mangling routines
376 * - caller must be holding interrupts disabled
377 */
378void frv_dma_pause_all(void)
379{
380 struct frv_dma_channel *channel;
381 unsigned long ioaddr;
382 unsigned long cstr, cctr;
383 int dma;
384
385 write_lock(&frv_dma_channels_lock);
386
387 for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
388 channel = &frv_dma_channels[dma];
389
390 if (!(channel->flags & FRV_DMA_FLAGS_INUSE))
391 continue;
392
393 ioaddr = channel->ioaddr;
394 cctr = __get_DMAC(ioaddr, CCTR);
395 if (cctr & DMAC_CCTRx_ACT) {
396 cctr &= ~DMAC_CCTRx_ACT;
397 __set_DMAC(ioaddr, CCTR, cctr);
398
399 do {
400 cstr = __get_DMAC(ioaddr, CSTR);
401 } while (cstr & DMAC_CSTRx_BUSY);
402
403 if (cstr & DMAC_CSTRx_FED)
404 channel->flags |= FRV_DMA_FLAGS_PAUSED;
405 frv_clear_dma_inprogress(dma);
406 }
407 }
408
409} /* end frv_dma_pause_all() */
410
411EXPORT_SYMBOL(frv_dma_pause_all);
412
413/*****************************************************************************/
414/*
415 * resume paused DMA controllers
416 * - called by clock mangling routines
417 * - caller must be holding interrupts disabled
418 */
419void frv_dma_resume_all(void)
420{
421 struct frv_dma_channel *channel;
422 unsigned long ioaddr;
423 unsigned long cstr, cctr;
424 int dma;
425
426 for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
427 channel = &frv_dma_channels[dma];
428
429 if (!(channel->flags & FRV_DMA_FLAGS_PAUSED))
430 continue;
431
432 ioaddr = channel->ioaddr;
433 cstr = __get_DMAC(ioaddr, CSTR);
434 cstr &= ~(DMAC_CSTRx_FED | DMAC_CSTRx_INT);
435 __set_DMAC(ioaddr, CSTR, cstr);
436
437 cctr = __get_DMAC(ioaddr, CCTR);
438 cctr |= DMAC_CCTRx_ACT;
439 __set_DMAC(ioaddr, CCTR, cctr);
440
441 channel->flags &= ~FRV_DMA_FLAGS_PAUSED;
442 frv_set_dma_inprogress(dma);
443 }
444
445 write_unlock(&frv_dma_channels_lock);
446
447} /* end frv_dma_resume_all() */
448
449EXPORT_SYMBOL(frv_dma_resume_all);
450
451/*****************************************************************************/
452/*
453 * dma status clear
454 */
455void frv_dma_status_clear(int dma)
456{
457 unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
458 uint32_t cctr;
459 ___set_DMAC(ioaddr, CSTR, 0);
460
461 cctr = __get_DMAC(ioaddr, CCTR);
462} /* end frv_dma_status_clear() */
463
464EXPORT_SYMBOL(frv_dma_status_clear);
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S
new file mode 100644
index 000000000000..9b9243e2103c
--- /dev/null
+++ b/arch/frv/kernel/entry-table.S
@@ -0,0 +1,295 @@
1/* entry-table.S: main trap vector tables and exception jump table
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/sys.h>
14#include <linux/config.h>
15#include <linux/linkage.h>
16#include <asm/spr-regs.h>
17
18###############################################################################
19#
20# Declare the main trap and vector tables
21#
22# There are six tables:
23#
24# (1) The trap table for debug mode
25# (2) The trap table for kernel mode
26# (3) The trap table for user mode
27#
28# The CPU jumps to an appropriate slot in the appropriate table to perform
29# exception processing. We have three different tables for the three
30# different CPU modes because there is no hardware differentiation between
31# stack pointers for these three modes, and so we have to invent one when
32# crossing mode boundaries.
33#
34# (4) The exception handler vector table
35#
36# The user and kernel trap tables use the same prologue for normal
37# exception processing. The prologue then jumps to the handler in this
38# table, as indexed by the exception ID from the TBR.
39#
40# (5) The fixup table for kernel-trap single-step
41# (6) The fixup table for user-trap single-step
42#
43# Due to the way single-stepping works on this CPU (single-step is not
44# disabled when crossing exception boundaries, only when in debug mode),
45# we have to catch the single-step event in break.S and jump to the fixup
46# routine pointed to by this table.
47#
48# The linker script places the user mode and kernel mode trap tables on to
49# the same 8Kb page, so that break.S can be more efficient when performing
50# single-step bypass management
51#
52###############################################################################
53
54 # trap table for entry from debug mode
55 .section .trap.break,"ax"
56 .balign 256*16
57 .globl __entry_breaktrap_table
58__entry_breaktrap_table:
59
60 # trap table for entry from user mode
61 .section .trap.user,"ax"
62 .balign 256*16
63 .globl __entry_usertrap_table
64__entry_usertrap_table:
65
66 # trap table for entry from kernel mode
67 .section .trap.kernel,"ax"
68 .balign 256*16
69 .globl __entry_kerneltrap_table
70__entry_kerneltrap_table:
71
72 # exception handler jump table
73 .section .trap.vector,"ax"
74 .balign 256*4
75 .globl __entry_vector_table
76__entry_vector_table:
77
78 # trap fixup table for single-stepping in user mode
79 .section .trap.fixup.user,"a"
80 .balign 256*4
81 .globl __break_usertrap_fixup_table
82__break_usertrap_fixup_table:
83
84 # trap fixup table for single-stepping in user mode
85 .section .trap.fixup.kernel,"a"
86 .balign 256*4
87 .globl __break_kerneltrap_fixup_table
88__break_kerneltrap_fixup_table:
89
90 # handler declaration for a sofware or program interrupt
91.macro VECTOR_SOFTPROG tbr_tt, vec
92 .section .trap.user
93 .org \tbr_tt
94 bra __entry_uspace_softprog_interrupt
95 .section .trap.fixup.user
96 .org \tbr_tt >> 2
97 .long __break_step_uspace_softprog_interrupt
98 .section .trap.kernel
99 .org \tbr_tt
100 bra __entry_kernel_softprog_interrupt
101 .section .trap.fixup.kernel
102 .org \tbr_tt >> 2
103 .long __break_step_kernel_softprog_interrupt
104 .section .trap.vector
105 .org \tbr_tt >> 2
106 .long \vec
107.endm
108
109 # handler declaration for a maskable external interrupt
110.macro VECTOR_IRQ tbr_tt, vec
111 .section .trap.user
112 .org \tbr_tt
113 bra __entry_uspace_external_interrupt
114 .section .trap.fixup.user
115 .org \tbr_tt >> 2
116 .long __break_step_uspace_external_interrupt
117 .section .trap.kernel
118 .org \tbr_tt
119 bra __entry_kernel_external_interrupt
120 .section .trap.fixup.kernel
121 .org \tbr_tt >> 2
122 .long __break_step_kernel_external_interrupt
123 .section .trap.vector
124 .org \tbr_tt >> 2
125 .long \vec
126.endm
127
128 # handler declaration for an NMI external interrupt
129.macro VECTOR_NMI tbr_tt, vec
130 .section .trap.user
131 .org \tbr_tt
132 break
133 break
134 break
135 break
136 .section .trap.kernel
137 .org \tbr_tt
138 break
139 break
140 break
141 break
142 .section .trap.vector
143 .org \tbr_tt >> 2
144 .long \vec
145.endm
146
147 # handler declaration for an MMU only sofware or program interrupt
148.macro VECTOR_SP_MMU tbr_tt, vec
149#ifdef CONFIG_MMU
150 VECTOR_SOFTPROG \tbr_tt, \vec
151#else
152 VECTOR_NMI \tbr_tt, 0
153#endif
154.endm
155
156
157###############################################################################
158#
159# specification of the vectors
160# - note: each macro inserts code into multiple sections
161#
162###############################################################################
163 VECTOR_SP_MMU TBR_TT_INSTR_MMU_MISS, __entry_insn_mmu_miss
164 VECTOR_SOFTPROG TBR_TT_INSTR_ACC_ERROR, __entry_insn_access_error
165 VECTOR_SOFTPROG TBR_TT_INSTR_ACC_EXCEP, __entry_insn_access_exception
166 VECTOR_SOFTPROG TBR_TT_PRIV_INSTR, __entry_privileged_instruction
167 VECTOR_SOFTPROG TBR_TT_ILLEGAL_INSTR, __entry_illegal_instruction
168 VECTOR_SOFTPROG TBR_TT_FP_EXCEPTION, __entry_media_exception
169 VECTOR_SOFTPROG TBR_TT_MP_EXCEPTION, __entry_media_exception
170 VECTOR_SOFTPROG TBR_TT_DATA_ACC_ERROR, __entry_data_access_error
171 VECTOR_SP_MMU TBR_TT_DATA_MMU_MISS, __entry_data_mmu_miss
172 VECTOR_SOFTPROG TBR_TT_DATA_ACC_EXCEP, __entry_data_access_exception
173 VECTOR_SOFTPROG TBR_TT_DATA_STR_ERROR, __entry_data_store_error
174 VECTOR_SOFTPROG TBR_TT_DIVISION_EXCEP, __entry_division_exception
175
176#ifdef CONFIG_MMU
177 .section .trap.user
178 .org TBR_TT_INSTR_TLB_MISS
179 .globl __trap_user_insn_tlb_miss
180__trap_user_insn_tlb_miss:
181 movsg ear0,gr28 /* faulting address */
182 movsg scr0,gr31 /* get mapped PTD coverage start address */
183 xor.p gr28,gr31,gr31 /* compare addresses */
184 bra __entry_user_insn_tlb_miss
185
186 .org TBR_TT_DATA_TLB_MISS
187 .globl __trap_user_data_tlb_miss
188__trap_user_data_tlb_miss:
189 movsg ear0,gr28 /* faulting address */
190 movsg scr1,gr31 /* get mapped PTD coverage start address */
191 xor.p gr28,gr31,gr31 /* compare addresses */
192 bra __entry_user_data_tlb_miss
193
194 .section .trap.kernel
195 .org TBR_TT_INSTR_TLB_MISS
196 .globl __trap_kernel_insn_tlb_miss
197__trap_kernel_insn_tlb_miss:
198 movsg ear0,gr29 /* faulting address */
199 movsg scr0,gr31 /* get mapped PTD coverage start address */
200 xor.p gr29,gr31,gr31 /* compare addresses */
201 bra __entry_kernel_insn_tlb_miss
202
203 .org TBR_TT_DATA_TLB_MISS
204 .globl __trap_kernel_data_tlb_miss
205__trap_kernel_data_tlb_miss:
206 movsg ear0,gr29 /* faulting address */
207 movsg scr1,gr31 /* get mapped PTD coverage start address */
208 xor.p gr29,gr31,gr31 /* compare addresses */
209 bra __entry_kernel_data_tlb_miss
210
211 .section .trap.fixup.user
212 .org TBR_TT_INSTR_TLB_MISS >> 2
213 .globl __trap_fixup_user_insn_tlb_miss
214__trap_fixup_user_insn_tlb_miss:
215 .long __break_user_insn_tlb_miss
216 .org TBR_TT_DATA_TLB_MISS >> 2
217 .globl __trap_fixup_user_data_tlb_miss
218__trap_fixup_user_data_tlb_miss:
219 .long __break_user_data_tlb_miss
220
221 .section .trap.fixup.kernel
222 .org TBR_TT_INSTR_TLB_MISS >> 2
223 .globl __trap_fixup_kernel_insn_tlb_miss
224__trap_fixup_kernel_insn_tlb_miss:
225 .long __break_kernel_insn_tlb_miss
226 .org TBR_TT_DATA_TLB_MISS >> 2
227 .globl __trap_fixup_kernel_data_tlb_miss
228__trap_fixup_kernel_data_tlb_miss:
229 .long __break_kernel_data_tlb_miss
230
231 .section .trap.vector
232 .org TBR_TT_INSTR_TLB_MISS >> 2
233 .long __entry_insn_mmu_fault
234 .org TBR_TT_DATA_TLB_MISS >> 2
235 .long __entry_data_mmu_fault
236#endif
237
238 VECTOR_SP_MMU TBR_TT_DATA_DAT_EXCEP, __entry_data_dat_fault
239 VECTOR_NMI TBR_TT_DECREMENT_TIMER, __entry_do_NMI
240 VECTOR_SOFTPROG TBR_TT_COMPOUND_EXCEP, __entry_compound_exception
241 VECTOR_IRQ TBR_TT_INTERRUPT_1, __entry_do_IRQ
242 VECTOR_IRQ TBR_TT_INTERRUPT_2, __entry_do_IRQ
243 VECTOR_IRQ TBR_TT_INTERRUPT_3, __entry_do_IRQ
244 VECTOR_IRQ TBR_TT_INTERRUPT_4, __entry_do_IRQ
245 VECTOR_IRQ TBR_TT_INTERRUPT_5, __entry_do_IRQ
246 VECTOR_IRQ TBR_TT_INTERRUPT_6, __entry_do_IRQ
247 VECTOR_IRQ TBR_TT_INTERRUPT_7, __entry_do_IRQ
248 VECTOR_IRQ TBR_TT_INTERRUPT_8, __entry_do_IRQ
249 VECTOR_IRQ TBR_TT_INTERRUPT_9, __entry_do_IRQ
250 VECTOR_IRQ TBR_TT_INTERRUPT_10, __entry_do_IRQ
251 VECTOR_IRQ TBR_TT_INTERRUPT_11, __entry_do_IRQ
252 VECTOR_IRQ TBR_TT_INTERRUPT_12, __entry_do_IRQ
253 VECTOR_IRQ TBR_TT_INTERRUPT_13, __entry_do_IRQ
254 VECTOR_IRQ TBR_TT_INTERRUPT_14, __entry_do_IRQ
255 VECTOR_NMI TBR_TT_INTERRUPT_15, __entry_do_NMI
256
257 # miscellaneous user mode entry points
258 .section .trap.user
259 .org TBR_TT_TRAP0
260 .rept 127
261 bra __entry_uspace_softprog_interrupt
262 bra __break_step_uspace_softprog_interrupt
263 .long 0,0
264 .endr
265 .org TBR_TT_BREAK
266 bra __entry_break
267 .long 0,0,0
268
269 # miscellaneous kernel mode entry points
270 .section .trap.kernel
271 .org TBR_TT_TRAP0
272 .rept 127
273 bra __entry_kernel_softprog_interrupt
274 bra __break_step_kernel_softprog_interrupt
275 .long 0,0
276 .endr
277 .org TBR_TT_BREAK
278 bra __entry_break
279 .long 0,0,0
280
281 # miscellaneous debug mode entry points
282 .section .trap.break
283 .org TBR_TT_BREAK
284 movsg bpcsr,gr30
285 jmpl @(gr30,gr0)
286
287 # miscellaneous vectors
288 .section .trap.vector
289 .org TBR_TT_TRAP0 >> 2
290 .long system_call
291 .rept 126
292 .long __entry_unsupported_trap
293 .endr
294 .org TBR_TT_BREAK >> 2
295 .long __entry_debug_exception
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
new file mode 100644
index 000000000000..ad10ea595459
--- /dev/null
+++ b/arch/frv/kernel/entry.S
@@ -0,0 +1,1428 @@
1/* entry.S: FR-V entry
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 *
12 * Entry to the kernel is "interesting":
13 * (1) There are no stack pointers, not even for the kernel
14 * (2) General Registers should not be clobbered
15 * (3) There are no kernel-only data registers
16 * (4) Since all addressing modes are wrt to a General Register, no global
17 * variables can be reached
18 *
19 * We deal with this by declaring that we shall kill GR28 on entering the
20 * kernel from userspace
21 *
22 * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
23 * they can't rely on GR28 to be anything useful, and so need to clobber a
24 * separate register (GR31). Break interrupts are managed in break.S
25 *
26 * GR29 _is_ saved, and holds the current task pointer globally
27 *
28 */
29
30#include <linux/sys.h>
31#include <linux/config.h>
32#include <linux/linkage.h>
33#include <asm/thread_info.h>
34#include <asm/setup.h>
35#include <asm/segment.h>
36#include <asm/ptrace.h>
37#include <asm/errno.h>
38#include <asm/cache.h>
39#include <asm/spr-regs.h>
40
41#define nr_syscalls ((syscall_table_size)/4)
42
43 .text
44 .balign 4
45
46.macro LEDS val
47# sethi.p %hi(0xe1200004),gr30
48# setlo %lo(0xe1200004),gr30
49# setlos #~\val,gr31
50# st gr31,@(gr30,gr0)
51# sethi.p %hi(0xffc00100),gr30
52# setlo %lo(0xffc00100),gr30
53# sth gr0,@(gr30,gr0)
54# membar
55.endm
56
57.macro LEDS32
58# not gr31,gr31
59# sethi.p %hi(0xe1200004),gr30
60# setlo %lo(0xe1200004),gr30
61# st.p gr31,@(gr30,gr0)
62# srli gr31,#16,gr31
63# sethi.p %hi(0xffc00100),gr30
64# setlo %lo(0xffc00100),gr30
65# sth gr31,@(gr30,gr0)
66# membar
67.endm
68
69###############################################################################
70#
71# entry point for External interrupts received whilst executing userspace code
72#
73###############################################################################
74 .globl __entry_uspace_external_interrupt
75 .type __entry_uspace_external_interrupt,@function
76__entry_uspace_external_interrupt:
77 LEDS 0x6200
78 sethi.p %hi(__kernel_frame0_ptr),gr28
79 setlo %lo(__kernel_frame0_ptr),gr28
80 ldi @(gr28,#0),gr28
81
82 # handle h/w single-step through exceptions
83 sti gr0,@(gr28,#REG__STATUS)
84
85 .globl __entry_uspace_external_interrupt_reentry
86__entry_uspace_external_interrupt_reentry:
87 LEDS 0x6201
88
89 setlos #REG__END,gr30
90 dcpl gr28,gr30,#0
91
92 # finish building the exception frame
93 sti sp, @(gr28,#REG_SP)
94 stdi gr2, @(gr28,#REG_GR(2))
95 stdi gr4, @(gr28,#REG_GR(4))
96 stdi gr6, @(gr28,#REG_GR(6))
97 stdi gr8, @(gr28,#REG_GR(8))
98 stdi gr10,@(gr28,#REG_GR(10))
99 stdi gr12,@(gr28,#REG_GR(12))
100 stdi gr14,@(gr28,#REG_GR(14))
101 stdi gr16,@(gr28,#REG_GR(16))
102 stdi gr18,@(gr28,#REG_GR(18))
103 stdi gr20,@(gr28,#REG_GR(20))
104 stdi gr22,@(gr28,#REG_GR(22))
105 stdi gr24,@(gr28,#REG_GR(24))
106 stdi gr26,@(gr28,#REG_GR(26))
107 sti gr0, @(gr28,#REG_GR(28))
108 sti gr29,@(gr28,#REG_GR(29))
109 stdi.p gr30,@(gr28,#REG_GR(30))
110
111 # set up the kernel stack pointer
112 ori gr28,0,sp
113
114 movsg tbr ,gr20
115 movsg psr ,gr22
116 movsg pcsr,gr21
117 movsg isr ,gr23
118 movsg ccr ,gr24
119 movsg cccr,gr25
120 movsg lr ,gr26
121 movsg lcr ,gr27
122
123 setlos.p #-1,gr4
124 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
125 andi.p gr22,#~(PSR_PS|PSR_S),gr6
126 slli gr5,#1,gr5
127 or gr6,gr5,gr5
128 andi gr5,#~PSR_ET,gr5
129
130 sti gr20,@(gr28,#REG_TBR)
131 sti gr21,@(gr28,#REG_PC)
132 sti gr5 ,@(gr28,#REG_PSR)
133 sti gr23,@(gr28,#REG_ISR)
134 stdi gr24,@(gr28,#REG_CCR)
135 stdi gr26,@(gr28,#REG_LR)
136 sti gr4 ,@(gr28,#REG_SYSCALLNO)
137
138 movsg iacc0h,gr4
139 movsg iacc0l,gr5
140 stdi gr4,@(gr28,#REG_IACC0)
141
142 movsg gner0,gr4
143 movsg gner1,gr5
144 stdi gr4,@(gr28,#REG_GNER0)
145
146 # set up kernel global registers
147 sethi.p %hi(__kernel_current_task),gr5
148 setlo %lo(__kernel_current_task),gr5
149 sethi.p %hi(_gp),gr16
150 setlo %lo(_gp),gr16
151 ldi @(gr5,#0),gr29
152 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
153
154 # make sure we (the kernel) get div-zero and misalignment exceptions
155 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
156 movgs gr5,isr
157
158 # switch to the kernel trap table
159 sethi.p %hi(__entry_kerneltrap_table),gr6
160 setlo %lo(__entry_kerneltrap_table),gr6
161 movgs gr6,tbr
162
163 # set the return address
164 sethi.p %hi(__entry_return_from_user_interrupt),gr4
165 setlo %lo(__entry_return_from_user_interrupt),gr4
166 movgs gr4,lr
167
168 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
169 movsg psr,gr4
170
171 ori gr4,#PSR_PIL_14,gr4
172 movgs gr4,psr
173 ori gr4,#PSR_PIL_14|PSR_ET,gr4
174 movgs gr4,psr
175
176 LEDS 0x6202
177 bra do_IRQ
178
179 .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
180
181###############################################################################
182#
183# entry point for External interrupts received whilst executing kernel code
184# - on arriving here, the following registers should already be set up:
185# GR15 - current thread_info struct pointer
186# GR16 - kernel GP-REL pointer
187# GR29 - current task struct pointer
188# TBR - kernel trap vector table
189# ISR - kernel's preferred integer controls
190#
191###############################################################################
192 .globl __entry_kernel_external_interrupt
193 .type __entry_kernel_external_interrupt,@function
194__entry_kernel_external_interrupt:
195 LEDS 0x6210
196
197 sub sp,gr15,gr31
198 LEDS32
199
200 # set up the stack pointer
201 or.p sp,gr0,gr30
202 subi sp,#REG__END,sp
203 sti gr30,@(sp,#REG_SP)
204
205 # handle h/w single-step through exceptions
206 sti gr0,@(sp,#REG__STATUS)
207
208 .globl __entry_kernel_external_interrupt_reentry
209__entry_kernel_external_interrupt_reentry:
210 LEDS 0x6211
211
212 # set up the exception frame
213 setlos #REG__END,gr30
214 dcpl sp,gr30,#0
215
216 sti.p gr28,@(sp,#REG_GR(28))
217 ori sp,0,gr28
218
219 # finish building the exception frame
220 stdi gr2,@(gr28,#REG_GR(2))
221 stdi gr4,@(gr28,#REG_GR(4))
222 stdi gr6,@(gr28,#REG_GR(6))
223 stdi gr8,@(gr28,#REG_GR(8))
224 stdi gr10,@(gr28,#REG_GR(10))
225 stdi gr12,@(gr28,#REG_GR(12))
226 stdi gr14,@(gr28,#REG_GR(14))
227 stdi gr16,@(gr28,#REG_GR(16))
228 stdi gr18,@(gr28,#REG_GR(18))
229 stdi gr20,@(gr28,#REG_GR(20))
230 stdi gr22,@(gr28,#REG_GR(22))
231 stdi gr24,@(gr28,#REG_GR(24))
232 stdi gr26,@(gr28,#REG_GR(26))
233 sti gr29,@(gr28,#REG_GR(29))
234 stdi gr30,@(gr28,#REG_GR(30))
235
236 movsg tbr ,gr20
237 movsg psr ,gr22
238 movsg pcsr,gr21
239 movsg isr ,gr23
240 movsg ccr ,gr24
241 movsg cccr,gr25
242 movsg lr ,gr26
243 movsg lcr ,gr27
244
245 setlos.p #-1,gr4
246 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
247 andi.p gr22,#~(PSR_PS|PSR_S),gr6
248 slli gr5,#1,gr5
249 or gr6,gr5,gr5
250 andi.p gr5,#~PSR_ET,gr5
251
252 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
253 # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
254 andi gr25,#~0xc0,gr25
255
256 sti gr20,@(gr28,#REG_TBR)
257 sti gr21,@(gr28,#REG_PC)
258 sti gr5 ,@(gr28,#REG_PSR)
259 sti gr23,@(gr28,#REG_ISR)
260 stdi gr24,@(gr28,#REG_CCR)
261 stdi gr26,@(gr28,#REG_LR)
262 sti gr4 ,@(gr28,#REG_SYSCALLNO)
263
264 movsg iacc0h,gr4
265 movsg iacc0l,gr5
266 stdi gr4,@(gr28,#REG_IACC0)
267
268 movsg gner0,gr4
269 movsg gner1,gr5
270 stdi gr4,@(gr28,#REG_GNER0)
271
272 # set the return address
273 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
274 setlo %lo(__entry_return_from_kernel_interrupt),gr4
275 movgs gr4,lr
276
277 # clear power-saving mode flags
278 movsg hsr0,gr4
279 andi gr4,#~HSR0_PDM,gr4
280 movgs gr4,hsr0
281
282 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
283 movsg psr,gr4
284 ori gr4,#PSR_PIL_14,gr4
285 movgs gr4,psr
286 ori gr4,#PSR_ET,gr4
287 movgs gr4,psr
288
289 LEDS 0x6212
290 bra do_IRQ
291
292 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
293
294
295###############################################################################
296#
297# entry point for Software and Progam interrupts generated whilst executing userspace code
298#
299###############################################################################
300 .globl __entry_uspace_softprog_interrupt
301 .type __entry_uspace_softprog_interrupt,@function
302 .globl __entry_uspace_handle_mmu_fault
303__entry_uspace_softprog_interrupt:
304 LEDS 0x6000
305#ifdef CONFIG_MMU
306 movsg ear0,gr28
307__entry_uspace_handle_mmu_fault:
308 movgs gr28,scr2
309#endif
310 sethi.p %hi(__kernel_frame0_ptr),gr28
311 setlo %lo(__kernel_frame0_ptr),gr28
312 ldi @(gr28,#0),gr28
313
314 # handle h/w single-step through exceptions
315 sti gr0,@(gr28,#REG__STATUS)
316
317 .globl __entry_uspace_softprog_interrupt_reentry
318__entry_uspace_softprog_interrupt_reentry:
319 LEDS 0x6001
320
321 setlos #REG__END,gr30
322 dcpl gr28,gr30,#0
323
324 # set up the kernel stack pointer
325 sti.p sp,@(gr28,#REG_SP)
326 ori gr28,0,sp
327 sti gr0,@(gr28,#REG_GR(28))
328
329 stdi gr20,@(gr28,#REG_GR(20))
330 stdi gr22,@(gr28,#REG_GR(22))
331
332 movsg tbr,gr20
333 movsg pcsr,gr21
334 movsg psr,gr22
335
336 sethi.p %hi(__entry_return_from_user_exception),gr23
337 setlo %lo(__entry_return_from_user_exception),gr23
338 bra __entry_common
339
340 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
341
342 # single-stepping was disabled on entry to a TLB handler that then faulted
343#ifdef CONFIG_MMU
344 .globl __entry_uspace_handle_mmu_fault_sstep
345__entry_uspace_handle_mmu_fault_sstep:
346 movgs gr28,scr2
347 sethi.p %hi(__kernel_frame0_ptr),gr28
348 setlo %lo(__kernel_frame0_ptr),gr28
349 ldi @(gr28,#0),gr28
350
351 # flag single-step re-enablement
352 sti gr0,@(gr28,#REG__STATUS)
353 bra __entry_uspace_softprog_interrupt_reentry
354#endif
355
356
357###############################################################################
358#
359# entry point for Software and Progam interrupts generated whilst executing kernel code
360#
361###############################################################################
362 .globl __entry_kernel_softprog_interrupt
363 .type __entry_kernel_softprog_interrupt,@function
364__entry_kernel_softprog_interrupt:
365 LEDS 0x6004
366
367#ifdef CONFIG_MMU
368 movsg ear0,gr30
369 movgs gr30,scr2
370#endif
371
372 .globl __entry_kernel_handle_mmu_fault
373__entry_kernel_handle_mmu_fault:
374 # set up the stack pointer
375 subi sp,#REG__END,sp
376 sti sp,@(sp,#REG_SP)
377 sti sp,@(sp,#REG_SP-4)
378 andi sp,#~7,sp
379
380 # handle h/w single-step through exceptions
381 sti gr0,@(sp,#REG__STATUS)
382
383 .globl __entry_kernel_softprog_interrupt_reentry
384__entry_kernel_softprog_interrupt_reentry:
385 LEDS 0x6005
386
387 setlos #REG__END,gr30
388 dcpl sp,gr30,#0
389
390 # set up the exception frame
391 sti.p gr28,@(sp,#REG_GR(28))
392 ori sp,0,gr28
393
394 stdi gr20,@(gr28,#REG_GR(20))
395 stdi gr22,@(gr28,#REG_GR(22))
396
397 ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
398 addi gr22,#REG__END,gr22
399 sti gr22,@(sp,#REG_SP)
400
401 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
402 # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
403 movsg cccr,gr20
404 andi gr20,#~0xc0,gr20
405 movgs gr20,cccr
406
407 movsg tbr,gr20
408 movsg pcsr,gr21
409 movsg psr,gr22
410
411 sethi.p %hi(__entry_return_from_kernel_exception),gr23
412 setlo %lo(__entry_return_from_kernel_exception),gr23
413 bra __entry_common
414
415 .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
416
417 # single-stepping was disabled on entry to a TLB handler that then faulted
418#ifdef CONFIG_MMU
419 .globl __entry_kernel_handle_mmu_fault_sstep
420__entry_kernel_handle_mmu_fault_sstep:
421 # set up the stack pointer
422 subi sp,#REG__END,sp
423 sti sp,@(sp,#REG_SP)
424 sti sp,@(sp,#REG_SP-4)
425 andi sp,#~7,sp
426
427 # flag single-step re-enablement
428 sethi #REG__STATUS_STEP,gr30
429 sti gr30,@(sp,#REG__STATUS)
430 bra __entry_kernel_softprog_interrupt_reentry
431#endif
432
433
434###############################################################################
435#
436# the rest of the kernel entry point code
437# - on arriving here, the following registers should be set up:
438# GR1 - kernel stack pointer
439# GR7 - syscall number (trap 0 only)
440# GR8-13 - syscall args (trap 0 only)
441# GR20 - saved TBR
442# GR21 - saved PC
443# GR22 - saved PSR
444# GR23 - return handler address
445# GR28 - exception frame on stack
446# SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
447# PSR - PSR.S 1, PSR.ET 0
448#
449###############################################################################
450 .globl __entry_common
451 .type __entry_common,@function
452__entry_common:
453 LEDS 0x6008
454
455 # finish building the exception frame
456 stdi gr2,@(gr28,#REG_GR(2))
457 stdi gr4,@(gr28,#REG_GR(4))
458 stdi gr6,@(gr28,#REG_GR(6))
459 stdi gr8,@(gr28,#REG_GR(8))
460 stdi gr10,@(gr28,#REG_GR(10))
461 stdi gr12,@(gr28,#REG_GR(12))
462 stdi gr14,@(gr28,#REG_GR(14))
463 stdi gr16,@(gr28,#REG_GR(16))
464 stdi gr18,@(gr28,#REG_GR(18))
465 stdi gr24,@(gr28,#REG_GR(24))
466 stdi gr26,@(gr28,#REG_GR(26))
467 sti gr29,@(gr28,#REG_GR(29))
468 stdi gr30,@(gr28,#REG_GR(30))
469
470 movsg lcr ,gr27
471 movsg lr ,gr26
472 movgs gr23,lr
473 movsg cccr,gr25
474 movsg ccr ,gr24
475 movsg isr ,gr23
476
477 setlos.p #-1,gr4
478 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
479 andi.p gr22,#~(PSR_PS|PSR_S),gr6
480 slli gr5,#1,gr5
481 or gr6,gr5,gr5
482 andi gr5,#~PSR_ET,gr5
483
484 sti gr20,@(gr28,#REG_TBR)
485 sti gr21,@(gr28,#REG_PC)
486 sti gr5 ,@(gr28,#REG_PSR)
487 sti gr23,@(gr28,#REG_ISR)
488 stdi gr24,@(gr28,#REG_CCR)
489 stdi gr26,@(gr28,#REG_LR)
490 sti gr4 ,@(gr28,#REG_SYSCALLNO)
491
492 movsg iacc0h,gr4
493 movsg iacc0l,gr5
494 stdi gr4,@(gr28,#REG_IACC0)
495
496 movsg gner0,gr4
497 movsg gner1,gr5
498 stdi gr4,@(gr28,#REG_GNER0)
499
500 # set up kernel global registers
501 sethi.p %hi(__kernel_current_task),gr5
502 setlo %lo(__kernel_current_task),gr5
503 sethi.p %hi(_gp),gr16
504 setlo %lo(_gp),gr16
505 ldi @(gr5,#0),gr29
506 ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
507
508 # switch to the kernel trap table
509 sethi.p %hi(__entry_kerneltrap_table),gr6
510 setlo %lo(__entry_kerneltrap_table),gr6
511 movgs gr6,tbr
512
513 # make sure we (the kernel) get div-zero and misalignment exceptions
514 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
515 movgs gr5,isr
516
517 # clear power-saving mode flags
518 movsg hsr0,gr4
519 andi gr4,#~HSR0_PDM,gr4
520 movgs gr4,hsr0
521
522 # multiplex again using old TBR as a guide
523 setlos.p #TBR_TT,gr3
524 sethi %hi(__entry_vector_table),gr6
525 and.p gr20,gr3,gr5
526 setlo %lo(__entry_vector_table),gr6
527 srli gr5,#2,gr5
528 ld @(gr5,gr6),gr5
529
530 LEDS 0x6009
531 jmpl @(gr5,gr0)
532
533
534 .size __entry_common,.-__entry_common
535
536###############################################################################
537#
538# handle instruction MMU fault
539#
540###############################################################################
541#ifdef CONFIG_MMU
542 .globl __entry_insn_mmu_fault
543__entry_insn_mmu_fault:
544 LEDS 0x6010
545 setlos #0,gr8
546 movsg esr0,gr9
547 movsg scr2,gr10
548
549 # now that we've accessed the exception regs, we can enable exceptions
550 movsg psr,gr4
551 ori gr4,#PSR_ET,gr4
552 movgs gr4,psr
553
554 sethi.p %hi(do_page_fault),gr5
555 setlo %lo(do_page_fault),gr5
556 jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
557#endif
558
559
560###############################################################################
561#
562# handle instruction access error
563#
564###############################################################################
565 .globl __entry_insn_access_error
566__entry_insn_access_error:
567 LEDS 0x6011
568 sethi.p %hi(insn_access_error),gr5
569 setlo %lo(insn_access_error),gr5
570 movsg esfr1,gr8
571 movsg epcr0,gr9
572 movsg esr0,gr10
573
574 # now that we've accessed the exception regs, we can enable exceptions
575 movsg psr,gr4
576 ori gr4,#PSR_ET,gr4
577 movgs gr4,psr
578 jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
579
580###############################################################################
581#
582# handle various instructions of dubious legality
583#
584###############################################################################
585 .globl __entry_unsupported_trap
586 .globl __entry_illegal_instruction
587 .globl __entry_privileged_instruction
588 .globl __entry_debug_exception
589__entry_unsupported_trap:
590 subi gr21,#4,gr21
591 sti gr21,@(gr28,#REG_PC)
592__entry_illegal_instruction:
593__entry_privileged_instruction:
594__entry_debug_exception:
595 LEDS 0x6012
596 sethi.p %hi(illegal_instruction),gr5
597 setlo %lo(illegal_instruction),gr5
598 movsg esfr1,gr8
599 movsg epcr0,gr9
600 movsg esr0,gr10
601
602 # now that we've accessed the exception regs, we can enable exceptions
603 movsg psr,gr4
604 ori gr4,#PSR_ET,gr4
605 movgs gr4,psr
606 jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
607
608###############################################################################
609#
610# handle media exception
611#
612###############################################################################
613 .globl __entry_media_exception
614__entry_media_exception:
615 LEDS 0x6013
616 sethi.p %hi(media_exception),gr5
617 setlo %lo(media_exception),gr5
618 movsg msr0,gr8
619 movsg msr1,gr9
620
621 # now that we've accessed the exception regs, we can enable exceptions
622 movsg psr,gr4
623 ori gr4,#PSR_ET,gr4
624 movgs gr4,psr
625 jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
626
627###############################################################################
628#
629# handle data MMU fault
630# handle data DAT fault (write-protect exception)
631#
632###############################################################################
633#ifdef CONFIG_MMU
634 .globl __entry_data_mmu_fault
635__entry_data_mmu_fault:
636 .globl __entry_data_dat_fault
637__entry_data_dat_fault:
638 LEDS 0x6014
639 setlos #1,gr8
640 movsg esr0,gr9
641 movsg scr2,gr10 ; saved EAR0
642
643 # now that we've accessed the exception regs, we can enable exceptions
644 movsg psr,gr4
645 ori gr4,#PSR_ET,gr4
646 movgs gr4,psr
647
648 sethi.p %hi(do_page_fault),gr5
649 setlo %lo(do_page_fault),gr5
650 jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
651#endif
652
653###############################################################################
654#
655# handle data and instruction access exceptions
656#
657###############################################################################
658 .globl __entry_insn_access_exception
659 .globl __entry_data_access_exception
660__entry_insn_access_exception:
661__entry_data_access_exception:
662 LEDS 0x6016
663 sethi.p %hi(memory_access_exception),gr5
664 setlo %lo(memory_access_exception),gr5
665 movsg esr0,gr8
666 movsg scr2,gr9 ; saved EAR0
667 movsg epcr0,gr10
668
669 # now that we've accessed the exception regs, we can enable exceptions
670 movsg psr,gr4
671 ori gr4,#PSR_ET,gr4
672 movgs gr4,psr
673 jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
674
675###############################################################################
676#
677# handle data access error
678#
679###############################################################################
680 .globl __entry_data_access_error
681__entry_data_access_error:
682 LEDS 0x6016
683 sethi.p %hi(data_access_error),gr5
684 setlo %lo(data_access_error),gr5
685 movsg esfr1,gr8
686 movsg esr15,gr9
687 movsg ear15,gr10
688
689 # now that we've accessed the exception regs, we can enable exceptions
690 movsg psr,gr4
691 ori gr4,#PSR_ET,gr4
692 movgs gr4,psr
693 jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
694
695###############################################################################
696#
697# handle data store error
698#
699###############################################################################
700 .globl __entry_data_store_error
701__entry_data_store_error:
702 LEDS 0x6017
703 sethi.p %hi(data_store_error),gr5
704 setlo %lo(data_store_error),gr5
705 movsg esfr1,gr8
706 movsg esr14,gr9
707
708 # now that we've accessed the exception regs, we can enable exceptions
709 movsg psr,gr4
710 ori gr4,#PSR_ET,gr4
711 movgs gr4,psr
712 jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
713
714###############################################################################
715#
716# handle division exception
717#
718###############################################################################
719 .globl __entry_division_exception
720__entry_division_exception:
721 LEDS 0x6018
722 sethi.p %hi(division_exception),gr5
723 setlo %lo(division_exception),gr5
724 movsg esfr1,gr8
725 movsg esr0,gr9
726 movsg isr,gr10
727
728 # now that we've accessed the exception regs, we can enable exceptions
729 movsg psr,gr4
730 ori gr4,#PSR_ET,gr4
731 movgs gr4,psr
732 jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
733
734###############################################################################
735#
736# handle compound exception
737#
738###############################################################################
739 .globl __entry_compound_exception
740__entry_compound_exception:
741 LEDS 0x6019
742 sethi.p %hi(compound_exception),gr5
743 setlo %lo(compound_exception),gr5
744 movsg esfr1,gr8
745 movsg esr0,gr9
746 movsg esr14,gr10
747 movsg esr15,gr11
748 movsg msr0,gr12
749 movsg msr1,gr13
750
751 # now that we've accessed the exception regs, we can enable exceptions
752 movsg psr,gr4
753 ori gr4,#PSR_ET,gr4
754 movgs gr4,psr
755 jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
756
757###############################################################################
758#
759# handle interrupts and NMIs
760#
761###############################################################################
762 .globl __entry_do_IRQ
763__entry_do_IRQ:
764 LEDS 0x6020
765
766 # we can enable exceptions
767 movsg psr,gr4
768 ori gr4,#PSR_ET,gr4
769 movgs gr4,psr
770 bra do_IRQ
771
772 .globl __entry_do_NMI
773__entry_do_NMI:
774 LEDS 0x6021
775
776 # we can enable exceptions
777 movsg psr,gr4
778 ori gr4,#PSR_ET,gr4
779 movgs gr4,psr
780 bra do_NMI
781
782###############################################################################
783#
784# the return path for a newly forked child process
785# - __switch_to() saved the old current pointer in GR8 for us
786#
787###############################################################################
788 .globl ret_from_fork
789ret_from_fork:
790 LEDS 0x6100
791 call schedule_tail
792
793 # fork & co. return 0 to child
794 setlos.p #0,gr8
795 bra __syscall_exit
796
797###################################################################################################
798#
799# Return to user mode is not as complex as all this looks,
800# but we want the default path for a system call return to
801# go as quickly as possible which is why some of this is
802# less clear than it otherwise should be.
803#
804###################################################################################################
805 .balign L1_CACHE_BYTES
806 .globl system_call
807system_call:
808 LEDS 0x6101
809 movsg psr,gr4 ; enable exceptions
810 ori gr4,#PSR_ET,gr4
811 movgs gr4,psr
812
813 sti gr7,@(gr28,#REG_SYSCALLNO)
814 sti.p gr8,@(gr28,#REG_ORIG_GR8)
815
816 subicc gr7,#nr_syscalls,gr0,icc0
817 bnc icc0,#0,__syscall_badsys
818
819 ldi @(gr15,#TI_FLAGS),gr4
820 ori gr4,#_TIF_SYSCALL_TRACE,gr4
821 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
822 bne icc0,#0,__syscall_trace_entry
823
824__syscall_call:
825 slli.p gr7,#2,gr7
826 sethi %hi(sys_call_table),gr5
827 setlo %lo(sys_call_table),gr5
828 ld @(gr5,gr7),gr4
829 calll @(gr4,gr0)
830
831
832###############################################################################
833#
834# return to interrupted process
835#
836###############################################################################
837__syscall_exit:
838 LEDS 0x6300
839
840 sti gr8,@(gr28,#REG_GR(8)) ; save return value
841
842 # rebuild saved psr - execve will change it for init/main.c
843 ldi @(gr28,#REG_PSR),gr22
844 srli gr22,#1,gr5
845 andi.p gr22,#~PSR_PS,gr22
846 andi gr5,#PSR_PS,gr5
847 or gr5,gr22,gr22
848 ori gr22,#PSR_S,gr22
849
850 # keep current PSR in GR23
851 movsg psr,gr23
852
853 # make sure we don't miss an interrupt setting need_resched or sigpending between
854 # sampling and the RETT
855 ori gr23,#PSR_PIL_14,gr23
856 movgs gr23,psr
857
858 ldi @(gr15,#TI_FLAGS),gr4
859 sethi.p %hi(_TIF_ALLWORK_MASK),gr5
860 setlo %lo(_TIF_ALLWORK_MASK),gr5
861 andcc gr4,gr5,gr0,icc0
862 bne icc0,#0,__syscall_exit_work
863
864 # restore all registers and return
865__entry_return_direct:
866 LEDS 0x6301
867
868 andi gr22,#~PSR_ET,gr22
869 movgs gr22,psr
870
871 ldi @(gr28,#REG_ISR),gr23
872 lddi @(gr28,#REG_CCR),gr24
873 lddi @(gr28,#REG_LR) ,gr26
874 ldi @(gr28,#REG_PC) ,gr21
875 ldi @(gr28,#REG_TBR),gr20
876
877 movgs gr20,tbr
878 movgs gr21,pcsr
879 movgs gr23,isr
880 movgs gr24,ccr
881 movgs gr25,cccr
882 movgs gr26,lr
883 movgs gr27,lcr
884
885 lddi @(gr28,#REG_GNER0),gr4
886 movgs gr4,gner0
887 movgs gr5,gner1
888
889 lddi @(gr28,#REG_IACC0),gr4
890 movgs gr4,iacc0h
891 movgs gr5,iacc0l
892
893 lddi @(gr28,#REG_GR(4)) ,gr4
894 lddi @(gr28,#REG_GR(6)) ,gr6
895 lddi @(gr28,#REG_GR(8)) ,gr8
896 lddi @(gr28,#REG_GR(10)),gr10
897 lddi @(gr28,#REG_GR(12)),gr12
898 lddi @(gr28,#REG_GR(14)),gr14
899 lddi @(gr28,#REG_GR(16)),gr16
900 lddi @(gr28,#REG_GR(18)),gr18
901 lddi @(gr28,#REG_GR(20)),gr20
902 lddi @(gr28,#REG_GR(22)),gr22
903 lddi @(gr28,#REG_GR(24)),gr24
904 lddi @(gr28,#REG_GR(26)),gr26
905 ldi @(gr28,#REG_GR(29)),gr29
906 lddi @(gr28,#REG_GR(30)),gr30
907
908 # check to see if a debugging return is required
909 LEDS 0x67f0
910 movsg ccr,gr2
911 ldi @(gr28,#REG__STATUS),gr3
912 andicc gr3,#REG__STATUS_STEP,gr0,icc0
913 bne icc0,#0,__entry_return_singlestep
914 movgs gr2,ccr
915
916 ldi @(gr28,#REG_SP) ,sp
917 lddi @(gr28,#REG_GR(2)) ,gr2
918 ldi @(gr28,#REG_GR(28)),gr28
919
920 LEDS 0x67fe
921// movsg pcsr,gr31
922// LEDS32
923
924#if 0
925 # store the current frame in the workram on the FR451
926 movgs gr28,scr2
927 sethi.p %hi(0xfe800000),gr28
928 setlo %lo(0xfe800000),gr28
929
930 stdi gr2,@(gr28,#REG_GR(2))
931 stdi gr4,@(gr28,#REG_GR(4))
932 stdi gr6,@(gr28,#REG_GR(6))
933 stdi gr8,@(gr28,#REG_GR(8))
934 stdi gr10,@(gr28,#REG_GR(10))
935 stdi gr12,@(gr28,#REG_GR(12))
936 stdi gr14,@(gr28,#REG_GR(14))
937 stdi gr16,@(gr28,#REG_GR(16))
938 stdi gr18,@(gr28,#REG_GR(18))
939 stdi gr24,@(gr28,#REG_GR(24))
940 stdi gr26,@(gr28,#REG_GR(26))
941 sti gr29,@(gr28,#REG_GR(29))
942 stdi gr30,@(gr28,#REG_GR(30))
943
944 movsg tbr ,gr30
945 sti gr30,@(gr28,#REG_TBR)
946 movsg pcsr,gr30
947 sti gr30,@(gr28,#REG_PC)
948 movsg psr ,gr30
949 sti gr30,@(gr28,#REG_PSR)
950 movsg isr ,gr30
951 sti gr30,@(gr28,#REG_ISR)
952 movsg ccr ,gr30
953 movsg cccr,gr31
954 stdi gr30,@(gr28,#REG_CCR)
955 movsg lr ,gr30
956 movsg lcr ,gr31
957 stdi gr30,@(gr28,#REG_LR)
958 sti gr0 ,@(gr28,#REG_SYSCALLNO)
959 movsg scr2,gr28
960#endif
961
962 rett #0
963
964 # return via break.S
965__entry_return_singlestep:
966 movgs gr2,ccr
967 lddi @(gr28,#REG_GR(2)) ,gr2
968 ldi @(gr28,#REG_SP) ,sp
969 ldi @(gr28,#REG_GR(28)),gr28
970 LEDS 0x67ff
971 break
972 .globl __entry_return_singlestep_breaks_here
973__entry_return_singlestep_breaks_here:
974 nop
975
976
977###############################################################################
978#
979# return to a process interrupted in kernel space
980# - we need to consider preemption if that is enabled
981#
982###############################################################################
983 .balign L1_CACHE_BYTES
984__entry_return_from_kernel_exception:
985 LEDS 0x6302
986 movsg psr,gr23
987 ori gr23,#PSR_PIL_14,gr23
988 movgs gr23,psr
989 bra __entry_return_direct
990
991 .balign L1_CACHE_BYTES
992__entry_return_from_kernel_interrupt:
993 LEDS 0x6303
994 movsg psr,gr23
995 ori gr23,#PSR_PIL_14,gr23
996 movgs gr23,psr
997
998#ifdef CONFIG_PREEMPT
999 ldi @(gr15,#TI_PRE_COUNT),gr5
1000 subicc gr5,#0,gr0,icc0
1001 beq icc0,#0,__entry_return_direct
1002
1003__entry_preempt_need_resched:
1004 ldi @(gr15,#TI_FLAGS),gr4
1005 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1006 beq icc0,#1,__entry_return_direct
1007
1008 setlos #PREEMPT_ACTIVE,gr5
1009 sti gr5,@(gr15,#TI_FLAGS)
1010
1011 andi gr23,#~PSR_PIL,gr23
1012 movgs gr23,psr
1013
1014 call schedule
1015 sti gr0,@(gr15,#TI_PRE_COUNT)
1016
1017 movsg psr,gr23
1018 ori gr23,#PSR_PIL_14,gr23
1019 movgs gr23,psr
1020 bra __entry_preempt_need_resched
1021#else
1022 bra __entry_return_direct
1023#endif
1024
1025
1026###############################################################################
1027#
1028# perform work that needs to be done immediately before resumption
1029#
1030###############################################################################
1031 .globl __entry_return_from_user_exception
1032 .balign L1_CACHE_BYTES
1033__entry_return_from_user_exception:
1034 LEDS 0x6501
1035
1036__entry_resume_userspace:
1037 # make sure we don't miss an interrupt setting need_resched or sigpending between
1038 # sampling and the RETT
1039 movsg psr,gr23
1040 ori gr23,#PSR_PIL_14,gr23
1041 movgs gr23,psr
1042
1043__entry_return_from_user_interrupt:
1044 LEDS 0x6402
1045 ldi @(gr15,#TI_FLAGS),gr4
1046 sethi.p %hi(_TIF_WORK_MASK),gr5
1047 setlo %lo(_TIF_WORK_MASK),gr5
1048 andcc gr4,gr5,gr0,icc0
1049 beq icc0,#1,__entry_return_direct
1050
1051__entry_work_pending:
1052 LEDS 0x6404
1053 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1054 beq icc0,#1,__entry_work_notifysig
1055
1056__entry_work_resched:
1057 LEDS 0x6408
1058 movsg psr,gr23
1059 andi gr23,#~PSR_PIL,gr23
1060 movgs gr23,psr
1061 call schedule
1062 movsg psr,gr23
1063 ori gr23,#PSR_PIL_14,gr23
1064 movgs gr23,psr
1065
1066 LEDS 0x6401
1067 ldi @(gr15,#TI_FLAGS),gr4
1068 sethi.p %hi(_TIF_WORK_MASK),gr5
1069 setlo %lo(_TIF_WORK_MASK),gr5
1070 andcc gr4,gr5,gr0,icc0
1071 beq icc0,#1,__entry_return_direct
1072 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1073 bne icc0,#1,__entry_work_resched
1074
1075__entry_work_notifysig:
1076 LEDS 0x6410
1077 ori.p gr4,#0,gr8
1078 call do_notify_resume
1079 bra __entry_return_direct
1080
1081 # perform syscall entry tracing
1082__syscall_trace_entry:
1083 LEDS 0x6320
1084 setlos.p #0,gr8
1085 call do_syscall_trace
1086
1087 ldi @(gr28,#REG_SYSCALLNO),gr7
1088 lddi @(gr28,#REG_GR(8)) ,gr8
1089 lddi @(gr28,#REG_GR(10)),gr10
1090 lddi.p @(gr28,#REG_GR(12)),gr12
1091
1092 subicc gr7,#nr_syscalls,gr0,icc0
1093 bnc icc0,#0,__syscall_badsys
1094 bra __syscall_call
1095
1096 # perform syscall exit tracing
1097__syscall_exit_work:
1098 LEDS 0x6340
1099 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
1100 beq icc0,#1,__entry_work_pending
1101
1102 movsg psr,gr23
1103 andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
1104 movgs gr23,psr
1105
1106 setlos.p #1,gr8
1107 call do_syscall_trace
1108 bra __entry_resume_userspace
1109
1110__syscall_badsys:
1111 LEDS 0x6380
1112 setlos #-ENOSYS,gr8
1113 sti gr8,@(gr28,#REG_GR(8)) ; save return value
1114 bra __entry_resume_userspace
1115
1116
1117###############################################################################
1118#
1119# syscall vector table
1120#
1121###############################################################################
1122#ifdef CONFIG_MMU
1123#define __MMU(X) X
1124#else
1125#define __MMU(X) sys_ni_syscall
1126#endif
1127
1128 .section .rodata
1129ALIGN
1130 .globl sys_call_table
1131sys_call_table:
1132 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
1133 .long sys_exit
1134 .long sys_fork
1135 .long sys_read
1136 .long sys_write
1137 .long sys_open /* 5 */
1138 .long sys_close
1139 .long sys_waitpid
1140 .long sys_creat
1141 .long sys_link
1142 .long sys_unlink /* 10 */
1143 .long sys_execve
1144 .long sys_chdir
1145 .long sys_time
1146 .long sys_mknod
1147 .long sys_chmod /* 15 */
1148 .long sys_lchown16
1149 .long sys_ni_syscall /* old break syscall holder */
1150 .long sys_stat
1151 .long sys_lseek
1152 .long sys_getpid /* 20 */
1153 .long sys_mount
1154 .long sys_oldumount
1155 .long sys_setuid16
1156 .long sys_getuid16
1157 .long sys_ni_syscall // sys_stime /* 25 */
1158 .long sys_ptrace
1159 .long sys_alarm
1160 .long sys_fstat
1161 .long sys_pause
1162 .long sys_utime /* 30 */
1163 .long sys_ni_syscall /* old stty syscall holder */
1164 .long sys_ni_syscall /* old gtty syscall holder */
1165 .long sys_access
1166 .long sys_nice
1167 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1168 .long sys_sync
1169 .long sys_kill
1170 .long sys_rename
1171 .long sys_mkdir
1172 .long sys_rmdir /* 40 */
1173 .long sys_dup
1174 .long sys_pipe
1175 .long sys_times
1176 .long sys_ni_syscall /* old prof syscall holder */
1177 .long sys_brk /* 45 */
1178 .long sys_setgid16
1179 .long sys_getgid16
1180 .long sys_ni_syscall // sys_signal
1181 .long sys_geteuid16
1182 .long sys_getegid16 /* 50 */
1183 .long sys_acct
1184 .long sys_umount /* recycled never used phys( */
1185 .long sys_ni_syscall /* old lock syscall holder */
1186 .long sys_ioctl
1187 .long sys_fcntl /* 55 */
1188 .long sys_ni_syscall /* old mpx syscall holder */
1189 .long sys_setpgid
1190 .long sys_ni_syscall /* old ulimit syscall holder */
1191 .long sys_ni_syscall /* old old uname syscall */
1192 .long sys_umask /* 60 */
1193 .long sys_chroot
1194 .long sys_ustat
1195 .long sys_dup2
1196 .long sys_getppid
1197 .long sys_getpgrp /* 65 */
1198 .long sys_setsid
1199 .long sys_sigaction
1200 .long sys_ni_syscall // sys_sgetmask
1201 .long sys_ni_syscall // sys_ssetmask
1202 .long sys_setreuid16 /* 70 */
1203 .long sys_setregid16
1204 .long sys_sigsuspend
1205 .long sys_ni_syscall // sys_sigpending
1206 .long sys_sethostname
1207 .long sys_setrlimit /* 75 */
1208 .long sys_ni_syscall // sys_old_getrlimit
1209 .long sys_getrusage
1210 .long sys_gettimeofday
1211 .long sys_settimeofday
1212 .long sys_getgroups16 /* 80 */
1213 .long sys_setgroups16
1214 .long sys_ni_syscall /* old_select slot */
1215 .long sys_symlink
1216 .long sys_lstat
1217 .long sys_readlink /* 85 */
1218 .long sys_uselib
1219 .long sys_swapon
1220 .long sys_reboot
1221 .long sys_ni_syscall // old_readdir
1222 .long sys_ni_syscall /* 90 */ /* old_mmap slot */
1223 .long sys_munmap
1224 .long sys_truncate
1225 .long sys_ftruncate
1226 .long sys_fchmod
1227 .long sys_fchown16 /* 95 */
1228 .long sys_getpriority
1229 .long sys_setpriority
1230 .long sys_ni_syscall /* old profil syscall holder */
1231 .long sys_statfs
1232 .long sys_fstatfs /* 100 */
1233 .long sys_ni_syscall /* ioperm for i386 */
1234 .long sys_socketcall
1235 .long sys_syslog
1236 .long sys_setitimer
1237 .long sys_getitimer /* 105 */
1238 .long sys_newstat
1239 .long sys_newlstat
1240 .long sys_newfstat
1241 .long sys_ni_syscall /* obsolete olduname( syscall */
1242 .long sys_ni_syscall /* iopl for i386 */ /* 110 */
1243 .long sys_vhangup
1244 .long sys_ni_syscall /* obsolete idle( syscall */
1245 .long sys_ni_syscall /* vm86old for i386 */
1246 .long sys_wait4
1247 .long sys_swapoff /* 115 */
1248 .long sys_sysinfo
1249 .long sys_ipc
1250 .long sys_fsync
1251 .long sys_sigreturn
1252 .long sys_clone /* 120 */
1253 .long sys_setdomainname
1254 .long sys_newuname
1255 .long sys_ni_syscall /* old "cacheflush" */
1256 .long sys_adjtimex
1257 .long __MMU(sys_mprotect) /* 125 */
1258 .long sys_sigprocmask
1259 .long sys_ni_syscall /* old "create_module" */
1260 .long sys_init_module
1261 .long sys_delete_module
1262 .long sys_ni_syscall /* old "get_kernel_syms" */
1263 .long sys_quotactl
1264 .long sys_getpgid
1265 .long sys_fchdir
1266 .long sys_bdflush
1267 .long sys_sysfs /* 135 */
1268 .long sys_personality
1269 .long sys_ni_syscall /* for afs_syscall */
1270 .long sys_setfsuid16
1271 .long sys_setfsgid16
1272 .long sys_llseek /* 140 */
1273 .long sys_getdents
1274 .long sys_select
1275 .long sys_flock
1276 .long __MMU(sys_msync)
1277 .long sys_readv /* 145 */
1278 .long sys_writev
1279 .long sys_getsid
1280 .long sys_fdatasync
1281 .long sys_sysctl
1282 .long __MMU(sys_mlock) /* 150 */
1283 .long __MMU(sys_munlock)
1284 .long __MMU(sys_mlockall)
1285 .long __MMU(sys_munlockall)
1286 .long sys_sched_setparam
1287 .long sys_sched_getparam /* 155 */
1288 .long sys_sched_setscheduler
1289 .long sys_sched_getscheduler
1290 .long sys_sched_yield
1291 .long sys_sched_get_priority_max
1292 .long sys_sched_get_priority_min /* 160 */
1293 .long sys_sched_rr_get_interval
1294 .long sys_nanosleep
1295 .long __MMU(sys_mremap)
1296 .long sys_setresuid16
1297 .long sys_getresuid16 /* 165 */
1298 .long sys_ni_syscall /* for vm86 */
1299 .long sys_ni_syscall /* Old sys_query_module */
1300 .long sys_poll
1301 .long sys_nfsservctl
1302 .long sys_setresgid16 /* 170 */
1303 .long sys_getresgid16
1304 .long sys_prctl
1305 .long sys_rt_sigreturn
1306 .long sys_rt_sigaction
1307 .long sys_rt_sigprocmask /* 175 */
1308 .long sys_rt_sigpending
1309 .long sys_rt_sigtimedwait
1310 .long sys_rt_sigqueueinfo
1311 .long sys_rt_sigsuspend
1312 .long sys_pread64 /* 180 */
1313 .long sys_pwrite64
1314 .long sys_chown16
1315 .long sys_getcwd
1316 .long sys_capget
1317 .long sys_capset /* 185 */
1318 .long sys_sigaltstack
1319 .long sys_sendfile
1320 .long sys_ni_syscall /* streams1 */
1321 .long sys_ni_syscall /* streams2 */
1322 .long sys_vfork /* 190 */
1323 .long sys_getrlimit
1324 .long sys_mmap2
1325 .long sys_truncate64
1326 .long sys_ftruncate64
1327 .long sys_stat64 /* 195 */
1328 .long sys_lstat64
1329 .long sys_fstat64
1330 .long sys_lchown
1331 .long sys_getuid
1332 .long sys_getgid /* 200 */
1333 .long sys_geteuid
1334 .long sys_getegid
1335 .long sys_setreuid
1336 .long sys_setregid
1337 .long sys_getgroups /* 205 */
1338 .long sys_setgroups
1339 .long sys_fchown
1340 .long sys_setresuid
1341 .long sys_getresuid
1342 .long sys_setresgid /* 210 */
1343 .long sys_getresgid
1344 .long sys_chown
1345 .long sys_setuid
1346 .long sys_setgid
1347 .long sys_setfsuid /* 215 */
1348 .long sys_setfsgid
1349 .long sys_pivot_root
1350 .long __MMU(sys_mincore)
1351 .long __MMU(sys_madvise)
1352 .long sys_getdents64 /* 220 */
1353 .long sys_fcntl64
1354 .long sys_ni_syscall /* reserved for TUX */
1355 .long sys_ni_syscall /* Reserved for Security */
1356 .long sys_gettid
1357 .long sys_readahead /* 225 */
1358 .long sys_setxattr
1359 .long sys_lsetxattr
1360 .long sys_fsetxattr
1361 .long sys_getxattr
1362 .long sys_lgetxattr /* 230 */
1363 .long sys_fgetxattr
1364 .long sys_listxattr
1365 .long sys_llistxattr
1366 .long sys_flistxattr
1367 .long sys_removexattr /* 235 */
1368 .long sys_lremovexattr
1369 .long sys_fremovexattr
1370 .long sys_tkill
1371 .long sys_sendfile64
1372 .long sys_futex /* 240 */
1373 .long sys_sched_setaffinity
1374 .long sys_sched_getaffinity
1375 .long sys_ni_syscall //sys_set_thread_area
1376 .long sys_ni_syscall //sys_get_thread_area
1377 .long sys_io_setup /* 245 */
1378 .long sys_io_destroy
1379 .long sys_io_getevents
1380 .long sys_io_submit
1381 .long sys_io_cancel
1382 .long sys_fadvise64 /* 250 */
1383 .long sys_ni_syscall
1384 .long sys_exit_group
1385 .long sys_lookup_dcookie
1386 .long sys_epoll_create
1387 .long sys_epoll_ctl /* 255 */
1388 .long sys_epoll_wait
1389 .long __MMU(sys_remap_file_pages)
1390 .long sys_set_tid_address
1391 .long sys_timer_create
1392 .long sys_timer_settime /* 260 */
1393 .long sys_timer_gettime
1394 .long sys_timer_getoverrun
1395 .long sys_timer_delete
1396 .long sys_clock_settime
1397 .long sys_clock_gettime /* 265 */
1398 .long sys_clock_getres
1399 .long sys_clock_nanosleep
1400 .long sys_statfs64
1401 .long sys_fstatfs64
1402 .long sys_tgkill /* 270 */
1403 .long sys_utimes
1404 .long sys_fadvise64_64
1405 .long sys_ni_syscall /* sys_vserver */
1406 .long sys_mbind
1407 .long sys_get_mempolicy
1408 .long sys_set_mempolicy
1409 .long sys_mq_open
1410 .long sys_mq_unlink
1411 .long sys_mq_timedsend
1412 .long sys_mq_timedreceive /* 280 */
1413 .long sys_mq_notify
1414 .long sys_mq_getsetattr
1415 .long sys_ni_syscall /* reserved for kexec */
1416 .long sys_waitid
1417 .long sys_ni_syscall /* 285 */ /* available */
1418 .long sys_add_key
1419 .long sys_request_key
1420 .long sys_keyctl
1421 .long sys_ni_syscall // sys_vperfctr_open
1422 .long sys_ni_syscall // sys_vperfctr_control /* 290 */
1423 .long sys_ni_syscall // sys_vperfctr_unlink
1424 .long sys_ni_syscall // sys_vperfctr_iresume
1425 .long sys_ni_syscall // sys_vperfctr_read
1426
1427
1428syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
new file mode 100644
index 000000000000..62cfbd9b4f98
--- /dev/null
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -0,0 +1,124 @@
1#include <linux/module.h>
2#include <linux/linkage.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/mm.h>
6#include <linux/user.h>
7#include <linux/elfcore.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10#include <linux/config.h>
11
12#include <asm/setup.h>
13#include <asm/pgalloc.h>
14#include <asm/irq.h>
15#include <asm/io.h>
16#include <asm/semaphore.h>
17#include <asm/checksum.h>
18#include <asm/hardirq.h>
19#include <asm/current.h>
20
21extern void dump_thread(struct pt_regs *, struct user *);
22extern long __memcpy_user(void *dst, const void *src, size_t count);
23
24/* platform dependent support */
25
26EXPORT_SYMBOL(__ioremap);
27EXPORT_SYMBOL(iounmap);
28
29EXPORT_SYMBOL(dump_thread);
30EXPORT_SYMBOL(strnlen);
31EXPORT_SYMBOL(strrchr);
32EXPORT_SYMBOL(strstr);
33EXPORT_SYMBOL(strchr);
34EXPORT_SYMBOL(strcat);
35EXPORT_SYMBOL(strlen);
36EXPORT_SYMBOL(strcmp);
37EXPORT_SYMBOL(strncmp);
38EXPORT_SYMBOL(strncpy);
39
40EXPORT_SYMBOL(ip_fast_csum);
41
42#if 0
43EXPORT_SYMBOL(local_irq_count);
44EXPORT_SYMBOL(local_bh_count);
45#endif
46EXPORT_SYMBOL(kernel_thread);
47
48EXPORT_SYMBOL(enable_irq);
49EXPORT_SYMBOL(disable_irq);
50EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
51EXPORT_SYMBOL(__page_offset);
52EXPORT_SYMBOL(__memcpy_user);
53EXPORT_SYMBOL(flush_dcache_page);
54
55#ifndef CONFIG_MMU
56EXPORT_SYMBOL(memory_start);
57EXPORT_SYMBOL(memory_end);
58#endif
59
60EXPORT_SYMBOL(__debug_bug_trap);
61
62/* Networking helper routines. */
63EXPORT_SYMBOL(csum_partial_copy);
64
65/* The following are special because they're not called
66 explicitly (the C compiler generates them). Fortunately,
67 their interface isn't gonna change any time soon now, so
68 it's OK to leave it out of version control. */
69EXPORT_SYMBOL(memcpy);
70EXPORT_SYMBOL(memset);
71EXPORT_SYMBOL(memcmp);
72EXPORT_SYMBOL(memscan);
73EXPORT_SYMBOL(memmove);
74EXPORT_SYMBOL(strtok);
75
76EXPORT_SYMBOL(get_wchan);
77
78#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
79EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
80EXPORT_SYMBOL(atomic_test_and_OR_mask);
81EXPORT_SYMBOL(atomic_test_and_XOR_mask);
82EXPORT_SYMBOL(atomic_add_return);
83EXPORT_SYMBOL(atomic_sub_return);
84EXPORT_SYMBOL(__xchg_8);
85EXPORT_SYMBOL(__xchg_16);
86EXPORT_SYMBOL(__xchg_32);
87EXPORT_SYMBOL(__cmpxchg_8);
88EXPORT_SYMBOL(__cmpxchg_16);
89EXPORT_SYMBOL(__cmpxchg_32);
90#endif
91
92/*
93 * libgcc functions - functions that are used internally by the
94 * compiler... (prototypes are not correct though, but that
95 * doesn't really matter since they're not versioned).
96 */
97extern void __gcc_bcmp(void);
98extern void __ashldi3(void);
99extern void __ashrdi3(void);
100extern void __cmpdi2(void);
101extern void __divdi3(void);
102extern void __lshrdi3(void);
103extern void __moddi3(void);
104extern void __muldi3(void);
105extern void __negdi2(void);
106extern void __ucmpdi2(void);
107extern void __udivdi3(void);
108extern void __udivmoddi4(void);
109extern void __umoddi3(void);
110
111 /* gcc lib functions */
112//EXPORT_SYMBOL(__gcc_bcmp);
113EXPORT_SYMBOL(__ashldi3);
114EXPORT_SYMBOL(__ashrdi3);
115//EXPORT_SYMBOL(__cmpdi2);
116//EXPORT_SYMBOL(__divdi3);
117EXPORT_SYMBOL(__lshrdi3);
118//EXPORT_SYMBOL(__moddi3);
119EXPORT_SYMBOL(__muldi3);
120EXPORT_SYMBOL(__negdi2);
121//EXPORT_SYMBOL(__ucmpdi2);
122//EXPORT_SYMBOL(__udivdi3);
123//EXPORT_SYMBOL(__udivmoddi4);
124//EXPORT_SYMBOL(__umoddi3);
diff --git a/arch/frv/kernel/gdb-io.c b/arch/frv/kernel/gdb-io.c
new file mode 100644
index 000000000000..c997bccb9221
--- /dev/null
+++ b/arch/frv/kernel/gdb-io.c
@@ -0,0 +1,216 @@
1/* gdb-io.c: FR403 GDB stub I/O
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/mm.h>
17#include <linux/console.h>
18#include <linux/init.h>
19#include <linux/serial_reg.h>
20
21#include <asm/pgtable.h>
22#include <asm/system.h>
23#include <asm/irc-regs.h>
24#include <asm/timer-regs.h>
25#include <asm/gdb-stub.h>
26#include "gdb-io.h"
27
28#ifdef CONFIG_GDBSTUB_UART0
29#define __UART(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X)))
30#define __UART_IRR_NMI 0xff0f0000
31#else /* CONFIG_GDBSTUB_UART1 */
32#define __UART(X) (*(volatile uint8_t *)(UART1_BASE + (UART_##X)))
33#define __UART_IRR_NMI 0xfff00000
34#endif
35
36#define LSR_WAIT_FOR(STATE) \
37do { \
38 gdbstub_do_rx(); \
39} while (!(__UART(LSR) & UART_LSR_##STATE))
40
41#define FLOWCTL_QUERY(LINE) ({ __UART(MSR) & UART_MSR_##LINE; })
42#define FLOWCTL_CLEAR(LINE) do { __UART(MCR) &= ~UART_MCR_##LINE; mb(); } while (0)
43#define FLOWCTL_SET(LINE) do { __UART(MCR) |= UART_MCR_##LINE; mb(); } while (0)
44
45#define FLOWCTL_WAIT_FOR(LINE) \
46do { \
47 gdbstub_do_rx(); \
48} while(!FLOWCTL_QUERY(LINE))
49
50/*****************************************************************************/
51/*
52 * initialise the GDB stub
53 * - called with PSR.ET==0, so can't incur external interrupts
54 */
55void gdbstub_io_init(void)
56{
57 /* set up the serial port */
58 __UART(LCR) = UART_LCR_WLEN8; /* 1N8 */
59 __UART(FCR) =
60 UART_FCR_ENABLE_FIFO |
61 UART_FCR_CLEAR_RCVR |
62 UART_FCR_CLEAR_XMIT |
63 UART_FCR_TRIGGER_1;
64
65 FLOWCTL_CLEAR(DTR);
66 FLOWCTL_SET(RTS);
67
68// gdbstub_set_baud(115200);
69
70 /* we want to get serial receive interrupts */
71 __UART(IER) = UART_IER_RDI | UART_IER_RLSI;
72 mb();
73
74 __set_IRR(6, __UART_IRR_NMI); /* map ERRs and UARTx to NMI */
75
76} /* end gdbstub_io_init() */
77
78/*****************************************************************************/
79/*
80 * set up the GDB stub serial port baud rate timers
81 */
82void gdbstub_set_baud(unsigned baud)
83{
84 unsigned value, high, low;
85 u8 lcr;
86
87 /* work out the divisor to give us the nearest higher baud rate */
88 value = __serial_clock_speed_HZ / 16 / baud;
89
90 /* determine the baud rate range */
91 high = __serial_clock_speed_HZ / 16 / value;
92 low = __serial_clock_speed_HZ / 16 / (value + 1);
93
94 /* pick the nearest bound */
95 if (low + (high - low) / 2 > baud)
96 value++;
97
98 lcr = __UART(LCR);
99 __UART(LCR) |= UART_LCR_DLAB;
100 mb();
101 __UART(DLL) = value & 0xff;
102 __UART(DLM) = (value >> 8) & 0xff;
103 mb();
104 __UART(LCR) = lcr;
105 mb();
106
107} /* end gdbstub_set_baud() */
108
109/*****************************************************************************/
110/*
111 * receive characters into the receive FIFO
112 */
113void gdbstub_do_rx(void)
114{
115 unsigned ix, nix;
116
117 ix = gdbstub_rx_inp;
118
119 while (__UART(LSR) & UART_LSR_DR) {
120 nix = (ix + 2) & 0xfff;
121 if (nix == gdbstub_rx_outp)
122 break;
123
124 gdbstub_rx_buffer[ix++] = __UART(LSR);
125 gdbstub_rx_buffer[ix++] = __UART(RX);
126 ix = nix;
127 }
128
129 gdbstub_rx_inp = ix;
130
131 __clr_RC(15);
132 __clr_IRL();
133
134} /* end gdbstub_do_rx() */
135
136/*****************************************************************************/
137/*
138 * wait for a character to come from the debugger
139 */
140int gdbstub_rx_char(unsigned char *_ch, int nonblock)
141{
142 unsigned ix;
143 u8 ch, st;
144
145 *_ch = 0xff;
146
147 if (gdbstub_rx_unget) {
148 *_ch = gdbstub_rx_unget;
149 gdbstub_rx_unget = 0;
150 return 0;
151 }
152
153 try_again:
154 gdbstub_do_rx();
155
156 /* pull chars out of the buffer */
157 ix = gdbstub_rx_outp;
158 if (ix == gdbstub_rx_inp) {
159 if (nonblock)
160 return -EAGAIN;
161 //watchdog_alert_counter = 0;
162 goto try_again;
163 }
164
165 st = gdbstub_rx_buffer[ix++];
166 ch = gdbstub_rx_buffer[ix++];
167 gdbstub_rx_outp = ix & 0x00000fff;
168
169 if (st & UART_LSR_BI) {
170 gdbstub_proto("### GDB Rx Break Detected ###\n");
171 return -EINTR;
172 }
173 else if (st & (UART_LSR_FE|UART_LSR_OE|UART_LSR_PE)) {
174 gdbstub_proto("### GDB Rx Error (st=%02x) ###\n",st);
175 return -EIO;
176 }
177 else {
178 gdbstub_proto("### GDB Rx %02x (st=%02x) ###\n",ch,st);
179 *_ch = ch & 0x7f;
180 return 0;
181 }
182
183} /* end gdbstub_rx_char() */
184
185/*****************************************************************************/
186/*
187 * send a character to the debugger
188 */
189void gdbstub_tx_char(unsigned char ch)
190{
191 FLOWCTL_SET(DTR);
192 LSR_WAIT_FOR(THRE);
193// FLOWCTL_WAIT_FOR(CTS);
194
195 if (ch == 0x0a) {
196 __UART(TX) = 0x0d;
197 mb();
198 LSR_WAIT_FOR(THRE);
199// FLOWCTL_WAIT_FOR(CTS);
200 }
201 __UART(TX) = ch;
202 mb();
203
204 FLOWCTL_CLEAR(DTR);
205} /* end gdbstub_tx_char() */
206
207/*****************************************************************************/
208/*
209 * send a character to the debugger
210 */
211void gdbstub_tx_flush(void)
212{
213 LSR_WAIT_FOR(TEMT);
214 LSR_WAIT_FOR(THRE);
215 FLOWCTL_CLEAR(DTR);
216} /* end gdbstub_tx_flush() */
diff --git a/arch/frv/kernel/gdb-io.h b/arch/frv/kernel/gdb-io.h
new file mode 100644
index 000000000000..138714bacc40
--- /dev/null
+++ b/arch/frv/kernel/gdb-io.h
@@ -0,0 +1,55 @@
1/* gdb-io.h: FR403 GDB I/O port defs
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _GDB_IO_H
13#define _GDB_IO_H
14
15#include <asm/serial-regs.h>
16
17#undef UART_RX
18#undef UART_TX
19#undef UART_DLL
20#undef UART_DLM
21#undef UART_IER
22#undef UART_IIR
23#undef UART_FCR
24#undef UART_LCR
25#undef UART_MCR
26#undef UART_LSR
27#undef UART_MSR
28#undef UART_SCR
29
30#define UART_RX 0*8 /* In: Receive buffer (DLAB=0) */
31#define UART_TX 0*8 /* Out: Transmit buffer (DLAB=0) */
32#define UART_DLL 0*8 /* Out: Divisor Latch Low (DLAB=1) */
33#define UART_DLM 1*8 /* Out: Divisor Latch High (DLAB=1) */
34#define UART_IER 1*8 /* Out: Interrupt Enable Register */
35#define UART_IIR 2*8 /* In: Interrupt ID Register */
36#define UART_FCR 2*8 /* Out: FIFO Control Register */
37#define UART_LCR 3*8 /* Out: Line Control Register */
38#define UART_MCR 4*8 /* Out: Modem Control Register */
39#define UART_LSR 5*8 /* In: Line Status Register */
40#define UART_MSR 6*8 /* In: Modem Status Register */
41#define UART_SCR 7*8 /* I/O: Scratch Register */
42
43#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
44#define UART_LCR_SBC 0x40 /* Set break control */
45#define UART_LCR_SPAR 0x20 /* Stick parity (?) */
46#define UART_LCR_EPAR 0x10 /* Even parity select */
47#define UART_LCR_PARITY 0x08 /* Parity Enable */
48#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 stop bit, 1= 2 stop bits */
49#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
50#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
51#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
52#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
53
54
55#endif /* _GDB_IO_H */
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
new file mode 100644
index 000000000000..8f860d9c4947
--- /dev/null
+++ b/arch/frv/kernel/gdb-stub.c
@@ -0,0 +1,2084 @@
1/* gdb-stub.c: FRV GDB stub
2 *
3 * Copyright (C) 2003,4 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from Linux/MIPS version, Copyright (C) 1995 Andreas Busse
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13/*
14 * To enable debugger support, two things need to happen. One, a
15 * call to set_debug_traps() is necessary in order to allow any breakpoints
16 * or error conditions to be properly intercepted and reported to gdb.
17 * Two, a breakpoint needs to be generated to begin communication. This
18 * is most easily accomplished by a call to breakpoint(). Breakpoint()
19 * simulates a breakpoint by executing a BREAK instruction.
20 *
21 *
22 * The following gdb commands are supported:
23 *
24 * command function Return value
25 *
26 * g return the value of the CPU registers hex data or ENN
27 * G set the value of the CPU registers OK or ENN
28 *
29 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
30 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
31 *
32 * c Resume at current address SNN ( signal NN)
33 * cAA..AA Continue at address AA..AA SNN
34 *
35 * s Step one instruction SNN
36 * sAA..AA Step one instruction from AA..AA SNN
37 *
38 * k kill
39 *
40 * ? What was the last sigval ? SNN (signal NN)
41 *
42 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
43 * baud rate
44 *
45 * All commands and responses are sent with a packet which includes a
46 * checksum. A packet consists of
47 *
48 * $<packet info>#<checksum>.
49 *
50 * where
51 * <packet info> :: <characters representing the command or response>
52 * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
53 *
54 * When a packet is received, it is first acknowledged with either '+' or '-'.
55 * '+' indicates a successful transfer. '-' indicates a failed transfer.
56 *
57 * Example:
58 *
59 * Host: Reply:
60 * $m0,10#2a +$00010203040506070809101112131415#42
61 *
62 *
63 * ==============
64 * MORE EXAMPLES:
65 * ==============
66 *
67 * For reference -- the following are the steps that one
68 * company took (RidgeRun Inc) to get remote gdb debugging
69 * going. In this scenario the host machine was a PC and the
70 * target platform was a Galileo EVB64120A MIPS evaluation
71 * board.
72 *
73 * Step 1:
74 * First download gdb-5.0.tar.gz from the internet.
75 * and then build/install the package.
76 *
77 * Example:
78 * $ tar zxf gdb-5.0.tar.gz
79 * $ cd gdb-5.0
80 * $ ./configure --target=frv-elf-gdb
81 * $ make
82 * $ frv-elf-gdb
83 *
84 * Step 2:
85 * Configure linux for remote debugging and build it.
86 *
87 * Example:
88 * $ cd ~/linux
89 * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
90 * $ make dep; make vmlinux
91 *
92 * Step 3:
93 * Download the kernel to the remote target and start
94 * the kernel running. It will promptly halt and wait
95 * for the host gdb session to connect. It does this
96 * since the "Kernel Hacking" option has defined
97 * CONFIG_REMOTE_DEBUG which in turn enables your calls
98 * to:
99 * set_debug_traps();
100 * breakpoint();
101 *
102 * Step 4:
103 * Start the gdb session on the host.
104 *
105 * Example:
106 * $ frv-elf-gdb vmlinux
107 * (gdb) set remotebaud 115200
108 * (gdb) target remote /dev/ttyS1
109 * ...at this point you are connected to
110 * the remote target and can use gdb
111 * in the normal fasion. Setting
112 * breakpoints, single stepping,
113 * printing variables, etc.
114 *
115 */
116
117#include <linux/string.h>
118#include <linux/kernel.h>
119#include <linux/signal.h>
120#include <linux/sched.h>
121#include <linux/mm.h>
122#include <linux/console.h>
123#include <linux/init.h>
124#include <linux/slab.h>
125#include <linux/nmi.h>
126
127#include <asm/pgtable.h>
128#include <asm/system.h>
129#include <asm/gdb-stub.h>
130
131#define LEDS(x) do { /* *(u32*)0xe1200004 = ~(x); mb(); */ } while(0)
132
133#undef GDBSTUB_DEBUG_PROTOCOL
134
135extern void debug_to_serial(const char *p, int n);
136extern void gdbstub_console_write(struct console *co, const char *p, unsigned n);
137
138extern volatile uint32_t __break_error_detect[3]; /* ESFR1, ESR15, EAR15 */
139extern struct user_context __break_user_context;
140
141struct __debug_amr {
142 unsigned long L, P;
143} __attribute__((aligned(8)));
144
145struct __debug_mmu {
146 struct {
147 unsigned long hsr0, pcsr, esr0, ear0, epcr0;
148#ifdef CONFIG_MMU
149 unsigned long tplr, tppr, tpxr, cxnr;
150#endif
151 } regs;
152
153 struct __debug_amr iamr[16];
154 struct __debug_amr damr[16];
155
156#ifdef CONFIG_MMU
157 struct __debug_amr tlb[64*2];
158#endif
159};
160
161static struct __debug_mmu __debug_mmu;
162
163/*
164 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
165 * at least NUMREGBYTES*2 are needed for register packets
166 */
167#define BUFMAX 2048
168
169#define BREAK_INSN 0x801000c0 /* use "break" as bkpt */
170
171static const char gdbstub_banner[] = "Linux/FR-V GDB Stub (c) RedHat 2003\n";
172
173volatile u8 gdbstub_rx_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
174volatile u32 gdbstub_rx_inp = 0;
175volatile u32 gdbstub_rx_outp = 0;
176volatile u8 gdbstub_rx_overflow = 0;
177u8 gdbstub_rx_unget = 0;
178
179/* set with GDB whilst running to permit step through exceptions */
180extern volatile u32 __attribute__((section(".bss"))) gdbstub_trace_through_exceptions;
181
182static char input_buffer[BUFMAX];
183static char output_buffer[BUFMAX];
184
185static const char hexchars[] = "0123456789abcdef";
186
187static const char *regnames[] = {
188 "PSR ", "ISR ", "CCR ", "CCCR",
189 "LR ", "LCR ", "PC ", "_stt",
190 "sys ", "GR8*", "GNE0", "GNE1",
191 "IACH", "IACL",
192 "TBR ", "SP ", "FP ", "GR3 ",
193 "GR4 ", "GR5 ", "GR6 ", "GR7 ",
194 "GR8 ", "GR9 ", "GR10", "GR11",
195 "GR12", "GR13", "GR14", "GR15",
196 "GR16", "GR17", "GR18", "GR19",
197 "GR20", "GR21", "GR22", "GR23",
198 "GR24", "GR25", "GR26", "GR27",
199 "EFRM", "CURR", "GR30", "BFRM"
200};
201
202struct gdbstub_bkpt {
203 unsigned long addr; /* address of breakpoint */
204 unsigned len; /* size of breakpoint */
205 uint32_t originsns[7]; /* original instructions */
206};
207
208static struct gdbstub_bkpt gdbstub_bkpts[256];
209
210/*
211 * local prototypes
212 */
213
214static void gdbstub_recv_packet(char *buffer);
215static int gdbstub_send_packet(char *buffer);
216static int gdbstub_compute_signal(unsigned long tbr);
217static int hex(unsigned char ch);
218static int hexToInt(char **ptr, unsigned long *intValue);
219static unsigned char *mem2hex(const void *mem, char *buf, int count, int may_fault);
220static char *hex2mem(const char *buf, void *_mem, int count);
221
222/*
223 * Convert ch from a hex digit to an int
224 */
225static int hex(unsigned char ch)
226{
227 if (ch >= 'a' && ch <= 'f')
228 return ch-'a'+10;
229 if (ch >= '0' && ch <= '9')
230 return ch-'0';
231 if (ch >= 'A' && ch <= 'F')
232 return ch-'A'+10;
233 return -1;
234}
235
236void gdbstub_printk(const char *fmt, ...)
237{
238 static char buf[1024];
239 va_list args;
240 int len;
241
242 /* Emit the output into the temporary buffer */
243 va_start(args, fmt);
244 len = vsnprintf(buf, sizeof(buf), fmt, args);
245 va_end(args);
246 debug_to_serial(buf, len);
247}
248
249static inline char *gdbstub_strcpy(char *dst, const char *src)
250{
251 int loop = 0;
252 while ((dst[loop] = src[loop]))
253 loop++;
254 return dst;
255}
256
257static void gdbstub_purge_cache(void)
258{
259 asm volatile(" dcef @(gr0,gr0),#1 \n"
260 " icei @(gr0,gr0),#1 \n"
261 " membar \n"
262 " bar \n"
263 );
264}
265
266/*****************************************************************************/
267/*
268 * scan for the sequence $<data>#<checksum>
269 */
270static void gdbstub_recv_packet(char *buffer)
271{
272 unsigned char checksum;
273 unsigned char xmitcsum;
274 unsigned char ch;
275 int count, i, ret, error;
276
277 for (;;) {
278 /* wait around for the start character, ignore all other characters */
279 do {
280 gdbstub_rx_char(&ch, 0);
281 } while (ch != '$');
282
283 checksum = 0;
284 xmitcsum = -1;
285 count = 0;
286 error = 0;
287
288 /* now, read until a # or end of buffer is found */
289 while (count < BUFMAX) {
290 ret = gdbstub_rx_char(&ch, 0);
291 if (ret < 0)
292 error = ret;
293
294 if (ch == '#')
295 break;
296 checksum += ch;
297 buffer[count] = ch;
298 count++;
299 }
300
301 if (error == -EIO) {
302 gdbstub_proto("### GDB Rx Error - Skipping packet ###\n");
303 gdbstub_proto("### GDB Tx NAK\n");
304 gdbstub_tx_char('-');
305 continue;
306 }
307
308 if (count >= BUFMAX || error)
309 continue;
310
311 buffer[count] = 0;
312
313 /* read the checksum */
314 ret = gdbstub_rx_char(&ch, 0);
315 if (ret < 0)
316 error = ret;
317 xmitcsum = hex(ch) << 4;
318
319 ret = gdbstub_rx_char(&ch, 0);
320 if (ret < 0)
321 error = ret;
322 xmitcsum |= hex(ch);
323
324 if (error) {
325 if (error == -EIO)
326 gdbstub_proto("### GDB Rx Error - Skipping packet\n");
327 gdbstub_proto("### GDB Tx NAK\n");
328 gdbstub_tx_char('-');
329 continue;
330 }
331
332 /* check the checksum */
333 if (checksum != xmitcsum) {
334 gdbstub_proto("### GDB Tx NAK\n");
335 gdbstub_tx_char('-'); /* failed checksum */
336 continue;
337 }
338
339 gdbstub_proto("### GDB Rx '$%s#%02x' ###\n", buffer, checksum);
340 gdbstub_proto("### GDB Tx ACK\n");
341 gdbstub_tx_char('+'); /* successful transfer */
342
343 /* if a sequence char is present, reply the sequence ID */
344 if (buffer[2] == ':') {
345 gdbstub_tx_char(buffer[0]);
346 gdbstub_tx_char(buffer[1]);
347
348 /* remove sequence chars from buffer */
349 count = 0;
350 while (buffer[count]) count++;
351 for (i=3; i <= count; i++)
352 buffer[i - 3] = buffer[i];
353 }
354
355 break;
356 }
357} /* end gdbstub_recv_packet() */
358
359/*****************************************************************************/
360/*
361 * send the packet in buffer.
362 * - return 0 if successfully ACK'd
363 * - return 1 if abandoned due to new incoming packet
364 */
365static int gdbstub_send_packet(char *buffer)
366{
367 unsigned char checksum;
368 int count;
369 unsigned char ch;
370
371 /* $<packet info>#<checksum> */
372 gdbstub_proto("### GDB Tx '%s' ###\n", buffer);
373
374 do {
375 gdbstub_tx_char('$');
376 checksum = 0;
377 count = 0;
378
379 while ((ch = buffer[count]) != 0) {
380 gdbstub_tx_char(ch);
381 checksum += ch;
382 count += 1;
383 }
384
385 gdbstub_tx_char('#');
386 gdbstub_tx_char(hexchars[checksum >> 4]);
387 gdbstub_tx_char(hexchars[checksum & 0xf]);
388
389 } while (gdbstub_rx_char(&ch,0),
390#ifdef GDBSTUB_DEBUG_PROTOCOL
391 ch=='-' && (gdbstub_proto("### GDB Rx NAK\n"),0),
392 ch!='-' && ch!='+' && (gdbstub_proto("### GDB Rx ??? %02x\n",ch),0),
393#endif
394 ch!='+' && ch!='$');
395
396 if (ch=='+') {
397 gdbstub_proto("### GDB Rx ACK\n");
398 return 0;
399 }
400
401 gdbstub_proto("### GDB Tx Abandoned\n");
402 gdbstub_rx_unget = ch;
403 return 1;
404} /* end gdbstub_send_packet() */
405
406/*
407 * While we find nice hex chars, build an int.
408 * Return number of chars processed.
409 */
410static int hexToInt(char **ptr, unsigned long *_value)
411{
412 int count = 0, ch;
413
414 *_value = 0;
415 while (**ptr) {
416 ch = hex(**ptr);
417 if (ch < 0)
418 break;
419
420 *_value = (*_value << 4) | ((uint8_t) ch & 0xf);
421 count++;
422
423 (*ptr)++;
424 }
425
426 return count;
427}
428
429/*****************************************************************************/
430/*
431 * probe an address to see whether it maps to anything
432 */
433static inline int gdbstub_addr_probe(const void *vaddr)
434{
435#ifdef CONFIG_MMU
436 unsigned long paddr;
437
438 asm("lrad %1,%0,#1,#0,#0" : "=r"(paddr) : "r"(vaddr));
439 if (!(paddr & xAMPRx_V))
440 return 0;
441#endif
442
443 return 1;
444} /* end gdbstub_addr_probe() */
445
446#ifdef CONFIG_MMU
447static unsigned long __saved_dampr, __saved_damlr;
448
449static inline unsigned long gdbstub_virt_to_pte(unsigned long vaddr)
450{
451 pgd_t *pgd;
452 pud_t *pud;
453 pmd_t *pmd;
454 pte_t *pte;
455 unsigned long val, dampr5;
456
457 pgd = (pgd_t *) __get_DAMLR(3) + pgd_index(vaddr);
458 pud = pud_offset(pgd, vaddr);
459 pmd = pmd_offset(pud, vaddr);
460
461 if (pmd_bad(*pmd) || !pmd_present(*pmd))
462 return 0;
463
464 /* make sure dampr5 maps to the correct pmd */
465 dampr5 = __get_DAMPR(5);
466 val = pmd_val(*pmd);
467 __set_DAMPR(5, val | xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V);
468
469 /* now its safe to access pmd */
470 pte = (pte_t *)__get_DAMLR(5) + __pte_index(vaddr);
471 if (pte_present(*pte))
472 val = pte_val(*pte);
473 else
474 val = 0;
475
476 /* restore original dampr5 */
477 __set_DAMPR(5, dampr5);
478
479 return val;
480}
481#endif
482
483static inline int gdbstub_addr_map(const void *vaddr)
484{
485#ifdef CONFIG_MMU
486 unsigned long pte;
487
488 __saved_dampr = __get_DAMPR(2);
489 __saved_damlr = __get_DAMLR(2);
490#endif
491 if (gdbstub_addr_probe(vaddr))
492 return 1;
493#ifdef CONFIG_MMU
494 pte = gdbstub_virt_to_pte((unsigned long) vaddr);
495 if (pte) {
496 __set_DAMPR(2, pte);
497 __set_DAMLR(2, (unsigned long) vaddr & PAGE_MASK);
498 return 1;
499 }
500#endif
501 return 0;
502}
503
504static inline void gdbstub_addr_unmap(void)
505{
506#ifdef CONFIG_MMU
507 __set_DAMPR(2, __saved_dampr);
508 __set_DAMLR(2, __saved_damlr);
509#endif
510}
511
512/*
513 * access potentially dodgy memory through a potentially dodgy pointer
514 */
515static inline int gdbstub_read_dword(const void *addr, uint32_t *_res)
516{
517 unsigned long brr;
518 uint32_t res;
519
520 if (!gdbstub_addr_map(addr))
521 return 0;
522
523 asm volatile(" movgs gr0,brr \n"
524 " ld%I2 %M2,%0 \n"
525 " movsg brr,%1 \n"
526 : "=r"(res), "=r"(brr)
527 : "m"(*(uint32_t *) addr));
528 *_res = res;
529 gdbstub_addr_unmap();
530 return likely(!brr);
531}
532
533static inline int gdbstub_write_dword(void *addr, uint32_t val)
534{
535 unsigned long brr;
536
537 if (!gdbstub_addr_map(addr))
538 return 0;
539
540 asm volatile(" movgs gr0,brr \n"
541 " st%I2 %1,%M2 \n"
542 " movsg brr,%0 \n"
543 : "=r"(brr)
544 : "r"(val), "m"(*(uint32_t *) addr));
545 gdbstub_addr_unmap();
546 return likely(!brr);
547}
548
549static inline int gdbstub_read_word(const void *addr, uint16_t *_res)
550{
551 unsigned long brr;
552 uint16_t res;
553
554 if (!gdbstub_addr_map(addr))
555 return 0;
556
557 asm volatile(" movgs gr0,brr \n"
558 " lduh%I2 %M2,%0 \n"
559 " movsg brr,%1 \n"
560 : "=r"(res), "=r"(brr)
561 : "m"(*(uint16_t *) addr));
562 *_res = res;
563 gdbstub_addr_unmap();
564 return likely(!brr);
565}
566
567static inline int gdbstub_write_word(void *addr, uint16_t val)
568{
569 unsigned long brr;
570
571 if (!gdbstub_addr_map(addr))
572 return 0;
573
574 asm volatile(" movgs gr0,brr \n"
575 " sth%I2 %1,%M2 \n"
576 " movsg brr,%0 \n"
577 : "=r"(brr)
578 : "r"(val), "m"(*(uint16_t *) addr));
579 gdbstub_addr_unmap();
580 return likely(!brr);
581}
582
583static inline int gdbstub_read_byte(const void *addr, uint8_t *_res)
584{
585 unsigned long brr;
586 uint8_t res;
587
588 if (!gdbstub_addr_map(addr))
589 return 0;
590
591 asm volatile(" movgs gr0,brr \n"
592 " ldub%I2 %M2,%0 \n"
593 " movsg brr,%1 \n"
594 : "=r"(res), "=r"(brr)
595 : "m"(*(uint8_t *) addr));
596 *_res = res;
597 gdbstub_addr_unmap();
598 return likely(!brr);
599}
600
601static inline int gdbstub_write_byte(void *addr, uint8_t val)
602{
603 unsigned long brr;
604
605 if (!gdbstub_addr_map(addr))
606 return 0;
607
608 asm volatile(" movgs gr0,brr \n"
609 " stb%I2 %1,%M2 \n"
610 " movsg brr,%0 \n"
611 : "=r"(brr)
612 : "r"(val), "m"(*(uint8_t *) addr));
613 gdbstub_addr_unmap();
614 return likely(!brr);
615}
616
617static void __gdbstub_console_write(struct console *co, const char *p, unsigned n)
618{
619 char outbuf[26];
620 int qty;
621
622 outbuf[0] = 'O';
623
624 while (n > 0) {
625 qty = 1;
626
627 while (n > 0 && qty < 20) {
628 mem2hex(p, outbuf + qty, 2, 0);
629 qty += 2;
630 if (*p == 0x0a) {
631 outbuf[qty++] = '0';
632 outbuf[qty++] = 'd';
633 }
634 p++;
635 n--;
636 }
637
638 outbuf[qty] = 0;
639 gdbstub_send_packet(outbuf);
640 }
641}
642
643#if 0
644void debug_to_serial(const char *p, int n)
645{
646 gdbstub_console_write(NULL,p,n);
647}
648#endif
649
650#ifdef CONFIG_GDBSTUB_CONSOLE
651
652static kdev_t gdbstub_console_dev(struct console *con)
653{
654 return MKDEV(1,3); /* /dev/null */
655}
656
657static struct console gdbstub_console = {
658 .name = "gdb",
659 .write = gdbstub_console_write, /* in break.S */
660 .device = gdbstub_console_dev,
661 .flags = CON_PRINTBUFFER,
662 .index = -1,
663};
664
665#endif
666
667/*****************************************************************************/
668/*
669 * Convert the memory pointed to by mem into hex, placing result in buf.
670 * - if successful, return a pointer to the last char put in buf (NUL)
671 * - in case of mem fault, return NULL
672 * may_fault is non-zero if we are reading from arbitrary memory, but is currently
673 * not used.
674 */
675static unsigned char *mem2hex(const void *_mem, char *buf, int count, int may_fault)
676{
677 const uint8_t *mem = _mem;
678 uint8_t ch[4] __attribute__((aligned(4)));
679
680 if ((uint32_t)mem&1 && count>=1) {
681 if (!gdbstub_read_byte(mem,ch))
682 return NULL;
683 *buf++ = hexchars[ch[0] >> 4];
684 *buf++ = hexchars[ch[0] & 0xf];
685 mem++;
686 count--;
687 }
688
689 if ((uint32_t)mem&3 && count>=2) {
690 if (!gdbstub_read_word(mem,(uint16_t *)ch))
691 return NULL;
692 *buf++ = hexchars[ch[0] >> 4];
693 *buf++ = hexchars[ch[0] & 0xf];
694 *buf++ = hexchars[ch[1] >> 4];
695 *buf++ = hexchars[ch[1] & 0xf];
696 mem += 2;
697 count -= 2;
698 }
699
700 while (count>=4) {
701 if (!gdbstub_read_dword(mem,(uint32_t *)ch))
702 return NULL;
703 *buf++ = hexchars[ch[0] >> 4];
704 *buf++ = hexchars[ch[0] & 0xf];
705 *buf++ = hexchars[ch[1] >> 4];
706 *buf++ = hexchars[ch[1] & 0xf];
707 *buf++ = hexchars[ch[2] >> 4];
708 *buf++ = hexchars[ch[2] & 0xf];
709 *buf++ = hexchars[ch[3] >> 4];
710 *buf++ = hexchars[ch[3] & 0xf];
711 mem += 4;
712 count -= 4;
713 }
714
715 if (count>=2) {
716 if (!gdbstub_read_word(mem,(uint16_t *)ch))
717 return NULL;
718 *buf++ = hexchars[ch[0] >> 4];
719 *buf++ = hexchars[ch[0] & 0xf];
720 *buf++ = hexchars[ch[1] >> 4];
721 *buf++ = hexchars[ch[1] & 0xf];
722 mem += 2;
723 count -= 2;
724 }
725
726 if (count>=1) {
727 if (!gdbstub_read_byte(mem,ch))
728 return NULL;
729 *buf++ = hexchars[ch[0] >> 4];
730 *buf++ = hexchars[ch[0] & 0xf];
731 }
732
733 *buf = 0;
734
735 return buf;
736} /* end mem2hex() */
737
738/*****************************************************************************/
739/*
740 * convert the hex array pointed to by buf into binary to be placed in mem
741 * return a pointer to the character AFTER the last byte of buffer consumed
742 */
743static char *hex2mem(const char *buf, void *_mem, int count)
744{
745 uint8_t *mem = _mem;
746 union {
747 uint32_t l;
748 uint16_t w;
749 uint8_t b[4];
750 } ch;
751
752 if ((u32)mem&1 && count>=1) {
753 ch.b[0] = hex(*buf++) << 4;
754 ch.b[0] |= hex(*buf++);
755 if (!gdbstub_write_byte(mem,ch.b[0]))
756 return NULL;
757 mem++;
758 count--;
759 }
760
761 if ((u32)mem&3 && count>=2) {
762 ch.b[0] = hex(*buf++) << 4;
763 ch.b[0] |= hex(*buf++);
764 ch.b[1] = hex(*buf++) << 4;
765 ch.b[1] |= hex(*buf++);
766 if (!gdbstub_write_word(mem,ch.w))
767 return NULL;
768 mem += 2;
769 count -= 2;
770 }
771
772 while (count>=4) {
773 ch.b[0] = hex(*buf++) << 4;
774 ch.b[0] |= hex(*buf++);
775 ch.b[1] = hex(*buf++) << 4;
776 ch.b[1] |= hex(*buf++);
777 ch.b[2] = hex(*buf++) << 4;
778 ch.b[2] |= hex(*buf++);
779 ch.b[3] = hex(*buf++) << 4;
780 ch.b[3] |= hex(*buf++);
781 if (!gdbstub_write_dword(mem,ch.l))
782 return NULL;
783 mem += 4;
784 count -= 4;
785 }
786
787 if (count>=2) {
788 ch.b[0] = hex(*buf++) << 4;
789 ch.b[0] |= hex(*buf++);
790 ch.b[1] = hex(*buf++) << 4;
791 ch.b[1] |= hex(*buf++);
792 if (!gdbstub_write_word(mem,ch.w))
793 return NULL;
794 mem += 2;
795 count -= 2;
796 }
797
798 if (count>=1) {
799 ch.b[0] = hex(*buf++) << 4;
800 ch.b[0] |= hex(*buf++);
801 if (!gdbstub_write_byte(mem,ch.b[0]))
802 return NULL;
803 }
804
805 return (char *) buf;
806} /* end hex2mem() */
807
808/*****************************************************************************/
809/*
810 * This table contains the mapping between FRV TBR.TT exception codes,
811 * and signals, which are primarily what GDB understands. It also
812 * indicates which hardware traps we need to commandeer when
813 * initializing the stub.
814 */
815static const struct brr_to_sig_map {
816 unsigned long brr_mask; /* BRR bitmask */
817 unsigned long tbr_tt; /* TBR.TT code (in BRR.EBTT) */
818 unsigned int signo; /* Signal that we map this into */
819} brr_to_sig_map[] = {
820 { BRR_EB, TBR_TT_INSTR_ACC_ERROR, SIGSEGV },
821 { BRR_EB, TBR_TT_ILLEGAL_INSTR, SIGILL },
822 { BRR_EB, TBR_TT_PRIV_INSTR, SIGILL },
823 { BRR_EB, TBR_TT_MP_EXCEPTION, SIGFPE },
824 { BRR_EB, TBR_TT_DATA_ACC_ERROR, SIGSEGV },
825 { BRR_EB, TBR_TT_DATA_STR_ERROR, SIGSEGV },
826 { BRR_EB, TBR_TT_DIVISION_EXCEP, SIGFPE },
827 { BRR_EB, TBR_TT_COMPOUND_EXCEP, SIGSEGV },
828 { BRR_EB, TBR_TT_INTERRUPT_13, SIGALRM }, /* watchdog */
829 { BRR_EB, TBR_TT_INTERRUPT_14, SIGINT }, /* GDB serial */
830 { BRR_EB, TBR_TT_INTERRUPT_15, SIGQUIT }, /* NMI */
831 { BRR_CB, 0, SIGUSR1 },
832 { BRR_TB, 0, SIGUSR2 },
833 { BRR_DBNEx, 0, SIGTRAP },
834 { BRR_DBx, 0, SIGTRAP }, /* h/w watchpoint */
835 { BRR_IBx, 0, SIGTRAP }, /* h/w breakpoint */
836 { BRR_CBB, 0, SIGTRAP },
837 { BRR_SB, 0, SIGTRAP },
838 { BRR_ST, 0, SIGTRAP }, /* single step */
839 { 0, 0, SIGHUP } /* default */
840};
841
842/*****************************************************************************/
843/*
844 * convert the FRV BRR register contents into a UNIX signal number
845 */
846static inline int gdbstub_compute_signal(unsigned long brr)
847{
848 const struct brr_to_sig_map *map;
849 unsigned long tbr = (brr & BRR_EBTT) >> 12;
850
851 for (map = brr_to_sig_map; map->brr_mask; map++)
852 if (map->brr_mask & brr)
853 if (!map->tbr_tt || map->tbr_tt == tbr)
854 break;
855
856 return map->signo;
857} /* end gdbstub_compute_signal() */
858
859/*****************************************************************************/
860/*
861 * set a software breakpoint or a hardware breakpoint or watchpoint
862 */
863static int gdbstub_set_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
864{
865 unsigned long tmp;
866 int bkpt, loop, xloop;
867
868 union {
869 struct {
870 unsigned long mask0, mask1;
871 };
872 uint8_t bytes[8];
873 } dbmr;
874
875 //gdbstub_printk("setbkpt(%ld,%08lx,%ld)\n", type, addr, len);
876
877 switch (type) {
878 /* set software breakpoint */
879 case 0:
880 if (addr & 3 || len > 7*4)
881 return -EINVAL;
882
883 for (bkpt = 255; bkpt >= 0; bkpt--)
884 if (!gdbstub_bkpts[bkpt].addr)
885 break;
886 if (bkpt < 0)
887 return -ENOSPC;
888
889 for (loop = 0; loop < len/4; loop++)
890 if (!gdbstub_read_dword(&((uint32_t *) addr)[loop],
891 &gdbstub_bkpts[bkpt].originsns[loop]))
892 return -EFAULT;
893
894 for (loop = 0; loop < len/4; loop++)
895 if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
896 BREAK_INSN)
897 ) {
898 /* need to undo the changes if possible */
899 for (xloop = 0; xloop < loop; xloop++)
900 gdbstub_write_dword(&((uint32_t *) addr)[xloop],
901 gdbstub_bkpts[bkpt].originsns[xloop]);
902 return -EFAULT;
903 }
904
905 gdbstub_bkpts[bkpt].addr = addr;
906 gdbstub_bkpts[bkpt].len = len;
907
908#if 0
909 gdbstub_printk("Set BKPT[%02x]: %08lx #%d {%04x, %04x} -> { %04x, %04x }\n",
910 bkpt,
911 gdbstub_bkpts[bkpt].addr,
912 gdbstub_bkpts[bkpt].len,
913 gdbstub_bkpts[bkpt].originsns[0],
914 gdbstub_bkpts[bkpt].originsns[1],
915 ((uint32_t *) addr)[0],
916 ((uint32_t *) addr)[1]
917 );
918#endif
919 return 0;
920
921 /* set hardware breakpoint */
922 case 1:
923 if (addr & 3 || len != 4)
924 return -EINVAL;
925
926 if (!(__debug_regs->dcr & DCR_IBE0)) {
927 //gdbstub_printk("set h/w break 0: %08lx\n", addr);
928 __debug_regs->dcr |= DCR_IBE0;
929 asm volatile("movgs %0,ibar0" : : "r"(addr));
930 return 0;
931 }
932
933 if (!(__debug_regs->dcr & DCR_IBE1)) {
934 //gdbstub_printk("set h/w break 1: %08lx\n", addr);
935 __debug_regs->dcr |= DCR_IBE1;
936 asm volatile("movgs %0,ibar1" : : "r"(addr));
937 return 0;
938 }
939
940 if (!(__debug_regs->dcr & DCR_IBE2)) {
941 //gdbstub_printk("set h/w break 2: %08lx\n", addr);
942 __debug_regs->dcr |= DCR_IBE2;
943 asm volatile("movgs %0,ibar2" : : "r"(addr));
944 return 0;
945 }
946
947 if (!(__debug_regs->dcr & DCR_IBE3)) {
948 //gdbstub_printk("set h/w break 3: %08lx\n", addr);
949 __debug_regs->dcr |= DCR_IBE3;
950 asm volatile("movgs %0,ibar3" : : "r"(addr));
951 return 0;
952 }
953
954 return -ENOSPC;
955
956 /* set data read/write/access watchpoint */
957 case 2:
958 case 3:
959 case 4:
960 if ((addr & ~7) != ((addr + len - 1) & ~7))
961 return -EINVAL;
962
963 tmp = addr & 7;
964
965 memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
966 for (loop = 0; loop < len; loop++)
967 dbmr.bytes[tmp + loop] = 0;
968
969 addr &= ~7;
970
971 if (!(__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0))) {
972 //gdbstub_printk("set h/w watchpoint 0 type %ld: %08lx\n", type, addr);
973 tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
974 __debug_regs->dcr |= tmp;
975 asm volatile(" movgs %0,dbar0 \n"
976 " movgs %1,dbmr00 \n"
977 " movgs %2,dbmr01 \n"
978 " movgs gr0,dbdr00 \n"
979 " movgs gr0,dbdr01 \n"
980 : : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
981 return 0;
982 }
983
984 if (!(__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1))) {
985 //gdbstub_printk("set h/w watchpoint 1 type %ld: %08lx\n", type, addr);
986 tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
987 __debug_regs->dcr |= tmp;
988 asm volatile(" movgs %0,dbar1 \n"
989 " movgs %1,dbmr10 \n"
990 " movgs %2,dbmr11 \n"
991 " movgs gr0,dbdr10 \n"
992 " movgs gr0,dbdr11 \n"
993 : : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
994 return 0;
995 }
996
997 return -ENOSPC;
998
999 default:
1000 return -EINVAL;
1001 }
1002
1003} /* end gdbstub_set_breakpoint() */
1004
1005/*****************************************************************************/
1006/*
1007 * clear a breakpoint or watchpoint
1008 */
1009int gdbstub_clear_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
1010{
1011 unsigned long tmp;
1012 int bkpt, loop;
1013
1014 union {
1015 struct {
1016 unsigned long mask0, mask1;
1017 };
1018 uint8_t bytes[8];
1019 } dbmr;
1020
1021 //gdbstub_printk("clearbkpt(%ld,%08lx,%ld)\n", type, addr, len);
1022
1023 switch (type) {
1024 /* clear software breakpoint */
1025 case 0:
1026 for (bkpt = 255; bkpt >= 0; bkpt--)
1027 if (gdbstub_bkpts[bkpt].addr == addr && gdbstub_bkpts[bkpt].len == len)
1028 break;
1029 if (bkpt < 0)
1030 return -ENOENT;
1031
1032 gdbstub_bkpts[bkpt].addr = 0;
1033
1034 for (loop = 0; loop < len/4; loop++)
1035 if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
1036 gdbstub_bkpts[bkpt].originsns[loop]))
1037 return -EFAULT;
1038 return 0;
1039
1040 /* clear hardware breakpoint */
1041 case 1:
1042 if (addr & 3 || len != 4)
1043 return -EINVAL;
1044
1045#define __get_ibar(X) ({ unsigned long x; asm volatile("movsg ibar"#X",%0" : "=r"(x)); x; })
1046
1047 if (__debug_regs->dcr & DCR_IBE0 && __get_ibar(0) == addr) {
1048 //gdbstub_printk("clear h/w break 0: %08lx\n", addr);
1049 __debug_regs->dcr &= ~DCR_IBE0;
1050 asm volatile("movgs gr0,ibar0");
1051 return 0;
1052 }
1053
1054 if (__debug_regs->dcr & DCR_IBE1 && __get_ibar(1) == addr) {
1055 //gdbstub_printk("clear h/w break 1: %08lx\n", addr);
1056 __debug_regs->dcr &= ~DCR_IBE1;
1057 asm volatile("movgs gr0,ibar1");
1058 return 0;
1059 }
1060
1061 if (__debug_regs->dcr & DCR_IBE2 && __get_ibar(2) == addr) {
1062 //gdbstub_printk("clear h/w break 2: %08lx\n", addr);
1063 __debug_regs->dcr &= ~DCR_IBE2;
1064 asm volatile("movgs gr0,ibar2");
1065 return 0;
1066 }
1067
1068 if (__debug_regs->dcr & DCR_IBE3 && __get_ibar(3) == addr) {
1069 //gdbstub_printk("clear h/w break 3: %08lx\n", addr);
1070 __debug_regs->dcr &= ~DCR_IBE3;
1071 asm volatile("movgs gr0,ibar3");
1072 return 0;
1073 }
1074
1075 return -EINVAL;
1076
1077 /* clear data read/write/access watchpoint */
1078 case 2:
1079 case 3:
1080 case 4:
1081 if ((addr & ~7) != ((addr + len - 1) & ~7))
1082 return -EINVAL;
1083
1084 tmp = addr & 7;
1085
1086 memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
1087 for (loop = 0; loop < len; loop++)
1088 dbmr.bytes[tmp + loop] = 0;
1089
1090 addr &= ~7;
1091
1092#define __get_dbar(X) ({ unsigned long x; asm volatile("movsg dbar"#X",%0" : "=r"(x)); x; })
1093#define __get_dbmr0(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"0,%0" : "=r"(x)); x; })
1094#define __get_dbmr1(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"1,%0" : "=r"(x)); x; })
1095
1096 /* consider DBAR 0 */
1097 tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
1098
1099 if ((__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0)) != tmp ||
1100 __get_dbar(0) != addr ||
1101 __get_dbmr0(0) != dbmr.mask0 ||
1102 __get_dbmr1(0) != dbmr.mask1)
1103 goto skip_dbar0;
1104
1105 //gdbstub_printk("clear h/w watchpoint 0 type %ld: %08lx\n", type, addr);
1106 __debug_regs->dcr &= ~(DCR_DRBE0|DCR_DWBE0);
1107 asm volatile(" movgs gr0,dbar0 \n"
1108 " movgs gr0,dbmr00 \n"
1109 " movgs gr0,dbmr01 \n"
1110 " movgs gr0,dbdr00 \n"
1111 " movgs gr0,dbdr01 \n");
1112 return 0;
1113
1114 skip_dbar0:
1115 /* consider DBAR 0 */
1116 tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
1117
1118 if ((__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1)) != tmp ||
1119 __get_dbar(1) != addr ||
1120 __get_dbmr0(1) != dbmr.mask0 ||
1121 __get_dbmr1(1) != dbmr.mask1)
1122 goto skip_dbar1;
1123
1124 //gdbstub_printk("clear h/w watchpoint 1 type %ld: %08lx\n", type, addr);
1125 __debug_regs->dcr &= ~(DCR_DRBE1|DCR_DWBE1);
1126 asm volatile(" movgs gr0,dbar1 \n"
1127 " movgs gr0,dbmr10 \n"
1128 " movgs gr0,dbmr11 \n"
1129 " movgs gr0,dbdr10 \n"
1130 " movgs gr0,dbdr11 \n");
1131 return 0;
1132
1133 skip_dbar1:
1134 return -ENOSPC;
1135
1136 default:
1137 return -EINVAL;
1138 }
1139} /* end gdbstub_clear_breakpoint() */
1140
1141/*****************************************************************************/
1142/*
1143 * check a for an internal software breakpoint, and wind the PC back if necessary
1144 */
1145static void gdbstub_check_breakpoint(void)
1146{
1147 unsigned long addr = __debug_frame->pc - 4;
1148 int bkpt;
1149
1150 for (bkpt = 255; bkpt >= 0; bkpt--)
1151 if (gdbstub_bkpts[bkpt].addr == addr)
1152 break;
1153 if (bkpt >= 0)
1154 __debug_frame->pc = addr;
1155
1156 //gdbstub_printk("alter pc [%d] %08lx\n", bkpt, __debug_frame->pc);
1157
1158} /* end gdbstub_check_breakpoint() */
1159
1160/*****************************************************************************/
1161/*
1162 *
1163 */
1164static void __attribute__((unused)) gdbstub_show_regs(void)
1165{
1166 uint32_t *reg;
1167 int loop;
1168
1169 gdbstub_printk("\n");
1170
1171 gdbstub_printk("Frame: @%p [%s]\n",
1172 __debug_frame,
1173 __debug_frame->psr & PSR_S ? "kernel" : "user");
1174
1175 reg = (uint32_t *) __debug_frame;
1176 for (loop = 0; loop < REG__END; loop++) {
1177 printk("%s %08x", regnames[loop + 0], reg[loop + 0]);
1178
1179 if (loop == REG__END - 1 || loop % 5 == 4)
1180 printk("\n");
1181 else
1182 printk(" | ");
1183 }
1184
1185 gdbstub_printk("Process %s (pid: %d)\n", current->comm, current->pid);
1186} /* end gdbstub_show_regs() */
1187
1188/*****************************************************************************/
1189/*
1190 * dump debugging regs
1191 */
1192static void __attribute__((unused)) gdbstub_dump_debugregs(void)
1193{
1194 unsigned long x;
1195
1196 x = __debug_regs->dcr;
1197 gdbstub_printk("DCR %08lx ", x);
1198
1199 x = __debug_regs->brr;
1200 gdbstub_printk("BRR %08lx\n", x);
1201
1202 gdbstub_printk("IBAR0 %08lx ", __get_ibar(0));
1203 gdbstub_printk("IBAR1 %08lx ", __get_ibar(1));
1204 gdbstub_printk("IBAR2 %08lx ", __get_ibar(2));
1205 gdbstub_printk("IBAR3 %08lx\n", __get_ibar(3));
1206
1207 gdbstub_printk("DBAR0 %08lx ", __get_dbar(0));
1208 gdbstub_printk("DBMR00 %08lx ", __get_dbmr0(0));
1209 gdbstub_printk("DBMR01 %08lx\n", __get_dbmr1(0));
1210
1211 gdbstub_printk("DBAR1 %08lx ", __get_dbar(1));
1212 gdbstub_printk("DBMR10 %08lx ", __get_dbmr0(1));
1213 gdbstub_printk("DBMR11 %08lx\n", __get_dbmr1(1));
1214
1215 gdbstub_printk("\n");
1216} /* end gdbstub_dump_debugregs() */
1217
1218/*****************************************************************************/
1219/*
1220 * dump the MMU state into a structure so that it can be accessed with GDB
1221 */
1222void gdbstub_get_mmu_state(void)
1223{
1224 asm volatile("movsg hsr0,%0" : "=r"(__debug_mmu.regs.hsr0));
1225 asm volatile("movsg pcsr,%0" : "=r"(__debug_mmu.regs.pcsr));
1226 asm volatile("movsg esr0,%0" : "=r"(__debug_mmu.regs.esr0));
1227 asm volatile("movsg ear0,%0" : "=r"(__debug_mmu.regs.ear0));
1228 asm volatile("movsg epcr0,%0" : "=r"(__debug_mmu.regs.epcr0));
1229
1230 /* read the protection / SAT registers */
1231 __debug_mmu.iamr[0].L = __get_IAMLR(0);
1232 __debug_mmu.iamr[0].P = __get_IAMPR(0);
1233 __debug_mmu.iamr[1].L = __get_IAMLR(1);
1234 __debug_mmu.iamr[1].P = __get_IAMPR(1);
1235 __debug_mmu.iamr[2].L = __get_IAMLR(2);
1236 __debug_mmu.iamr[2].P = __get_IAMPR(2);
1237 __debug_mmu.iamr[3].L = __get_IAMLR(3);
1238 __debug_mmu.iamr[3].P = __get_IAMPR(3);
1239 __debug_mmu.iamr[4].L = __get_IAMLR(4);
1240 __debug_mmu.iamr[4].P = __get_IAMPR(4);
1241 __debug_mmu.iamr[5].L = __get_IAMLR(5);
1242 __debug_mmu.iamr[5].P = __get_IAMPR(5);
1243 __debug_mmu.iamr[6].L = __get_IAMLR(6);
1244 __debug_mmu.iamr[6].P = __get_IAMPR(6);
1245 __debug_mmu.iamr[7].L = __get_IAMLR(7);
1246 __debug_mmu.iamr[7].P = __get_IAMPR(7);
1247 __debug_mmu.iamr[8].L = __get_IAMLR(8);
1248 __debug_mmu.iamr[8].P = __get_IAMPR(8);
1249 __debug_mmu.iamr[9].L = __get_IAMLR(9);
1250 __debug_mmu.iamr[9].P = __get_IAMPR(9);
1251 __debug_mmu.iamr[10].L = __get_IAMLR(10);
1252 __debug_mmu.iamr[10].P = __get_IAMPR(10);
1253 __debug_mmu.iamr[11].L = __get_IAMLR(11);
1254 __debug_mmu.iamr[11].P = __get_IAMPR(11);
1255 __debug_mmu.iamr[12].L = __get_IAMLR(12);
1256 __debug_mmu.iamr[12].P = __get_IAMPR(12);
1257 __debug_mmu.iamr[13].L = __get_IAMLR(13);
1258 __debug_mmu.iamr[13].P = __get_IAMPR(13);
1259 __debug_mmu.iamr[14].L = __get_IAMLR(14);
1260 __debug_mmu.iamr[14].P = __get_IAMPR(14);
1261 __debug_mmu.iamr[15].L = __get_IAMLR(15);
1262 __debug_mmu.iamr[15].P = __get_IAMPR(15);
1263
1264 __debug_mmu.damr[0].L = __get_DAMLR(0);
1265 __debug_mmu.damr[0].P = __get_DAMPR(0);
1266 __debug_mmu.damr[1].L = __get_DAMLR(1);
1267 __debug_mmu.damr[1].P = __get_DAMPR(1);
1268 __debug_mmu.damr[2].L = __get_DAMLR(2);
1269 __debug_mmu.damr[2].P = __get_DAMPR(2);
1270 __debug_mmu.damr[3].L = __get_DAMLR(3);
1271 __debug_mmu.damr[3].P = __get_DAMPR(3);
1272 __debug_mmu.damr[4].L = __get_DAMLR(4);
1273 __debug_mmu.damr[4].P = __get_DAMPR(4);
1274 __debug_mmu.damr[5].L = __get_DAMLR(5);
1275 __debug_mmu.damr[5].P = __get_DAMPR(5);
1276 __debug_mmu.damr[6].L = __get_DAMLR(6);
1277 __debug_mmu.damr[6].P = __get_DAMPR(6);
1278 __debug_mmu.damr[7].L = __get_DAMLR(7);
1279 __debug_mmu.damr[7].P = __get_DAMPR(7);
1280 __debug_mmu.damr[8].L = __get_DAMLR(8);
1281 __debug_mmu.damr[8].P = __get_DAMPR(8);
1282 __debug_mmu.damr[9].L = __get_DAMLR(9);
1283 __debug_mmu.damr[9].P = __get_DAMPR(9);
1284 __debug_mmu.damr[10].L = __get_DAMLR(10);
1285 __debug_mmu.damr[10].P = __get_DAMPR(10);
1286 __debug_mmu.damr[11].L = __get_DAMLR(11);
1287 __debug_mmu.damr[11].P = __get_DAMPR(11);
1288 __debug_mmu.damr[12].L = __get_DAMLR(12);
1289 __debug_mmu.damr[12].P = __get_DAMPR(12);
1290 __debug_mmu.damr[13].L = __get_DAMLR(13);
1291 __debug_mmu.damr[13].P = __get_DAMPR(13);
1292 __debug_mmu.damr[14].L = __get_DAMLR(14);
1293 __debug_mmu.damr[14].P = __get_DAMPR(14);
1294 __debug_mmu.damr[15].L = __get_DAMLR(15);
1295 __debug_mmu.damr[15].P = __get_DAMPR(15);
1296
1297#ifdef CONFIG_MMU
1298 do {
1299 /* read the DAT entries from the TLB */
1300 struct __debug_amr *p;
1301 int loop;
1302
1303 asm volatile("movsg tplr,%0" : "=r"(__debug_mmu.regs.tplr));
1304 asm volatile("movsg tppr,%0" : "=r"(__debug_mmu.regs.tppr));
1305 asm volatile("movsg tpxr,%0" : "=r"(__debug_mmu.regs.tpxr));
1306 asm volatile("movsg cxnr,%0" : "=r"(__debug_mmu.regs.cxnr));
1307
1308 p = __debug_mmu.tlb;
1309
1310 /* way 0 */
1311 asm volatile("movgs %0,tpxr" :: "r"(0 << TPXR_WAY_SHIFT));
1312 for (loop = 0; loop < 64; loop++) {
1313 asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
1314 asm volatile("movsg tplr,%0" : "=r"(p->L));
1315 asm volatile("movsg tppr,%0" : "=r"(p->P));
1316 p++;
1317 }
1318
1319 /* way 1 */
1320 asm volatile("movgs %0,tpxr" :: "r"(1 << TPXR_WAY_SHIFT));
1321 for (loop = 0; loop < 64; loop++) {
1322 asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
1323 asm volatile("movsg tplr,%0" : "=r"(p->L));
1324 asm volatile("movsg tppr,%0" : "=r"(p->P));
1325 p++;
1326 }
1327
1328 asm volatile("movgs %0,tplr" :: "r"(__debug_mmu.regs.tplr));
1329 asm volatile("movgs %0,tppr" :: "r"(__debug_mmu.regs.tppr));
1330 asm volatile("movgs %0,tpxr" :: "r"(__debug_mmu.regs.tpxr));
1331 } while(0);
1332#endif
1333
1334} /* end gdbstub_get_mmu_state() */
1335
1336/*****************************************************************************/
1337/*
1338 * handle event interception and GDB remote protocol processing
1339 * - on entry:
1340 * PSR.ET==0, PSR.S==1 and the CPU is in debug mode
1341 * __debug_frame points to the saved registers
1342 * __frame points to the kernel mode exception frame, if it was in kernel
1343 * mode when the break happened
1344 */
1345void gdbstub(int sigval)
1346{
1347 unsigned long addr, length, loop, dbar, temp, temp2, temp3;
1348 uint32_t zero;
1349 char *ptr;
1350 int flush_cache = 0;
1351
1352 LEDS(0x5000);
1353
1354 if (sigval < 0) {
1355#ifndef CONFIG_GDBSTUB_IMMEDIATE
1356 /* return immediately if GDB immediate activation option not set */
1357 return;
1358#else
1359 sigval = SIGINT;
1360#endif
1361 }
1362
1363 save_user_regs(&__break_user_context);
1364
1365#if 0
1366 gdbstub_printk("--> gdbstub() %08x %p %08x %08x\n",
1367 __debug_frame->pc,
1368 __debug_frame,
1369 __debug_regs->brr,
1370 __debug_regs->bpsr);
1371// gdbstub_show_regs();
1372#endif
1373
1374 LEDS(0x5001);
1375
1376 /* if we were interrupted by input on the serial gdbstub serial port,
1377 * restore the context prior to the interrupt so that we return to that
1378 * directly
1379 */
1380 temp = (unsigned long) __entry_kerneltrap_table;
1381 temp2 = (unsigned long) __entry_usertrap_table;
1382 temp3 = __debug_frame->pc & ~15;
1383
1384 if (temp3 == temp + TBR_TT_INTERRUPT_15 ||
1385 temp3 == temp2 + TBR_TT_INTERRUPT_15
1386 ) {
1387 asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
1388 __debug_frame->psr |= PSR_ET;
1389 __debug_frame->psr &= ~PSR_S;
1390 if (__debug_frame->psr & PSR_PS)
1391 __debug_frame->psr |= PSR_S;
1392 __debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12;
1393 __debug_regs->brr |= BRR_EB;
1394 sigval = SIGINT;
1395 }
1396
1397 /* handle the decrement timer going off (FR451 only) */
1398 if (temp3 == temp + TBR_TT_DECREMENT_TIMER ||
1399 temp3 == temp2 + TBR_TT_DECREMENT_TIMER
1400 ) {
1401 asm volatile("movgs %0,timerd" :: "r"(10000000));
1402 asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
1403 __debug_frame->psr |= PSR_ET;
1404 __debug_frame->psr &= ~PSR_S;
1405 if (__debug_frame->psr & PSR_PS)
1406 __debug_frame->psr |= PSR_S;
1407 __debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12;
1408 __debug_regs->brr |= BRR_EB;
1409 sigval = SIGXCPU;;
1410 }
1411
1412 LEDS(0x5002);
1413
1414 /* after a BREAK insn, the PC lands on the far side of it */
1415 if (__debug_regs->brr & BRR_SB)
1416 gdbstub_check_breakpoint();
1417
1418 LEDS(0x5003);
1419
1420 /* handle attempts to write console data via GDB "O" commands */
1421 if (__debug_frame->pc == (unsigned long) gdbstub_console_write + 4) {
1422 __gdbstub_console_write((struct console *) __debug_frame->gr8,
1423 (const char *) __debug_frame->gr9,
1424 (unsigned) __debug_frame->gr10);
1425 goto done;
1426 }
1427
1428 if (gdbstub_rx_unget) {
1429 sigval = SIGINT;
1430 goto packet_waiting;
1431 }
1432
1433 if (!sigval)
1434 sigval = gdbstub_compute_signal(__debug_regs->brr);
1435
1436 LEDS(0x5004);
1437
1438 /* send a message to the debugger's user saying what happened if it may
1439 * not be clear cut (we can't map exceptions onto signals properly)
1440 */
1441 if (sigval != SIGINT && sigval != SIGTRAP && sigval != SIGILL) {
1442 static const char title[] = "Break ";
1443 static const char crlf[] = "\r\n";
1444 unsigned long brr = __debug_regs->brr;
1445 char hx;
1446
1447 ptr = output_buffer;
1448 *ptr++ = 'O';
1449 ptr = mem2hex(title, ptr, sizeof(title) - 1,0);
1450
1451 hx = hexchars[(brr & 0xf0000000) >> 28];
1452 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1453 hx = hexchars[(brr & 0x0f000000) >> 24];
1454 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1455 hx = hexchars[(brr & 0x00f00000) >> 20];
1456 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1457 hx = hexchars[(brr & 0x000f0000) >> 16];
1458 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1459 hx = hexchars[(brr & 0x0000f000) >> 12];
1460 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1461 hx = hexchars[(brr & 0x00000f00) >> 8];
1462 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1463 hx = hexchars[(brr & 0x000000f0) >> 4];
1464 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1465 hx = hexchars[(brr & 0x0000000f)];
1466 *ptr++ = hexchars[hx >> 4]; *ptr++ = hexchars[hx & 0xf];
1467
1468 ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0);
1469 *ptr = 0;
1470 gdbstub_send_packet(output_buffer); /* send it off... */
1471 }
1472
1473 LEDS(0x5005);
1474
1475 /* tell the debugger that an exception has occurred */
1476 ptr = output_buffer;
1477
1478 /* Send trap type (converted to signal) */
1479 *ptr++ = 'T';
1480 *ptr++ = hexchars[sigval >> 4];
1481 *ptr++ = hexchars[sigval & 0xf];
1482
1483 /* Send Error PC */
1484 *ptr++ = hexchars[GDB_REG_PC >> 4];
1485 *ptr++ = hexchars[GDB_REG_PC & 0xf];
1486 *ptr++ = ':';
1487 ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
1488 *ptr++ = ';';
1489
1490 /*
1491 * Send frame pointer
1492 */
1493 *ptr++ = hexchars[GDB_REG_FP >> 4];
1494 *ptr++ = hexchars[GDB_REG_FP & 0xf];
1495 *ptr++ = ':';
1496 ptr = mem2hex(&__debug_frame->fp, ptr, 4, 0);
1497 *ptr++ = ';';
1498
1499 /*
1500 * Send stack pointer
1501 */
1502 *ptr++ = hexchars[GDB_REG_SP >> 4];
1503 *ptr++ = hexchars[GDB_REG_SP & 0xf];
1504 *ptr++ = ':';
1505 ptr = mem2hex(&__debug_frame->sp, ptr, 4, 0);
1506 *ptr++ = ';';
1507
1508 *ptr++ = 0;
1509 gdbstub_send_packet(output_buffer); /* send it off... */
1510
1511 LEDS(0x5006);
1512
1513 packet_waiting:
1514 gdbstub_get_mmu_state();
1515
1516 /* wait for input from remote GDB */
1517 while (1) {
1518 output_buffer[0] = 0;
1519
1520 LEDS(0x5007);
1521 gdbstub_recv_packet(input_buffer);
1522 LEDS(0x5600 | input_buffer[0]);
1523
1524 switch (input_buffer[0]) {
1525 /* request repeat of last signal number */
1526 case '?':
1527 output_buffer[0] = 'S';
1528 output_buffer[1] = hexchars[sigval >> 4];
1529 output_buffer[2] = hexchars[sigval & 0xf];
1530 output_buffer[3] = 0;
1531 break;
1532
1533 case 'd':
1534 /* toggle debug flag */
1535 break;
1536
1537 /* return the value of the CPU registers
1538 * - GR0, GR1, GR2, GR3, GR4, GR5, GR6, GR7,
1539 * - GR8, GR9, GR10, GR11, GR12, GR13, GR14, GR15,
1540 * - GR16, GR17, GR18, GR19, GR20, GR21, GR22, GR23,
1541 * - GR24, GR25, GR26, GR27, GR28, GR29, GR30, GR31,
1542 * - GR32, GR33, GR34, GR35, GR36, GR37, GR38, GR39,
1543 * - GR40, GR41, GR42, GR43, GR44, GR45, GR46, GR47,
1544 * - GR48, GR49, GR50, GR51, GR52, GR53, GR54, GR55,
1545 * - GR56, GR57, GR58, GR59, GR60, GR61, GR62, GR63,
1546 * - FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
1547 * - FP8, FP9, FP10, FP11, FP12, FP13, FP14, FP15,
1548 * - FP16, FP17, FP18, FP19, FP20, FP21, FP22, FP23,
1549 * - FP24, FP25, FP26, FP27, FP28, FP29, FP30, FP31,
1550 * - FP32, FP33, FP34, FP35, FP36, FP37, FP38, FP39,
1551 * - FP40, FP41, FP42, FP43, FP44, FP45, FP46, FP47,
1552 * - FP48, FP49, FP50, FP51, FP52, FP53, FP54, FP55,
1553 * - FP56, FP57, FP58, FP59, FP60, FP61, FP62, FP63,
1554 * - PC, PSR, CCR, CCCR,
1555 * - _X132, _X133, _X134
1556 * - TBR, BRR, DBAR0, DBAR1, DBAR2, DBAR3,
1557 * - _X141, _X142, _X143, _X144,
1558 * - LR, LCR
1559 */
1560 case 'g':
1561 zero = 0;
1562 ptr = output_buffer;
1563
1564 /* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
1565 ptr = mem2hex(&zero, ptr, 4, 0);
1566
1567 for (loop = 1; loop <= 27; loop++)
1568 ptr = mem2hex((unsigned long *)__debug_frame + REG_GR(loop),
1569 ptr, 4, 0);
1570 temp = (unsigned long) __frame;
1571 ptr = mem2hex(&temp, ptr, 4, 0);
1572 ptr = mem2hex((unsigned long *)__debug_frame + REG_GR(29), ptr, 4, 0);
1573 ptr = mem2hex((unsigned long *)__debug_frame + REG_GR(30), ptr, 4, 0);
1574#ifdef CONFIG_MMU
1575 ptr = mem2hex((unsigned long *)__debug_frame + REG_GR(31), ptr, 4, 0);
1576#else
1577 temp = (unsigned long) __debug_frame;
1578 ptr = mem2hex(&temp, ptr, 4, 0);
1579#endif
1580
1581 for (loop = 32; loop <= 63; loop++)
1582 ptr = mem2hex((unsigned long *)__debug_frame + REG_GR(loop),
1583 ptr, 4, 0);
1584
1585 /* deal with FR0-FR63 */
1586 for (loop = 0; loop <= 63; loop++)
1587 ptr = mem2hex((unsigned long *)&__break_user_context +
1588 __FPMEDIA_FR(loop),
1589 ptr, 4, 0);
1590
1591 /* deal with special registers */
1592 ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
1593 ptr = mem2hex(&__debug_frame->psr, ptr, 4, 0);
1594 ptr = mem2hex(&__debug_frame->ccr, ptr, 4, 0);
1595 ptr = mem2hex(&__debug_frame->cccr, ptr, 4, 0);
1596 ptr = mem2hex(&zero, ptr, 4, 0);
1597 ptr = mem2hex(&zero, ptr, 4, 0);
1598 ptr = mem2hex(&zero, ptr, 4, 0);
1599 ptr = mem2hex(&__debug_frame->tbr, ptr, 4, 0);
1600 ptr = mem2hex(&__debug_regs->brr , ptr, 4, 0);
1601
1602 asm volatile("movsg dbar0,%0" : "=r"(dbar));
1603 ptr = mem2hex(&dbar, ptr, 4, 0);
1604 asm volatile("movsg dbar1,%0" : "=r"(dbar));
1605 ptr = mem2hex(&dbar, ptr, 4, 0);
1606 asm volatile("movsg dbar2,%0" : "=r"(dbar));
1607 ptr = mem2hex(&dbar, ptr, 4, 0);
1608 asm volatile("movsg dbar3,%0" : "=r"(dbar));
1609 ptr = mem2hex(&dbar, ptr, 4, 0);
1610
1611 asm volatile("movsg scr0,%0" : "=r"(dbar));
1612 ptr = mem2hex(&dbar, ptr, 4, 0);
1613 asm volatile("movsg scr1,%0" : "=r"(dbar));
1614 ptr = mem2hex(&dbar, ptr, 4, 0);
1615 asm volatile("movsg scr2,%0" : "=r"(dbar));
1616 ptr = mem2hex(&dbar, ptr, 4, 0);
1617 asm volatile("movsg scr3,%0" : "=r"(dbar));
1618 ptr = mem2hex(&dbar, ptr, 4, 0);
1619
1620 ptr = mem2hex(&__debug_frame->lr, ptr, 4, 0);
1621 ptr = mem2hex(&__debug_frame->lcr, ptr, 4, 0);
1622
1623 ptr = mem2hex(&__debug_frame->iacc0, ptr, 8, 0);
1624
1625 ptr = mem2hex(&__break_user_context.f.fsr[0], ptr, 4, 0);
1626
1627 for (loop = 0; loop <= 7; loop++)
1628 ptr = mem2hex(&__break_user_context.f.acc[loop], ptr, 4, 0);
1629
1630 ptr = mem2hex(&__break_user_context.f.accg, ptr, 8, 0);
1631
1632 for (loop = 0; loop <= 1; loop++)
1633 ptr = mem2hex(&__break_user_context.f.msr[loop], ptr, 4, 0);
1634
1635 ptr = mem2hex(&__debug_frame->gner0, ptr, 4, 0);
1636 ptr = mem2hex(&__debug_frame->gner1, ptr, 4, 0);
1637
1638 ptr = mem2hex(&__break_user_context.f.fner[0], ptr, 4, 0);
1639 ptr = mem2hex(&__break_user_context.f.fner[1], ptr, 4, 0);
1640
1641 break;
1642
1643 /* set the values of the CPU registers */
1644 case 'G':
1645 ptr = &input_buffer[1];
1646
1647 /* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
1648 ptr = hex2mem(ptr, &temp, 4);
1649
1650 for (loop = 1; loop <= 27; loop++)
1651 ptr = hex2mem(ptr, (unsigned long *)__debug_frame + REG_GR(loop),
1652 4);
1653
1654 ptr = hex2mem(ptr, &temp, 4);
1655 __frame = (struct pt_regs *) temp;
1656 ptr = hex2mem(ptr, &__debug_frame->gr29, 4);
1657 ptr = hex2mem(ptr, &__debug_frame->gr30, 4);
1658#ifdef CONFIG_MMU
1659 ptr = hex2mem(ptr, &__debug_frame->gr31, 4);
1660#else
1661 ptr = hex2mem(ptr, &temp, 4);
1662#endif
1663
1664 for (loop = 32; loop <= 63; loop++)
1665 ptr = hex2mem(ptr, (unsigned long *)__debug_frame + REG_GR(loop),
1666 4);
1667
1668 /* deal with FR0-FR63 */
1669 for (loop = 0; loop <= 63; loop++)
1670 ptr = mem2hex((unsigned long *)&__break_user_context +
1671 __FPMEDIA_FR(loop),
1672 ptr, 4, 0);
1673
1674 /* deal with special registers */
1675 ptr = hex2mem(ptr, &__debug_frame->pc, 4);
1676 ptr = hex2mem(ptr, &__debug_frame->psr, 4);
1677 ptr = hex2mem(ptr, &__debug_frame->ccr, 4);
1678 ptr = hex2mem(ptr, &__debug_frame->cccr,4);
1679
1680 for (loop = 132; loop <= 140; loop++)
1681 ptr = hex2mem(ptr, &temp, 4);
1682
1683 ptr = hex2mem(ptr, &temp, 4);
1684 asm volatile("movgs %0,scr0" :: "r"(temp));
1685 ptr = hex2mem(ptr, &temp, 4);
1686 asm volatile("movgs %0,scr1" :: "r"(temp));
1687 ptr = hex2mem(ptr, &temp, 4);
1688 asm volatile("movgs %0,scr2" :: "r"(temp));
1689 ptr = hex2mem(ptr, &temp, 4);
1690 asm volatile("movgs %0,scr3" :: "r"(temp));
1691
1692 ptr = hex2mem(ptr, &__debug_frame->lr, 4);
1693 ptr = hex2mem(ptr, &__debug_frame->lcr, 4);
1694
1695 ptr = hex2mem(ptr, &__debug_frame->iacc0, 8);
1696
1697 ptr = hex2mem(ptr, &__break_user_context.f.fsr[0], 4);
1698
1699 for (loop = 0; loop <= 7; loop++)
1700 ptr = hex2mem(ptr, &__break_user_context.f.acc[loop], 4);
1701
1702 ptr = hex2mem(ptr, &__break_user_context.f.accg, 8);
1703
1704 for (loop = 0; loop <= 1; loop++)
1705 ptr = hex2mem(ptr, &__break_user_context.f.msr[loop], 4);
1706
1707 ptr = hex2mem(ptr, &__debug_frame->gner0, 4);
1708 ptr = hex2mem(ptr, &__debug_frame->gner1, 4);
1709
1710 ptr = hex2mem(ptr, &__break_user_context.f.fner[0], 4);
1711 ptr = hex2mem(ptr, &__break_user_context.f.fner[1], 4);
1712
1713 gdbstub_strcpy(output_buffer,"OK");
1714 break;
1715
1716 /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
1717 case 'm':
1718 ptr = &input_buffer[1];
1719
1720 if (hexToInt(&ptr, &addr) &&
1721 *ptr++ == ',' &&
1722 hexToInt(&ptr, &length)
1723 ) {
1724 if (mem2hex((char *)addr, output_buffer, length, 1))
1725 break;
1726 gdbstub_strcpy (output_buffer, "E03");
1727 }
1728 else {
1729 gdbstub_strcpy(output_buffer,"E01");
1730 }
1731 break;
1732
1733 /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
1734 case 'M':
1735 ptr = &input_buffer[1];
1736
1737 if (hexToInt(&ptr, &addr) &&
1738 *ptr++ == ',' &&
1739 hexToInt(&ptr, &length) &&
1740 *ptr++ == ':'
1741 ) {
1742 if (hex2mem(ptr, (char *)addr, length)) {
1743 gdbstub_strcpy(output_buffer, "OK");
1744 }
1745 else {
1746 gdbstub_strcpy(output_buffer, "E03");
1747 }
1748 }
1749 else
1750 gdbstub_strcpy(output_buffer, "E02");
1751
1752 flush_cache = 1;
1753 break;
1754
1755 /* PNN,=RRRRRRRR: Write value R to reg N return OK */
1756 case 'P':
1757 ptr = &input_buffer[1];
1758
1759 if (!hexToInt(&ptr, &addr) ||
1760 *ptr++ != '=' ||
1761 !hexToInt(&ptr, &temp)
1762 ) {
1763 gdbstub_strcpy(output_buffer, "E01");
1764 break;
1765 }
1766
1767 temp2 = 1;
1768 switch (addr) {
1769 case GDB_REG_GR(0):
1770 break;
1771 case GDB_REG_GR(1) ... GDB_REG_GR(63):
1772 __break_user_context.i.gr[addr - GDB_REG_GR(0)] = temp;
1773 break;
1774 case GDB_REG_FR(0) ... GDB_REG_FR(63):
1775 __break_user_context.f.fr[addr - GDB_REG_FR(0)] = temp;
1776 break;
1777 case GDB_REG_PC:
1778 __break_user_context.i.pc = temp;
1779 break;
1780 case GDB_REG_PSR:
1781 __break_user_context.i.psr = temp;
1782 break;
1783 case GDB_REG_CCR:
1784 __break_user_context.i.ccr = temp;
1785 break;
1786 case GDB_REG_CCCR:
1787 __break_user_context.i.cccr = temp;
1788 break;
1789 case GDB_REG_BRR:
1790 __debug_regs->brr = temp;
1791 break;
1792 case GDB_REG_LR:
1793 __break_user_context.i.lr = temp;
1794 break;
1795 case GDB_REG_LCR:
1796 __break_user_context.i.lcr = temp;
1797 break;
1798 case GDB_REG_FSR0:
1799 __break_user_context.f.fsr[0] = temp;
1800 break;
1801 case GDB_REG_ACC(0) ... GDB_REG_ACC(7):
1802 __break_user_context.f.acc[addr - GDB_REG_ACC(0)] = temp;
1803 break;
1804 case GDB_REG_ACCG(0):
1805 *(uint32_t *) &__break_user_context.f.accg[0] = temp;
1806 break;
1807 case GDB_REG_ACCG(4):
1808 *(uint32_t *) &__break_user_context.f.accg[4] = temp;
1809 break;
1810 case GDB_REG_MSR(0) ... GDB_REG_MSR(1):
1811 __break_user_context.f.msr[addr - GDB_REG_MSR(0)] = temp;
1812 break;
1813 case GDB_REG_GNER(0) ... GDB_REG_GNER(1):
1814 __break_user_context.i.gner[addr - GDB_REG_GNER(0)] = temp;
1815 break;
1816 case GDB_REG_FNER(0) ... GDB_REG_FNER(1):
1817 __break_user_context.f.fner[addr - GDB_REG_FNER(0)] = temp;
1818 break;
1819 default:
1820 temp2 = 0;
1821 break;
1822 }
1823
1824 if (temp2) {
1825 gdbstub_strcpy(output_buffer, "OK");
1826 }
1827 else {
1828 gdbstub_strcpy(output_buffer, "E02");
1829 }
1830 break;
1831
1832 /* cAA..AA Continue at address AA..AA(optional) */
1833 case 'c':
1834 /* try to read optional parameter, pc unchanged if no parm */
1835 ptr = &input_buffer[1];
1836 if (hexToInt(&ptr, &addr))
1837 __debug_frame->pc = addr;
1838 goto done;
1839
1840 /* kill the program */
1841 case 'k' :
1842 goto done; /* just continue */
1843
1844
1845 /* reset the whole machine (FIXME: system dependent) */
1846 case 'r':
1847 break;
1848
1849
1850 /* step to next instruction */
1851 case 's':
1852 __debug_regs->dcr |= DCR_SE;
1853 goto done;
1854
1855 /* set baud rate (bBB) */
1856 case 'b':
1857 ptr = &input_buffer[1];
1858 if (!hexToInt(&ptr, &temp)) {
1859 gdbstub_strcpy(output_buffer,"B01");
1860 break;
1861 }
1862
1863 if (temp) {
1864 /* ack before changing speed */
1865 gdbstub_send_packet("OK");
1866 gdbstub_set_baud(temp);
1867 }
1868 break;
1869
1870 /* set breakpoint */
1871 case 'Z':
1872 ptr = &input_buffer[1];
1873
1874 if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
1875 !hexToInt(&ptr,&addr) || *ptr++ != ',' ||
1876 !hexToInt(&ptr,&length)
1877 ) {
1878 gdbstub_strcpy(output_buffer,"E01");
1879 break;
1880 }
1881
1882 if (temp >= 5) {
1883 gdbstub_strcpy(output_buffer,"E03");
1884 break;
1885 }
1886
1887 if (gdbstub_set_breakpoint(temp, addr, length) < 0) {
1888 gdbstub_strcpy(output_buffer,"E03");
1889 break;
1890 }
1891
1892 if (temp == 0)
1893 flush_cache = 1; /* soft bkpt by modified memory */
1894
1895 gdbstub_strcpy(output_buffer,"OK");
1896 break;
1897
1898 /* clear breakpoint */
1899 case 'z':
1900 ptr = &input_buffer[1];
1901
1902 if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
1903 !hexToInt(&ptr,&addr) || *ptr++ != ',' ||
1904 !hexToInt(&ptr,&length)
1905 ) {
1906 gdbstub_strcpy(output_buffer,"E01");
1907 break;
1908 }
1909
1910 if (temp >= 5) {
1911 gdbstub_strcpy(output_buffer,"E03");
1912 break;
1913 }
1914
1915 if (gdbstub_clear_breakpoint(temp, addr, length) < 0) {
1916 gdbstub_strcpy(output_buffer,"E03");
1917 break;
1918 }
1919
1920 if (temp == 0)
1921 flush_cache = 1; /* soft bkpt by modified memory */
1922
1923 gdbstub_strcpy(output_buffer,"OK");
1924 break;
1925
1926 default:
1927 gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
1928 break;
1929 }
1930
1931 /* reply to the request */
1932 LEDS(0x5009);
1933 gdbstub_send_packet(output_buffer);
1934 }
1935
1936 done:
1937 restore_user_regs(&__break_user_context);
1938
1939 //gdbstub_dump_debugregs();
1940 //gdbstub_printk("<-- gdbstub() %08x\n", __debug_frame->pc);
1941
1942 /* need to flush the instruction cache before resuming, as we may have
1943 * deposited a breakpoint, and the icache probably has no way of
1944 * knowing that a data ref to some location may have changed something
1945 * that is in the instruction cache. NB: We flush both caches, just to
1946 * be sure...
1947 */
1948
1949 /* note: flushing the icache will clobber EAR0 on the FR451 */
1950 if (flush_cache)
1951 gdbstub_purge_cache();
1952
1953 LEDS(0x5666);
1954
1955} /* end gdbstub() */
1956
1957/*****************************************************************************/
1958/*
1959 * initialise the GDB stub
1960 */
1961void __init gdbstub_init(void)
1962{
1963#ifdef CONFIG_GDBSTUB_IMMEDIATE
1964 unsigned char ch;
1965 int ret;
1966#endif
1967
1968 gdbstub_printk("%s", gdbstub_banner);
1969 gdbstub_printk("DCR: %x\n", __debug_regs->dcr);
1970
1971 gdbstub_io_init();
1972
1973 /* try to talk to GDB (or anyone insane enough to want to type GDB protocol by hand) */
1974 gdbstub_proto("### GDB Tx ACK\n");
1975 gdbstub_tx_char('+'); /* 'hello world' */
1976
1977#ifdef CONFIG_GDBSTUB_IMMEDIATE
1978 gdbstub_printk("GDB Stub waiting for packet\n");
1979
1980 /*
1981 * In case GDB is started before us, ack any packets
1982 * (presumably "$?#xx") sitting there.
1983 */
1984 do { gdbstub_rx_char(&ch, 0); } while (ch != '$');
1985 do { gdbstub_rx_char(&ch, 0); } while (ch != '#');
1986 do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat first csum byte */
1987 do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */
1988
1989 gdbstub_proto("### GDB Tx NAK\n");
1990 gdbstub_tx_char('-'); /* nak it */
1991
1992#else
1993 gdbstub_printk("GDB Stub set\n");
1994#endif
1995
1996#if 0
1997 /* send banner */
1998 ptr = output_buffer;
1999 *ptr++ = 'O';
2000 ptr = mem2hex(gdbstub_banner, ptr, sizeof(gdbstub_banner) - 1, 0);
2001 gdbstub_send_packet(output_buffer);
2002#endif
2003#if defined(CONFIG_GDBSTUB_CONSOLE) && defined(CONFIG_GDBSTUB_IMMEDIATE)
2004 register_console(&gdbstub_console);
2005#endif
2006
2007} /* end gdbstub_init() */
2008
2009/*****************************************************************************/
2010/*
2011 * register the console at a more appropriate time
2012 */
2013#if defined (CONFIG_GDBSTUB_CONSOLE) && !defined(CONFIG_GDBSTUB_IMMEDIATE)
2014static int __init gdbstub_postinit(void)
2015{
2016 printk("registering console\n");
2017 register_console(&gdbstub_console);
2018 return 0;
2019} /* end gdbstub_postinit() */
2020
2021__initcall(gdbstub_postinit);
2022#endif
2023
2024/*****************************************************************************/
2025/*
2026 * send an exit message to GDB
2027 */
2028void gdbstub_exit(int status)
2029{
2030 unsigned char checksum;
2031 int count;
2032 unsigned char ch;
2033
2034 sprintf(output_buffer,"W%02x",status&0xff);
2035
2036 gdbstub_tx_char('$');
2037 checksum = 0;
2038 count = 0;
2039
2040 while ((ch = output_buffer[count]) != 0) {
2041 gdbstub_tx_char(ch);
2042 checksum += ch;
2043 count += 1;
2044 }
2045
2046 gdbstub_tx_char('#');
2047 gdbstub_tx_char(hexchars[checksum >> 4]);
2048 gdbstub_tx_char(hexchars[checksum & 0xf]);
2049
2050 /* make sure the output is flushed, or else RedBoot might clobber it */
2051 gdbstub_tx_char('-');
2052 gdbstub_tx_flush();
2053
2054} /* end gdbstub_exit() */
2055
2056/*****************************************************************************/
2057/*
2058 * GDB wants to call malloc() and free() to allocate memory for calling kernel
2059 * functions directly from its command line
2060 */
2061static void *malloc(size_t size) __attribute__((unused));
2062static void *malloc(size_t size)
2063{
2064 return kmalloc(size, GFP_ATOMIC);
2065}
2066
2067static void free(void *p) __attribute__((unused));
2068static void free(void *p)
2069{
2070 kfree(p);
2071}
2072
2073static uint32_t ___get_HSR0(void) __attribute__((unused));
2074static uint32_t ___get_HSR0(void)
2075{
2076 return __get_HSR(0);
2077}
2078
2079static uint32_t ___set_HSR0(uint32_t x) __attribute__((unused));
2080static uint32_t ___set_HSR0(uint32_t x)
2081{
2082 __set_HSR(0, x);
2083 return __get_HSR(0);
2084}
diff --git a/arch/frv/kernel/head-mmu-fr451.S b/arch/frv/kernel/head-mmu-fr451.S
new file mode 100644
index 000000000000..a143c2f66ee5
--- /dev/null
+++ b/arch/frv/kernel/head-mmu-fr451.S
@@ -0,0 +1,374 @@
1/* head-mmu-fr451.S: FR451 mmu-linux specific bits of initialisation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14#include <linux/linkage.h>
15#include <asm/ptrace.h>
16#include <asm/page.h>
17#include <asm/mem-layout.h>
18#include <asm/spr-regs.h>
19#include <asm/mb86943a.h>
20#include "head.inc"
21
22
23#define __400_DBR0 0xfe000e00
24#define __400_DBR1 0xfe000e08
25#define __400_DBR2 0xfe000e10
26#define __400_DBR3 0xfe000e18
27#define __400_DAM0 0xfe000f00
28#define __400_DAM1 0xfe000f08
29#define __400_DAM2 0xfe000f10
30#define __400_DAM3 0xfe000f18
31#define __400_LGCR 0xfe000010
32#define __400_LCR 0xfe000100
33#define __400_LSBR 0xfe000c00
34
35 .section .text.init,"ax"
36 .balign 4
37
38###############################################################################
39#
40# describe the position and layout of the SDRAM controller registers
41#
42# ENTRY: EXIT:
43# GR5 - cacheline size
44# GR11 - displacement of 2nd SDRAM addr reg from GR14
45# GR12 - displacement of 3rd SDRAM addr reg from GR14
46# GR13 - displacement of 4th SDRAM addr reg from GR14
47# GR14 - address of 1st SDRAM addr reg
48# GR15 - amount to shift address by to match SDRAM addr reg
49# GR26 &__head_reference [saved]
50# GR30 LED address [saved]
51# CC0 - T if DBR0 is present
52# CC1 - T if DBR1 is present
53# CC2 - T if DBR2 is present
54# CC3 - T if DBR3 is present
55#
56###############################################################################
57 .globl __head_fr451_describe_sdram
58__head_fr451_describe_sdram:
59 sethi.p %hi(__400_DBR0),gr14
60 setlo %lo(__400_DBR0),gr14
61 setlos.p #__400_DBR1-__400_DBR0,gr11
62 setlos #__400_DBR2-__400_DBR0,gr12
63 setlos.p #__400_DBR3-__400_DBR0,gr13
64 setlos #32,gr5 ; cacheline size
65 setlos.p #0,gr15 ; amount to shift addr reg by
66 setlos #0x00ff,gr4
67 movgs gr4,cccr ; extant DARS/DAMK regs
68 bralr
69
70###############################################################################
71#
72# rearrange the bus controller registers
73#
74# ENTRY: EXIT:
75# GR26 &__head_reference [saved]
76# GR30 LED address revised LED address
77#
78###############################################################################
79 .globl __head_fr451_set_busctl
80__head_fr451_set_busctl:
81 sethi.p %hi(__400_LGCR),gr4
82 setlo %lo(__400_LGCR),gr4
83 sethi.p %hi(__400_LSBR),gr10
84 setlo %lo(__400_LSBR),gr10
85 sethi.p %hi(__400_LCR),gr11
86 setlo %lo(__400_LCR),gr11
87
88 # set the bus controller
89 ldi @(gr4,#0),gr5
90 ori gr5,#0xff,gr5 ; make sure all chip-selects are enabled
91 sti gr5,@(gr4,#0)
92
93 sethi.p %hi(__region_CS1),gr4
94 setlo %lo(__region_CS1),gr4
95 sethi.p %hi(__region_CS1_M),gr5
96 setlo %lo(__region_CS1_M),gr5
97 sethi.p %hi(__region_CS1_C),gr6
98 setlo %lo(__region_CS1_C),gr6
99 sti gr4,@(gr10,#1*0x08)
100 sti gr5,@(gr10,#1*0x08+0x100)
101 sti gr6,@(gr11,#1*0x08)
102 sethi.p %hi(__region_CS2),gr4
103 setlo %lo(__region_CS2),gr4
104 sethi.p %hi(__region_CS2_M),gr5
105 setlo %lo(__region_CS2_M),gr5
106 sethi.p %hi(__region_CS2_C),gr6
107 setlo %lo(__region_CS2_C),gr6
108 sti gr4,@(gr10,#2*0x08)
109 sti gr5,@(gr10,#2*0x08+0x100)
110 sti gr6,@(gr11,#2*0x08)
111 sethi.p %hi(__region_CS3),gr4
112 setlo %lo(__region_CS3),gr4
113 sethi.p %hi(__region_CS3_M),gr5
114 setlo %lo(__region_CS3_M),gr5
115 sethi.p %hi(__region_CS3_C),gr6
116 setlo %lo(__region_CS3_C),gr6
117 sti gr4,@(gr10,#3*0x08)
118 sti gr5,@(gr10,#3*0x08+0x100)
119 sti gr6,@(gr11,#3*0x08)
120 sethi.p %hi(__region_CS4),gr4
121 setlo %lo(__region_CS4),gr4
122 sethi.p %hi(__region_CS4_M),gr5
123 setlo %lo(__region_CS4_M),gr5
124 sethi.p %hi(__region_CS4_C),gr6
125 setlo %lo(__region_CS4_C),gr6
126 sti gr4,@(gr10,#4*0x08)
127 sti gr5,@(gr10,#4*0x08+0x100)
128 sti gr6,@(gr11,#4*0x08)
129 sethi.p %hi(__region_CS5),gr4
130 setlo %lo(__region_CS5),gr4
131 sethi.p %hi(__region_CS5_M),gr5
132 setlo %lo(__region_CS5_M),gr5
133 sethi.p %hi(__region_CS5_C),gr6
134 setlo %lo(__region_CS5_C),gr6
135 sti gr4,@(gr10,#5*0x08)
136 sti gr5,@(gr10,#5*0x08+0x100)
137 sti gr6,@(gr11,#5*0x08)
138 sethi.p %hi(__region_CS6),gr4
139 setlo %lo(__region_CS6),gr4
140 sethi.p %hi(__region_CS6_M),gr5
141 setlo %lo(__region_CS6_M),gr5
142 sethi.p %hi(__region_CS6_C),gr6
143 setlo %lo(__region_CS6_C),gr6
144 sti gr4,@(gr10,#6*0x08)
145 sti gr5,@(gr10,#6*0x08+0x100)
146 sti gr6,@(gr11,#6*0x08)
147 sethi.p %hi(__region_CS7),gr4
148 setlo %lo(__region_CS7),gr4
149 sethi.p %hi(__region_CS7_M),gr5
150 setlo %lo(__region_CS7_M),gr5
151 sethi.p %hi(__region_CS7_C),gr6
152 setlo %lo(__region_CS7_C),gr6
153 sti gr4,@(gr10,#7*0x08)
154 sti gr5,@(gr10,#7*0x08+0x100)
155 sti gr6,@(gr11,#7*0x08)
156 membar
157 bar
158
159 # adjust LED bank address
160#ifdef CONFIG_MB93091_VDK
161 sethi.p %hi(__region_CS2 + 0x01200004),gr30
162 setlo %lo(__region_CS2 + 0x01200004),gr30
163#endif
164 bralr
165
166###############################################################################
167#
168# determine the total SDRAM size
169#
170# ENTRY: EXIT:
171# GR25 - SDRAM size
172# GR26 &__head_reference [saved]
173# GR30 LED address [saved]
174#
175###############################################################################
176 .globl __head_fr451_survey_sdram
177__head_fr451_survey_sdram:
178 sethi.p %hi(__400_DAM0),gr11
179 setlo %lo(__400_DAM0),gr11
180 sethi.p %hi(__400_DBR0),gr12
181 setlo %lo(__400_DBR0),gr12
182
183 sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
184 setlo %lo(0xfe000000),gr17
185 setlos #0,gr25
186
187 ldi @(gr12,#0x00),gr4 ; DAR0
188 subcc gr4,gr17,gr0,icc0
189 beq icc0,#0,__head_no_DCS0
190 ldi @(gr11,#0x00),gr6 ; DAM0: bits 31:20 match addr 31:20
191 add gr25,gr6,gr25
192 addi gr25,#1,gr25
193__head_no_DCS0:
194
195 ldi @(gr12,#0x08),gr4 ; DAR1
196 subcc gr4,gr17,gr0,icc0
197 beq icc0,#0,__head_no_DCS1
198 ldi @(gr11,#0x08),gr6 ; DAM1: bits 31:20 match addr 31:20
199 add gr25,gr6,gr25
200 addi gr25,#1,gr25
201__head_no_DCS1:
202
203 ldi @(gr12,#0x10),gr4 ; DAR2
204 subcc gr4,gr17,gr0,icc0
205 beq icc0,#0,__head_no_DCS2
206 ldi @(gr11,#0x10),gr6 ; DAM2: bits 31:20 match addr 31:20
207 add gr25,gr6,gr25
208 addi gr25,#1,gr25
209__head_no_DCS2:
210
211 ldi @(gr12,#0x18),gr4 ; DAR3
212 subcc gr4,gr17,gr0,icc0
213 beq icc0,#0,__head_no_DCS3
214 ldi @(gr11,#0x18),gr6 ; DAM3: bits 31:20 match addr 31:20
215 add gr25,gr6,gr25
216 addi gr25,#1,gr25
217__head_no_DCS3:
218 bralr
219
220###############################################################################
221#
222# set the protection map with the I/DAMPR registers
223#
224# ENTRY: EXIT:
225# GR25 SDRAM size [saved]
226# GR26 &__head_reference [saved]
227# GR30 LED address [saved]
228#
229#
230# Using this map:
231# REGISTERS ADDRESS RANGE VIEW
232# =============== ====================== ===============================
233# IAMPR0/DAMPR0 0xC0000000-0xCFFFFFFF Cached kernel RAM Window
234# DAMPR11 0xE0000000-0xFFFFFFFF Uncached I/O
235#
236###############################################################################
237 .globl __head_fr451_set_protection
238__head_fr451_set_protection:
239 movsg lr,gr27
240
241 # set the I/O region protection registers for FR451 in MMU mode
242#define PGPROT_IO xAMPRx_L|xAMPRx_M|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V
243
244 sethi.p %hi(__region_IO),gr5
245 setlo %lo(__region_IO),gr5
246 setlos #PGPROT_IO|xAMPRx_SS_512Mb,gr4
247 or gr4,gr5,gr4
248 movgs gr5,damlr11 ; General I/O tile
249 movgs gr4,dampr11
250
251 # need to open a window onto at least part of the RAM for the kernel's use
252 sethi.p %hi(__sdram_base),gr8
253 setlo %lo(__sdram_base),gr8 ; physical address
254 sethi.p %hi(__page_offset),gr9
255 setlo %lo(__page_offset),gr9 ; virtual address
256
257 setlos #xAMPRx_L|xAMPRx_M|xAMPRx_SS_256Mb|xAMPRx_S_KERNEL|xAMPRx_V,gr11
258 or gr8,gr11,gr8
259
260 movgs gr9,iamlr0 ; mapped from real address 0
261 movgs gr8,iampr0 ; cached kernel memory at 0xC0000000
262 movgs gr9,damlr0
263 movgs gr8,dampr0
264
265 # set a temporary mapping for the kernel running at address 0 until we've turned on the MMU
266 sethi.p %hi(__sdram_base),gr9
267 setlo %lo(__sdram_base),gr9 ; virtual address
268
269 and.p gr4,gr11,gr4
270 and gr5,gr11,gr5
271 or.p gr4,gr11,gr4
272 or gr5,gr11,gr5
273
274 movgs gr9,iamlr1 ; mapped from real address 0
275 movgs gr8,iampr1 ; cached kernel memory at 0x00000000
276 movgs gr9,damlr1
277 movgs gr8,dampr1
278
279 # we use DAMR2-10 for kmap_atomic(), cache flush and TLB management
280 # since the DAMLR regs are not going to change, we can set them now
281 # also set up IAMLR2 to the same as DAMLR5
282 sethi.p %hi(KMAP_ATOMIC_PRIMARY_FRAME),gr4
283 setlo %lo(KMAP_ATOMIC_PRIMARY_FRAME),gr4
284 sethi.p %hi(PAGE_SIZE),gr5
285 setlo %lo(PAGE_SIZE),gr5
286
287 movgs gr4,damlr2
288 movgs gr4,iamlr2
289 add gr4,gr5,gr4
290 movgs gr4,damlr3
291 add gr4,gr5,gr4
292 movgs gr4,damlr4
293 add gr4,gr5,gr4
294 movgs gr4,damlr5
295 add gr4,gr5,gr4
296 movgs gr4,damlr6
297 add gr4,gr5,gr4
298 movgs gr4,damlr7
299 add gr4,gr5,gr4
300 movgs gr4,damlr8
301 add gr4,gr5,gr4
302 movgs gr4,damlr9
303 add gr4,gr5,gr4
304 movgs gr4,damlr10
305
306 movgs gr0,dampr2
307 movgs gr0,dampr4
308 movgs gr0,dampr5
309 movgs gr0,dampr6
310 movgs gr0,dampr7
311 movgs gr0,dampr8
312 movgs gr0,dampr9
313 movgs gr0,dampr10
314
315 movgs gr0,iamlr3
316 movgs gr0,iamlr4
317 movgs gr0,iamlr5
318 movgs gr0,iamlr6
319 movgs gr0,iamlr7
320
321 movgs gr0,iampr2
322 movgs gr0,iampr3
323 movgs gr0,iampr4
324 movgs gr0,iampr5
325 movgs gr0,iampr6
326 movgs gr0,iampr7
327
328 # start in TLB context 0 with the swapper's page tables
329 movgs gr0,cxnr
330
331 sethi.p %hi(swapper_pg_dir),gr4
332 setlo %lo(swapper_pg_dir),gr4
333 sethi.p %hi(__page_offset),gr5
334 setlo %lo(__page_offset),gr5
335 sub gr4,gr5,gr4
336 movgs gr4,ttbr
337 setlos #xAMPRx_L|xAMPRx_M|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr5
338 or gr4,gr5,gr4
339 movgs gr4,dampr3
340
341 # the FR451 also has an extra trap base register
342 movsg tbr,gr4
343 movgs gr4,btbr
344
345 LEDS 0x3300
346 jmpl @(gr27,gr0)
347
348###############################################################################
349#
350# finish setting up the protection registers
351#
352###############################################################################
353 .globl __head_fr451_finalise_protection
354__head_fr451_finalise_protection:
355 # turn on the timers as appropriate
356 movgs gr0,timerh
357 movgs gr0,timerl
358 movgs gr0,timerd
359 movsg hsr0,gr4
360 sethi.p %hi(HSR0_ETMI),gr5
361 setlo %lo(HSR0_ETMI),gr5
362 or gr4,gr5,gr4
363 movgs gr4,hsr0
364
365 # clear the TLB entry cache
366 movgs gr0,iamlr1
367 movgs gr0,iampr1
368 movgs gr0,damlr1
369 movgs gr0,dampr1
370
371 # clear the PGE cache
372 sethi.p %hi(__flush_tlb_all),gr4
373 setlo %lo(__flush_tlb_all),gr4
374 jmpl @(gr4,gr0)
diff --git a/arch/frv/kernel/head-uc-fr401.S b/arch/frv/kernel/head-uc-fr401.S
new file mode 100644
index 000000000000..4ccf8414ae44
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr401.S
@@ -0,0 +1,311 @@
1/* head-uc-fr401.S: FR401/3/5 uc-linux specific bits of initialisation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14#include <linux/linkage.h>
15#include <asm/ptrace.h>
16#include <asm/page.h>
17#include <asm/spr-regs.h>
18#include <asm/mb86943a.h>
19#include "head.inc"
20
21
22#define __400_DBR0 0xfe000e00
23#define __400_DBR1 0xfe000e08
24#define __400_DBR2 0xfe000e10 /* not on FR401 */
25#define __400_DBR3 0xfe000e18 /* not on FR401 */
26#define __400_DAM0 0xfe000f00
27#define __400_DAM1 0xfe000f08
28#define __400_DAM2 0xfe000f10 /* not on FR401 */
29#define __400_DAM3 0xfe000f18 /* not on FR401 */
30#define __400_LGCR 0xfe000010
31#define __400_LCR 0xfe000100
32#define __400_LSBR 0xfe000c00
33
34 .section .text.init,"ax"
35 .balign 4
36
37###############################################################################
38#
39# describe the position and layout of the SDRAM controller registers
40#
41# ENTRY: EXIT:
42# GR5 - cacheline size
43# GR11 - displacement of 2nd SDRAM addr reg from GR14
44# GR12 - displacement of 3rd SDRAM addr reg from GR14
45# GR13 - displacement of 4th SDRAM addr reg from GR14
46# GR14 - address of 1st SDRAM addr reg
47# GR15 - amount to shift address by to match SDRAM addr reg
48# GR26 &__head_reference [saved]
49# GR30 LED address [saved]
50# CC0 - T if DBR0 is present
51# CC1 - T if DBR1 is present
52# CC2 - T if DBR2 is present (not FR401/FR401A)
53# CC3 - T if DBR3 is present (not FR401/FR401A)
54#
55###############################################################################
56 .globl __head_fr401_describe_sdram
57__head_fr401_describe_sdram:
58 sethi.p %hi(__400_DBR0),gr14
59 setlo %lo(__400_DBR0),gr14
60 setlos.p #__400_DBR1-__400_DBR0,gr11
61 setlos #__400_DBR2-__400_DBR0,gr12
62 setlos.p #__400_DBR3-__400_DBR0,gr13
63 setlos #32,gr5 ; cacheline size
64 setlos.p #0,gr15 ; amount to shift addr reg by
65
66 # specify which DBR regs are present
67 setlos #0x00ff,gr4
68 movgs gr4,cccr
69 movsg psr,gr3 ; check for FR401/FR401A
70 srli gr3,#25,gr3
71 subicc gr3,#0x20>>1,gr0,icc0
72 bnelr icc0,#1
73 setlos #0x000f,gr4
74 movgs gr4,cccr
75 bralr
76
77###############################################################################
78#
79# rearrange the bus controller registers
80#
81# ENTRY: EXIT:
82# GR26 &__head_reference [saved]
83# GR30 LED address revised LED address
84#
85###############################################################################
86 .globl __head_fr401_set_busctl
87__head_fr401_set_busctl:
88 sethi.p %hi(__400_LGCR),gr4
89 setlo %lo(__400_LGCR),gr4
90 sethi.p %hi(__400_LSBR),gr10
91 setlo %lo(__400_LSBR),gr10
92 sethi.p %hi(__400_LCR),gr11
93 setlo %lo(__400_LCR),gr11
94
95 # set the bus controller
96 ldi @(gr4,#0),gr5
97 ori gr5,#0xff,gr5 ; make sure all chip-selects are enabled
98 sti gr5,@(gr4,#0)
99
100 sethi.p %hi(__region_CS1),gr4
101 setlo %lo(__region_CS1),gr4
102 sethi.p %hi(__region_CS1_M),gr5
103 setlo %lo(__region_CS1_M),gr5
104 sethi.p %hi(__region_CS1_C),gr6
105 setlo %lo(__region_CS1_C),gr6
106 sti gr4,@(gr10,#1*0x08)
107 sti gr5,@(gr10,#1*0x08+0x100)
108 sti gr6,@(gr11,#1*0x08)
109 sethi.p %hi(__region_CS2),gr4
110 setlo %lo(__region_CS2),gr4
111 sethi.p %hi(__region_CS2_M),gr5
112 setlo %lo(__region_CS2_M),gr5
113 sethi.p %hi(__region_CS2_C),gr6
114 setlo %lo(__region_CS2_C),gr6
115 sti gr4,@(gr10,#2*0x08)
116 sti gr5,@(gr10,#2*0x08+0x100)
117 sti gr6,@(gr11,#2*0x08)
118 sethi.p %hi(__region_CS3),gr4
119 setlo %lo(__region_CS3),gr4
120 sethi.p %hi(__region_CS3_M),gr5
121 setlo %lo(__region_CS3_M),gr5
122 sethi.p %hi(__region_CS3_C),gr6
123 setlo %lo(__region_CS3_C),gr6
124 sti gr4,@(gr10,#3*0x08)
125 sti gr5,@(gr10,#3*0x08+0x100)
126 sti gr6,@(gr11,#3*0x08)
127 sethi.p %hi(__region_CS4),gr4
128 setlo %lo(__region_CS4),gr4
129 sethi.p %hi(__region_CS4_M),gr5
130 setlo %lo(__region_CS4_M),gr5
131 sethi.p %hi(__region_CS4_C),gr6
132 setlo %lo(__region_CS4_C),gr6
133 sti gr4,@(gr10,#4*0x08)
134 sti gr5,@(gr10,#4*0x08+0x100)
135 sti gr6,@(gr11,#4*0x08)
136 sethi.p %hi(__region_CS5),gr4
137 setlo %lo(__region_CS5),gr4
138 sethi.p %hi(__region_CS5_M),gr5
139 setlo %lo(__region_CS5_M),gr5
140 sethi.p %hi(__region_CS5_C),gr6
141 setlo %lo(__region_CS5_C),gr6
142 sti gr4,@(gr10,#5*0x08)
143 sti gr5,@(gr10,#5*0x08+0x100)
144 sti gr6,@(gr11,#5*0x08)
145 sethi.p %hi(__region_CS6),gr4
146 setlo %lo(__region_CS6),gr4
147 sethi.p %hi(__region_CS6_M),gr5
148 setlo %lo(__region_CS6_M),gr5
149 sethi.p %hi(__region_CS6_C),gr6
150 setlo %lo(__region_CS6_C),gr6
151 sti gr4,@(gr10,#6*0x08)
152 sti gr5,@(gr10,#6*0x08+0x100)
153 sti gr6,@(gr11,#6*0x08)
154 sethi.p %hi(__region_CS7),gr4
155 setlo %lo(__region_CS7),gr4
156 sethi.p %hi(__region_CS7_M),gr5
157 setlo %lo(__region_CS7_M),gr5
158 sethi.p %hi(__region_CS7_C),gr6
159 setlo %lo(__region_CS7_C),gr6
160 sti gr4,@(gr10,#7*0x08)
161 sti gr5,@(gr10,#7*0x08+0x100)
162 sti gr6,@(gr11,#7*0x08)
163 membar
164 bar
165
166 # adjust LED bank address
167 sethi.p %hi(LED_ADDR - 0x20000000 +__region_CS2),gr30
168 setlo %lo(LED_ADDR - 0x20000000 +__region_CS2),gr30
169 bralr
170
171###############################################################################
172#
173# determine the total SDRAM size
174#
175# ENTRY: EXIT:
176# GR25 - SDRAM size
177# GR26 &__head_reference [saved]
178# GR30 LED address [saved]
179#
180###############################################################################
181 .globl __head_fr401_survey_sdram
182__head_fr401_survey_sdram:
183 sethi.p %hi(__400_DAM0),gr11
184 setlo %lo(__400_DAM0),gr11
185 sethi.p %hi(__400_DBR0),gr12
186 setlo %lo(__400_DBR0),gr12
187
188 sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
189 setlo %lo(0xfe000000),gr17
190 setlos #0,gr25
191
192 ldi @(gr12,#0x00),gr4 ; DAR0
193 subcc gr4,gr17,gr0,icc0
194 beq icc0,#0,__head_no_DCS0
195 ldi @(gr11,#0x00),gr6 ; DAM0: bits 31:20 match addr 31:20
196 add gr25,gr6,gr25
197 addi gr25,#1,gr25
198__head_no_DCS0:
199
200 ldi @(gr12,#0x08),gr4 ; DAR1
201 subcc gr4,gr17,gr0,icc0
202 beq icc0,#0,__head_no_DCS1
203 ldi @(gr11,#0x08),gr6 ; DAM1: bits 31:20 match addr 31:20
204 add gr25,gr6,gr25
205 addi gr25,#1,gr25
206__head_no_DCS1:
207
208 # FR401/FR401A does not have DCS2/3
209 movsg psr,gr3
210 srli gr3,#25,gr3
211 subicc gr3,#0x20>>1,gr0,icc0
212 beq icc0,#0,__head_no_DCS3
213
214 ldi @(gr12,#0x10),gr4 ; DAR2
215 subcc gr4,gr17,gr0,icc0
216 beq icc0,#0,__head_no_DCS2
217 ldi @(gr11,#0x10),gr6 ; DAM2: bits 31:20 match addr 31:20
218 add gr25,gr6,gr25
219 addi gr25,#1,gr25
220__head_no_DCS2:
221
222 ldi @(gr12,#0x18),gr4 ; DAR3
223 subcc gr4,gr17,gr0,icc0
224 beq icc0,#0,__head_no_DCS3
225 ldi @(gr11,#0x18),gr6 ; DAM3: bits 31:20 match addr 31:20
226 add gr25,gr6,gr25
227 addi gr25,#1,gr25
228__head_no_DCS3:
229 bralr
230
231###############################################################################
232#
233# set the protection map with the I/DAMPR registers
234#
235# ENTRY: EXIT:
236# GR25 SDRAM size [saved]
237# GR26 &__head_reference [saved]
238# GR30 LED address [saved]
239#
240###############################################################################
241 .globl __head_fr401_set_protection
242__head_fr401_set_protection:
243 movsg lr,gr27
244
245 # set the I/O region protection registers for FR401/3/5
246 sethi.p %hi(__region_IO),gr5
247 setlo %lo(__region_IO),gr5
248 ori gr5,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
249 movgs gr0,iampr7
250 movgs gr5,dampr7 ; General I/O tile
251
252 # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
253 # - start with the highest numbered registers
254 sethi.p %hi(__kernel_image_end),gr8
255 setlo %lo(__kernel_image_end),gr8
256 sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
257 setlo %lo(32768),gr4
258 add gr8,gr4,gr8
259 sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
260 setlo %lo(1024*2048-1),gr4
261 add.p gr8,gr4,gr8
262 not gr4,gr4
263 and gr8,gr4,gr8
264
265 sethi.p %hi(__page_offset),gr9
266 setlo %lo(__page_offset),gr9
267 add gr9,gr25,gr9
268
269 # GR8 = base of uncovered RAM
270 # GR9 = top of uncovered RAM
271
272#ifdef CONFIG_MB93093_PDK
273 sethi.p %hi(__region_CS2),gr4
274 setlo %lo(__region_CS2),gr4
275 ori gr4,#xAMPRx_SS_1Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr4
276 movgs gr4,dampr6
277 movgs gr0,iampr6
278#else
279 call __head_split_region
280 movgs gr4,iampr6
281 movgs gr5,dampr6
282#endif
283 call __head_split_region
284 movgs gr4,iampr5
285 movgs gr5,dampr5
286 call __head_split_region
287 movgs gr4,iampr4
288 movgs gr5,dampr4
289 call __head_split_region
290 movgs gr4,iampr3
291 movgs gr5,dampr3
292 call __head_split_region
293 movgs gr4,iampr2
294 movgs gr5,dampr2
295 call __head_split_region
296 movgs gr4,iampr1
297 movgs gr5,dampr1
298
299 # cover kernel core image with kernel-only segment
300 sethi.p %hi(__page_offset),gr8
301 setlo %lo(__page_offset),gr8
302 call __head_split_region
303
304#ifdef CONFIG_PROTECT_KERNEL
305 ori.p gr4,#xAMPRx_S_KERNEL,gr4
306 ori gr5,#xAMPRx_S_KERNEL,gr5
307#endif
308
309 movgs gr4,iampr0
310 movgs gr5,dampr0
311 jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head-uc-fr451.S b/arch/frv/kernel/head-uc-fr451.S
new file mode 100644
index 000000000000..31cb54a6f080
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr451.S
@@ -0,0 +1,174 @@
1/* head-uc-fr451.S: FR451 uc-linux specific bits of initialisation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14#include <linux/linkage.h>
15#include <asm/ptrace.h>
16#include <asm/page.h>
17#include <asm/spr-regs.h>
18#include <asm/mb86943a.h>
19#include "head.inc"
20
21
22#define __400_DBR0 0xfe000e00
23#define __400_DBR1 0xfe000e08
24#define __400_DBR2 0xfe000e10
25#define __400_DBR3 0xfe000e18
26#define __400_DAM0 0xfe000f00
27#define __400_DAM1 0xfe000f08
28#define __400_DAM2 0xfe000f10
29#define __400_DAM3 0xfe000f18
30#define __400_LGCR 0xfe000010
31#define __400_LCR 0xfe000100
32#define __400_LSBR 0xfe000c00
33
34 .section .text.init,"ax"
35 .balign 4
36
37###############################################################################
38#
39# set the protection map with the I/DAMPR registers
40#
41# ENTRY: EXIT:
42# GR25 SDRAM size [saved]
43# GR26 &__head_reference [saved]
44# GR30 LED address [saved]
45#
46###############################################################################
47 .globl __head_fr451_set_protection
48__head_fr451_set_protection:
49 movsg lr,gr27
50
51 movgs gr0,dampr10
52 movgs gr0,damlr10
53 movgs gr0,dampr9
54 movgs gr0,damlr9
55 movgs gr0,dampr8
56 movgs gr0,damlr8
57
58 # set the I/O region protection registers for FR401/3/5
59 sethi.p %hi(__region_IO),gr5
60 setlo %lo(__region_IO),gr5
61 sethi.p %hi(0x1fffffff),gr7
62 setlo %lo(0x1fffffff),gr7
63 ori gr5,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
64 movgs gr5,dampr11 ; General I/O tile
65 movgs gr7,damlr11
66
67 # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
68 # - start with the highest numbered registers
69 sethi.p %hi(__kernel_image_end),gr8
70 setlo %lo(__kernel_image_end),gr8
71 sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
72 setlo %lo(32768),gr4
73 add gr8,gr4,gr8
74 sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
75 setlo %lo(1024*2048-1),gr4
76 add.p gr8,gr4,gr8
77 not gr4,gr4
78 and gr8,gr4,gr8
79
80 sethi.p %hi(__page_offset),gr9
81 setlo %lo(__page_offset),gr9
82 add gr9,gr25,gr9
83
84 sethi.p %hi(0xffffc000),gr11
85 setlo %lo(0xffffc000),gr11
86
87 # GR8 = base of uncovered RAM
88 # GR9 = top of uncovered RAM
89 # GR11 = xAMLR mask
90 LEDS 0x3317
91 call __head_split_region
92 movgs gr4,iampr7
93 movgs gr6,iamlr7
94 movgs gr5,dampr7
95 movgs gr7,damlr7
96
97 LEDS 0x3316
98 call __head_split_region
99 movgs gr4,iampr6
100 movgs gr6,iamlr6
101 movgs gr5,dampr6
102 movgs gr7,damlr6
103
104 LEDS 0x3315
105 call __head_split_region
106 movgs gr4,iampr5
107 movgs gr6,iamlr5
108 movgs gr5,dampr5
109 movgs gr7,damlr5
110
111 LEDS 0x3314
112 call __head_split_region
113 movgs gr4,iampr4
114 movgs gr6,iamlr4
115 movgs gr5,dampr4
116 movgs gr7,damlr4
117
118 LEDS 0x3313
119 call __head_split_region
120 movgs gr4,iampr3
121 movgs gr6,iamlr3
122 movgs gr5,dampr3
123 movgs gr7,damlr3
124
125 LEDS 0x3312
126 call __head_split_region
127 movgs gr4,iampr2
128 movgs gr6,iamlr2
129 movgs gr5,dampr2
130 movgs gr7,damlr2
131
132 LEDS 0x3311
133 call __head_split_region
134 movgs gr4,iampr1
135 movgs gr6,iamlr1
136 movgs gr5,dampr1
137 movgs gr7,damlr1
138
139 # cover kernel core image with kernel-only segment
140 LEDS 0x3310
141 sethi.p %hi(__page_offset),gr8
142 setlo %lo(__page_offset),gr8
143 call __head_split_region
144
145#ifdef CONFIG_PROTECT_KERNEL
146 ori.p gr4,#xAMPRx_S_KERNEL,gr4
147 ori gr5,#xAMPRx_S_KERNEL,gr5
148#endif
149
150 movgs gr4,iampr0
151 movgs gr6,iamlr0
152 movgs gr5,dampr0
153 movgs gr7,damlr0
154
155 # start in TLB context 0 with no page tables
156 movgs gr0,cxnr
157 movgs gr0,ttbr
158
159 # the FR451 also has an extra trap base register
160 movsg tbr,gr4
161 movgs gr4,btbr
162
163 # turn on the timers as appropriate
164 movgs gr0,timerh
165 movgs gr0,timerl
166 movgs gr0,timerd
167 movsg hsr0,gr4
168 sethi.p %hi(HSR0_ETMI),gr5
169 setlo %lo(HSR0_ETMI),gr5
170 or gr4,gr5,gr4
171 movgs gr4,hsr0
172
173 LEDS 0x3300
174 jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head-uc-fr555.S b/arch/frv/kernel/head-uc-fr555.S
new file mode 100644
index 000000000000..d088db2699bf
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr555.S
@@ -0,0 +1,347 @@
1/* head-uc-fr555.S: FR555 uc-linux specific bits of initialisation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14#include <linux/linkage.h>
15#include <asm/ptrace.h>
16#include <asm/page.h>
17#include <asm/spr-regs.h>
18#include <asm/mb86943a.h>
19#include "head.inc"
20
21
22#define __551_DARS0 0xfeff0100
23#define __551_DARS1 0xfeff0104
24#define __551_DARS2 0xfeff0108
25#define __551_DARS3 0xfeff010c
26#define __551_DAMK0 0xfeff0110
27#define __551_DAMK1 0xfeff0114
28#define __551_DAMK2 0xfeff0118
29#define __551_DAMK3 0xfeff011c
30#define __551_LCR 0xfeff1100
31#define __551_LSBR 0xfeff1c00
32
33 .section .text.init,"ax"
34 .balign 4
35
36###############################################################################
37#
38# describe the position and layout of the SDRAM controller registers
39#
40# ENTRY: EXIT:
41# GR5 - cacheline size
42# GR11 - displacement of 2nd SDRAM addr reg from GR14
43# GR12 - displacement of 3rd SDRAM addr reg from GR14
44# GR13 - displacement of 4th SDRAM addr reg from GR14
45# GR14 - address of 1st SDRAM addr reg
46# GR15 - amount to shift address by to match SDRAM addr reg
47# GR26 &__head_reference [saved]
48# GR30 LED address [saved]
49# CC0 - T if DARS0 is present
50# CC1 - T if DARS1 is present
51# CC2 - T if DARS2 is present
52# CC3 - T if DARS3 is present
53#
54###############################################################################
55 .globl __head_fr555_describe_sdram
56__head_fr555_describe_sdram:
57 sethi.p %hi(__551_DARS0),gr14
58 setlo %lo(__551_DARS0),gr14
59 setlos.p #__551_DARS1-__551_DARS0,gr11
60 setlos #__551_DARS2-__551_DARS0,gr12
61 setlos.p #__551_DARS3-__551_DARS0,gr13
62 setlos #64,gr5 ; cacheline size
63 setlos #20,gr15 ; amount to shift addr by
64 setlos #0x00ff,gr4
65 movgs gr4,cccr ; extant DARS/DAMK regs
66 bralr
67
68###############################################################################
69#
70# rearrange the bus controller registers
71#
72# ENTRY: EXIT:
73# GR26 &__head_reference [saved]
74# GR30 LED address revised LED address
75#
76###############################################################################
77 .globl __head_fr555_set_busctl
78__head_fr555_set_busctl:
79 LEDS 0x100f
80 sethi.p %hi(__551_LSBR),gr10
81 setlo %lo(__551_LSBR),gr10
82 sethi.p %hi(__551_LCR),gr11
83 setlo %lo(__551_LCR),gr11
84
85 # set the bus controller
86 sethi.p %hi(__region_CS1),gr4
87 setlo %lo(__region_CS1),gr4
88 sethi.p %hi(__region_CS1_M),gr5
89 setlo %lo(__region_CS1_M),gr5
90 sethi.p %hi(__region_CS1_C),gr6
91 setlo %lo(__region_CS1_C),gr6
92 sti gr4,@(gr10,#1*0x08)
93 sti gr5,@(gr10,#1*0x08+0x100)
94 sti gr6,@(gr11,#1*0x08)
95 sethi.p %hi(__region_CS2),gr4
96 setlo %lo(__region_CS2),gr4
97 sethi.p %hi(__region_CS2_M),gr5
98 setlo %lo(__region_CS2_M),gr5
99 sethi.p %hi(__region_CS2_C),gr6
100 setlo %lo(__region_CS2_C),gr6
101 sti gr4,@(gr10,#2*0x08)
102 sti gr5,@(gr10,#2*0x08+0x100)
103 sti gr6,@(gr11,#2*0x08)
104 sethi.p %hi(__region_CS3),gr4
105 setlo %lo(__region_CS3),gr4
106 sethi.p %hi(__region_CS3_M),gr5
107 setlo %lo(__region_CS3_M),gr5
108 sethi.p %hi(__region_CS3_C),gr6
109 setlo %lo(__region_CS3_C),gr6
110 sti gr4,@(gr10,#3*0x08)
111 sti gr5,@(gr10,#3*0x08+0x100)
112 sti gr6,@(gr11,#3*0x08)
113 sethi.p %hi(__region_CS4),gr4
114 setlo %lo(__region_CS4),gr4
115 sethi.p %hi(__region_CS4_M),gr5
116 setlo %lo(__region_CS4_M),gr5
117 sethi.p %hi(__region_CS4_C),gr6
118 setlo %lo(__region_CS4_C),gr6
119 sti gr4,@(gr10,#4*0x08)
120 sti gr5,@(gr10,#4*0x08+0x100)
121 sti gr6,@(gr11,#4*0x08)
122 sethi.p %hi(__region_CS5),gr4
123 setlo %lo(__region_CS5),gr4
124 sethi.p %hi(__region_CS5_M),gr5
125 setlo %lo(__region_CS5_M),gr5
126 sethi.p %hi(__region_CS5_C),gr6
127 setlo %lo(__region_CS5_C),gr6
128 sti gr4,@(gr10,#5*0x08)
129 sti gr5,@(gr10,#5*0x08+0x100)
130 sti gr6,@(gr11,#5*0x08)
131 sethi.p %hi(__region_CS6),gr4
132 setlo %lo(__region_CS6),gr4
133 sethi.p %hi(__region_CS6_M),gr5
134 setlo %lo(__region_CS6_M),gr5
135 sethi.p %hi(__region_CS6_C),gr6
136 setlo %lo(__region_CS6_C),gr6
137 sti gr4,@(gr10,#6*0x08)
138 sti gr5,@(gr10,#6*0x08+0x100)
139 sti gr6,@(gr11,#6*0x08)
140 sethi.p %hi(__region_CS7),gr4
141 setlo %lo(__region_CS7),gr4
142 sethi.p %hi(__region_CS7_M),gr5
143 setlo %lo(__region_CS7_M),gr5
144 sethi.p %hi(__region_CS7_C),gr6
145 setlo %lo(__region_CS7_C),gr6
146 sti gr4,@(gr10,#7*0x08)
147 sti gr5,@(gr10,#7*0x08+0x100)
148 sti gr6,@(gr11,#7*0x08)
149 membar
150 bar
151
152 # adjust LED bank address
153#ifdef CONFIG_MB93091_VDK
154 sethi.p %hi(LED_ADDR - 0x20000000 +__region_CS2),gr30
155 setlo %lo(LED_ADDR - 0x20000000 +__region_CS2),gr30
156#endif
157 bralr
158
159###############################################################################
160#
161# determine the total SDRAM size
162#
163# ENTRY: EXIT:
164# GR25 - SDRAM size
165# GR26 &__head_reference [saved]
166# GR30 LED address [saved]
167#
168###############################################################################
169 .globl __head_fr555_survey_sdram
170__head_fr555_survey_sdram:
171 sethi.p %hi(__551_DAMK0),gr11
172 setlo %lo(__551_DAMK0),gr11
173 sethi.p %hi(__551_DARS0),gr12
174 setlo %lo(__551_DARS0),gr12
175
176 sethi.p %hi(0xfff),gr17 ; unused SDRAM AMK value
177 setlo %lo(0xfff),gr17
178 setlos #0,gr25
179
180 ldi @(gr11,#0x00),gr6 ; DAMK0: bits 11:0 match addr 11:0
181 subcc gr6,gr17,gr0,icc0
182 beq icc0,#0,__head_no_DCS0
183 ldi @(gr12,#0x00),gr4 ; DARS0
184 add gr25,gr6,gr25
185 addi gr25,#1,gr25
186__head_no_DCS0:
187
188 ldi @(gr11,#0x04),gr6 ; DAMK1: bits 11:0 match addr 11:0
189 subcc gr6,gr17,gr0,icc0
190 beq icc0,#0,__head_no_DCS1
191 ldi @(gr12,#0x04),gr4 ; DARS1
192 add gr25,gr6,gr25
193 addi gr25,#1,gr25
194__head_no_DCS1:
195
196 ldi @(gr11,#0x8),gr6 ; DAMK2: bits 11:0 match addr 11:0
197 subcc gr6,gr17,gr0,icc0
198 beq icc0,#0,__head_no_DCS2
199 ldi @(gr12,#0x8),gr4 ; DARS2
200 add gr25,gr6,gr25
201 addi gr25,#1,gr25
202__head_no_DCS2:
203
204 ldi @(gr11,#0xc),gr6 ; DAMK3: bits 11:0 match addr 11:0
205 subcc gr6,gr17,gr0,icc0
206 beq icc0,#0,__head_no_DCS3
207 ldi @(gr12,#0xc),gr4 ; DARS3
208 add gr25,gr6,gr25
209 addi gr25,#1,gr25
210__head_no_DCS3:
211
212 slli gr25,#20,gr25 ; shift [11:0] -> [31:20]
213 bralr
214
215###############################################################################
216#
217# set the protection map with the I/DAMPR registers
218#
219# ENTRY: EXIT:
220# GR25 SDRAM size saved
221# GR30 LED address saved
222#
223###############################################################################
224 .globl __head_fr555_set_protection
225__head_fr555_set_protection:
226 movsg lr,gr27
227
228 sethi.p %hi(0xfff00000),gr11
229 setlo %lo(0xfff00000),gr11
230
231 # set the I/O region protection registers for FR555
232 sethi.p %hi(__region_IO),gr7
233 setlo %lo(__region_IO),gr7
234 ori gr7,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
235 movgs gr0,iampr15
236 movgs gr0,iamlr15
237 movgs gr5,dampr15
238 movgs gr7,damlr15
239
240 # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
241 # - start with the highest numbered registers
242 sethi.p %hi(__kernel_image_end),gr8
243 setlo %lo(__kernel_image_end),gr8
244 sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
245 setlo %lo(32768),gr4
246 add gr8,gr4,gr8
247 sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
248 setlo %lo(1024*2048-1),gr4
249 add.p gr8,gr4,gr8
250 not gr4,gr4
251 and gr8,gr4,gr8
252
253 sethi.p %hi(__page_offset),gr9
254 setlo %lo(__page_offset),gr9
255 add gr9,gr25,gr9
256
257 # GR8 = base of uncovered RAM
258 # GR9 = top of uncovered RAM
259 # GR11 - mask for DAMLR/IAMLR regs
260 #
261 call __head_split_region
262 movgs gr4,iampr14
263 movgs gr6,iamlr14
264 movgs gr5,dampr14
265 movgs gr7,damlr14
266 call __head_split_region
267 movgs gr4,iampr13
268 movgs gr6,iamlr13
269 movgs gr5,dampr13
270 movgs gr7,damlr13
271 call __head_split_region
272 movgs gr4,iampr12
273 movgs gr6,iamlr12
274 movgs gr5,dampr12
275 movgs gr7,damlr12
276 call __head_split_region
277 movgs gr4,iampr11
278 movgs gr6,iamlr11
279 movgs gr5,dampr11
280 movgs gr7,damlr11
281 call __head_split_region
282 movgs gr4,iampr10
283 movgs gr6,iamlr10
284 movgs gr5,dampr10
285 movgs gr7,damlr10
286 call __head_split_region
287 movgs gr4,iampr9
288 movgs gr6,iamlr9
289 movgs gr5,dampr9
290 movgs gr7,damlr9
291 call __head_split_region
292 movgs gr4,iampr8
293 movgs gr6,iamlr8
294 movgs gr5,dampr8
295 movgs gr7,damlr8
296
297 call __head_split_region
298 movgs gr4,iampr7
299 movgs gr6,iamlr7
300 movgs gr5,dampr7
301 movgs gr7,damlr7
302 call __head_split_region
303 movgs gr4,iampr6
304 movgs gr6,iamlr6
305 movgs gr5,dampr6
306 movgs gr7,damlr6
307 call __head_split_region
308 movgs gr4,iampr5
309 movgs gr6,iamlr5
310 movgs gr5,dampr5
311 movgs gr7,damlr5
312 call __head_split_region
313 movgs gr4,iampr4
314 movgs gr6,iamlr4
315 movgs gr5,dampr4
316 movgs gr7,damlr4
317 call __head_split_region
318 movgs gr4,iampr3
319 movgs gr6,iamlr3
320 movgs gr5,dampr3
321 movgs gr7,damlr3
322 call __head_split_region
323 movgs gr4,iampr2
324 movgs gr6,iamlr2
325 movgs gr5,dampr2
326 movgs gr7,damlr2
327 call __head_split_region
328 movgs gr4,iampr1
329 movgs gr6,iamlr1
330 movgs gr5,dampr1
331 movgs gr7,damlr1
332
333 # cover kernel core image with kernel-only segment
334 sethi.p %hi(__page_offset),gr8
335 setlo %lo(__page_offset),gr8
336 call __head_split_region
337
338#ifdef CONFIG_PROTECT_KERNEL
339 ori.p gr4,#xAMPRx_S_KERNEL,gr4
340 ori gr5,#xAMPRx_S_KERNEL,gr5
341#endif
342
343 movgs gr4,iampr0
344 movgs gr6,iamlr0
345 movgs gr5,dampr0
346 movgs gr7,damlr0
347 jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S
new file mode 100644
index 000000000000..c73b4fe9f6ca
--- /dev/null
+++ b/arch/frv/kernel/head.S
@@ -0,0 +1,639 @@
1/* head.S: kernel entry point for FR-V kernel
2 *
3 * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/threads.h>
14#include <linux/linkage.h>
15#include <asm/ptrace.h>
16#include <asm/page.h>
17#include <asm/spr-regs.h>
18#include <asm/mb86943a.h>
19#include <asm/cache.h>
20#include "head.inc"
21
22###############################################################################
23#
24# void _boot(unsigned long magic, char *command_line) __attribute__((noreturn))
25#
26# - if magic is 0xdead1eaf, then command_line is assumed to point to the kernel
27# command line string
28#
29###############################################################################
30 .section .text.head,"ax"
31 .balign 4
32
33 .globl _boot, __head_reference
34 .type _boot,@function
35_boot:
36__head_reference:
37 sethi.p %hi(LED_ADDR),gr30
38 setlo %lo(LED_ADDR),gr30
39
40 LEDS 0x0000
41
42 # calculate reference address for PC-relative stuff
43 call 0f
440: movsg lr,gr26
45 addi gr26,#__head_reference-0b,gr26
46
47 # invalidate and disable both of the caches and turn off the memory access checking
48 dcef @(gr0,gr0),1
49 bar
50
51 sethi.p %hi(~(HSR0_ICE|HSR0_DCE|HSR0_CBM|HSR0_EIMMU|HSR0_EDMMU)),gr4
52 setlo %lo(~(HSR0_ICE|HSR0_DCE|HSR0_CBM|HSR0_EIMMU|HSR0_EDMMU)),gr4
53 movsg hsr0,gr5
54 and gr4,gr5,gr5
55 movgs gr5,hsr0
56 movsg hsr0,gr5
57
58 LEDS 0x0001
59
60 icei @(gr0,gr0),1
61 dcei @(gr0,gr0),1
62 bar
63
64 # turn the instruction cache back on
65 sethi.p %hi(HSR0_ICE),gr4
66 setlo %lo(HSR0_ICE),gr4
67 movsg hsr0,gr5
68 or gr4,gr5,gr5
69 movgs gr5,hsr0
70 movsg hsr0,gr5
71
72 bar
73
74 LEDS 0x0002
75
76 # retrieve the parameters (including command line) before we overwrite them
77 sethi.p %hi(0xdead1eaf),gr7
78 setlo %lo(0xdead1eaf),gr7
79 subcc gr7,gr8,gr0,icc0
80 bne icc0,#0,__head_no_parameters
81
82 sethi.p %hi(redboot_command_line-1),gr6
83 setlo %lo(redboot_command_line-1),gr6
84 sethi.p %hi(__head_reference),gr4
85 setlo %lo(__head_reference),gr4
86 sub gr6,gr4,gr6
87 add.p gr6,gr26,gr6
88 subi gr9,#1,gr9
89 setlos.p #511,gr4
90 setlos #1,gr5
91
92__head_copy_cmdline:
93 ldubu.p @(gr9,gr5),gr16
94 subicc gr4,#1,gr4,icc0
95 stbu.p gr16,@(gr6,gr5)
96 subicc gr16,#0,gr0,icc1
97 bls icc0,#0,__head_end_cmdline
98 bne icc1,#1,__head_copy_cmdline
99__head_end_cmdline:
100 stbu gr0,@(gr6,gr5)
101__head_no_parameters:
102
103###############################################################################
104#
105# we need to relocate the SDRAM to 0x00000000 (linux) or 0xC0000000 (uClinux)
106# - note that we're going to have to run entirely out of the icache whilst
107# fiddling with the SDRAM controller registers
108#
109###############################################################################
110#ifdef CONFIG_MMU
111 call __head_fr451_describe_sdram
112
113#else
114 movsg psr,gr5
115 srli gr5,#28,gr5
116 subicc gr5,#3,gr0,icc0
117 beq icc0,#0,__head_fr551_sdram
118
119 call __head_fr401_describe_sdram
120 bra __head_do_sdram
121
122__head_fr551_sdram:
123 call __head_fr555_describe_sdram
124 LEDS 0x000d
125
126__head_do_sdram:
127#endif
128
129 # preload the registers with invalid values in case any DBR/DARS are marked not present
130 sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
131 setlo %lo(0xfe000000),gr17
132 or.p gr17,gr0,gr20
133 or gr17,gr0,gr21
134 or.p gr17,gr0,gr22
135 or gr17,gr0,gr23
136
137 # consult the SDRAM controller CS address registers
138 cld @(gr14,gr0 ),gr20, cc0,#1 ; DBR0 / DARS0
139 cld @(gr14,gr11),gr21, cc1,#1 ; DBR1 / DARS1
140 cld @(gr14,gr12),gr22, cc2,#1 ; DBR2 / DARS2
141 cld.p @(gr14,gr13),gr23, cc3,#1 ; DBR3 / DARS3
142
143 sll gr20,gr15,gr20 ; shift values up for FR551
144 sll gr21,gr15,gr21
145 sll gr22,gr15,gr22
146 sll gr23,gr15,gr23
147
148 LEDS 0x0003
149
150 # assume the lowest valid CS line to be the SDRAM base and get its address
151 subcc gr20,gr17,gr0,icc0
152 subcc.p gr21,gr17,gr0,icc1
153 subcc gr22,gr17,gr0,icc2
154 subcc.p gr23,gr17,gr0,icc3
155 ckne icc0,cc4 ; T if DBR0 != 0xfe000000
156 ckne icc1,cc5
157 ckne icc2,cc6
158 ckne icc3,cc7
159 cor gr23,gr0,gr24, cc7,#1 ; GR24 = SDRAM base
160 cor gr22,gr0,gr24, cc6,#1
161 cor gr21,gr0,gr24, cc5,#1
162 cor gr20,gr0,gr24, cc4,#1
163
164 # calculate the displacement required to get the SDRAM into the right place in memory
165 sethi.p %hi(__sdram_base),gr16
166 setlo %lo(__sdram_base),gr16
167 sub gr16,gr24,gr16 ; delta = __sdram_base - DBRx
168
169 # calculate the new values to go in the controller regs
170 cadd.p gr20,gr16,gr20, cc4,#1 ; DCS#0 (new) = DCS#0 (old) + delta
171 cadd gr21,gr16,gr21, cc5,#1
172 cadd.p gr22,gr16,gr22, cc6,#1
173 cadd gr23,gr16,gr23, cc7,#1
174
175 srl gr20,gr15,gr20 ; shift values down for FR551
176 srl gr21,gr15,gr21
177 srl gr22,gr15,gr22
178 srl gr23,gr15,gr23
179
180 # work out the address at which the reg updater resides and lock it into icache
181 # also work out the address the updater will jump to when finished
182 sethi.p %hi(__head_move_sdram-__head_reference),gr18
183 setlo %lo(__head_move_sdram-__head_reference),gr18
184 sethi.p %hi(__head_sdram_moved-__head_reference),gr19
185 setlo %lo(__head_sdram_moved-__head_reference),gr19
186 add.p gr18,gr26,gr18
187 add gr19,gr26,gr19
188 add.p gr19,gr16,gr19 ; moved = addr + (__sdram_base - DBRx)
189 add gr18,gr5,gr4 ; two cachelines probably required
190
191 icpl gr18,gr0,#1 ; load and lock the cachelines
192 icpl gr4,gr0,#1
193 LEDS 0x0004
194 membar
195 bar
196 jmpl @(gr18,gr0)
197
198 .balign L1_CACHE_BYTES
199__head_move_sdram:
200 cst gr20,@(gr14,gr0 ), cc4,#1
201 cst gr21,@(gr14,gr11), cc5,#1
202 cst gr22,@(gr14,gr12), cc6,#1
203 cst gr23,@(gr14,gr13), cc7,#1
204 cld @(gr14,gr0 ),gr20, cc4,#1
205 cld @(gr14,gr11),gr21, cc5,#1
206 cld @(gr14,gr12),gr22, cc4,#1
207 cld @(gr14,gr13),gr23, cc7,#1
208 bar
209 membar
210 jmpl @(gr19,gr0)
211
212 .balign L1_CACHE_BYTES
213__head_sdram_moved:
214 icul gr18
215 add gr18,gr5,gr4
216 icul gr4
217 icei @(gr0,gr0),1
218 dcei @(gr0,gr0),1
219
220 LEDS 0x0005
221
222 # recalculate reference address
223 call 0f
2240: movsg lr,gr26
225 addi gr26,#__head_reference-0b,gr26
226
227
228###############################################################################
229#
230# move the kernel image down to the bottom of the SDRAM
231#
232###############################################################################
233 sethi.p %hi(__kernel_image_size_no_bss+15),gr4
234 setlo %lo(__kernel_image_size_no_bss+15),gr4
235 srli.p gr4,#4,gr4 ; count
236 or gr26,gr26,gr16 ; source
237
238 sethi.p %hi(__sdram_base),gr17 ; destination
239 setlo %lo(__sdram_base),gr17
240
241 setlos #8,gr5
242 sub.p gr16,gr5,gr16 ; adjust src for LDDU
243 sub gr17,gr5,gr17 ; adjust dst for LDDU
244
245 sethi.p %hi(__head_move_kernel-__head_reference),gr18
246 setlo %lo(__head_move_kernel-__head_reference),gr18
247 sethi.p %hi(__head_kernel_moved-__head_reference+__sdram_base),gr19
248 setlo %lo(__head_kernel_moved-__head_reference+__sdram_base),gr19
249 add gr18,gr26,gr18
250 icpl gr18,gr0,#1
251 jmpl @(gr18,gr0)
252
253 .balign 32
254__head_move_kernel:
255 lddu @(gr16,gr5),gr10
256 lddu @(gr16,gr5),gr12
257 stdu.p gr10,@(gr17,gr5)
258 subicc gr4,#1,gr4,icc0
259 stdu.p gr12,@(gr17,gr5)
260 bhi icc0,#0,__head_move_kernel
261 jmpl @(gr19,gr0)
262
263 .balign 32
264__head_kernel_moved:
265 icul gr18
266 icei @(gr0,gr0),1
267 dcei @(gr0,gr0),1
268
269 LEDS 0x0006
270
271 # recalculate reference address
272 call 0f
2730: movsg lr,gr26
274 addi gr26,#__head_reference-0b,gr26
275
276
277###############################################################################
278#
279# rearrange the iomem map and set the protection registers
280#
281###############################################################################
282
283#ifdef CONFIG_MMU
284 LEDS 0x3301
285 call __head_fr451_set_busctl
286 LEDS 0x3303
287 call __head_fr451_survey_sdram
288 LEDS 0x3305
289 call __head_fr451_set_protection
290
291#else
292 movsg psr,gr5
293 srli gr5,#PSR_IMPLE_SHIFT,gr5
294 subicc gr5,#PSR_IMPLE_FR551,gr0,icc0
295 beq icc0,#0,__head_fr555_memmap
296 subicc gr5,#PSR_IMPLE_FR451,gr0,icc0
297 beq icc0,#0,__head_fr451_memmap
298
299 LEDS 0x3101
300 call __head_fr401_set_busctl
301 LEDS 0x3103
302 call __head_fr401_survey_sdram
303 LEDS 0x3105
304 call __head_fr401_set_protection
305 bra __head_done_memmap
306
307__head_fr451_memmap:
308 LEDS 0x3301
309 call __head_fr401_set_busctl
310 LEDS 0x3303
311 call __head_fr401_survey_sdram
312 LEDS 0x3305
313 call __head_fr451_set_protection
314 bra __head_done_memmap
315
316__head_fr555_memmap:
317 LEDS 0x3501
318 call __head_fr555_set_busctl
319 LEDS 0x3503
320 call __head_fr555_survey_sdram
321 LEDS 0x3505
322 call __head_fr555_set_protection
323
324__head_done_memmap:
325#endif
326 LEDS 0x0007
327
328###############################################################################
329#
330# turn the data cache and MMU on
331# - for the FR451 this'll mean that the window through which the kernel is
332# viewed will change
333#
334###############################################################################
335
336#ifdef CONFIG_MMU
337#define MMUMODE HSR0_EIMMU|HSR0_EDMMU|HSR0_EXMMU|HSR0_EDAT|HSR0_XEDAT
338#else
339#define MMUMODE HSR0_EIMMU|HSR0_EDMMU
340#endif
341
342 movsg hsr0,gr5
343
344 sethi.p %hi(MMUMODE),gr4
345 setlo %lo(MMUMODE),gr4
346 or gr4,gr5,gr5
347
348#if defined(CONFIG_FRV_DEFL_CACHE_WTHRU)
349 sethi.p %hi(HSR0_DCE|HSR0_CBM_WRITE_THRU),gr4
350 setlo %lo(HSR0_DCE|HSR0_CBM_WRITE_THRU),gr4
351#elif defined(CONFIG_FRV_DEFL_CACHE_WBACK)
352 sethi.p %hi(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
353 setlo %lo(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
354#elif defined(CONFIG_FRV_DEFL_CACHE_WBEHIND)
355 sethi.p %hi(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
356 setlo %lo(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
357
358 movsg psr,gr6
359 srli gr6,#24,gr6
360 cmpi gr6,#0x50,icc0 // FR451
361 beq icc0,#0,0f
362 cmpi gr6,#0x40,icc0 // FR405
363 bne icc0,#0,1f
3640:
365 # turn off write-allocate
366 sethi.p %hi(HSR0_NWA),gr6
367 setlo %lo(HSR0_NWA),gr6
368 or gr4,gr6,gr4
3691:
370
371#else
372#error No default cache configuration set
373#endif
374
375 or gr4,gr5,gr5
376 movgs gr5,hsr0
377 bar
378
379 LEDS 0x0008
380
381 sethi.p %hi(__head_mmu_enabled),gr19
382 setlo %lo(__head_mmu_enabled),gr19
383 jmpl @(gr19,gr0)
384
385__head_mmu_enabled:
386 icei @(gr0,gr0),#1
387 dcei @(gr0,gr0),#1
388
389 LEDS 0x0009
390
391#ifdef CONFIG_MMU
392 call __head_fr451_finalise_protection
393#endif
394
395 LEDS 0x000a
396
397###############################################################################
398#
399# set up the runtime environment
400#
401###############################################################################
402
403 # clear the BSS area
404 sethi.p %hi(__bss_start),gr4
405 setlo %lo(__bss_start),gr4
406 sethi.p %hi(_end),gr5
407 setlo %lo(_end),gr5
408 or.p gr0,gr0,gr18
409 or gr0,gr0,gr19
410
4110:
412 stdi gr18,@(gr4,#0)
413 stdi gr18,@(gr4,#8)
414 stdi gr18,@(gr4,#16)
415 stdi.p gr18,@(gr4,#24)
416 addi gr4,#24,gr4
417 subcc gr5,gr4,gr0,icc0
418 bhi icc0,#2,0b
419
420 LEDS 0x000b
421
422 # save the SDRAM details
423 sethi.p %hi(__sdram_old_base),gr4
424 setlo %lo(__sdram_old_base),gr4
425 st gr24,@(gr4,gr0)
426
427 sethi.p %hi(__sdram_base),gr5
428 setlo %lo(__sdram_base),gr5
429 sethi.p %hi(memory_start),gr4
430 setlo %lo(memory_start),gr4
431 st gr5,@(gr4,gr0)
432
433 add gr25,gr5,gr25
434 sethi.p %hi(memory_end),gr4
435 setlo %lo(memory_end),gr4
436 st gr25,@(gr4,gr0)
437
438 # point the TBR at the kernel trap table
439 sethi.p %hi(__entry_kerneltrap_table),gr4
440 setlo %lo(__entry_kerneltrap_table),gr4
441 movgs gr4,tbr
442
443 # set up the exception frame for init
444 sethi.p %hi(__kernel_frame0_ptr),gr28
445 setlo %lo(__kernel_frame0_ptr),gr28
446 sethi.p %hi(_gp),gr16
447 setlo %lo(_gp),gr16
448 sethi.p %hi(__entry_usertrap_table),gr4
449 setlo %lo(__entry_usertrap_table),gr4
450
451 lddi @(gr28,#0),gr28 ; load __frame & current
452 ldi.p @(gr29,#4),gr15 ; set current_thread
453
454 or gr0,gr0,fp
455 or gr28,gr0,sp
456
457 sti.p gr4,@(gr28,REG_TBR)
458 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
459 movgs gr5,isr
460
461 # turn on and off various CPU services
462 movsg psr,gr22
463 sethi.p %hi(#PSR_EM|PSR_EF|PSR_CM|PSR_NEM),gr4
464 setlo %lo(#PSR_EM|PSR_EF|PSR_CM|PSR_NEM),gr4
465 or gr22,gr4,gr22
466 movgs gr22,psr
467
468 andi gr22,#~(PSR_PIL|PSR_PS|PSR_S),gr22
469 ori gr22,#PSR_ET,gr22
470 sti gr22,@(gr28,REG_PSR)
471
472
473###############################################################################
474#
475# set up the registers and jump into the kernel
476#
477###############################################################################
478
479 LEDS 0x000c
480
481 # initialise the processor and the peripherals
482 #call SYMBOL_NAME(processor_init)
483 #call SYMBOL_NAME(unit_init)
484 #LEDS 0x0aff
485
486 sethi.p #0xe5e5,gr3
487 setlo #0xe5e5,gr3
488 or.p gr3,gr0,gr4
489 or gr3,gr0,gr5
490 or.p gr3,gr0,gr6
491 or gr3,gr0,gr7
492 or.p gr3,gr0,gr8
493 or gr3,gr0,gr9
494 or.p gr3,gr0,gr10
495 or gr3,gr0,gr11
496 or.p gr3,gr0,gr12
497 or gr3,gr0,gr13
498 or.p gr3,gr0,gr14
499 or gr3,gr0,gr17
500 or.p gr3,gr0,gr18
501 or gr3,gr0,gr19
502 or.p gr3,gr0,gr20
503 or gr3,gr0,gr21
504 or.p gr3,gr0,gr23
505 or gr3,gr0,gr24
506 or.p gr3,gr0,gr25
507 or gr3,gr0,gr26
508 or.p gr3,gr0,gr27
509# or gr3,gr0,gr30
510 or gr3,gr0,gr31
511 movgs gr0,lr
512 movgs gr0,lcr
513 movgs gr0,ccr
514 movgs gr0,cccr
515
516#ifdef CONFIG_MMU
517 movgs gr3,scr2
518 movgs gr3,scr3
519#endif
520
521 LEDS 0x0fff
522
523 # invoke the debugging stub if present
524 # - arch/frv/kernel/debug-stub.c will shift control directly to init/main.c
525 # (it will not return here)
526 break
527 .globl __debug_stub_init_break
528__debug_stub_init_break:
529
530 # however, if you need to use an ICE, and don't care about using any userspace
531 # debugging tools (such as the ptrace syscall), you can just step over the break
532 # above and get to the kernel this way
533 # look at arch/frv/kernel/debug-stub.c: debug_stub_init() to see what you've missed
534 call start_kernel
535
536 .globl __head_end
537__head_end:
538 .size _boot, .-_boot
539
540 # provide a point for GDB to place a break
541 .section .text.start,"ax"
542 .globl _start
543 .balign 4
544_start:
545 call _boot
546
547 .previous
548###############################################################################
549#
550# split a tile off of the region defined by GR8-GR9
551#
552# ENTRY: EXIT:
553# GR4 - IAMPR value representing tile
554# GR5 - DAMPR value representing tile
555# GR6 - IAMLR value representing tile
556# GR7 - DAMLR value representing tile
557# GR8 region base pointer [saved]
558# GR9 region top pointer updated to exclude new tile
559# GR11 xAMLR mask [saved]
560# GR25 SDRAM size [saved]
561# GR30 LED address [saved]
562#
563# - GR8 and GR9 should be rounded up/down to the nearest megabyte before calling
564#
565###############################################################################
566 .globl __head_split_region
567 .type __head_split_region,@function
568__head_split_region:
569 subcc.p gr9,gr8,gr4,icc0
570 setlos #31,gr5
571 scan.p gr4,gr0,gr6
572 beq icc0,#0,__head_region_empty
573 sub.p gr5,gr6,gr6 ; bit number of highest set bit (1MB=>20)
574 setlos #1,gr4
575 sll.p gr4,gr6,gr4 ; size of region (1 << bitno)
576 subi gr6,#17,gr6 ; 1MB => 0x03
577 slli.p gr6,#4,gr6 ; 1MB => 0x30
578 sub gr9,gr4,gr9 ; move uncovered top down
579
580 or gr9,gr6,gr4
581 ori gr4,#xAMPRx_S_USER|xAMPRx_C_CACHED|xAMPRx_V,gr4
582 or.p gr4,gr0,gr5
583
584 and gr4,gr11,gr6
585 and.p gr5,gr11,gr7
586 bralr
587
588__head_region_empty:
589 or.p gr0,gr0,gr4
590 or gr0,gr0,gr5
591 or.p gr0,gr0,gr6
592 or gr0,gr0,gr7
593 bralr
594 .size __head_split_region, .-__head_split_region
595
596###############################################################################
597#
598# write the 32-bit hex number in GR8 to ttyS0
599#
600###############################################################################
601#if 0
602 .globl __head_write_to_ttyS0
603 .type __head_write_to_ttyS0,@function
604__head_write_to_ttyS0:
605 sethi.p %hi(0xfeff9c00),gr31
606 setlo %lo(0xfeff9c00),gr31
607 setlos #8,gr20
608
6090: ldubi @(gr31,#5*8),gr21
610 andi gr21,#0x60,gr21
611 subicc gr21,#0x60,gr21,icc0
612 bne icc0,#0,0b
613
6141: srli gr8,#28,gr21
615 slli gr8,#4,gr8
616
617 addi gr21,#'0',gr21
618 subicc gr21,#'9',gr0,icc0
619 bls icc0,#2,2f
620 addi gr21,#'A'-'0'-10,gr21
6212:
622 stbi gr21,@(gr31,#0*8)
623 subicc gr20,#1,gr20,icc0
624 bhi icc0,#2,1b
625
626 setlos #'\r',gr21
627 stbi gr21,@(gr31,#0*8)
628
629 setlos #'\n',gr21
630 stbi gr21,@(gr31,#0*8)
631
6323: ldubi @(gr31,#5*8),gr21
633 andi gr21,#0x60,gr21
634 subicc gr21,#0x60,gr21,icc0
635 bne icc0,#0,3b
636 bralr
637
638 .size __head_write_to_ttyS0, .-__head_write_to_ttyS0
639#endif
diff --git a/arch/frv/kernel/head.inc b/arch/frv/kernel/head.inc
new file mode 100644
index 000000000000..d424cd2eb213
--- /dev/null
+++ b/arch/frv/kernel/head.inc
@@ -0,0 +1,50 @@
1/* head.inc: head common definitions -*- asm -*-
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12
13#if defined(CONFIG_MB93090_MB00)
14#define LED_ADDR (0x21200000+4)
15
16.macro LEDS val
17 sethi.p %hi(0xFFC00030),gr3
18 setlo %lo(0xFFC00030),gr3
19 lduh @(gr3,gr0),gr3
20 andicc gr3,#0x100,gr0,icc0
21 bne icc0,0,999f
22
23 setlos #~\val,gr3
24 st gr3,@(gr30,gr0)
25 membar
26 dcf @(gr30,gr0)
27 999:
28.endm
29
30#elif defined(CONFIG_MB93093_PDK)
31#define LED_ADDR (0x20000023)
32
33.macro LEDS val
34 setlos #\val,gr3
35 stb gr3,@(gr30,gr0)
36 membar
37.endm
38
39#else
40#define LED_ADDR 0
41
42.macro LEDS val
43.endm
44#endif
45
46#ifdef CONFIG_MMU
47__sdram_base = 0x00000000 /* base address to which SDRAM relocated */
48#else
49__sdram_base = 0xc0000000 /* base address to which SDRAM relocated */
50#endif
diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c
new file mode 100644
index 000000000000..22993932b3fc
--- /dev/null
+++ b/arch/frv/kernel/init_task.c
@@ -0,0 +1,39 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8
9#include <asm/uaccess.h>
10#include <asm/pgtable.h>
11
12
13static struct fs_struct init_fs = INIT_FS;
14static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/*
22 * Initial thread structure.
23 *
24 * We need to make sure that this is THREAD_SIZE aligned due to the
25 * way process stacks are handled. This is done by having a special
26 * "init_task" linker map entry..
27 */
28union thread_union init_thread_union
29 __attribute__((__section__(".data.init_task"))) =
30 { INIT_THREAD_INFO(init_task) };
31
32/*
33 * Initial task structure.
34 *
35 * All other task structs will be allocated on slabs in fork.c
36 */
37struct task_struct init_task = INIT_TASK(init_task);
38
39EXPORT_SYMBOL(init_task);
diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c
new file mode 100644
index 000000000000..9778e0ff7c1c
--- /dev/null
+++ b/arch/frv/kernel/irq-mb93091.c
@@ -0,0 +1,116 @@
1/* irq-mb93091.c: MB93091 FPGA interrupt handling
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/irq.h>
21
22#include <asm/io.h>
23#include <asm/system.h>
24#include <asm/bitops.h>
25#include <asm/delay.h>
26#include <asm/irq.h>
27#include <asm/irc-regs.h>
28#include <asm/irq-routing.h>
29
30#define __reg16(ADDR) (*(volatile unsigned short *)(ADDR))
31
32#define __get_IMR() ({ __reg16(0xffc00004); })
33#define __set_IMR(M) do { __reg16(0xffc00004) = (M); wmb(); } while(0)
34#define __get_IFR() ({ __reg16(0xffc0000c); })
35#define __clr_IFR(M) do { __reg16(0xffc0000c) = ~(M); wmb(); } while(0)
36
37static void frv_fpga_doirq(struct irq_source *source);
38static void frv_fpga_control(struct irq_group *group, int irq, int on);
39
40/*****************************************************************************/
41/*
42 * FPGA IRQ multiplexor
43 */
44static struct irq_source frv_fpga[4] = {
45#define __FPGA(X, M) \
46 [X] = { \
47 .muxname = "fpga."#X, \
48 .irqmask = M, \
49 .doirq = frv_fpga_doirq, \
50 }
51
52 __FPGA(0, 0x0028),
53 __FPGA(1, 0x0050),
54 __FPGA(2, 0x1c00),
55 __FPGA(3, 0x6386),
56};
57
58static struct irq_group frv_fpga_irqs = {
59 .first_irq = IRQ_BASE_FPGA,
60 .control = frv_fpga_control,
61 .sources = {
62 [ 1] = &frv_fpga[3],
63 [ 2] = &frv_fpga[3],
64 [ 3] = &frv_fpga[0],
65 [ 4] = &frv_fpga[1],
66 [ 5] = &frv_fpga[0],
67 [ 6] = &frv_fpga[1],
68 [ 7] = &frv_fpga[3],
69 [ 8] = &frv_fpga[3],
70 [ 9] = &frv_fpga[3],
71 [10] = &frv_fpga[2],
72 [11] = &frv_fpga[2],
73 [12] = &frv_fpga[2],
74 [13] = &frv_fpga[3],
75 [14] = &frv_fpga[3],
76 },
77};
78
79
80static void frv_fpga_control(struct irq_group *group, int index, int on)
81{
82 uint16_t imr = __get_IMR();
83
84 if (on)
85 imr &= ~(1 << index);
86 else
87 imr |= 1 << index;
88
89 __set_IMR(imr);
90}
91
92static void frv_fpga_doirq(struct irq_source *source)
93{
94 uint16_t mask, imr;
95
96 imr = __get_IMR();
97 mask = source->irqmask & ~imr & __get_IFR();
98 if (mask) {
99 __set_IMR(imr | mask);
100 __clr_IFR(mask);
101 distribute_irqs(&frv_fpga_irqs, mask);
102 __set_IMR(imr);
103 }
104}
105
106void __init fpga_init(void)
107{
108 __set_IMR(0x7ffe);
109 __clr_IFR(0x0000);
110
111 frv_irq_route_external(&frv_fpga[0], IRQ_CPU_EXTERNAL0);
112 frv_irq_route_external(&frv_fpga[1], IRQ_CPU_EXTERNAL1);
113 frv_irq_route_external(&frv_fpga[2], IRQ_CPU_EXTERNAL2);
114 frv_irq_route_external(&frv_fpga[3], IRQ_CPU_EXTERNAL3);
115 frv_irq_set_group(&frv_fpga_irqs);
116}
diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c
new file mode 100644
index 000000000000..21ca2b298247
--- /dev/null
+++ b/arch/frv/kernel/irq-mb93093.c
@@ -0,0 +1,99 @@
1/* irq-mb93093.c: MB93093 FPGA interrupt handling
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/irq.h>
21
22#include <asm/io.h>
23#include <asm/system.h>
24#include <asm/bitops.h>
25#include <asm/delay.h>
26#include <asm/irq.h>
27#include <asm/irc-regs.h>
28#include <asm/irq-routing.h>
29
30#define __reg16(ADDR) (*(volatile unsigned short *)(__region_CS2 + (ADDR)))
31
32#define __get_IMR() ({ __reg16(0x0a); })
33#define __set_IMR(M) do { __reg16(0x0a) = (M); wmb(); } while(0)
34#define __get_IFR() ({ __reg16(0x02); })
35#define __clr_IFR(M) do { __reg16(0x02) = ~(M); wmb(); } while(0)
36
37static void frv_fpga_doirq(struct irq_source *source);
38static void frv_fpga_control(struct irq_group *group, int irq, int on);
39
40/*****************************************************************************/
41/*
42 * FPGA IRQ multiplexor
43 */
44static struct irq_source frv_fpga[4] = {
45#define __FPGA(X, M) \
46 [X] = { \
47 .muxname = "fpga."#X, \
48 .irqmask = M, \
49 .doirq = frv_fpga_doirq, \
50 }
51
52 __FPGA(0, 0x0700),
53};
54
55static struct irq_group frv_fpga_irqs = {
56 .first_irq = IRQ_BASE_FPGA,
57 .control = frv_fpga_control,
58 .sources = {
59 [ 8] = &frv_fpga[0],
60 [ 9] = &frv_fpga[0],
61 [10] = &frv_fpga[0],
62 },
63};
64
65
66static void frv_fpga_control(struct irq_group *group, int index, int on)
67{
68 uint16_t imr = __get_IMR();
69
70 if (on)
71 imr &= ~(1 << index);
72 else
73 imr |= 1 << index;
74
75 __set_IMR(imr);
76}
77
78static void frv_fpga_doirq(struct irq_source *source)
79{
80 uint16_t mask, imr;
81
82 imr = __get_IMR();
83 mask = source->irqmask & ~imr & __get_IFR();
84 if (mask) {
85 __set_IMR(imr | mask);
86 __clr_IFR(mask);
87 distribute_irqs(&frv_fpga_irqs, mask);
88 __set_IMR(imr);
89 }
90}
91
92void __init fpga_init(void)
93{
94 __set_IMR(0x0700);
95 __clr_IFR(0x0000);
96
97 frv_irq_route_external(&frv_fpga[0], IRQ_CPU_EXTERNAL2);
98 frv_irq_set_group(&frv_fpga_irqs);
99}
diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c
new file mode 100644
index 000000000000..c003ae5e2b30
--- /dev/null
+++ b/arch/frv/kernel/irq-mb93493.c
@@ -0,0 +1,108 @@
1/* irq-mb93493.c: MB93493 companion chip interrupt handler
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/ptrace.h>
14#include <linux/errno.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/ioport.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/irq.h>
21
22#include <asm/io.h>
23#include <asm/system.h>
24#include <asm/bitops.h>
25#include <asm/delay.h>
26#include <asm/irq.h>
27#include <asm/irc-regs.h>
28#include <asm/irq-routing.h>
29#include <asm/mb93493-irqs.h>
30
31static void frv_mb93493_doirq(struct irq_source *source);
32
33/*****************************************************************************/
34/*
35 * MB93493 companion chip IRQ multiplexor
36 */
37static struct irq_source frv_mb93493[2] = {
38 [0] = {
39 .muxname = "mb93493.0",
40 .muxdata = __region_CS3 + 0x3d0,
41 .doirq = frv_mb93493_doirq,
42 .irqmask = 0x0000,
43 },
44 [1] = {
45 .muxname = "mb93493.1",
46 .muxdata = __region_CS3 + 0x3d4,
47 .doirq = frv_mb93493_doirq,
48 .irqmask = 0x0000,
49 },
50};
51
52static void frv_mb93493_control(struct irq_group *group, int index, int on)
53{
54 struct irq_source *source;
55 uint32_t iqsr;
56
57 if ((frv_mb93493[0].irqmask & (1 << index)))
58 source = &frv_mb93493[0];
59 else
60 source = &frv_mb93493[1];
61
62 iqsr = readl(source->muxdata);
63 if (on)
64 iqsr |= 1 << (index + 16);
65 else
66 iqsr &= ~(1 << (index + 16));
67
68 writel(iqsr, source->muxdata);
69}
70
71static struct irq_group frv_mb93493_irqs = {
72 .first_irq = IRQ_BASE_MB93493,
73 .control = frv_mb93493_control,
74};
75
76static void frv_mb93493_doirq(struct irq_source *source)
77{
78 uint32_t mask = readl(source->muxdata);
79 mask = mask & (mask >> 16) & 0xffff;
80
81 if (mask)
82 distribute_irqs(&frv_mb93493_irqs, mask);
83}
84
85static void __init mb93493_irq_route(int irq, int source)
86{
87 frv_mb93493[source].irqmask |= 1 << (irq - IRQ_BASE_MB93493);
88 frv_mb93493_irqs.sources[irq - IRQ_BASE_MB93493] = &frv_mb93493[source];
89}
90
91void __init route_mb93493_irqs(void)
92{
93 frv_irq_route_external(&frv_mb93493[0], IRQ_CPU_MB93493_0);
94 frv_irq_route_external(&frv_mb93493[1], IRQ_CPU_MB93493_1);
95
96 frv_irq_set_group(&frv_mb93493_irqs);
97
98 mb93493_irq_route(IRQ_MB93493_VDC, IRQ_MB93493_VDC_ROUTE);
99 mb93493_irq_route(IRQ_MB93493_VCC, IRQ_MB93493_VCC_ROUTE);
100 mb93493_irq_route(IRQ_MB93493_AUDIO_IN, IRQ_MB93493_AUDIO_IN_ROUTE);
101 mb93493_irq_route(IRQ_MB93493_I2C_0, IRQ_MB93493_I2C_0_ROUTE);
102 mb93493_irq_route(IRQ_MB93493_I2C_1, IRQ_MB93493_I2C_1_ROUTE);
103 mb93493_irq_route(IRQ_MB93493_USB, IRQ_MB93493_USB_ROUTE);
104 mb93493_irq_route(IRQ_MB93493_LOCAL_BUS, IRQ_MB93493_LOCAL_BUS_ROUTE);
105 mb93493_irq_route(IRQ_MB93493_PCMCIA, IRQ_MB93493_PCMCIA_ROUTE);
106 mb93493_irq_route(IRQ_MB93493_GPIO, IRQ_MB93493_GPIO_ROUTE);
107 mb93493_irq_route(IRQ_MB93493_AUDIO_OUT, IRQ_MB93493_AUDIO_OUT_ROUTE);
108}
diff --git a/arch/frv/kernel/irq-routing.c b/arch/frv/kernel/irq-routing.c
new file mode 100644
index 000000000000..d4776d1f4e82
--- /dev/null
+++ b/arch/frv/kernel/irq-routing.c
@@ -0,0 +1,291 @@
1/* irq-routing.c: IRQ routing
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/serial_reg.h>
16#include <asm/io.h>
17#include <asm/irq-routing.h>
18#include <asm/irc-regs.h>
19#include <asm/serial-regs.h>
20#include <asm/dma.h>
21
22struct irq_level frv_irq_levels[16] = {
23 [0 ... 15] = {
24 .lock = SPIN_LOCK_UNLOCKED,
25 }
26};
27
28struct irq_group *irq_groups[NR_IRQ_GROUPS];
29
30extern struct irq_group frv_cpu_irqs;
31
32void __init frv_irq_route(struct irq_source *source, int irqlevel)
33{
34 source->level = &frv_irq_levels[irqlevel];
35 source->next = frv_irq_levels[irqlevel].sources;
36 frv_irq_levels[irqlevel].sources = source;
37}
38
39void __init frv_irq_route_external(struct irq_source *source, int irq)
40{
41 int irqlevel = 0;
42
43 switch (irq) {
44 case IRQ_CPU_EXTERNAL0: irqlevel = IRQ_XIRQ0_LEVEL; break;
45 case IRQ_CPU_EXTERNAL1: irqlevel = IRQ_XIRQ1_LEVEL; break;
46 case IRQ_CPU_EXTERNAL2: irqlevel = IRQ_XIRQ2_LEVEL; break;
47 case IRQ_CPU_EXTERNAL3: irqlevel = IRQ_XIRQ3_LEVEL; break;
48 case IRQ_CPU_EXTERNAL4: irqlevel = IRQ_XIRQ4_LEVEL; break;
49 case IRQ_CPU_EXTERNAL5: irqlevel = IRQ_XIRQ5_LEVEL; break;
50 case IRQ_CPU_EXTERNAL6: irqlevel = IRQ_XIRQ6_LEVEL; break;
51 case IRQ_CPU_EXTERNAL7: irqlevel = IRQ_XIRQ7_LEVEL; break;
52 default: BUG();
53 }
54
55 source->level = &frv_irq_levels[irqlevel];
56 source->next = frv_irq_levels[irqlevel].sources;
57 frv_irq_levels[irqlevel].sources = source;
58}
59
60void __init frv_irq_set_group(struct irq_group *group)
61{
62 irq_groups[group->first_irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP] = group;
63}
64
65void distribute_irqs(struct irq_group *group, unsigned long irqmask)
66{
67 struct irqaction *action;
68 int irq;
69
70 while (irqmask) {
71 asm("scan %1,gr0,%0" : "=r"(irq) : "r"(irqmask));
72 if (irq < 0 || irq > 31)
73 asm volatile("break");
74 irq = 31 - irq;
75
76 irqmask &= ~(1 << irq);
77 action = group->actions[irq];
78
79 irq += group->first_irq;
80
81 if (action) {
82 int status = 0;
83
84// if (!(action->flags & SA_INTERRUPT))
85// local_irq_enable();
86
87 do {
88 status |= action->flags;
89 action->handler(irq, action->dev_id, __frame);
90 action = action->next;
91 } while (action);
92
93 if (status & SA_SAMPLE_RANDOM)
94 add_interrupt_randomness(irq);
95 local_irq_disable();
96 }
97 }
98}
99
100/*****************************************************************************/
101/*
102 * CPU UART interrupts
103 */
104static void frv_cpuuart_doirq(struct irq_source *source)
105{
106// uint8_t iir = readb(source->muxdata + UART_IIR * 8);
107// if ((iir & 0x0f) != UART_IIR_NO_INT)
108 distribute_irqs(&frv_cpu_irqs, source->irqmask);
109}
110
111struct irq_source frv_cpuuart[2] = {
112#define __CPUUART(X, A) \
113 [X] = { \
114 .muxname = "uart", \
115 .muxdata = (volatile void __iomem *) A, \
116 .irqmask = 1 << IRQ_CPU_UART##X, \
117 .doirq = frv_cpuuart_doirq, \
118 }
119
120 __CPUUART(0, UART0_BASE),
121 __CPUUART(1, UART1_BASE),
122};
123
124/*****************************************************************************/
125/*
126 * CPU DMA interrupts
127 */
128static void frv_cpudma_doirq(struct irq_source *source)
129{
130 uint32_t cstr = readl(source->muxdata + DMAC_CSTRx);
131 if (cstr & DMAC_CSTRx_INT)
132 distribute_irqs(&frv_cpu_irqs, source->irqmask);
133}
134
135struct irq_source frv_cpudma[8] = {
136#define __CPUDMA(X, A) \
137 [X] = { \
138 .muxname = "dma", \
139 .muxdata = (volatile void __iomem *) A, \
140 .irqmask = 1 << IRQ_CPU_DMA##X, \
141 .doirq = frv_cpudma_doirq, \
142 }
143
144 __CPUDMA(0, 0xfe000900),
145 __CPUDMA(1, 0xfe000980),
146 __CPUDMA(2, 0xfe000a00),
147 __CPUDMA(3, 0xfe000a80),
148 __CPUDMA(4, 0xfe001000),
149 __CPUDMA(5, 0xfe001080),
150 __CPUDMA(6, 0xfe001100),
151 __CPUDMA(7, 0xfe001180),
152};
153
154/*****************************************************************************/
155/*
156 * CPU timer interrupts - can't tell whether they've generated an interrupt or not
157 */
158static void frv_cputimer_doirq(struct irq_source *source)
159{
160 distribute_irqs(&frv_cpu_irqs, source->irqmask);
161}
162
163struct irq_source frv_cputimer[3] = {
164#define __CPUTIMER(X) \
165 [X] = { \
166 .muxname = "timer", \
167 .muxdata = 0, \
168 .irqmask = 1 << IRQ_CPU_TIMER##X, \
169 .doirq = frv_cputimer_doirq, \
170 }
171
172 __CPUTIMER(0),
173 __CPUTIMER(1),
174 __CPUTIMER(2),
175};
176
177/*****************************************************************************/
178/*
179 * external CPU interrupts - can't tell directly whether they've generated an interrupt or not
180 */
181static void frv_cpuexternal_doirq(struct irq_source *source)
182{
183 distribute_irqs(&frv_cpu_irqs, source->irqmask);
184}
185
186struct irq_source frv_cpuexternal[8] = {
187#define __CPUEXTERNAL(X) \
188 [X] = { \
189 .muxname = "ext", \
190 .muxdata = 0, \
191 .irqmask = 1 << IRQ_CPU_EXTERNAL##X, \
192 .doirq = frv_cpuexternal_doirq, \
193 }
194
195 __CPUEXTERNAL(0),
196 __CPUEXTERNAL(1),
197 __CPUEXTERNAL(2),
198 __CPUEXTERNAL(3),
199 __CPUEXTERNAL(4),
200 __CPUEXTERNAL(5),
201 __CPUEXTERNAL(6),
202 __CPUEXTERNAL(7),
203};
204
205#define set_IRR(N,A,B,C,D) __set_IRR(N, (A << 28) | (B << 24) | (C << 20) | (D << 16))
206
207struct irq_group frv_cpu_irqs = {
208 .sources = {
209 [IRQ_CPU_UART0] = &frv_cpuuart[0],
210 [IRQ_CPU_UART1] = &frv_cpuuart[1],
211 [IRQ_CPU_TIMER0] = &frv_cputimer[0],
212 [IRQ_CPU_TIMER1] = &frv_cputimer[1],
213 [IRQ_CPU_TIMER2] = &frv_cputimer[2],
214 [IRQ_CPU_DMA0] = &frv_cpudma[0],
215 [IRQ_CPU_DMA1] = &frv_cpudma[1],
216 [IRQ_CPU_DMA2] = &frv_cpudma[2],
217 [IRQ_CPU_DMA3] = &frv_cpudma[3],
218 [IRQ_CPU_DMA4] = &frv_cpudma[4],
219 [IRQ_CPU_DMA5] = &frv_cpudma[5],
220 [IRQ_CPU_DMA6] = &frv_cpudma[6],
221 [IRQ_CPU_DMA7] = &frv_cpudma[7],
222 [IRQ_CPU_EXTERNAL0] = &frv_cpuexternal[0],
223 [IRQ_CPU_EXTERNAL1] = &frv_cpuexternal[1],
224 [IRQ_CPU_EXTERNAL2] = &frv_cpuexternal[2],
225 [IRQ_CPU_EXTERNAL3] = &frv_cpuexternal[3],
226 [IRQ_CPU_EXTERNAL4] = &frv_cpuexternal[4],
227 [IRQ_CPU_EXTERNAL5] = &frv_cpuexternal[5],
228 [IRQ_CPU_EXTERNAL6] = &frv_cpuexternal[6],
229 [IRQ_CPU_EXTERNAL7] = &frv_cpuexternal[7],
230 },
231};
232
233/*****************************************************************************/
234/*
235 * route the CPU's interrupt sources
236 */
237void __init route_cpu_irqs(void)
238{
239 frv_irq_set_group(&frv_cpu_irqs);
240
241 __set_IITMR(0, 0x003f0000); /* DMA0-3, TIMER0-2 IRQ detect levels */
242 __set_IITMR(1, 0x20000000); /* ERR0-1, UART0-1, DMA4-7 IRQ detect levels */
243
244 /* route UART and error interrupts */
245 frv_irq_route(&frv_cpuuart[0], IRQ_UART0_LEVEL);
246 frv_irq_route(&frv_cpuuart[1], IRQ_UART1_LEVEL);
247
248 set_IRR(6, IRQ_GDBSTUB_LEVEL, IRQ_GDBSTUB_LEVEL, IRQ_UART1_LEVEL, IRQ_UART0_LEVEL);
249
250 /* route DMA channel interrupts */
251 frv_irq_route(&frv_cpudma[0], IRQ_DMA0_LEVEL);
252 frv_irq_route(&frv_cpudma[1], IRQ_DMA1_LEVEL);
253 frv_irq_route(&frv_cpudma[2], IRQ_DMA2_LEVEL);
254 frv_irq_route(&frv_cpudma[3], IRQ_DMA3_LEVEL);
255 frv_irq_route(&frv_cpudma[4], IRQ_DMA4_LEVEL);
256 frv_irq_route(&frv_cpudma[5], IRQ_DMA5_LEVEL);
257 frv_irq_route(&frv_cpudma[6], IRQ_DMA6_LEVEL);
258 frv_irq_route(&frv_cpudma[7], IRQ_DMA7_LEVEL);
259
260 set_IRR(4, IRQ_DMA3_LEVEL, IRQ_DMA2_LEVEL, IRQ_DMA1_LEVEL, IRQ_DMA0_LEVEL);
261 set_IRR(7, IRQ_DMA7_LEVEL, IRQ_DMA6_LEVEL, IRQ_DMA5_LEVEL, IRQ_DMA4_LEVEL);
262
263 /* route timer interrupts */
264 frv_irq_route(&frv_cputimer[0], IRQ_TIMER0_LEVEL);
265 frv_irq_route(&frv_cputimer[1], IRQ_TIMER1_LEVEL);
266 frv_irq_route(&frv_cputimer[2], IRQ_TIMER2_LEVEL);
267
268 set_IRR(5, 0, IRQ_TIMER2_LEVEL, IRQ_TIMER1_LEVEL, IRQ_TIMER0_LEVEL);
269
270 /* route external interrupts */
271 frv_irq_route(&frv_cpuexternal[0], IRQ_XIRQ0_LEVEL);
272 frv_irq_route(&frv_cpuexternal[1], IRQ_XIRQ1_LEVEL);
273 frv_irq_route(&frv_cpuexternal[2], IRQ_XIRQ2_LEVEL);
274 frv_irq_route(&frv_cpuexternal[3], IRQ_XIRQ3_LEVEL);
275 frv_irq_route(&frv_cpuexternal[4], IRQ_XIRQ4_LEVEL);
276 frv_irq_route(&frv_cpuexternal[5], IRQ_XIRQ5_LEVEL);
277 frv_irq_route(&frv_cpuexternal[6], IRQ_XIRQ6_LEVEL);
278 frv_irq_route(&frv_cpuexternal[7], IRQ_XIRQ7_LEVEL);
279
280 set_IRR(2, IRQ_XIRQ7_LEVEL, IRQ_XIRQ6_LEVEL, IRQ_XIRQ5_LEVEL, IRQ_XIRQ4_LEVEL);
281 set_IRR(3, IRQ_XIRQ3_LEVEL, IRQ_XIRQ2_LEVEL, IRQ_XIRQ1_LEVEL, IRQ_XIRQ0_LEVEL);
282
283#if defined(CONFIG_MB93091_VDK)
284 __set_TM1(0x55550000); /* XIRQ7-0 all active low */
285#elif defined(CONFIG_MB93093_PDK)
286 __set_TM1(0x15550000); /* XIRQ7 active high, 6-0 all active low */
287#else
288#error dont know external IRQ trigger levels for this setup
289#endif
290
291} /* end route_cpu_irqs() */
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
new file mode 100644
index 000000000000..8c524cdd2717
--- /dev/null
+++ b/arch/frv/kernel/irq.c
@@ -0,0 +1,764 @@
1/* irq.c: FRV IRQ handling
2 *
3 * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12/*
13 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
14 *
15 * IRQs are in fact implemented a bit like signal handlers for the kernel.
16 * Naturally it's not a 1:1 relation, but there are similarities.
17 */
18
19#include <linux/config.h>
20#include <linux/ptrace.h>
21#include <linux/errno.h>
22#include <linux/signal.h>
23#include <linux/sched.h>
24#include <linux/ioport.h>
25#include <linux/interrupt.h>
26#include <linux/timex.h>
27#include <linux/slab.h>
28#include <linux/random.h>
29#include <linux/smp_lock.h>
30#include <linux/init.h>
31#include <linux/kernel_stat.h>
32#include <linux/irq.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35
36#include <asm/atomic.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/system.h>
40#include <asm/bitops.h>
41#include <asm/uaccess.h>
42#include <asm/pgalloc.h>
43#include <asm/delay.h>
44#include <asm/irq.h>
45#include <asm/irc-regs.h>
46#include <asm/irq-routing.h>
47#include <asm/gdb-stub.h>
48
49extern void __init fpga_init(void);
50extern void __init route_mb93493_irqs(void);
51
52static void register_irq_proc (unsigned int irq);
53
54/*
55 * Special irq handlers.
56 */
57
58irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_HANDLED; }
59
60atomic_t irq_err_count;
61
62/*
63 * Generic, controller-independent functions:
64 */
65int show_interrupts(struct seq_file *p, void *v)
66{
67 struct irqaction *action;
68 struct irq_group *group;
69 unsigned long flags;
70 int level, grp, ix, i, j;
71
72 i = *(loff_t *) v;
73
74 switch (i) {
75 case 0:
76 seq_printf(p, " ");
77 for (j = 0; j < NR_CPUS; j++)
78 if (cpu_online(j))
79 seq_printf(p, "CPU%d ",j);
80
81 seq_putc(p, '\n');
82 break;
83
84 case 1 ... NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP:
85 local_irq_save(flags);
86
87 grp = (i - 1) / NR_IRQ_ACTIONS_PER_GROUP;
88 group = irq_groups[grp];
89 if (!group)
90 goto skip;
91
92 ix = (i - 1) % NR_IRQ_ACTIONS_PER_GROUP;
93 action = group->actions[ix];
94 if (!action)
95 goto skip;
96
97 seq_printf(p, "%3d: ", i - 1);
98
99#ifndef CONFIG_SMP
100 seq_printf(p, "%10u ", kstat_irqs(i));
101#else
102 for (j = 0; j < NR_CPUS; j++)
103 if (cpu_online(j))
104 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
105#endif
106
107 level = group->sources[ix]->level - frv_irq_levels;
108
109 seq_printf(p, " %12s@%x", group->sources[ix]->muxname, level);
110 seq_printf(p, " %s", action->name);
111
112 for (action = action->next; action; action = action->next)
113 seq_printf(p, ", %s", action->name);
114
115 seq_putc(p, '\n');
116skip:
117 local_irq_restore(flags);
118 break;
119
120 case NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP + 1:
121 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
122 break;
123
124 default:
125 break;
126 }
127
128 return 0;
129}
130
131
132/*
133 * Generic enable/disable code: this just calls
134 * down into the PIC-specific version for the actual
135 * hardware disable after having gotten the irq
136 * controller lock.
137 */
138
139/**
140 * disable_irq_nosync - disable an irq without waiting
141 * @irq: Interrupt to disable
142 *
143 * Disable the selected interrupt line. Disables and Enables are
144 * nested.
145 * Unlike disable_irq(), this function does not ensure existing
146 * instances of the IRQ handler have completed before returning.
147 *
148 * This function may be called from IRQ context.
149 */
150
151void disable_irq_nosync(unsigned int irq)
152{
153 struct irq_source *source;
154 struct irq_group *group;
155 struct irq_level *level;
156 unsigned long flags;
157 int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1);
158
159 group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
160 if (!group)
161 BUG();
162
163 source = group->sources[idx];
164 if (!source)
165 BUG();
166
167 level = source->level;
168
169 spin_lock_irqsave(&level->lock, flags);
170
171 if (group->control) {
172 if (!group->disable_cnt[idx]++)
173 group->control(group, idx, 0);
174 } else if (!level->disable_count++) {
175 __set_MASK(level - frv_irq_levels);
176 }
177
178 spin_unlock_irqrestore(&level->lock, flags);
179}
180
181/**
182 * disable_irq - disable an irq and wait for completion
183 * @irq: Interrupt to disable
184 *
185 * Disable the selected interrupt line. Enables and Disables are
186 * nested.
187 * This function waits for any pending IRQ handlers for this interrupt
188 * to complete before returning. If you use this function while
189 * holding a resource the IRQ handler may need you will deadlock.
190 *
191 * This function may be called - with care - from IRQ context.
192 */
193
194void disable_irq(unsigned int irq)
195{
196 disable_irq_nosync(irq);
197
198#ifdef CONFIG_SMP
199 if (!local_irq_count(smp_processor_id())) {
200 do {
201 barrier();
202 } while (irq_desc[irq].status & IRQ_INPROGRESS);
203 }
204#endif
205}
206
207/**
208 * enable_irq - enable handling of an irq
209 * @irq: Interrupt to enable
210 *
211 * Undoes the effect of one call to disable_irq(). If this
212 * matches the last disable, processing of interrupts on this
213 * IRQ line is re-enabled.
214 *
215 * This function may be called from IRQ context.
216 */
217
218void enable_irq(unsigned int irq)
219{
220 struct irq_source *source;
221 struct irq_group *group;
222 struct irq_level *level;
223 unsigned long flags;
224 int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1);
225 int count;
226
227 group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
228 if (!group)
229 BUG();
230
231 source = group->sources[idx];
232 if (!source)
233 BUG();
234
235 level = source->level;
236
237 spin_lock_irqsave(&level->lock, flags);
238
239 if (group->control)
240 count = group->disable_cnt[idx];
241 else
242 count = level->disable_count;
243
244 switch (count) {
245 case 1:
246 if (group->control) {
247 if (group->actions[idx])
248 group->control(group, idx, 1);
249 } else {
250 if (level->usage)
251 __clr_MASK(level - frv_irq_levels);
252 }
253 /* fall-through */
254
255 default:
256 count--;
257 break;
258
259 case 0:
260 printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0));
261 }
262
263 if (group->control)
264 group->disable_cnt[idx] = count;
265 else
266 level->disable_count = count;
267
268 spin_unlock_irqrestore(&level->lock, flags);
269}
270
271/*****************************************************************************/
272/*
273 * handles all normal device IRQ's
274 * - registers are referred to by the __frame variable (GR28)
275 * - IRQ distribution is complicated in this arch because of the many PICs, the
276 * way they work and the way they cascade
277 */
278asmlinkage void do_IRQ(void)
279{
280 struct irq_source *source;
281 int level, cpu;
282
283 level = (__frame->tbr >> 4) & 0xf;
284 cpu = smp_processor_id();
285
286#if 0
287 {
288 static u32 irqcount;
289 *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level);
290 *(volatile u16 *) 0xffc00100 = (u16) ~0x9999;
291 mb();
292 }
293#endif
294
295 if ((unsigned long) __frame - (unsigned long) (current + 1) < 512)
296 BUG();
297
298 __set_MASK(level);
299 __clr_RC(level);
300 __clr_IRL();
301
302 kstat_this_cpu.irqs[level]++;
303
304 irq_enter();
305
306 for (source = frv_irq_levels[level].sources; source; source = source->next)
307 source->doirq(source);
308
309 irq_exit();
310
311 __clr_MASK(level);
312
313 /* only process softirqs if we didn't interrupt another interrupt handler */
314 if ((__frame->psr & PSR_PIL) == PSR_PIL_0)
315 if (local_softirq_pending())
316 do_softirq();
317
318#ifdef CONFIG_PREEMPT
319 local_irq_disable();
320 while (--current->preempt_count == 0) {
321 if (!(__frame->psr & PSR_S) ||
322 current->need_resched == 0 ||
323 in_interrupt())
324 break;
325 current->preempt_count++;
326 local_irq_enable();
327 preempt_schedule();
328 local_irq_disable();
329 }
330#endif
331
332#if 0
333 {
334 *(volatile u16 *) 0xffc00100 = (u16) ~0x6666;
335 mb();
336 }
337#endif
338
339} /* end do_IRQ() */
340
341/*****************************************************************************/
342/*
343 * handles all NMIs when not co-opted by the debugger
344 * - registers are referred to by the __frame variable (GR28)
345 */
346asmlinkage void do_NMI(void)
347{
348} /* end do_NMI() */
349
350/*****************************************************************************/
351/**
352 * request_irq - allocate an interrupt line
353 * @irq: Interrupt line to allocate
354 * @handler: Function to be called when the IRQ occurs
355 * @irqflags: Interrupt type flags
356 * @devname: An ascii name for the claiming device
357 * @dev_id: A cookie passed back to the handler function
358 *
359 * This call allocates interrupt resources and enables the
360 * interrupt line and IRQ handling. From the point this
361 * call is made your handler function may be invoked. Since
362 * your handler function must clear any interrupt the board
363 * raises, you must take care both to initialise your hardware
364 * and to set up the interrupt handler in the right order.
365 *
366 * Dev_id must be globally unique. Normally the address of the
367 * device data structure is used as the cookie. Since the handler
368 * receives this value it makes sense to use it.
369 *
370 * If your interrupt is shared you must pass a non NULL dev_id
371 * as this is required when freeing the interrupt.
372 *
373 * Flags:
374 *
375 * SA_SHIRQ Interrupt is shared
376 *
377 * SA_INTERRUPT Disable local interrupts while processing
378 *
379 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
380 *
381 */
382
383int request_irq(unsigned int irq,
384 irqreturn_t (*handler)(int, void *, struct pt_regs *),
385 unsigned long irqflags,
386 const char * devname,
387 void *dev_id)
388{
389 int retval;
390 struct irqaction *action;
391
392#if 1
393 /*
394 * Sanity-check: shared interrupts should REALLY pass in
395 * a real dev-ID, otherwise we'll have trouble later trying
396 * to figure out which interrupt is which (messes up the
397 * interrupt freeing logic etc).
398 */
399 if (irqflags & SA_SHIRQ) {
400 if (!dev_id)
401 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n",
402 devname, (&irq)[-1]);
403 }
404#endif
405
406 if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS)
407 return -EINVAL;
408 if (!handler)
409 return -EINVAL;
410
411 action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL);
412 if (!action)
413 return -ENOMEM;
414
415 action->handler = handler;
416 action->flags = irqflags;
417 action->mask = CPU_MASK_NONE;
418 action->name = devname;
419 action->next = NULL;
420 action->dev_id = dev_id;
421
422 retval = setup_irq(irq, action);
423 if (retval)
424 kfree(action);
425 return retval;
426}
427
428/**
429 * free_irq - free an interrupt
430 * @irq: Interrupt line to free
431 * @dev_id: Device identity to free
432 *
433 * Remove an interrupt handler. The handler is removed and if the
434 * interrupt line is no longer in use by any driver it is disabled.
435 * On a shared IRQ the caller must ensure the interrupt is disabled
436 * on the card it drives before calling this function. The function
437 * does not return until any executing interrupts for this IRQ
438 * have completed.
439 *
440 * This function may be called from interrupt context.
441 *
442 * Bugs: Attempting to free an irq in a handler for the same irq hangs
443 * the machine.
444 */
445
446void free_irq(unsigned int irq, void *dev_id)
447{
448 struct irq_source *source;
449 struct irq_group *group;
450 struct irq_level *level;
451 struct irqaction **p, **pp;
452 unsigned long flags;
453
454 if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS)
455 return;
456
457 group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
458 if (!group)
459 BUG();
460
461 source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
462 if (!source)
463 BUG();
464
465 level = source->level;
466 p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
467
468 spin_lock_irqsave(&level->lock, flags);
469
470 for (pp = p; *pp; pp = &(*pp)->next) {
471 struct irqaction *action = *pp;
472
473 if (action->dev_id != dev_id)
474 continue;
475
476 /* found it - remove from the list of entries */
477 *pp = action->next;
478
479 level->usage--;
480
481 if (p == pp && group->control)
482 group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 0);
483
484 if (level->usage == 0)
485 __set_MASK(level - frv_irq_levels);
486
487 spin_unlock_irqrestore(&level->lock,flags);
488
489#ifdef CONFIG_SMP
490 /* Wait to make sure it's not being used on another CPU */
491 while (desc->status & IRQ_INPROGRESS)
492 barrier();
493#endif
494 kfree(action);
495 return;
496 }
497}
498
499/*
500 * IRQ autodetection code..
501 *
502 * This depends on the fact that any interrupt that comes in on to an
503 * unassigned IRQ will cause GxICR_DETECT to be set
504 */
505
506static DECLARE_MUTEX(probe_sem);
507
508/**
509 * probe_irq_on - begin an interrupt autodetect
510 *
511 * Commence probing for an interrupt. The interrupts are scanned
512 * and a mask of potential interrupt lines is returned.
513 *
514 */
515
516unsigned long probe_irq_on(void)
517{
518 down(&probe_sem);
519 return 0;
520}
521
522/*
523 * Return a mask of triggered interrupts (this
524 * can handle only legacy ISA interrupts).
525 */
526
527/**
528 * probe_irq_mask - scan a bitmap of interrupt lines
529 * @val: mask of interrupts to consider
530 *
531 * Scan the ISA bus interrupt lines and return a bitmap of
532 * active interrupts. The interrupt probe logic state is then
533 * returned to its previous value.
534 *
535 * Note: we need to scan all the irq's even though we will
536 * only return ISA irq numbers - just so that we reset them
537 * all to a known state.
538 */
539unsigned int probe_irq_mask(unsigned long xmask)
540{
541 up(&probe_sem);
542 return 0;
543}
544
545/*
546 * Return the one interrupt that triggered (this can
547 * handle any interrupt source).
548 */
549
550/**
551 * probe_irq_off - end an interrupt autodetect
552 * @xmask: mask of potential interrupts (unused)
553 *
554 * Scans the unused interrupt lines and returns the line which
555 * appears to have triggered the interrupt. If no interrupt was
556 * found then zero is returned. If more than one interrupt is
557 * found then minus the first candidate is returned to indicate
558 * their is doubt.
559 *
560 * The interrupt probe logic state is returned to its previous
561 * value.
562 *
563 * BUGS: When used in a module (which arguably shouldnt happen)
564 * nothing prevents two IRQ probe callers from overlapping. The
565 * results of this are non-optimal.
566 */
567
568int probe_irq_off(unsigned long xmask)
569{
570 up(&probe_sem);
571 return -1;
572}
573
574/* this was setup_x86_irq but it seems pretty generic */
575int setup_irq(unsigned int irq, struct irqaction *new)
576{
577 struct irq_source *source;
578 struct irq_group *group;
579 struct irq_level *level;
580 struct irqaction **p, **pp;
581 unsigned long flags;
582
583 group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
584 if (!group)
585 BUG();
586
587 source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
588 if (!source)
589 BUG();
590
591 level = source->level;
592
593 p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
594
595 /*
596 * Some drivers like serial.c use request_irq() heavily,
597 * so we have to be careful not to interfere with a
598 * running system.
599 */
600 if (new->flags & SA_SAMPLE_RANDOM) {
601 /*
602 * This function might sleep, we want to call it first,
603 * outside of the atomic block.
604 * Yes, this might clear the entropy pool if the wrong
605 * driver is attempted to be loaded, without actually
606 * installing a new handler, but is this really a problem,
607 * only the sysadmin is able to do this.
608 */
609 rand_initialize_irq(irq);
610 }
611
612 /* must juggle the interrupt processing stuff with interrupts disabled */
613 spin_lock_irqsave(&level->lock, flags);
614
615 /* can't share interrupts unless all parties agree to */
616 if (level->usage != 0 && !(level->flags & new->flags & SA_SHIRQ)) {
617 spin_unlock_irqrestore(&level->lock,flags);
618 return -EBUSY;
619 }
620
621 /* add new interrupt at end of irq queue */
622 pp = p;
623 while (*pp)
624 pp = &(*pp)->next;
625
626 *pp = new;
627
628 level->usage++;
629 level->flags = new->flags;
630
631 /* turn the interrupts on */
632 if (level->usage == 1)
633 __clr_MASK(level - frv_irq_levels);
634
635 if (p == pp && group->control)
636 group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 1);
637
638 spin_unlock_irqrestore(&level->lock, flags);
639 register_irq_proc(irq);
640 return 0;
641}
642
643static struct proc_dir_entry * root_irq_dir;
644static struct proc_dir_entry * irq_dir [NR_IRQS];
645
646#define HEX_DIGITS 8
647
648static unsigned int parse_hex_value (const char *buffer,
649 unsigned long count, unsigned long *ret)
650{
651 unsigned char hexnum [HEX_DIGITS];
652 unsigned long value;
653 int i;
654
655 if (!count)
656 return -EINVAL;
657 if (count > HEX_DIGITS)
658 count = HEX_DIGITS;
659 if (copy_from_user(hexnum, buffer, count))
660 return -EFAULT;
661
662 /*
663 * Parse the first 8 characters as a hex string, any non-hex char
664 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
665 */
666 value = 0;
667
668 for (i = 0; i < count; i++) {
669 unsigned int c = hexnum[i];
670
671 switch (c) {
672 case '0' ... '9': c -= '0'; break;
673 case 'a' ... 'f': c -= 'a'-10; break;
674 case 'A' ... 'F': c -= 'A'-10; break;
675 default:
676 goto out;
677 }
678 value = (value << 4) | c;
679 }
680out:
681 *ret = value;
682 return 0;
683}
684
685
686static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
687 int count, int *eof, void *data)
688{
689 unsigned long *mask = (unsigned long *) data;
690 if (count < HEX_DIGITS+1)
691 return -EINVAL;
692 return sprintf (page, "%08lx\n", *mask);
693}
694
695static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
696 unsigned long count, void *data)
697{
698 unsigned long *mask = (unsigned long *) data, full_count = count, err;
699 unsigned long new_value;
700
701 show_state();
702 err = parse_hex_value(buffer, count, &new_value);
703 if (err)
704 return err;
705
706 *mask = new_value;
707 return full_count;
708}
709
710#define MAX_NAMELEN 10
711
712static void register_irq_proc (unsigned int irq)
713{
714 char name [MAX_NAMELEN];
715
716 if (!root_irq_dir || irq_dir[irq])
717 return;
718
719 memset(name, 0, MAX_NAMELEN);
720 sprintf(name, "%d", irq);
721
722 /* create /proc/irq/1234 */
723 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
724}
725
726unsigned long prof_cpu_mask = -1;
727
728void init_irq_proc (void)
729{
730 struct proc_dir_entry *entry;
731 int i;
732
733 /* create /proc/irq */
734 root_irq_dir = proc_mkdir("irq", 0);
735
736 /* create /proc/irq/prof_cpu_mask */
737 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
738 if (!entry)
739 return;
740
741 entry->nlink = 1;
742 entry->data = (void *)&prof_cpu_mask;
743 entry->read_proc = prof_cpu_mask_read_proc;
744 entry->write_proc = prof_cpu_mask_write_proc;
745
746 /*
747 * Create entries for all existing IRQs.
748 */
749 for (i = 0; i < NR_IRQS; i++)
750 register_irq_proc(i);
751}
752
753/*****************************************************************************/
754/*
755 * initialise the interrupt system
756 */
757void __init init_IRQ(void)
758{
759 route_cpu_irqs();
760 fpga_init();
761#ifdef CONFIG_FUJITSU_MB93493
762 route_mb93493_irqs();
763#endif
764} /* end init_IRQ() */
diff --git a/arch/frv/kernel/kernel_thread.S b/arch/frv/kernel/kernel_thread.S
new file mode 100644
index 000000000000..4531c830d20b
--- /dev/null
+++ b/arch/frv/kernel/kernel_thread.S
@@ -0,0 +1,77 @@
1/* kernel_thread.S: kernel thread creation
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/linkage.h>
13#include <asm/unistd.h>
14
15#define CLONE_VM 0x00000100 /* set if VM shared between processes */
16#define KERN_ERR "<3>"
17
18 .section .rodata
19kernel_thread_emsg:
20 .asciz KERN_ERR "failed to create kernel thread: error=%d\n"
21
22 .text
23 .balign 4
24
25###############################################################################
26#
27# Create a kernel thread
28#
29# int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
30#
31###############################################################################
32 .globl kernel_thread
33 .type kernel_thread,@function
34kernel_thread:
35 or.p gr8,gr0,gr4
36 or gr9,gr0,gr5
37
38 # start by forking the current process, but with shared VM
39 setlos.p #__NR_clone,gr7 ; syscall number
40 ori gr10,#CLONE_VM,gr8 ; first syscall arg [clone_flags]
41 sethi.p #0xe4e4,gr9 ; second syscall arg [newsp]
42 setlo #0xe4e4,gr9
43 setlos.p #0,gr10 ; third syscall arg [parent_tidptr]
44 setlos #0,gr11 ; fourth syscall arg [child_tidptr]
45 tira gr0,#0
46 setlos.p #4095,gr7
47 andcc gr8,gr8,gr0,icc0
48 addcc.p gr8,gr7,gr0,icc1
49 bnelr icc0,#2
50 bc icc1,#0,kernel_thread_error
51
52 # now invoke the work function
53 or gr5,gr0,gr8
54 calll @(gr4,gr0)
55
56 # and finally exit the thread
57 setlos #__NR_exit,gr7 ; syscall number
58 tira gr0,#0
59
60kernel_thread_error:
61 subi sp,#8,sp
62 movsg lr,gr4
63 sti gr8,@(sp,#0)
64 sti.p gr4,@(sp,#4)
65
66 or gr8,gr0,gr9
67 sethi.p %hi(kernel_thread_emsg),gr8
68 setlo %lo(kernel_thread_emsg),gr8
69
70 call printk
71
72 ldi @(sp,#4),gr4
73 ldi @(sp,#0),gr8
74 subi sp,#8,sp
75 jmpl @(gr4,gr0)
76
77 .size kernel_thread,.-kernel_thread
diff --git a/arch/frv/kernel/local.h b/arch/frv/kernel/local.h
new file mode 100644
index 000000000000..e9471761d78b
--- /dev/null
+++ b/arch/frv/kernel/local.h
@@ -0,0 +1,56 @@
1/* local.h: local definitions
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _FRV_LOCAL_H
13#define _FRV_LOCAL_H
14
15#include <asm/sections.h>
16
17#ifndef __ASSEMBLY__
18
19/* dma.c */
20extern unsigned long frv_dma_inprogress;
21
22extern void frv_dma_pause_all(void);
23extern void frv_dma_resume_all(void);
24
25/* sleep.S */
26extern asmlinkage void frv_cpu_suspend(unsigned long);
27extern asmlinkage void frv_cpu_core_sleep(void);
28
29/* setup.c */
30extern unsigned long __nongprelbss pdm_suspend_mode;
31extern void determine_clocks(int verbose);
32extern int __nongprelbss clock_p0_current;
33extern int __nongprelbss clock_cm_current;
34extern int __nongprelbss clock_cmode_current;
35
36#ifdef CONFIG_PM
37extern int __nongprelbss clock_cmodes_permitted;
38extern unsigned long __nongprelbss clock_bits_settable;
39#define CLOCK_BIT_CM 0x0000000f
40#define CLOCK_BIT_CM_H 0x00000001 /* CLKC.CM can be set to 0 */
41#define CLOCK_BIT_CM_M 0x00000002 /* CLKC.CM can be set to 1 */
42#define CLOCK_BIT_CM_L 0x00000004 /* CLKC.CM can be set to 2 */
43#define CLOCK_BIT_P0 0x00000010 /* CLKC.P0 can be changed */
44#define CLOCK_BIT_CMODE 0x00000020 /* CLKC.CMODE can be changed */
45
46extern void (*__power_switch_wake_setup)(void);
47extern int (*__power_switch_wake_check)(void);
48extern void (*__power_switch_wake_cleanup)(void);
49#endif
50
51/* time.c */
52extern void time_divisor_init(void);
53
54
55#endif /* __ASSEMBLY__ */
56#endif /* _FRV_LOCAL_H */
diff --git a/arch/frv/kernel/pm-mb93093.c b/arch/frv/kernel/pm-mb93093.c
new file mode 100644
index 000000000000..34d01d7dcc3b
--- /dev/null
+++ b/arch/frv/kernel/pm-mb93093.c
@@ -0,0 +1,66 @@
1/*
2 * FR-V MB93093 Power Management Routines
3 *
4 * Copyright (c) 2004 Red Hat, Inc.
5 *
6 * Written by: msalter@redhat.com
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License.
10 *
11 */
12
13#include <linux/config.h>
14#include <linux/init.h>
15#include <linux/pm.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/sysctl.h>
19#include <linux/errno.h>
20#include <linux/delay.h>
21#include <asm/uaccess.h>
22
23#include <asm/mb86943a.h>
24
25#include "local.h"
26
27static unsigned long imask;
28/*
29 * Setup interrupt masks, etc to enable wakeup by power switch
30 */
31static void mb93093_power_switch_setup(void)
32{
33 /* mask all but FPGA interrupt sources. */
34 imask = *(volatile unsigned long *)0xfeff9820;
35 *(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000;
36}
37
38/*
39 * Cleanup interrupt masks, etc after wakeup by power switch
40 */
41static void mb93093_power_switch_cleanup(void)
42{
43 *(volatile unsigned long *)0xfeff9820 = imask;
44}
45
46/*
47 * Return non-zero if wakeup irq was caused by power switch
48 */
49static int mb93093_power_switch_check(void)
50{
51 return 1;
52}
53
54/*
55 * Initialize power interface
56 */
57static int __init mb93093_pm_init(void)
58{
59 __power_switch_wake_setup = mb93093_power_switch_setup;
60 __power_switch_wake_check = mb93093_power_switch_check;
61 __power_switch_wake_cleanup = mb93093_power_switch_cleanup;
62 return 0;
63}
64
65__initcall(mb93093_pm_init);
66
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
new file mode 100644
index 000000000000..1a1e8a119c3d
--- /dev/null
+++ b/arch/frv/kernel/pm.c
@@ -0,0 +1,432 @@
1/*
2 * FR-V Power Management Routines
3 *
4 * Copyright (c) 2004 Red Hat, Inc.
5 *
6 * Based on SA1100 version:
7 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/init.h>
16#include <linux/pm.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/sysctl.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <asm/uaccess.h>
23
24#include <asm/mb86943a.h>
25
26#include "local.h"
27
28void (*pm_power_off)(void);
29
30extern void frv_change_cmode(int);
31
32/*
33 * Debug macros
34 */
35#define DEBUG
36
37int pm_do_suspend(void)
38{
39 local_irq_disable();
40
41 __set_LEDS(0xb1);
42
43 /* go zzz */
44 frv_cpu_suspend(pdm_suspend_mode);
45
46 __set_LEDS(0xb2);
47
48 local_irq_enable();
49
50 return 0;
51}
52
53static unsigned long __irq_mask;
54
55/*
56 * Setup interrupt masks, etc to enable wakeup by power switch
57 */
58static void __default_power_switch_setup(void)
59{
60 /* default is to mask all interrupt sources. */
61 __irq_mask = *(unsigned long *)0xfeff9820;
62 *(unsigned long *)0xfeff9820 = 0xfffe0000;
63}
64
65/*
66 * Cleanup interrupt masks, etc after wakeup by power switch
67 */
68static void __default_power_switch_cleanup(void)
69{
70 *(unsigned long *)0xfeff9820 = __irq_mask;
71}
72
73/*
74 * Return non-zero if wakeup irq was caused by power switch
75 */
76static int __default_power_switch_check(void)
77{
78 return 1;
79}
80
81void (*__power_switch_wake_setup)(void) = __default_power_switch_setup;
82int (*__power_switch_wake_check)(void) = __default_power_switch_check;
83void (*__power_switch_wake_cleanup)(void) = __default_power_switch_cleanup;
84
85int pm_do_bus_sleep(void)
86{
87 local_irq_disable();
88
89 /*
90 * Here is where we need some platform-dependent setup
91 * of the interrupt state so that appropriate wakeup
92 * sources are allowed and all others are masked.
93 */
94 __power_switch_wake_setup();
95
96 __set_LEDS(0xa1);
97
98 /* go zzz
99 *
100 * This is in a loop in case power switch shares an irq with other
101 * devices. The wake_check() tells us if we need to finish waking
102 * or go back to sleep.
103 */
104 do {
105 frv_cpu_suspend(HSR0_PDM_BUS_SLEEP);
106 } while (__power_switch_wake_check && !__power_switch_wake_check());
107
108 __set_LEDS(0xa2);
109
110 /*
111 * Here is where we need some platform-dependent restore
112 * of the interrupt state prior to being called.
113 */
114 __power_switch_wake_cleanup();
115
116 local_irq_enable();
117
118 return 0;
119}
120
121unsigned long sleep_phys_sp(void *sp)
122{
123 return virt_to_phys(sp);
124}
125
126#ifdef CONFIG_SYSCTL
127/*
128 * Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6
129 * when all the PM interfaces exist nicely.
130 */
131#define CTL_PM 9899
132#define CTL_PM_SUSPEND 1
133#define CTL_PM_CMODE 2
134#define CTL_PM_P0 4
135#define CTL_PM_CM 5
136
137static int user_atoi(char *ubuf, size_t len)
138{
139 char buf[16];
140 unsigned long ret;
141
142 if (len > 15)
143 return -EINVAL;
144
145 if (copy_from_user(buf, ubuf, len))
146 return -EFAULT;
147
148 buf[len] = 0;
149 ret = simple_strtoul(buf, NULL, 0);
150 if (ret > INT_MAX)
151 return -ERANGE;
152 return ret;
153}
154
155/*
156 * Send us to sleep.
157 */
158static int sysctl_pm_do_suspend(ctl_table *ctl, int write, struct file *filp,
159 void *buffer, size_t *lenp, loff_t *fpos)
160{
161 int retval, mode;
162
163 if (*lenp <= 0)
164 return -EIO;
165
166 mode = user_atoi(buffer, *lenp);
167 if ((mode != 1) && (mode != 5))
168 return -EINVAL;
169
170 retval = pm_send_all(PM_SUSPEND, (void *)3);
171
172 if (retval == 0) {
173 if (mode == 5)
174 retval = pm_do_bus_sleep();
175 else
176 retval = pm_do_suspend();
177 pm_send_all(PM_RESUME, (void *)0);
178 }
179
180 return retval;
181}
182
183static int try_set_cmode(int new_cmode)
184{
185 if (new_cmode > 15)
186 return -EINVAL;
187 if (!(clock_cmodes_permitted & (1<<new_cmode)))
188 return -EINVAL;
189
190 /* tell all the drivers we're suspending */
191 pm_send_all(PM_SUSPEND, (void *)3);
192
193 /* now change cmode */
194 local_irq_disable();
195 frv_dma_pause_all();
196
197 frv_change_cmode(new_cmode);
198
199 determine_clocks(0);
200 time_divisor_init();
201
202#ifdef DEBUG
203 determine_clocks(1);
204#endif
205 frv_dma_resume_all();
206 local_irq_enable();
207
208 /* tell all the drivers we're resuming */
209 pm_send_all(PM_RESUME, (void *)0);
210 return 0;
211}
212
213
214static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
215 void *buffer, size_t *lenp, loff_t *fpos)
216{
217 int new_cmode;
218
219 if (!write)
220 return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
221
222 new_cmode = user_atoi(buffer, *lenp);
223
224 return try_set_cmode(new_cmode)?:*lenp;
225}
226
227static int cmode_sysctl(ctl_table *table, int *name, int nlen,
228 void *oldval, size_t *oldlenp,
229 void *newval, size_t newlen, void **context)
230{
231 if (oldval && oldlenp) {
232 size_t oldlen;
233
234 if (get_user(oldlen, oldlenp))
235 return -EFAULT;
236
237 if (oldlen != sizeof(int))
238 return -EINVAL;
239
240 if (put_user(clock_cmode_current, (unsigned int *)oldval) ||
241 put_user(sizeof(int), oldlenp))
242 return -EFAULT;
243 }
244 if (newval && newlen) {
245 int new_cmode;
246
247 if (newlen != sizeof(int))
248 return -EINVAL;
249
250 if (get_user(new_cmode, (int *)newval))
251 return -EFAULT;
252
253 return try_set_cmode(new_cmode)?:1;
254 }
255 return 1;
256}
257
258static int try_set_p0(int new_p0)
259{
260 unsigned long flags, clkc;
261
262 if (new_p0 < 0 || new_p0 > 1)
263 return -EINVAL;
264
265 local_irq_save(flags);
266 __set_PSR(flags & ~PSR_ET);
267
268 frv_dma_pause_all();
269
270 clkc = __get_CLKC();
271 if (new_p0)
272 clkc |= CLKC_P0;
273 else
274 clkc &= ~CLKC_P0;
275 __set_CLKC(clkc);
276
277 determine_clocks(0);
278 time_divisor_init();
279
280#ifdef DEBUG
281 determine_clocks(1);
282#endif
283 frv_dma_resume_all();
284 local_irq_restore(flags);
285 return 0;
286}
287
288static int try_set_cm(int new_cm)
289{
290 unsigned long flags, clkc;
291
292 if (new_cm < 0 || new_cm > 1)
293 return -EINVAL;
294
295 local_irq_save(flags);
296 __set_PSR(flags & ~PSR_ET);
297
298 frv_dma_pause_all();
299
300 clkc = __get_CLKC();
301 clkc &= ~CLKC_CM;
302 clkc |= new_cm;
303 __set_CLKC(clkc);
304
305 determine_clocks(0);
306 time_divisor_init();
307
308#if 1 //def DEBUG
309 determine_clocks(1);
310#endif
311
312 frv_dma_resume_all();
313 local_irq_restore(flags);
314 return 0;
315}
316
317static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
318 void *buffer, size_t *lenp, loff_t *fpos)
319{
320 int new_p0;
321
322 if (!write)
323 return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
324
325 new_p0 = user_atoi(buffer, *lenp);
326
327 return try_set_p0(new_p0)?:*lenp;
328}
329
330static int p0_sysctl(ctl_table *table, int *name, int nlen,
331 void *oldval, size_t *oldlenp,
332 void *newval, size_t newlen, void **context)
333{
334 if (oldval && oldlenp) {
335 size_t oldlen;
336
337 if (get_user(oldlen, oldlenp))
338 return -EFAULT;
339
340 if (oldlen != sizeof(int))
341 return -EINVAL;
342
343 if (put_user(clock_p0_current, (unsigned int *)oldval) ||
344 put_user(sizeof(int), oldlenp))
345 return -EFAULT;
346 }
347 if (newval && newlen) {
348 int new_p0;
349
350 if (newlen != sizeof(int))
351 return -EINVAL;
352
353 if (get_user(new_p0, (int *)newval))
354 return -EFAULT;
355
356 return try_set_p0(new_p0)?:1;
357 }
358 return 1;
359}
360
361static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
362 void *buffer, size_t *lenp, loff_t *fpos)
363{
364 int new_cm;
365
366 if (!write)
367 return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
368
369 new_cm = user_atoi(buffer, *lenp);
370
371 return try_set_cm(new_cm)?:*lenp;
372}
373
374static int cm_sysctl(ctl_table *table, int *name, int nlen,
375 void *oldval, size_t *oldlenp,
376 void *newval, size_t newlen, void **context)
377{
378 if (oldval && oldlenp) {
379 size_t oldlen;
380
381 if (get_user(oldlen, oldlenp))
382 return -EFAULT;
383
384 if (oldlen != sizeof(int))
385 return -EINVAL;
386
387 if (put_user(clock_cm_current, (unsigned int *)oldval) ||
388 put_user(sizeof(int), oldlenp))
389 return -EFAULT;
390 }
391 if (newval && newlen) {
392 int new_cm;
393
394 if (newlen != sizeof(int))
395 return -EINVAL;
396
397 if (get_user(new_cm, (int *)newval))
398 return -EFAULT;
399
400 return try_set_cm(new_cm)?:1;
401 }
402 return 1;
403}
404
405
406static struct ctl_table pm_table[] =
407{
408 {CTL_PM_SUSPEND, "suspend", NULL, 0, 0200, NULL, &sysctl_pm_do_suspend},
409 {CTL_PM_CMODE, "cmode", &clock_cmode_current, sizeof(int), 0644, NULL, &cmode_procctl, &cmode_sysctl, NULL},
410 {CTL_PM_P0, "p0", &clock_p0_current, sizeof(int), 0644, NULL, &p0_procctl, &p0_sysctl, NULL},
411 {CTL_PM_CM, "cm", &clock_cm_current, sizeof(int), 0644, NULL, &cm_procctl, &cm_sysctl, NULL},
412 {0}
413};
414
415static struct ctl_table pm_dir_table[] =
416{
417 {CTL_PM, "pm", NULL, 0, 0555, pm_table},
418 {0}
419};
420
421/*
422 * Initialize power interface
423 */
424static int __init pm_init(void)
425{
426 register_sysctl_table(pm_dir_table, 1);
427 return 0;
428}
429
430__initcall(pm_init);
431
432#endif
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
new file mode 100644
index 000000000000..3001b82b1514
--- /dev/null
+++ b/arch/frv/kernel/process.c
@@ -0,0 +1,388 @@
1/* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/elf.h>
26#include <linux/reboot.h>
27#include <linux/interrupt.h>
28
29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <asm/setup.h>
32#include <asm/pgtable.h>
33#include <asm/gdb-stub.h>
34#include <asm/mb-regs.h>
35
36#include "local.h"
37
38asmlinkage void ret_from_fork(void);
39
40#include <asm/pgalloc.h>
41
42struct task_struct *alloc_task_struct(void)
43{
44 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
45 if (p)
46 atomic_set((atomic_t *)(p+1), 1);
47 return p;
48}
49
50void free_task_struct(struct task_struct *p)
51{
52 if (atomic_dec_and_test((atomic_t *)(p+1)))
53 kfree(p);
54}
55
56static void core_sleep_idle(void)
57{
58#ifdef LED_DEBUG_SLEEP
59 /* Show that we're sleeping... */
60 __set_LEDS(0x55aa);
61#endif
62 frv_cpu_core_sleep();
63#ifdef LED_DEBUG_SLEEP
64 /* ... and that we woke up */
65 __set_LEDS(0);
66#endif
67 mb();
68}
69
70void (*idle)(void) = core_sleep_idle;
71
72/*
73 * The idle thread. There's no useful work to be
74 * done, so just try to conserve power and have a
75 * low exit latency (ie sit in a loop waiting for
76 * somebody to say that they'd like to reschedule)
77 */
78void cpu_idle(void)
79{
80 /* endless idle loop with no priority at all */
81 while (1) {
82 while (!need_resched()) {
83 irq_stat[smp_processor_id()].idle_timestamp = jiffies;
84
85 if (!frv_dma_inprogress && idle)
86 idle();
87 }
88
89 schedule();
90 }
91}
92
93void machine_restart(char * __unused)
94{
95 unsigned long reset_addr;
96#ifdef CONFIG_GDBSTUB
97 gdbstub_exit(0);
98#endif
99
100 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
101 reset_addr = 0xfefff500;
102 else
103 reset_addr = 0xfeff0500;
104
105 /* Software reset. */
106 asm volatile(" dcef @(gr0,gr0),1 ! membar !"
107 " sti %1,@(%0,0) !"
108 " nop ! nop ! nop ! nop ! nop ! "
109 " nop ! nop ! nop ! nop ! nop ! "
110 " nop ! nop ! nop ! nop ! nop ! "
111 " nop ! nop ! nop ! nop ! nop ! "
112 : : "r" (reset_addr), "r" (1) );
113
114 for (;;)
115 ;
116}
117
118void machine_halt(void)
119{
120#ifdef CONFIG_GDBSTUB
121 gdbstub_exit(0);
122#endif
123
124 for (;;);
125}
126
127void machine_power_off(void)
128{
129#ifdef CONFIG_GDBSTUB
130 gdbstub_exit(0);
131#endif
132
133 for (;;);
134}
135
136void flush_thread(void)
137{
138#if 0 //ndef NO_FPU
139 unsigned long zero = 0;
140#endif
141 set_fs(USER_DS);
142}
143
144inline unsigned long user_stack(const struct pt_regs *regs)
145{
146 while (regs->next_frame)
147 regs = regs->next_frame;
148 return user_mode(regs) ? regs->sp : 0;
149}
150
151asmlinkage int sys_fork(void)
152{
153#ifndef CONFIG_MMU
154 /* fork almost works, enough to trick you into looking elsewhere:-( */
155 return -EINVAL;
156#else
157 return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
158#endif
159}
160
161asmlinkage int sys_vfork(void)
162{
163 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
164 NULL, NULL);
165}
166
167/*****************************************************************************/
168/*
169 * clone a process
170 * - tlsptr is retrieved by copy_thread()
171 */
172asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
173 int __user *parent_tidptr, int __user *child_tidptr,
174 int __user *tlsptr)
175{
176 if (!newsp)
177 newsp = user_stack(__frame);
178 return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
179} /* end sys_clone() */
180
181/*****************************************************************************/
182/*
183 * This gets called before we allocate a new thread and copy
184 * the current task into it.
185 */
186void prepare_to_copy(struct task_struct *tsk)
187{
188 //unlazy_fpu(tsk);
189} /* end prepare_to_copy() */
190
191/*****************************************************************************/
192/*
193 * set up the kernel stack and exception frames for a new process
194 */
195int copy_thread(int nr, unsigned long clone_flags,
196 unsigned long usp, unsigned long topstk,
197 struct task_struct *p, struct pt_regs *regs)
198{
199 struct pt_regs *childregs0, *childregs, *regs0;
200
201 regs0 = __kernel_frame0_ptr;
202 childregs0 = (struct pt_regs *)
203 ((unsigned long) p->thread_info + THREAD_SIZE - USER_CONTEXT_SIZE);
204 childregs = childregs0;
205
206 /* set up the userspace frame (the only place that the USP is stored) */
207 *childregs0 = *regs0;
208
209 childregs0->gr8 = 0;
210 childregs0->sp = usp;
211 childregs0->next_frame = NULL;
212
213 /* set up the return kernel frame if called from kernel_thread() */
214 if (regs != regs0) {
215 childregs--;
216 *childregs = *regs;
217 childregs->sp = (unsigned long) childregs0;
218 childregs->next_frame = childregs0;
219 childregs->gr15 = (unsigned long) p->thread_info;
220 childregs->gr29 = (unsigned long) p;
221 }
222
223 p->set_child_tid = p->clear_child_tid = NULL;
224
225 p->thread.frame = childregs;
226 p->thread.curr = p;
227 p->thread.sp = (unsigned long) childregs;
228 p->thread.fp = 0;
229 p->thread.lr = 0;
230 p->thread.pc = (unsigned long) ret_from_fork;
231 p->thread.frame0 = childregs0;
232
233 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
234 if (clone_flags & CLONE_SETTLS)
235 childregs->gr29 = childregs->gr12;
236
237 save_user_regs(p->thread.user);
238
239 return 0;
240} /* end copy_thread() */
241
242/*
243 * fill in the user structure for a core dump..
244 */
245void dump_thread(struct pt_regs *regs, struct user *dump)
246{
247#if 0
248 /* changed the size calculations - should hopefully work better. lbt */
249 dump->magic = CMAGIC;
250 dump->start_code = 0;
251 dump->start_stack = user_stack(regs) & ~(PAGE_SIZE - 1);
252 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
253 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
254 dump->u_dsize -= dump->u_tsize;
255 dump->u_ssize = 0;
256
257 if (dump->start_stack < TASK_SIZE)
258 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
259
260 dump->regs = *(struct user_context *) regs;
261#endif
262}
263
264/*
265 * sys_execve() executes a new program.
266 */
267asmlinkage int sys_execve(char *name, char **argv, char **envp)
268{
269 int error;
270 char * filename;
271
272 lock_kernel();
273 filename = getname(name);
274 error = PTR_ERR(filename);
275 if (IS_ERR(filename))
276 goto out;
277 error = do_execve(filename, argv, envp, __frame);
278 putname(filename);
279 out:
280 unlock_kernel();
281 return error;
282}
283
284unsigned long get_wchan(struct task_struct *p)
285{
286 struct pt_regs *regs0;
287 unsigned long fp, pc;
288 unsigned long stack_limit;
289 int count = 0;
290 if (!p || p == current || p->state == TASK_RUNNING)
291 return 0;
292
293 stack_limit = (unsigned long) (p + 1);
294 fp = p->thread.fp;
295 regs0 = p->thread.frame0;
296
297 do {
298 if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
299 return 0;
300
301 pc = ((unsigned long *) fp)[2];
302
303 /* FIXME: This depends on the order of these functions. */
304 if (!in_sched_functions(pc))
305 return pc;
306
307 fp = *(unsigned long *) fp;
308 } while (count++ < 16);
309
310 return 0;
311}
312
313unsigned long thread_saved_pc(struct task_struct *tsk)
314{
315 /* Check whether the thread is blocked in resume() */
316 if (in_sched_functions(tsk->thread.pc))
317 return ((unsigned long *)tsk->thread.fp)[2];
318 else
319 return tsk->thread.pc;
320}
321
322int elf_check_arch(const struct elf32_hdr *hdr)
323{
324 unsigned long hsr0 = __get_HSR(0);
325 unsigned long psr = __get_PSR();
326
327 if (hdr->e_machine != EM_FRV)
328 return 0;
329
330 switch (hdr->e_flags & EF_FRV_GPR_MASK) {
331 case EF_FRV_GPR64:
332 if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
333 return 0;
334 case EF_FRV_GPR32:
335 case 0:
336 break;
337 default:
338 return 0;
339 }
340
341 switch (hdr->e_flags & EF_FRV_FPR_MASK) {
342 case EF_FRV_FPR64:
343 if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
344 return 0;
345 case EF_FRV_FPR32:
346 case EF_FRV_FPR_NONE:
347 case 0:
348 break;
349 default:
350 return 0;
351 }
352
353 if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
354 if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
355 PSR_IMPLE(psr) != PSR_IMPLE_FR451)
356 return 0;
357
358 switch (hdr->e_flags & EF_FRV_CPU_MASK) {
359 case EF_FRV_CPU_GENERIC:
360 break;
361 case EF_FRV_CPU_FR300:
362 case EF_FRV_CPU_SIMPLE:
363 case EF_FRV_CPU_TOMCAT:
364 default:
365 return 0;
366 case EF_FRV_CPU_FR400:
367 if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
368 PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
369 PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
370 PSR_IMPLE(psr) != PSR_IMPLE_FR551)
371 return 0;
372 break;
373 case EF_FRV_CPU_FR450:
374 if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
375 return 0;
376 break;
377 case EF_FRV_CPU_FR500:
378 if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
379 return 0;
380 break;
381 case EF_FRV_CPU_FR550:
382 if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
383 return 0;
384 break;
385 }
386
387 return 1;
388}
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
new file mode 100644
index 000000000000..2a0efb739adc
--- /dev/null
+++ b/arch/frv/kernel/ptrace.c
@@ -0,0 +1,764 @@
1/* ptrace.c: FRV specific parts of process tracing
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/ptrace.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/user.h>
21#include <linux/config.h>
22#include <linux/security.h>
23
24#include <asm/uaccess.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/system.h>
28#include <asm/processor.h>
29#include <asm/unistd.h>
30
31/*
32 * does not yet catch signals sent when the child dies.
33 * in exit.c or in signal.c.
34 */
35
36/*
37 * Get contents of register REGNO in task TASK.
38 */
39static inline long get_reg(struct task_struct *task, int regno)
40{
41 struct user_context *user = task->thread.user;
42
43 if (regno < 0 || regno >= PT__END)
44 return 0;
45
46 return ((unsigned long *) user)[regno];
47}
48
49/*
50 * Write contents of register REGNO in task TASK.
51 */
52static inline int put_reg(struct task_struct *task, int regno,
53 unsigned long data)
54{
55 struct user_context *user = task->thread.user;
56
57 if (regno < 0 || regno >= PT__END)
58 return -EIO;
59
60 switch (regno) {
61 case PT_GR(0):
62 return 0;
63 case PT_PSR:
64 case PT__STATUS:
65 return -EIO;
66 default:
67 ((unsigned long *) user)[regno] = data;
68 return 0;
69 }
70}
71
72/*
73 * check that an address falls within the bounds of the target process's memory mappings
74 */
75static inline int is_user_addr_valid(struct task_struct *child,
76 unsigned long start, unsigned long len)
77{
78#ifdef CONFIG_MMU
79 if (start >= PAGE_OFFSET || len > PAGE_OFFSET - start)
80 return -EIO;
81 return 0;
82#else
83 struct vm_list_struct *vml;
84
85 for (vml = child->mm->context.vmlist; vml; vml = vml->next)
86 if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end)
87 return 0;
88
89 return -EIO;
90#endif
91}
92
93/*
94 * Called by kernel/ptrace.c when detaching..
95 *
96 * Control h/w single stepping
97 */
98void ptrace_disable(struct task_struct *child)
99{
100 child->thread.frame0->__status &= ~REG__STATUS_STEP;
101}
102
103void ptrace_enable(struct task_struct *child)
104{
105 child->thread.frame0->__status |= REG__STATUS_STEP;
106}
107
108asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
109{
110 struct task_struct *child;
111 unsigned long tmp;
112 int ret;
113
114 lock_kernel();
115 ret = -EPERM;
116 if (request == PTRACE_TRACEME) {
117 /* are we already being traced? */
118 if (current->ptrace & PT_PTRACED)
119 goto out;
120 ret = security_ptrace(current->parent, current);
121 if (ret)
122 goto out;
123 /* set the ptrace bit in the process flags. */
124 current->ptrace |= PT_PTRACED;
125 ret = 0;
126 goto out;
127 }
128 ret = -ESRCH;
129 read_lock(&tasklist_lock);
130 child = find_task_by_pid(pid);
131 if (child)
132 get_task_struct(child);
133 read_unlock(&tasklist_lock);
134 if (!child)
135 goto out;
136
137 ret = -EPERM;
138 if (pid == 1) /* you may not mess with init */
139 goto out_tsk;
140
141 if (request == PTRACE_ATTACH) {
142 ret = ptrace_attach(child);
143 goto out_tsk;
144 }
145
146 ret = ptrace_check_attach(child, request == PTRACE_KILL);
147 if (ret < 0)
148 goto out_tsk;
149
150 switch (request) {
151 /* when I and D space are separate, these will need to be fixed. */
152 case PTRACE_PEEKTEXT: /* read word at location addr. */
153 case PTRACE_PEEKDATA: {
154 int copied;
155
156 ret = -EIO;
157 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
158 break;
159
160 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
161 if (copied != sizeof(tmp))
162 break;
163
164 ret = put_user(tmp,(unsigned long *) data);
165 break;
166 }
167
168 /* read the word at location addr in the USER area. */
169 case PTRACE_PEEKUSR: {
170 tmp = 0;
171 ret = -EIO;
172 if ((addr & 3) || addr < 0)
173 break;
174
175 ret = 0;
176 switch (addr >> 2) {
177 case 0 ... PT__END - 1:
178 tmp = get_reg(child, addr >> 2);
179 break;
180
181 case PT__END + 0:
182 tmp = child->mm->end_code - child->mm->start_code;
183 break;
184
185 case PT__END + 1:
186 tmp = child->mm->end_data - child->mm->start_data;
187 break;
188
189 case PT__END + 2:
190 tmp = child->mm->start_stack - child->mm->start_brk;
191 break;
192
193 case PT__END + 3:
194 tmp = child->mm->start_code;
195 break;
196
197 case PT__END + 4:
198 tmp = child->mm->start_stack;
199 break;
200
201 default:
202 ret = -EIO;
203 break;
204 }
205
206 if (ret == 0)
207 ret = put_user(tmp, (unsigned long *) data);
208 break;
209 }
210
211 /* when I and D space are separate, this will have to be fixed. */
212 case PTRACE_POKETEXT: /* write the word at location addr. */
213 case PTRACE_POKEDATA:
214 ret = -EIO;
215 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
216 break;
217 if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data))
218 break;
219 ret = 0;
220 break;
221
222 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
223 ret = -EIO;
224 if ((addr & 3) || addr < 0)
225 break;
226
227 ret = 0;
228 switch (addr >> 2) {
229 case 0 ... PT__END-1:
230 ret = put_reg(child, addr >> 2, data);
231 break;
232
233 default:
234 ret = -EIO;
235 break;
236 }
237 break;
238
239 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
240 case PTRACE_CONT: /* restart after signal. */
241 ret = -EIO;
242 if ((unsigned long) data > _NSIG)
243 break;
244 if (request == PTRACE_SYSCALL)
245 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
246 else
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 child->exit_code = data;
249 ptrace_disable(child);
250 wake_up_process(child);
251 ret = 0;
252 break;
253
254 /* make the child exit. Best I can do is send it a sigkill.
255 * perhaps it should be put in the status that it wants to
256 * exit.
257 */
258 case PTRACE_KILL:
259 ret = 0;
260 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
261 break;
262 child->exit_code = SIGKILL;
263 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
264 ptrace_disable(child);
265 wake_up_process(child);
266 break;
267
268 case PTRACE_SINGLESTEP: /* set the trap flag. */
269 ret = -EIO;
270 if ((unsigned long) data > _NSIG)
271 break;
272 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
273 ptrace_enable(child);
274 child->exit_code = data;
275 wake_up_process(child);
276 ret = 0;
277 break;
278
279 case PTRACE_DETACH: /* detach a process that was attached. */
280 ret = ptrace_detach(child, data);
281 break;
282
283 case PTRACE_GETREGS: { /* Get all integer regs from the child. */
284 int i;
285 for (i = 0; i < PT__GPEND; i++) {
286 tmp = get_reg(child, i);
287 if (put_user(tmp, (unsigned long *) data)) {
288 ret = -EFAULT;
289 break;
290 }
291 data += sizeof(long);
292 }
293 ret = 0;
294 break;
295 }
296
297 case PTRACE_SETREGS: { /* Set all integer regs in the child. */
298 int i;
299 for (i = 0; i < PT__GPEND; i++) {
300 if (get_user(tmp, (unsigned long *) data)) {
301 ret = -EFAULT;
302 break;
303 }
304 put_reg(child, i, tmp);
305 data += sizeof(long);
306 }
307 ret = 0;
308 break;
309 }
310
311 case PTRACE_GETFPREGS: { /* Get the child FP/Media state. */
312 ret = 0;
313 if (copy_to_user((void *) data,
314 &child->thread.user->f,
315 sizeof(child->thread.user->f)))
316 ret = -EFAULT;
317 break;
318 }
319
320 case PTRACE_SETFPREGS: { /* Set the child FP/Media state. */
321 ret = 0;
322 if (copy_from_user(&child->thread.user->f,
323 (void *) data,
324 sizeof(child->thread.user->f)))
325 ret = -EFAULT;
326 break;
327 }
328
329 case PTRACE_GETFDPIC:
330 tmp = 0;
331 switch (addr) {
332 case PTRACE_GETFDPIC_EXEC:
333 tmp = child->mm->context.exec_fdpic_loadmap;
334 break;
335 case PTRACE_GETFDPIC_INTERP:
336 tmp = child->mm->context.interp_fdpic_loadmap;
337 break;
338 default:
339 break;
340 }
341
342 ret = 0;
343 if (put_user(tmp, (unsigned long *) data)) {
344 ret = -EFAULT;
345 break;
346 }
347 break;
348
349 default:
350 ret = -EIO;
351 break;
352 }
353out_tsk:
354 put_task_struct(child);
355out:
356 unlock_kernel();
357 return ret;
358}
359
360int __nongprelbss kstrace;
361
362static const struct {
363 const char *name;
364 unsigned argmask;
365} __syscall_name_table[NR_syscalls] = {
366 [0] = { "restart_syscall" },
367 [1] = { "exit", 0x000001 },
368 [2] = { "fork", 0xffffff },
369 [3] = { "read", 0x000141 },
370 [4] = { "write", 0x000141 },
371 [5] = { "open", 0x000235 },
372 [6] = { "close", 0x000001 },
373 [7] = { "waitpid", 0x000141 },
374 [8] = { "creat", 0x000025 },
375 [9] = { "link", 0x000055 },
376 [10] = { "unlink", 0x000005 },
377 [11] = { "execve", 0x000445 },
378 [12] = { "chdir", 0x000005 },
379 [13] = { "time", 0x000004 },
380 [14] = { "mknod", 0x000325 },
381 [15] = { "chmod", 0x000025 },
382 [16] = { "lchown", 0x000025 },
383 [17] = { "break" },
384 [18] = { "oldstat", 0x000045 },
385 [19] = { "lseek", 0x000131 },
386 [20] = { "getpid", 0xffffff },
387 [21] = { "mount", 0x043555 },
388 [22] = { "umount", 0x000005 },
389 [23] = { "setuid", 0x000001 },
390 [24] = { "getuid", 0xffffff },
391 [25] = { "stime", 0x000004 },
392 [26] = { "ptrace", 0x004413 },
393 [27] = { "alarm", 0x000001 },
394 [28] = { "oldfstat", 0x000041 },
395 [29] = { "pause", 0xffffff },
396 [30] = { "utime", 0x000045 },
397 [31] = { "stty" },
398 [32] = { "gtty" },
399 [33] = { "access", 0x000025 },
400 [34] = { "nice", 0x000001 },
401 [35] = { "ftime" },
402 [36] = { "sync", 0xffffff },
403 [37] = { "kill", 0x000011 },
404 [38] = { "rename", 0x000055 },
405 [39] = { "mkdir", 0x000025 },
406 [40] = { "rmdir", 0x000005 },
407 [41] = { "dup", 0x000001 },
408 [42] = { "pipe", 0x000004 },
409 [43] = { "times", 0x000004 },
410 [44] = { "prof" },
411 [45] = { "brk", 0x000004 },
412 [46] = { "setgid", 0x000001 },
413 [47] = { "getgid", 0xffffff },
414 [48] = { "signal", 0x000041 },
415 [49] = { "geteuid", 0xffffff },
416 [50] = { "getegid", 0xffffff },
417 [51] = { "acct", 0x000005 },
418 [52] = { "umount2", 0x000035 },
419 [53] = { "lock" },
420 [54] = { "ioctl", 0x000331 },
421 [55] = { "fcntl", 0x000331 },
422 [56] = { "mpx" },
423 [57] = { "setpgid", 0x000011 },
424 [58] = { "ulimit" },
425 [60] = { "umask", 0x000002 },
426 [61] = { "chroot", 0x000005 },
427 [62] = { "ustat", 0x000043 },
428 [63] = { "dup2", 0x000011 },
429 [64] = { "getppid", 0xffffff },
430 [65] = { "getpgrp", 0xffffff },
431 [66] = { "setsid", 0xffffff },
432 [67] = { "sigaction" },
433 [68] = { "sgetmask" },
434 [69] = { "ssetmask" },
435 [70] = { "setreuid" },
436 [71] = { "setregid" },
437 [72] = { "sigsuspend" },
438 [73] = { "sigpending" },
439 [74] = { "sethostname" },
440 [75] = { "setrlimit" },
441 [76] = { "getrlimit" },
442 [77] = { "getrusage" },
443 [78] = { "gettimeofday" },
444 [79] = { "settimeofday" },
445 [80] = { "getgroups" },
446 [81] = { "setgroups" },
447 [82] = { "select" },
448 [83] = { "symlink" },
449 [84] = { "oldlstat" },
450 [85] = { "readlink" },
451 [86] = { "uselib" },
452 [87] = { "swapon" },
453 [88] = { "reboot" },
454 [89] = { "readdir" },
455 [91] = { "munmap", 0x000034 },
456 [92] = { "truncate" },
457 [93] = { "ftruncate" },
458 [94] = { "fchmod" },
459 [95] = { "fchown" },
460 [96] = { "getpriority" },
461 [97] = { "setpriority" },
462 [99] = { "statfs" },
463 [100] = { "fstatfs" },
464 [102] = { "socketcall" },
465 [103] = { "syslog" },
466 [104] = { "setitimer" },
467 [105] = { "getitimer" },
468 [106] = { "stat" },
469 [107] = { "lstat" },
470 [108] = { "fstat" },
471 [111] = { "vhangup" },
472 [114] = { "wait4" },
473 [115] = { "swapoff" },
474 [116] = { "sysinfo" },
475 [117] = { "ipc" },
476 [118] = { "fsync" },
477 [119] = { "sigreturn" },
478 [120] = { "clone" },
479 [121] = { "setdomainname" },
480 [122] = { "uname" },
481 [123] = { "modify_ldt" },
482 [123] = { "cacheflush" },
483 [124] = { "adjtimex" },
484 [125] = { "mprotect" },
485 [126] = { "sigprocmask" },
486 [127] = { "create_module" },
487 [128] = { "init_module" },
488 [129] = { "delete_module" },
489 [130] = { "get_kernel_syms" },
490 [131] = { "quotactl" },
491 [132] = { "getpgid" },
492 [133] = { "fchdir" },
493 [134] = { "bdflush" },
494 [135] = { "sysfs" },
495 [136] = { "personality" },
496 [137] = { "afs_syscall" },
497 [138] = { "setfsuid" },
498 [139] = { "setfsgid" },
499 [140] = { "_llseek", 0x014331 },
500 [141] = { "getdents" },
501 [142] = { "_newselect", 0x000141 },
502 [143] = { "flock" },
503 [144] = { "msync" },
504 [145] = { "readv" },
505 [146] = { "writev" },
506 [147] = { "getsid", 0x000001 },
507 [148] = { "fdatasync", 0x000001 },
508 [149] = { "_sysctl", 0x000004 },
509 [150] = { "mlock" },
510 [151] = { "munlock" },
511 [152] = { "mlockall" },
512 [153] = { "munlockall" },
513 [154] = { "sched_setparam" },
514 [155] = { "sched_getparam" },
515 [156] = { "sched_setscheduler" },
516 [157] = { "sched_getscheduler" },
517 [158] = { "sched_yield" },
518 [159] = { "sched_get_priority_max" },
519 [160] = { "sched_get_priority_min" },
520 [161] = { "sched_rr_get_interval" },
521 [162] = { "nanosleep", 0x000044 },
522 [163] = { "mremap" },
523 [164] = { "setresuid" },
524 [165] = { "getresuid" },
525 [166] = { "vm86" },
526 [167] = { "query_module" },
527 [168] = { "poll" },
528 [169] = { "nfsservctl" },
529 [170] = { "setresgid" },
530 [171] = { "getresgid" },
531 [172] = { "prctl", 0x333331 },
532 [173] = { "rt_sigreturn", 0xffffff },
533 [174] = { "rt_sigaction", 0x001441 },
534 [175] = { "rt_sigprocmask", 0x001441 },
535 [176] = { "rt_sigpending", 0x000014 },
536 [177] = { "rt_sigtimedwait", 0x001444 },
537 [178] = { "rt_sigqueueinfo", 0x000411 },
538 [179] = { "rt_sigsuspend", 0x000014 },
539 [180] = { "pread", 0x003341 },
540 [181] = { "pwrite", 0x003341 },
541 [182] = { "chown", 0x000115 },
542 [183] = { "getcwd" },
543 [184] = { "capget" },
544 [185] = { "capset" },
545 [186] = { "sigaltstack" },
546 [187] = { "sendfile" },
547 [188] = { "getpmsg" },
548 [189] = { "putpmsg" },
549 [190] = { "vfork", 0xffffff },
550 [191] = { "ugetrlimit" },
551 [192] = { "mmap2", 0x313314 },
552 [193] = { "truncate64" },
553 [194] = { "ftruncate64" },
554 [195] = { "stat64", 0x000045 },
555 [196] = { "lstat64", 0x000045 },
556 [197] = { "fstat64", 0x000041 },
557 [198] = { "lchown32" },
558 [199] = { "getuid32", 0xffffff },
559 [200] = { "getgid32", 0xffffff },
560 [201] = { "geteuid32", 0xffffff },
561 [202] = { "getegid32", 0xffffff },
562 [203] = { "setreuid32" },
563 [204] = { "setregid32" },
564 [205] = { "getgroups32" },
565 [206] = { "setgroups32" },
566 [207] = { "fchown32" },
567 [208] = { "setresuid32" },
568 [209] = { "getresuid32" },
569 [210] = { "setresgid32" },
570 [211] = { "getresgid32" },
571 [212] = { "chown32" },
572 [213] = { "setuid32" },
573 [214] = { "setgid32" },
574 [215] = { "setfsuid32" },
575 [216] = { "setfsgid32" },
576 [217] = { "pivot_root" },
577 [218] = { "mincore" },
578 [219] = { "madvise" },
579 [220] = { "getdents64" },
580 [221] = { "fcntl64" },
581 [223] = { "security" },
582 [224] = { "gettid" },
583 [225] = { "readahead" },
584 [226] = { "setxattr" },
585 [227] = { "lsetxattr" },
586 [228] = { "fsetxattr" },
587 [229] = { "getxattr" },
588 [230] = { "lgetxattr" },
589 [231] = { "fgetxattr" },
590 [232] = { "listxattr" },
591 [233] = { "llistxattr" },
592 [234] = { "flistxattr" },
593 [235] = { "removexattr" },
594 [236] = { "lremovexattr" },
595 [237] = { "fremovexattr" },
596 [238] = { "tkill" },
597 [239] = { "sendfile64" },
598 [240] = { "futex" },
599 [241] = { "sched_setaffinity" },
600 [242] = { "sched_getaffinity" },
601 [243] = { "set_thread_area" },
602 [244] = { "get_thread_area" },
603 [245] = { "io_setup" },
604 [246] = { "io_destroy" },
605 [247] = { "io_getevents" },
606 [248] = { "io_submit" },
607 [249] = { "io_cancel" },
608 [250] = { "fadvise64" },
609 [252] = { "exit_group", 0x000001 },
610 [253] = { "lookup_dcookie" },
611 [254] = { "epoll_create" },
612 [255] = { "epoll_ctl" },
613 [256] = { "epoll_wait" },
614 [257] = { "remap_file_pages" },
615 [258] = { "set_tid_address" },
616 [259] = { "timer_create" },
617 [260] = { "timer_settime" },
618 [261] = { "timer_gettime" },
619 [262] = { "timer_getoverrun" },
620 [263] = { "timer_delete" },
621 [264] = { "clock_settime" },
622 [265] = { "clock_gettime" },
623 [266] = { "clock_getres" },
624 [267] = { "clock_nanosleep" },
625 [268] = { "statfs64" },
626 [269] = { "fstatfs64" },
627 [270] = { "tgkill" },
628 [271] = { "utimes" },
629 [272] = { "fadvise64_64" },
630 [273] = { "vserver" },
631 [274] = { "mbind" },
632 [275] = { "get_mempolicy" },
633 [276] = { "set_mempolicy" },
634 [277] = { "mq_open" },
635 [278] = { "mq_unlink" },
636 [279] = { "mq_timedsend" },
637 [280] = { "mq_timedreceive" },
638 [281] = { "mq_notify" },
639 [282] = { "mq_getsetattr" },
640 [283] = { "sys_kexec_load" },
641};
642
643asmlinkage void do_syscall_trace(int leaving)
644{
645#if 0
646 unsigned long *argp;
647 const char *name;
648 unsigned argmask;
649 char buffer[16];
650
651 if (!kstrace)
652 return;
653
654 if (!current->mm)
655 return;
656
657 if (__frame->gr7 == __NR_close)
658 return;
659
660#if 0
661 if (__frame->gr7 != __NR_mmap2 &&
662 __frame->gr7 != __NR_vfork &&
663 __frame->gr7 != __NR_execve &&
664 __frame->gr7 != __NR_exit)
665 return;
666#endif
667
668 argmask = 0;
669 name = NULL;
670 if (__frame->gr7 < NR_syscalls) {
671 name = __syscall_name_table[__frame->gr7].name;
672 argmask = __syscall_name_table[__frame->gr7].argmask;
673 }
674 if (!name) {
675 sprintf(buffer, "sys_%lx", __frame->gr7);
676 name = buffer;
677 }
678
679 if (!leaving) {
680 if (!argmask) {
681 printk(KERN_CRIT "[%d] %s(%lx,%lx,%lx,%lx,%lx,%lx)\n",
682 current->pid,
683 name,
684 __frame->gr8,
685 __frame->gr9,
686 __frame->gr10,
687 __frame->gr11,
688 __frame->gr12,
689 __frame->gr13);
690 }
691 else if (argmask == 0xffffff) {
692 printk(KERN_CRIT "[%d] %s()\n",
693 current->pid,
694 name);
695 }
696 else {
697 printk(KERN_CRIT "[%d] %s(",
698 current->pid,
699 name);
700
701 argp = &__frame->gr8;
702
703 do {
704 switch (argmask & 0xf) {
705 case 1:
706 printk("%ld", (long) *argp);
707 break;
708 case 2:
709 printk("%lo", *argp);
710 break;
711 case 3:
712 printk("%lx", *argp);
713 break;
714 case 4:
715 printk("%p", (void *) *argp);
716 break;
717 case 5:
718 printk("\"%s\"", (char *) *argp);
719 break;
720 }
721
722 argp++;
723 argmask >>= 4;
724 if (argmask)
725 printk(",");
726
727 } while (argmask);
728
729 printk(")\n");
730 }
731 }
732 else {
733 if ((int)__frame->gr8 > -4096 && (int)__frame->gr8 < 4096)
734 printk(KERN_CRIT "[%d] %s() = %ld\n", current->pid, name, __frame->gr8);
735 else
736 printk(KERN_CRIT "[%d] %s() = %lx\n", current->pid, name, __frame->gr8);
737 }
738 return;
739#endif
740
741 if (!test_thread_flag(TIF_SYSCALL_TRACE))
742 return;
743
744 if (!(current->ptrace & PT_PTRACED))
745 return;
746
747 /* we need to indicate entry or exit to strace */
748 if (leaving)
749 __frame->__status |= REG__STATUS_SYSC_EXIT;
750 else
751 __frame->__status |= REG__STATUS_SYSC_ENTRY;
752
753 ptrace_notify(SIGTRAP);
754
755 /*
756 * this isn't the same as continuing with a signal, but it will do
757 * for normal use. strace only continues with a signal if the
758 * stopping signal is not SIGTRAP. -brl
759 */
760 if (current->exit_code) {
761 send_sig(current->exit_code, current, 1);
762 current->exit_code = 0;
763 }
764}
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
new file mode 100644
index 000000000000..5cba9c1f2b3d
--- /dev/null
+++ b/arch/frv/kernel/semaphore.c
@@ -0,0 +1,156 @@
1/* semaphore.c: FR-V semaphores
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from lib/rwsem-spinlock.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/module.h>
16#include <asm/semaphore.h>
17
18struct sem_waiter {
19 struct list_head list;
20 struct task_struct *task;
21};
22
23#if SEM_DEBUG
24void semtrace(struct semaphore *sem, const char *str)
25{
26 if (sem->debug)
27 printk("[%d] %s({%d,%d})\n",
28 current->pid,
29 str,
30 sem->counter,
31 list_empty(&sem->wait_list) ? 0 : 1);
32}
33#else
34#define semtrace(SEM,STR) do { } while(0)
35#endif
36
37/*
38 * wait for a token to be granted from a semaphore
39 * - entered with lock held and interrupts disabled
40 */
41void __down(struct semaphore *sem, unsigned long flags)
42{
43 struct task_struct *tsk = current;
44 struct sem_waiter waiter;
45
46 semtrace(sem, "Entering __down");
47
48 /* set up my own style of waitqueue */
49 waiter.task = tsk;
50 get_task_struct(tsk);
51
52 list_add_tail(&waiter.list, &sem->wait_list);
53
54 /* we don't need to touch the semaphore struct anymore */
55 spin_unlock_irqrestore(&sem->wait_lock, flags);
56
57 /* wait to be given the semaphore */
58 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
59
60 for (;;) {
61 if (list_empty(&waiter.list))
62 break;
63 schedule();
64 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
65 }
66
67 tsk->state = TASK_RUNNING;
68 semtrace(sem, "Leaving __down");
69}
70
71EXPORT_SYMBOL(__down);
72
73/*
74 * interruptibly wait for a token to be granted from a semaphore
75 * - entered with lock held and interrupts disabled
76 */
77int __down_interruptible(struct semaphore *sem, unsigned long flags)
78{
79 struct task_struct *tsk = current;
80 struct sem_waiter waiter;
81 int ret;
82
83 semtrace(sem,"Entering __down_interruptible");
84
85 /* set up my own style of waitqueue */
86 waiter.task = tsk;
87 get_task_struct(tsk);
88
89 list_add_tail(&waiter.list, &sem->wait_list);
90
91 /* we don't need to touch the semaphore struct anymore */
92 set_task_state(tsk, TASK_INTERRUPTIBLE);
93
94 spin_unlock_irqrestore(&sem->wait_lock, flags);
95
96 /* wait to be given the semaphore */
97 ret = 0;
98 for (;;) {
99 if (list_empty(&waiter.list))
100 break;
101 if (unlikely(signal_pending(current)))
102 goto interrupted;
103 schedule();
104 set_task_state(tsk, TASK_INTERRUPTIBLE);
105 }
106
107 out:
108 tsk->state = TASK_RUNNING;
109 semtrace(sem, "Leaving __down_interruptible");
110 return ret;
111
112 interrupted:
113 spin_lock_irqsave(&sem->wait_lock, flags);
114
115 if (!list_empty(&waiter.list)) {
116 list_del(&waiter.list);
117 ret = -EINTR;
118 }
119
120 spin_unlock_irqrestore(&sem->wait_lock, flags);
121 if (ret == -EINTR)
122 put_task_struct(current);
123 goto out;
124}
125
126EXPORT_SYMBOL(__down_interruptible);
127
128/*
129 * release a single token back to a semaphore
130 * - entered with lock held and interrupts disabled
131 */
132void __up(struct semaphore *sem)
133{
134 struct task_struct *tsk;
135 struct sem_waiter *waiter;
136
137 semtrace(sem,"Entering __up");
138
139 /* grant the token to the process at the front of the queue */
140 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
141
142 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
143 * It is an allocated on the waiter's stack and may become invalid at
144 * any time after that point (due to a wakeup from another source).
145 */
146 list_del_init(&waiter->list);
147 tsk = waiter->task;
148 mb();
149 waiter->task = NULL;
150 wake_up_process(tsk);
151 put_task_struct(tsk);
152
153 semtrace(sem,"Leaving __up");
154}
155
156EXPORT_SYMBOL(__up);
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
new file mode 100644
index 000000000000..ef6865f0b979
--- /dev/null
+++ b/arch/frv/kernel/setup.c
@@ -0,0 +1,1194 @@
1/* setup.c: FRV specific setup
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/setup.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/version.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/fb.h>
22#include <linux/console.h>
23#include <linux/genhd.h>
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/major.h>
27#include <linux/bootmem.h>
28#include <linux/highmem.h>
29#include <linux/seq_file.h>
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32#include <linux/serial_reg.h>
33
34#include <asm/setup.h>
35#include <asm/serial.h>
36#include <asm/irq.h>
37#include <asm/sections.h>
38#include <asm/pgalloc.h>
39#include <asm/busctl-regs.h>
40#include <asm/serial-regs.h>
41#include <asm/timer-regs.h>
42#include <asm/irc-regs.h>
43#include <asm/spr-regs.h>
44#include <asm/mb-regs.h>
45#include <asm/mb93493-regs.h>
46#include <asm/gdb-stub.h>
47#include <asm/irq-routing.h>
48#include <asm/io.h>
49
50#ifdef CONFIG_BLK_DEV_INITRD
51#include <linux/blk.h>
52#include <asm/pgtable.h>
53#endif
54
55#include "local.h"
56
57#ifdef CONFIG_MB93090_MB00
58static void __init mb93090_display(void);
59#endif
60#ifdef CONFIG_MMU
61static void __init setup_linux_memory(void);
62#else
63static void __init setup_uclinux_memory(void);
64#endif
65
66#ifdef CONFIG_CONSOLE
67extern struct consw *conswitchp;
68#endif
69
70#ifdef CONFIG_MB93090_MB00
71static char __initdata mb93090_banner[] = "FJ/RH FR-V Linux";
72static char __initdata mb93090_version[] = UTS_RELEASE;
73
74int __nongprelbss mb93090_mb00_detected;
75#endif
76
77const char __frv_unknown_system[] = "unknown";
78const char __frv_mb93091_cb10[] = "mb93091-cb10";
79const char __frv_mb93091_cb11[] = "mb93091-cb11";
80const char __frv_mb93091_cb30[] = "mb93091-cb30";
81const char __frv_mb93091_cb41[] = "mb93091-cb41";
82const char __frv_mb93091_cb60[] = "mb93091-cb60";
83const char __frv_mb93091_cb70[] = "mb93091-cb70";
84const char __frv_mb93091_cb451[] = "mb93091-cb451";
85const char __frv_mb93090_mb00[] = "mb93090-mb00";
86
87const char __frv_mb93493[] = "mb93493";
88
89const char __frv_mb93093[] = "mb93093";
90
91static const char *__nongprelbss cpu_series;
92static const char *__nongprelbss cpu_core;
93static const char *__nongprelbss cpu_silicon;
94static const char *__nongprelbss cpu_mmu;
95static const char *__nongprelbss cpu_system;
96static const char *__nongprelbss cpu_board1;
97static const char *__nongprelbss cpu_board2;
98
99static unsigned long __nongprelbss cpu_psr_all;
100static unsigned long __nongprelbss cpu_hsr0_all;
101
102unsigned long __nongprelbss pdm_suspend_mode;
103
104unsigned long __nongprelbss rom_length;
105unsigned long __nongprelbss memory_start;
106unsigned long __nongprelbss memory_end;
107
108unsigned long __nongprelbss dma_coherent_mem_start;
109unsigned long __nongprelbss dma_coherent_mem_end;
110
111unsigned long __initdata __sdram_old_base;
112unsigned long __initdata num_mappedpages;
113
114struct cpuinfo_frv __nongprelbss boot_cpu_data;
115
116char command_line[COMMAND_LINE_SIZE];
117char __initdata redboot_command_line[COMMAND_LINE_SIZE];
118
119#ifdef CONFIG_PM
120#define __pminit
121#define __pminitdata
122#else
123#define __pminit __init
124#define __pminitdata __initdata
125#endif
126
127struct clock_cmode {
128 uint8_t xbus, sdram, corebus, core, dsu;
129};
130
131#define _frac(N,D) ((N)<<4 | (D))
132#define _x0_16 _frac(1,6)
133#define _x0_25 _frac(1,4)
134#define _x0_33 _frac(1,3)
135#define _x0_375 _frac(3,8)
136#define _x0_5 _frac(1,2)
137#define _x0_66 _frac(2,3)
138#define _x0_75 _frac(3,4)
139#define _x1 _frac(1,1)
140#define _x1_5 _frac(3,2)
141#define _x2 _frac(2,1)
142#define _x3 _frac(3,1)
143#define _x4 _frac(4,1)
144#define _x4_5 _frac(9,2)
145#define _x6 _frac(6,1)
146#define _x8 _frac(8,1)
147#define _x9 _frac(9,1)
148
149int __nongprelbss clock_p0_current;
150int __nongprelbss clock_cm_current;
151int __nongprelbss clock_cmode_current;
152#ifdef CONFIG_PM
153int __nongprelbss clock_cmodes_permitted;
154unsigned long __nongprelbss clock_bits_settable;
155#endif
156
157static struct clock_cmode __pminitdata undef_clock_cmode = { _x1, _x1, _x1, _x1, _x1 };
158
159static struct clock_cmode __pminitdata clock_cmodes_fr401_fr403[16] = {
160 [4] = { _x1, _x1, _x2, _x2, _x0_25 },
161 [5] = { _x1, _x2, _x4, _x4, _x0_5 },
162 [8] = { _x1, _x1, _x1, _x2, _x0_25 },
163 [9] = { _x1, _x2, _x2, _x4, _x0_5 },
164 [11] = { _x1, _x4, _x4, _x8, _x1 },
165 [12] = { _x1, _x1, _x2, _x4, _x0_5 },
166 [13] = { _x1, _x2, _x4, _x8, _x1 },
167};
168
169static struct clock_cmode __pminitdata clock_cmodes_fr405[16] = {
170 [0] = { _x1, _x1, _x1, _x1, _x0_5 },
171 [1] = { _x1, _x1, _x1, _x3, _x0_25 },
172 [2] = { _x1, _x1, _x2, _x6, _x0_5 },
173 [3] = { _x1, _x2, _x2, _x6, _x0_5 },
174 [4] = { _x1, _x1, _x2, _x2, _x0_16 },
175 [8] = { _x1, _x1, _x1, _x2, _x0_16 },
176 [9] = { _x1, _x2, _x2, _x4, _x0_33 },
177 [12] = { _x1, _x1, _x2, _x4, _x0_33 },
178 [14] = { _x1, _x3, _x3, _x9, _x0_75 },
179 [15] = { _x1, _x1_5, _x1_5, _x4_5, _x0_375 },
180
181#define CLOCK_CMODES_PERMITTED_FR405 0xd31f
182};
183
184static struct clock_cmode __pminitdata clock_cmodes_fr555[16] = {
185 [0] = { _x1, _x2, _x2, _x4, _x0_33 },
186 [1] = { _x1, _x3, _x3, _x6, _x0_5 },
187 [2] = { _x1, _x2, _x4, _x8, _x0_66 },
188 [3] = { _x1, _x1_5, _x3, _x6, _x0_5 },
189 [4] = { _x1, _x3, _x3, _x9, _x0_75 },
190 [5] = { _x1, _x2, _x2, _x6, _x0_5 },
191 [6] = { _x1, _x1_5, _x1_5, _x4_5, _x0_375 },
192};
193
194static const struct clock_cmode __pminitdata *clock_cmodes;
195static int __pminitdata clock_doubled;
196
197static struct uart_port __initdata __frv_uart0 = {
198 .uartclk = 0,
199 .membase = (char *) UART0_BASE,
200 .irq = IRQ_CPU_UART0,
201 .regshift = 3,
202 .iotype = UPIO_MEM,
203 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
204};
205
206static struct uart_port __initdata __frv_uart1 = {
207 .uartclk = 0,
208 .membase = (char *) UART1_BASE,
209 .irq = IRQ_CPU_UART1,
210 .regshift = 3,
211 .iotype = UPIO_MEM,
212 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
213};
214
215#if 0
216static void __init printk_xampr(unsigned long ampr, unsigned long amlr, char i_d, int n)
217{
218 unsigned long phys, virt, cxn, size;
219
220#ifdef CONFIG_MMU
221 virt = amlr & 0xffffc000;
222 cxn = amlr & 0x3fff;
223#else
224 virt = ampr & 0xffffc000;
225 cxn = 0;
226#endif
227 phys = ampr & xAMPRx_PPFN;
228 size = 1 << (((ampr & xAMPRx_SS) >> 4) + 17);
229
230 printk("%cAMPR%d: va %08lx-%08lx [pa %08lx] %c%c%c%c [cxn:%04lx]\n",
231 i_d, n,
232 virt, virt + size - 1,
233 phys,
234 ampr & xAMPRx_S ? 'S' : '-',
235 ampr & xAMPRx_C ? 'C' : '-',
236 ampr & DAMPRx_WP ? 'W' : '-',
237 ampr & xAMPRx_V ? 'V' : '-',
238 cxn
239 );
240}
241#endif
242
243/*****************************************************************************/
244/*
245 * dump the memory map
246 */
247static void __init dump_memory_map(void)
248{
249
250#if 0
251 /* dump the protection map */
252 printk_xampr(__get_IAMPR(0), __get_IAMLR(0), 'I', 0);
253 printk_xampr(__get_IAMPR(1), __get_IAMLR(1), 'I', 1);
254 printk_xampr(__get_IAMPR(2), __get_IAMLR(2), 'I', 2);
255 printk_xampr(__get_IAMPR(3), __get_IAMLR(3), 'I', 3);
256 printk_xampr(__get_IAMPR(4), __get_IAMLR(4), 'I', 4);
257 printk_xampr(__get_IAMPR(5), __get_IAMLR(5), 'I', 5);
258 printk_xampr(__get_IAMPR(6), __get_IAMLR(6), 'I', 6);
259 printk_xampr(__get_IAMPR(7), __get_IAMLR(7), 'I', 7);
260 printk_xampr(__get_IAMPR(8), __get_IAMLR(8), 'I', 8);
261 printk_xampr(__get_IAMPR(9), __get_IAMLR(9), 'i', 9);
262 printk_xampr(__get_IAMPR(10), __get_IAMLR(10), 'I', 10);
263 printk_xampr(__get_IAMPR(11), __get_IAMLR(11), 'I', 11);
264 printk_xampr(__get_IAMPR(12), __get_IAMLR(12), 'I', 12);
265 printk_xampr(__get_IAMPR(13), __get_IAMLR(13), 'I', 13);
266 printk_xampr(__get_IAMPR(14), __get_IAMLR(14), 'I', 14);
267 printk_xampr(__get_IAMPR(15), __get_IAMLR(15), 'I', 15);
268
269 printk_xampr(__get_DAMPR(0), __get_DAMLR(0), 'D', 0);
270 printk_xampr(__get_DAMPR(1), __get_DAMLR(1), 'D', 1);
271 printk_xampr(__get_DAMPR(2), __get_DAMLR(2), 'D', 2);
272 printk_xampr(__get_DAMPR(3), __get_DAMLR(3), 'D', 3);
273 printk_xampr(__get_DAMPR(4), __get_DAMLR(4), 'D', 4);
274 printk_xampr(__get_DAMPR(5), __get_DAMLR(5), 'D', 5);
275 printk_xampr(__get_DAMPR(6), __get_DAMLR(6), 'D', 6);
276 printk_xampr(__get_DAMPR(7), __get_DAMLR(7), 'D', 7);
277 printk_xampr(__get_DAMPR(8), __get_DAMLR(8), 'D', 8);
278 printk_xampr(__get_DAMPR(9), __get_DAMLR(9), 'D', 9);
279 printk_xampr(__get_DAMPR(10), __get_DAMLR(10), 'D', 10);
280 printk_xampr(__get_DAMPR(11), __get_DAMLR(11), 'D', 11);
281 printk_xampr(__get_DAMPR(12), __get_DAMLR(12), 'D', 12);
282 printk_xampr(__get_DAMPR(13), __get_DAMLR(13), 'D', 13);
283 printk_xampr(__get_DAMPR(14), __get_DAMLR(14), 'D', 14);
284 printk_xampr(__get_DAMPR(15), __get_DAMLR(15), 'D', 15);
285#endif
286
287#if 0
288 /* dump the bus controller registers */
289 printk("LGCR: %08lx\n", __get_LGCR());
290 printk("Master: %08lx-%08lx CR=%08lx\n",
291 __get_LEMBR(), __get_LEMBR() + __get_LEMAM(),
292 __get_LMAICR());
293
294 int loop;
295 for (loop = 1; loop <= 7; loop++) {
296 unsigned long lcr = __get_LCR(loop), lsbr = __get_LSBR(loop);
297 printk("CS#%d: %08lx-%08lx %c%c%c%c%c%c%c%c%c\n",
298 loop,
299 lsbr, lsbr + __get_LSAM(loop),
300 lcr & 0x80000000 ? 'r' : '-',
301 lcr & 0x40000000 ? 'w' : '-',
302 lcr & 0x08000000 ? 'b' : '-',
303 lcr & 0x04000000 ? 'B' : '-',
304 lcr & 0x02000000 ? 'C' : '-',
305 lcr & 0x01000000 ? 'D' : '-',
306 lcr & 0x00800000 ? 'W' : '-',
307 lcr & 0x00400000 ? 'R' : '-',
308 (lcr & 0x00030000) == 0x00000000 ? '4' :
309 (lcr & 0x00030000) == 0x00010000 ? '2' :
310 (lcr & 0x00030000) == 0x00020000 ? '1' :
311 '-'
312 );
313 }
314#endif
315
316#if 0
317 printk("\n");
318#endif
319} /* end dump_memory_map() */
320
321/*****************************************************************************/
322/*
323 * attempt to detect a VDK motherboard and DAV daughter board on an MB93091 system
324 */
325#ifdef CONFIG_MB93091_VDK
326static void __init detect_mb93091(void)
327{
328#ifdef CONFIG_MB93090_MB00
329 /* Detect CB70 without motherboard */
330 if (!(cpu_system == __frv_mb93091_cb70 && ((*(unsigned short *)0xffc00030) & 0x100))) {
331 cpu_board1 = __frv_mb93090_mb00;
332 mb93090_mb00_detected = 1;
333 }
334#endif
335
336#ifdef CONFIG_FUJITSU_MB93493
337 cpu_board2 = __frv_mb93493;
338#endif
339
340} /* end detect_mb93091() */
341#endif
342
343/*****************************************************************************/
344/*
345 * determine the CPU type and set appropriate parameters
346 *
347 * Family Series CPU Core Silicon Imple Vers
348 * ----------------------------------------------------------
349 * FR-V --+-> FR400 --+-> FR401 --+-> MB93401 02 00 [1]
350 * | | |
351 * | | +-> MB93401/A 02 01
352 * | | |
353 * | | +-> MB93403 02 02
354 * | |
355 * | +-> FR405 ----> MB93405 04 00
356 * |
357 * +-> FR450 ----> FR451 ----> MB93451 05 00
358 * |
359 * +-> FR500 ----> FR501 --+-> MB93501 01 01 [2]
360 * | |
361 * | +-> MB93501/A 01 02
362 * |
363 * +-> FR550 --+-> FR551 ----> MB93555 03 01
364 *
365 * [1] The MB93401 is an obsolete CPU replaced by the MB93401A
366 * [2] The MB93501 is an obsolete CPU replaced by the MB93501A
367 *
368 * Imple is PSR(Processor Status Register)[31:28].
369 * Vers is PSR(Processor Status Register)[27:24].
370 *
371 * A "Silicon" consists of CPU core and some on-chip peripherals.
372 */
373static void __init determine_cpu(void)
374{
375 unsigned long hsr0 = __get_HSR(0);
376 unsigned long psr = __get_PSR();
377
378 /* work out what selectable services the CPU supports */
379 __set_PSR(psr | PSR_EM | PSR_EF | PSR_CM | PSR_NEM);
380 cpu_psr_all = __get_PSR();
381 __set_PSR(psr);
382
383 __set_HSR(0, hsr0 | HSR0_GRLE | HSR0_GRHE | HSR0_FRLE | HSR0_FRHE);
384 cpu_hsr0_all = __get_HSR(0);
385 __set_HSR(0, hsr0);
386
387 /* derive other service specs from the CPU type */
388 cpu_series = "unknown";
389 cpu_core = "unknown";
390 cpu_silicon = "unknown";
391 cpu_mmu = "Prot";
392 cpu_system = __frv_unknown_system;
393 clock_cmodes = NULL;
394 clock_doubled = 0;
395#ifdef CONFIG_PM
396 clock_bits_settable = CLOCK_BIT_CM_H | CLOCK_BIT_CM_M | CLOCK_BIT_P0;
397#endif
398
399 switch (PSR_IMPLE(psr)) {
400 case PSR_IMPLE_FR401:
401 cpu_series = "fr400";
402 cpu_core = "fr401";
403 pdm_suspend_mode = HSR0_PDM_PLL_RUN;
404
405 switch (PSR_VERSION(psr)) {
406 case PSR_VERSION_FR401_MB93401:
407 cpu_silicon = "mb93401";
408 cpu_system = __frv_mb93091_cb10;
409 clock_cmodes = clock_cmodes_fr401_fr403;
410 clock_doubled = 1;
411 break;
412 case PSR_VERSION_FR401_MB93401A:
413 cpu_silicon = "mb93401/A";
414 cpu_system = __frv_mb93091_cb11;
415 clock_cmodes = clock_cmodes_fr401_fr403;
416 break;
417 case PSR_VERSION_FR401_MB93403:
418 cpu_silicon = "mb93403";
419#ifndef CONFIG_MB93093_PDK
420 cpu_system = __frv_mb93091_cb30;
421#else
422 cpu_system = __frv_mb93093;
423#endif
424 clock_cmodes = clock_cmodes_fr401_fr403;
425 break;
426 default:
427 break;
428 }
429 break;
430
431 case PSR_IMPLE_FR405:
432 cpu_series = "fr400";
433 cpu_core = "fr405";
434 pdm_suspend_mode = HSR0_PDM_PLL_STOP;
435
436 switch (PSR_VERSION(psr)) {
437 case PSR_VERSION_FR405_MB93405:
438 cpu_silicon = "mb93405";
439 cpu_system = __frv_mb93091_cb60;
440 clock_cmodes = clock_cmodes_fr405;
441#ifdef CONFIG_PM
442 clock_bits_settable |= CLOCK_BIT_CMODE;
443 clock_cmodes_permitted = CLOCK_CMODES_PERMITTED_FR405;
444#endif
445
446 /* the FPGA on the CB70 has extra registers
447 * - it has 0x0046 in the VDK_ID FPGA register at 0x1a0, which is
448 * how we tell the difference between it and a CB60
449 */
450 if (*(volatile unsigned short *) 0xffc001a0 == 0x0046)
451 cpu_system = __frv_mb93091_cb70;
452 break;
453 default:
454 break;
455 }
456 break;
457
458 case PSR_IMPLE_FR451:
459 cpu_series = "fr450";
460 cpu_core = "fr451";
461 pdm_suspend_mode = HSR0_PDM_PLL_STOP;
462#ifdef CONFIG_PM
463 clock_bits_settable |= CLOCK_BIT_CMODE;
464 clock_cmodes_permitted = CLOCK_CMODES_PERMITTED_FR405;
465#endif
466 switch (PSR_VERSION(psr)) {
467 case PSR_VERSION_FR451_MB93451:
468 cpu_silicon = "mb93451";
469 cpu_mmu = "Prot, SAT, xSAT, DAT";
470 cpu_system = __frv_mb93091_cb451;
471 clock_cmodes = clock_cmodes_fr405;
472 break;
473 default:
474 break;
475 }
476 break;
477
478 case PSR_IMPLE_FR501:
479 cpu_series = "fr500";
480 cpu_core = "fr501";
481 pdm_suspend_mode = HSR0_PDM_PLL_STOP;
482
483 switch (PSR_VERSION(psr)) {
484 case PSR_VERSION_FR501_MB93501: cpu_silicon = "mb93501"; break;
485 case PSR_VERSION_FR501_MB93501A: cpu_silicon = "mb93501/A"; break;
486 default:
487 break;
488 }
489 break;
490
491 case PSR_IMPLE_FR551:
492 cpu_series = "fr550";
493 cpu_core = "fr551";
494 pdm_suspend_mode = HSR0_PDM_PLL_RUN;
495
496 switch (PSR_VERSION(psr)) {
497 case PSR_VERSION_FR551_MB93555:
498 cpu_silicon = "mb93555";
499 cpu_mmu = "Prot, SAT";
500 cpu_system = __frv_mb93091_cb41;
501 clock_cmodes = clock_cmodes_fr555;
502 clock_doubled = 1;
503 break;
504 default:
505 break;
506 }
507 break;
508
509 default:
510 break;
511 }
512
513 printk("- Series:%s CPU:%s Silicon:%s\n",
514 cpu_series, cpu_core, cpu_silicon);
515
516#ifdef CONFIG_MB93091_VDK
517 detect_mb93091();
518#endif
519
520#if defined(CONFIG_MB93093_PDK) && defined(CONFIG_FUJITSU_MB93493)
521 cpu_board2 = __frv_mb93493;
522#endif
523
524} /* end determine_cpu() */
525
526/*****************************************************************************/
527/*
528 * calculate the bus clock speed
529 */
530void __pminit determine_clocks(int verbose)
531{
532 const struct clock_cmode *mode, *tmode;
533 unsigned long clkc, psr, quot;
534
535 clkc = __get_CLKC();
536 psr = __get_PSR();
537
538 clock_p0_current = !!(clkc & CLKC_P0);
539 clock_cm_current = clkc & CLKC_CM;
540 clock_cmode_current = (clkc & CLKC_CMODE) >> CLKC_CMODE_s;
541
542 if (verbose)
543 printk("psr=%08lx hsr0=%08lx clkc=%08lx\n", psr, __get_HSR(0), clkc);
544
545 /* the CB70 has some alternative ways of setting the clock speed through switches accessed
546 * through the FPGA. */
547 if (cpu_system == __frv_mb93091_cb70) {
548 unsigned short clkswr = *(volatile unsigned short *) 0xffc00104UL & 0x1fffUL;
549
550 if (clkswr & 0x1000)
551 __clkin_clock_speed_HZ = 60000000UL;
552 else
553 __clkin_clock_speed_HZ =
554 ((clkswr >> 8) & 0xf) * 10000000 +
555 ((clkswr >> 4) & 0xf) * 1000000 +
556 ((clkswr ) & 0xf) * 100000;
557 }
558 /* the FR451 is currently fixed at 24MHz */
559 else if (cpu_system == __frv_mb93091_cb451) {
560 //__clkin_clock_speed_HZ = 24000000UL; // CB451-FPGA
561 unsigned short clkswr = *(volatile unsigned short *) 0xffc00104UL & 0x1fffUL;
562
563 if (clkswr & 0x1000)
564 __clkin_clock_speed_HZ = 60000000UL;
565 else
566 __clkin_clock_speed_HZ =
567 ((clkswr >> 8) & 0xf) * 10000000 +
568 ((clkswr >> 4) & 0xf) * 1000000 +
569 ((clkswr ) & 0xf) * 100000;
570 }
571 /* otherwise determine the clockspeed from VDK or other registers */
572 else {
573 __clkin_clock_speed_HZ = __get_CLKIN();
574 }
575
576 /* look up the appropriate clock relationships table entry */
577 mode = &undef_clock_cmode;
578 if (clock_cmodes) {
579 tmode = &clock_cmodes[(clkc & CLKC_CMODE) >> CLKC_CMODE_s];
580 if (tmode->xbus)
581 mode = tmode;
582 }
583
584#define CLOCK(SRC,RATIO) ((SRC) * (((RATIO) >> 4) & 0x0f) / ((RATIO) & 0x0f))
585
586 if (clock_doubled)
587 __clkin_clock_speed_HZ <<= 1;
588
589 __ext_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->xbus);
590 __sdram_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
591 __dsu_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->dsu);
592
593 switch (clkc & CLKC_CM) {
594 case 0: /* High */
595 __core_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->corebus);
596 __core_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->core);
597 break;
598 case 1: /* Medium */
599 __core_bus_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
600 __core_clock_speed_HZ = CLOCK(__clkin_clock_speed_HZ, mode->sdram);
601 break;
602 case 2: /* Low; not supported */
603 case 3: /* UNDEF */
604 printk("Unsupported CLKC CM %ld\n", clkc & CLKC_CM);
605 panic("Bye");
606 }
607
608 __res_bus_clock_speed_HZ = __ext_bus_clock_speed_HZ;
609 if (clkc & CLKC_P0)
610 __res_bus_clock_speed_HZ >>= 1;
611
612 if (verbose) {
613 printk("CLKIN: %lu.%3.3luMHz\n",
614 __clkin_clock_speed_HZ / 1000000,
615 (__clkin_clock_speed_HZ / 1000) % 1000);
616
617 printk("CLKS:"
618 " ext=%luMHz res=%luMHz sdram=%luMHz cbus=%luMHz core=%luMHz dsu=%luMHz\n",
619 __ext_bus_clock_speed_HZ / 1000000,
620 __res_bus_clock_speed_HZ / 1000000,
621 __sdram_clock_speed_HZ / 1000000,
622 __core_bus_clock_speed_HZ / 1000000,
623 __core_clock_speed_HZ / 1000000,
624 __dsu_clock_speed_HZ / 1000000
625 );
626 }
627
628 /* calculate the number of __delay() loop iterations per sec (2 insn loop) */
629 __delay_loops_MHz = __core_clock_speed_HZ / (1000000 * 2);
630
631 /* set the serial prescaler */
632 __serial_clock_speed_HZ = __res_bus_clock_speed_HZ;
633 quot = 1;
634 while (__serial_clock_speed_HZ / quot / 16 / 65536 > 3000)
635 quot += 1;
636
637 /* double the divisor if P0 is clear, so that if/when P0 is set, it's still achievable
638 * - we have to be careful - dividing too much can mean we can't get 115200 baud
639 */
640 if (__serial_clock_speed_HZ > 32000000 && !(clkc & CLKC_P0))
641 quot <<= 1;
642
643 __serial_clock_speed_HZ /= quot;
644 __frv_uart0.uartclk = __serial_clock_speed_HZ;
645 __frv_uart1.uartclk = __serial_clock_speed_HZ;
646
647 if (verbose)
648 printk(" uart=%luMHz\n", __serial_clock_speed_HZ / 1000000 * quot);
649
650 while (!(__get_UART0_LSR() & UART_LSR_TEMT))
651 continue;
652
653 while (!(__get_UART1_LSR() & UART_LSR_TEMT))
654 continue;
655
656 __set_UCPVR(quot);
657 __set_UCPSR(0);
658} /* end determine_clocks() */
659
660/*****************************************************************************/
661/*
662 * reserve some DMA consistent memory
663 */
664#ifdef CONFIG_RESERVE_DMA_COHERENT
665static void __init reserve_dma_coherent(void)
666{
667 unsigned long ampr;
668
669 /* find the first non-kernel memory tile and steal it */
670#define __steal_AMPR(r) \
671 if (__get_DAMPR(r) & xAMPRx_V) { \
672 ampr = __get_DAMPR(r); \
673 __set_DAMPR(r, ampr | xAMPRx_S | xAMPRx_C); \
674 __set_IAMPR(r, 0); \
675 goto found; \
676 }
677
678 __steal_AMPR(1);
679 __steal_AMPR(2);
680 __steal_AMPR(3);
681 __steal_AMPR(4);
682 __steal_AMPR(5);
683 __steal_AMPR(6);
684
685 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551) {
686 __steal_AMPR(7);
687 __steal_AMPR(8);
688 __steal_AMPR(9);
689 __steal_AMPR(10);
690 __steal_AMPR(11);
691 __steal_AMPR(12);
692 __steal_AMPR(13);
693 __steal_AMPR(14);
694 }
695
696 /* unable to grant any DMA consistent memory */
697 printk("No DMA consistent memory reserved\n");
698 return;
699
700 found:
701 dma_coherent_mem_start = ampr & xAMPRx_PPFN;
702 ampr &= xAMPRx_SS;
703 ampr >>= 4;
704 ampr = 1 << (ampr - 3 + 20);
705 dma_coherent_mem_end = dma_coherent_mem_start + ampr;
706
707 printk("DMA consistent memory reserved %lx-%lx\n",
708 dma_coherent_mem_start, dma_coherent_mem_end);
709
710} /* end reserve_dma_coherent() */
711#endif
712
713/*****************************************************************************/
714/*
715 * calibrate the delay loop
716 */
717void __init calibrate_delay(void)
718{
719 loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
720
721 printk("Calibrating delay loop... %lu.%02lu BogoMIPS\n",
722 loops_per_jiffy / (500000 / HZ),
723 (loops_per_jiffy / (5000 / HZ)) % 100);
724
725} /* end calibrate_delay() */
726
727/*****************************************************************************/
728/*
729 * look through the command line for some things we need to know immediately
730 */
731static void __init parse_cmdline_early(char *cmdline)
732{
733 if (!cmdline)
734 return;
735
736 while (*cmdline) {
737 if (*cmdline == ' ')
738 cmdline++;
739
740 /* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked
741 * out from the SDRAM controller mask register
742 */
743 if (!memcmp(cmdline, "mem=", 4)) {
744 unsigned long long mem_size;
745
746 mem_size = memparse(cmdline + 4, &cmdline);
747 memory_end = memory_start + mem_size;
748 }
749
750 while (*cmdline && *cmdline != ' ')
751 cmdline++;
752 }
753
754} /* end parse_cmdline_early() */
755
756/*****************************************************************************/
757/*
758 *
759 */
760void __init setup_arch(char **cmdline_p)
761{
762#ifdef CONFIG_MMU
763 printk("Linux FR-V port done by Red Hat Inc <dhowells@redhat.com>\n");
764#else
765 printk("uClinux FR-V port done by Red Hat Inc <dhowells@redhat.com>\n");
766#endif
767
768 memcpy(saved_command_line, redboot_command_line, COMMAND_LINE_SIZE);
769
770 determine_cpu();
771 determine_clocks(1);
772
773 /* For printk-directly-beats-on-serial-hardware hack */
774 console_set_baud(115200);
775#ifdef CONFIG_GDBSTUB
776 gdbstub_set_baud(115200);
777#endif
778
779#ifdef CONFIG_RESERVE_DMA_COHERENT
780 reserve_dma_coherent();
781#endif
782 dump_memory_map();
783
784#ifdef CONFIG_MB93090_MB00
785 if (mb93090_mb00_detected)
786 mb93090_display();
787#endif
788
789 /* register those serial ports that are available */
790#ifndef CONFIG_GDBSTUB_UART0
791 __reg(UART0_BASE + UART_IER * 8) = 0;
792 early_serial_setup(&__frv_uart0);
793// register_serial(&__frv_uart0);
794#endif
795#ifndef CONFIG_GDBSTUB_UART1
796 __reg(UART1_BASE + UART_IER * 8) = 0;
797 early_serial_setup(&__frv_uart1);
798// register_serial(&__frv_uart1);
799#endif
800
801#if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH)
802 /* we need to initialize the Flashrom device here since we might
803 * do things with flash early on in the boot
804 */
805 flash_probe();
806#endif
807
808 /* deal with the command line - RedBoot may have passed one to the kernel */
809 memcpy(command_line, saved_command_line, sizeof(command_line));
810 *cmdline_p = &command_line[0];
811 parse_cmdline_early(command_line);
812
813 /* set up the memory description
814 * - by now the stack is part of the init task */
815 printk("Memory %08lx-%08lx\n", memory_start, memory_end);
816
817 if (memory_start == memory_end) BUG();
818
819 init_mm.start_code = (unsigned long) &_stext;
820 init_mm.end_code = (unsigned long) &_etext;
821 init_mm.end_data = (unsigned long) &_edata;
822#if 0 /* DAVIDM - don't set brk just incase someone decides to use it */
823 init_mm.brk = (unsigned long) &_end;
824#else
825 init_mm.brk = (unsigned long) 0;
826#endif
827
828#ifdef DEBUG
829 printk("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x BSS=0x%06x-0x%06x\n",
830 (int) &_stext, (int) &_etext,
831 (int) &_sdata, (int) &_edata,
832 (int) &_sbss, (int) &_ebss);
833#endif
834
835#ifdef CONFIG_VT
836#if defined(CONFIG_VGA_CONSOLE)
837 conswitchp = &vga_con;
838#elif defined(CONFIG_DUMMY_CONSOLE)
839 conswitchp = &dummy_con;
840#endif
841#endif
842
843#ifdef CONFIG_BLK_DEV_BLKMEM
844 ROOT_DEV = MKDEV(BLKMEM_MAJOR,0);
845#endif
846 /*rom_length = (unsigned long)&_flashend - (unsigned long)&_romvec;*/
847
848#ifdef CONFIG_MMU
849 setup_linux_memory();
850#else
851 setup_uclinux_memory();
852#endif
853
854 /* get kmalloc into gear */
855 paging_init();
856
857 /* init DMA */
858 frv_dma_init();
859#ifdef DEBUG
860 printk("Done setup_arch\n");
861#endif
862
863 /* start the decrement timer running */
864// asm volatile("movgs %0,timerd" :: "r"(10000000));
865// __set_HSR(0, __get_HSR(0) | HSR0_ETMD);
866
867} /* end setup_arch() */
868
869#if 0
870/*****************************************************************************/
871/*
872 *
873 */
874static int __devinit setup_arch_serial(void)
875{
876 /* register those serial ports that are available */
877#ifndef CONFIG_GDBSTUB_UART0
878 early_serial_setup(&__frv_uart0);
879#endif
880#ifndef CONFIG_GDBSTUB_UART1
881 early_serial_setup(&__frv_uart1);
882#endif
883
884 return 0;
885} /* end setup_arch_serial() */
886
887late_initcall(setup_arch_serial);
888#endif
889
890/*****************************************************************************/
891/*
892 * set up the memory map for normal MMU linux
893 */
894#ifdef CONFIG_MMU
895static void __init setup_linux_memory(void)
896{
897 unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem;
898
899 kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET;
900 kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET;
901
902 kstart = kstart & PAGE_MASK;
903 kend = (kend + PAGE_SIZE - 1) & PAGE_MASK;
904
905 /* give all the memory to the bootmap allocator, tell it to put the
906 * boot mem_map immediately following the kernel image
907 */
908 bootmap_size = init_bootmem_node(NODE_DATA(0),
909 kend >> PAGE_SHIFT, /* map addr */
910 memory_start >> PAGE_SHIFT, /* start of RAM */
911 memory_end >> PAGE_SHIFT /* end of RAM */
912 );
913
914 /* pass the memory that the kernel can immediately use over to the bootmem allocator */
915 max_mapnr = num_physpages = (memory_end - memory_start) >> PAGE_SHIFT;
916 low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT;
917 high_mem = 0;
918
919 if (num_physpages > low_top_pfn) {
920#ifdef CONFIG_HIGHMEM
921 high_mem = num_physpages - low_top_pfn;
922#else
923 max_mapnr = num_physpages = low_top_pfn;
924#endif
925 }
926 else {
927 low_top_pfn = num_physpages;
928 }
929
930 min_low_pfn = memory_start >> PAGE_SHIFT;
931 max_low_pfn = low_top_pfn;
932 max_pfn = memory_end >> PAGE_SHIFT;
933
934 num_mappedpages = low_top_pfn;
935
936 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", low_top_pfn >> (20 - PAGE_SHIFT));
937
938 free_bootmem(memory_start, low_top_pfn << PAGE_SHIFT);
939
940#ifdef CONFIG_HIGHMEM
941 if (high_mem)
942 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", high_mem >> (20 - PAGE_SHIFT));
943#endif
944
945 /* take back the memory occupied by the kernel image and the bootmem alloc map */
946 reserve_bootmem(kstart, kend - kstart + bootmap_size);
947
948 /* reserve the memory occupied by the initial ramdisk */
949#ifdef CONFIG_BLK_DEV_INITRD
950 if (LOADER_TYPE && INITRD_START) {
951 if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
952 reserve_bootmem(INITRD_START, INITRD_SIZE);
953 initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
954 initrd_end = initrd_start + INITRD_SIZE;
955 }
956 else {
957 printk(KERN_ERR
958 "initrd extends beyond end of memory (0x%08lx > 0x%08lx)\n"
959 "disabling initrd\n",
960 INITRD_START + INITRD_SIZE,
961 low_top_pfn << PAGE_SHIFT);
962 initrd_start = 0;
963 }
964 }
965#endif
966
967} /* end setup_linux_memory() */
968#endif
969
970/*****************************************************************************/
971/*
972 * set up the memory map for uClinux
973 */
974#ifndef CONFIG_MMU
975static void __init setup_uclinux_memory(void)
976{
977#ifdef CONFIG_PROTECT_KERNEL
978 unsigned long dampr;
979#endif
980 unsigned long kend;
981 int bootmap_size;
982
983 kend = (unsigned long) &__kernel_image_end;
984 kend = (kend + PAGE_SIZE - 1) & PAGE_MASK;
985
986 /* give all the memory to the bootmap allocator, tell it to put the
987 * boot mem_map immediately following the kernel image
988 */
989 bootmap_size = init_bootmem_node(NODE_DATA(0),
990 kend >> PAGE_SHIFT, /* map addr */
991 memory_start >> PAGE_SHIFT, /* start of RAM */
992 memory_end >> PAGE_SHIFT /* end of RAM */
993 );
994
995 /* free all the usable memory */
996 free_bootmem(memory_start, memory_end - memory_start);
997
998 high_memory = (void *) (memory_end & PAGE_MASK);
999 max_mapnr = num_physpages = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
1000
1001 min_low_pfn = memory_start >> PAGE_SHIFT;
1002 max_low_pfn = memory_end >> PAGE_SHIFT;
1003 max_pfn = max_low_pfn;
1004
1005 /* now take back the bits the core kernel is occupying */
1006#ifndef CONFIG_PROTECT_KERNEL
1007 reserve_bootmem(kend, bootmap_size);
1008 reserve_bootmem((unsigned long) &__kernel_image_start,
1009 kend - (unsigned long) &__kernel_image_start);
1010
1011#else
1012 dampr = __get_DAMPR(0);
1013 dampr &= xAMPRx_SS;
1014 dampr = (dampr >> 4) + 17;
1015 dampr = 1 << dampr;
1016
1017 reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr);
1018#endif
1019
1020 /* reserve some memory to do uncached DMA through if requested */
1021#ifdef CONFIG_RESERVE_DMA_COHERENT
1022 if (dma_coherent_mem_start)
1023 reserve_bootmem(dma_coherent_mem_start,
1024 dma_coherent_mem_end - dma_coherent_mem_start);
1025#endif
1026
1027} /* end setup_uclinux_memory() */
1028#endif
1029
1030/*****************************************************************************/
1031/*
1032 * get CPU information for use by procfs
1033 */
1034static int show_cpuinfo(struct seq_file *m, void *v)
1035{
1036 const char *gr, *fr, *fm, *fp, *cm, *nem, *ble;
1037#ifdef CONFIG_PM
1038 const char *sep;
1039#endif
1040
1041 gr = cpu_hsr0_all & HSR0_GRHE ? "gr0-63" : "gr0-31";
1042 fr = cpu_hsr0_all & HSR0_FRHE ? "fr0-63" : "fr0-31";
1043 fm = cpu_psr_all & PSR_EM ? ", Media" : "";
1044 fp = cpu_psr_all & PSR_EF ? ", FPU" : "";
1045 cm = cpu_psr_all & PSR_CM ? ", CCCR" : "";
1046 nem = cpu_psr_all & PSR_NEM ? ", NE" : "";
1047 ble = cpu_psr_all & PSR_BE ? "BE" : "LE";
1048
1049 seq_printf(m,
1050 "CPU-Series:\t%s\n"
1051 "CPU-Core:\t%s, %s, %s%s%s\n"
1052 "CPU:\t\t%s\n"
1053 "MMU:\t\t%s\n"
1054 "FP-Media:\t%s%s%s\n"
1055 "System:\t\t%s",
1056 cpu_series,
1057 cpu_core, gr, ble, cm, nem,
1058 cpu_silicon,
1059 cpu_mmu,
1060 fr, fm, fp,
1061 cpu_system);
1062
1063 if (cpu_board1)
1064 seq_printf(m, ", %s", cpu_board1);
1065
1066 if (cpu_board2)
1067 seq_printf(m, ", %s", cpu_board2);
1068
1069 seq_printf(m, "\n");
1070
1071#ifdef CONFIG_PM
1072 seq_printf(m, "PM-Controls:");
1073 sep = "\t";
1074
1075 if (clock_bits_settable & CLOCK_BIT_CMODE) {
1076 seq_printf(m, "%scmode=0x%04hx", sep, clock_cmodes_permitted);
1077 sep = ", ";
1078 }
1079
1080 if (clock_bits_settable & CLOCK_BIT_CM) {
1081 seq_printf(m, "%scm=0x%lx", sep, clock_bits_settable & CLOCK_BIT_CM);
1082 sep = ", ";
1083 }
1084
1085 if (clock_bits_settable & CLOCK_BIT_P0) {
1086 seq_printf(m, "%sp0=0x3", sep);
1087 sep = ", ";
1088 }
1089
1090 seq_printf(m, "%ssuspend=0x22\n", sep);
1091#endif
1092
1093 seq_printf(m,
1094 "PM-Status:\tcmode=%d, cm=%d, p0=%d\n",
1095 clock_cmode_current, clock_cm_current, clock_p0_current);
1096
1097#define print_clk(TAG, VAR) \
1098 seq_printf(m, "Clock-" TAG ":\t%lu.%2.2lu MHz\n", VAR / 1000000, (VAR / 10000) % 100)
1099
1100 print_clk("In", __clkin_clock_speed_HZ);
1101 print_clk("Core", __core_clock_speed_HZ);
1102 print_clk("SDRAM", __sdram_clock_speed_HZ);
1103 print_clk("CBus", __core_bus_clock_speed_HZ);
1104 print_clk("Res", __res_bus_clock_speed_HZ);
1105 print_clk("Ext", __ext_bus_clock_speed_HZ);
1106 print_clk("DSU", __dsu_clock_speed_HZ);
1107
1108 seq_printf(m,
1109 "BogoMips:\t%lu.%02lu\n",
1110 (loops_per_jiffy * HZ) / 500000, ((loops_per_jiffy * HZ) / 5000) % 100);
1111
1112 return 0;
1113} /* end show_cpuinfo() */
1114
1115static void *c_start(struct seq_file *m, loff_t *pos)
1116{
1117 return *pos < NR_CPUS ? (void *) 0x12345678 : NULL;
1118}
1119
1120static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1121{
1122 ++*pos;
1123 return c_start(m, pos);
1124}
1125
1126static void c_stop(struct seq_file *m, void *v)
1127{
1128}
1129
1130struct seq_operations cpuinfo_op = {
1131 .start = c_start,
1132 .next = c_next,
1133 .stop = c_stop,
1134 .show = show_cpuinfo,
1135};
1136
1137void arch_gettod(int *year, int *mon, int *day, int *hour,
1138 int *min, int *sec)
1139{
1140 *year = *mon = *day = *hour = *min = *sec = 0;
1141}
1142
1143/*****************************************************************************/
1144/*
1145 *
1146 */
1147#ifdef CONFIG_MB93090_MB00
1148static void __init mb93090_sendlcdcmd(uint32_t cmd)
1149{
1150 unsigned long base = __addr_LCD();
1151 int loop;
1152
1153 /* request reading of the busy flag */
1154 __set_LCD(base, LCD_CMD_READ_BUSY);
1155 __set_LCD(base, LCD_CMD_READ_BUSY & ~LCD_E);
1156
1157 /* wait for the busy flag to become clear */
1158 for (loop = 10000; loop > 0; loop--)
1159 if (!(__get_LCD(base) & 0x80))
1160 break;
1161
1162 /* send the command */
1163 __set_LCD(base, cmd);
1164 __set_LCD(base, cmd & ~LCD_E);
1165
1166} /* end mb93090_sendlcdcmd() */
1167
1168/*****************************************************************************/
1169/*
1170 * write to the MB93090 LEDs and LCD
1171 */
1172static void __init mb93090_display(void)
1173{
1174 const char *p;
1175
1176 __set_LEDS(0);
1177
1178 /* set up the LCD */
1179 mb93090_sendlcdcmd(LCD_CMD_CLEAR);
1180 mb93090_sendlcdcmd(LCD_CMD_FUNCSET(1,1,0));
1181 mb93090_sendlcdcmd(LCD_CMD_ON(0,0));
1182 mb93090_sendlcdcmd(LCD_CMD_HOME);
1183
1184 mb93090_sendlcdcmd(LCD_CMD_SET_DD_ADDR(0));
1185 for (p = mb93090_banner; *p; p++)
1186 mb93090_sendlcdcmd(LCD_DATA_WRITE(*p));
1187
1188 mb93090_sendlcdcmd(LCD_CMD_SET_DD_ADDR(64));
1189 for (p = mb93090_version; *p; p++)
1190 mb93090_sendlcdcmd(LCD_DATA_WRITE(*p));
1191
1192} /* end mb93090_display() */
1193
1194#endif // CONFIG_MB93090_MB00
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
new file mode 100644
index 000000000000..d8d8f3d4304d
--- /dev/null
+++ b/arch/frv/kernel/signal.c
@@ -0,0 +1,588 @@
1/* signal.c: FRV specific bits of signal handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/signal.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/smp_lock.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/wait.h>
21#include <linux/ptrace.h>
22#include <linux/unistd.h>
23#include <linux/personality.h>
24#include <linux/suspend.h>
25#include <asm/ucontext.h>
26#include <asm/uaccess.h>
27#include <asm/cacheflush.h>
28
29#define DEBUG_SIG 0
30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33struct fdpic_func_descriptor {
34 unsigned long text;
35 unsigned long GOT;
36};
37
38asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
39
40/*
41 * Atomically swap in the new signal mask, and wait for a signal.
42 */
43asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask)
44{
45 sigset_t saveset;
46
47 mask &= _BLOCKABLE;
48 spin_lock_irq(&current->sighand->siglock);
49 saveset = current->blocked;
50 siginitset(&current->blocked, mask);
51 recalc_sigpending();
52 spin_unlock_irq(&current->sighand->siglock);
53
54 __frame->gr8 = -EINTR;
55 while (1) {
56 current->state = TASK_INTERRUPTIBLE;
57 schedule();
58 if (do_signal(__frame, &saveset))
59 /* return the signal number as the return value of this function
60 * - this is an utterly evil hack. syscalls should not invoke do_signal()
61 * as entry.S sets regs->gr8 to the return value of the system call
62 * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
63 * and call waitpid() if SIGCHLD needed discarding
64 * - this only works on the i386 because it passes arguments to the signal
65 * handler on the stack, and the return value in EAX is effectively
66 * discarded
67 */
68 return __frame->gr8;
69 }
70}
71
72asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
73{
74 sigset_t saveset, newset;
75
76 /* XXX: Don't preclude handling different sized sigset_t's. */
77 if (sigsetsize != sizeof(sigset_t))
78 return -EINVAL;
79
80 if (copy_from_user(&newset, unewset, sizeof(newset)))
81 return -EFAULT;
82 sigdelsetmask(&newset, ~_BLOCKABLE);
83
84 spin_lock_irq(&current->sighand->siglock);
85 saveset = current->blocked;
86 current->blocked = newset;
87 recalc_sigpending();
88 spin_unlock_irq(&current->sighand->siglock);
89
90 __frame->gr8 = -EINTR;
91 while (1) {
92 current->state = TASK_INTERRUPTIBLE;
93 schedule();
94 if (do_signal(__frame, &saveset))
95 /* return the signal number as the return value of this function
96 * - this is an utterly evil hack. syscalls should not invoke do_signal()
97 * as entry.S sets regs->gr8 to the return value of the system call
98 * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
99 * and call waitpid() if SIGCHLD needed discarding
100 * - this only works on the i386 because it passes arguments to the signal
101 * handler on the stack, and the return value in EAX is effectively
102 * discarded
103 */
104 return __frame->gr8;
105 }
106}
107
108asmlinkage int sys_sigaction(int sig,
109 const struct old_sigaction __user *act,
110 struct old_sigaction __user *oact)
111{
112 struct k_sigaction new_ka, old_ka;
113 int ret;
114
115 if (act) {
116 old_sigset_t mask;
117 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
118 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
119 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
120 return -EFAULT;
121 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
122 __get_user(mask, &act->sa_mask);
123 siginitset(&new_ka.sa.sa_mask, mask);
124 }
125
126 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
127
128 if (!ret && oact) {
129 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
130 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
131 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
132 return -EFAULT;
133 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
134 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
135 }
136
137 return ret;
138}
139
140asmlinkage
141int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
142{
143 return do_sigaltstack(uss, uoss, __frame->sp);
144}
145
146
147/*
148 * Do a signal return; undo the signal stack.
149 */
150
151struct sigframe
152{
153 void (*pretcode)(void);
154 int sig;
155 struct sigcontext sc;
156 unsigned long extramask[_NSIG_WORDS-1];
157 uint32_t retcode[2];
158};
159
160struct rt_sigframe
161{
162 void (*pretcode)(void);
163 int sig;
164 struct siginfo *pinfo;
165 void *puc;
166 struct siginfo info;
167 struct ucontext uc;
168 uint32_t retcode[2];
169};
170
171static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
172{
173 struct user_context *user = current->thread.user;
174 unsigned long tbr, psr;
175
176 tbr = user->i.tbr;
177 psr = user->i.psr;
178 if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
179 goto badframe;
180 user->i.tbr = tbr;
181 user->i.psr = psr;
182
183 restore_user_regs(user);
184
185 user->i.syscallno = -1; /* disable syscall checks */
186
187 *_gr8 = user->i.gr[8];
188 return 0;
189
190 badframe:
191 return 1;
192}
193
194asmlinkage int sys_sigreturn(void)
195{
196 struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
197 sigset_t set;
198 int gr8;
199
200 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
201 goto badframe;
202 if (__get_user(set.sig[0], &frame->sc.sc_oldmask))
203 goto badframe;
204
205 if (_NSIG_WORDS > 1 &&
206 __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))
207 goto badframe;
208
209 sigdelsetmask(&set, ~_BLOCKABLE);
210 spin_lock_irq(&current->sighand->siglock);
211 current->blocked = set;
212 recalc_sigpending();
213 spin_unlock_irq(&current->sighand->siglock);
214
215 if (restore_sigcontext(&frame->sc, &gr8))
216 goto badframe;
217 return gr8;
218
219 badframe:
220 force_sig(SIGSEGV, current);
221 return 0;
222}
223
224asmlinkage int sys_rt_sigreturn(void)
225{
226 struct rt_sigframe __user *frame = (struct rt_sigframe __user *) __frame->sp;
227 sigset_t set;
228 int gr8;
229
230 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
231 goto badframe;
232 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
233 goto badframe;
234
235 sigdelsetmask(&set, ~_BLOCKABLE);
236 spin_lock_irq(&current->sighand->siglock);
237 current->blocked = set;
238 recalc_sigpending();
239 spin_unlock_irq(&current->sighand->siglock);
240
241 if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8))
242 goto badframe;
243
244 if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
245 goto badframe;
246
247 return gr8;
248
249badframe:
250 force_sig(SIGSEGV, current);
251 return 0;
252}
253
254/*
255 * Set up a signal frame
256 */
257static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask)
258{
259 save_user_regs(current->thread.user);
260
261 if (copy_to_user(&sc->sc_context, current->thread.user, sizeof(sc->sc_context)) != 0)
262 goto badframe;
263
264 /* non-iBCS2 extensions.. */
265 if (__put_user(mask, &sc->sc_oldmask) < 0)
266 goto badframe;
267
268 return 0;
269
270 badframe:
271 return 1;
272}
273
274/*****************************************************************************/
275/*
276 * Determine which stack to use..
277 */
278static inline void __user *get_sigframe(struct k_sigaction *ka,
279 struct pt_regs *regs,
280 size_t frame_size)
281{
282 unsigned long sp;
283
284 /* Default to using normal stack */
285 sp = regs->sp;
286
287 /* This is the X/Open sanctioned signal stack switching. */
288 if (ka->sa.sa_flags & SA_ONSTACK) {
289 if (! on_sig_stack(sp))
290 sp = current->sas_ss_sp + current->sas_ss_size;
291 }
292
293 return (void __user *) ((sp - frame_size) & ~7UL);
294} /* end get_sigframe() */
295
296/*****************************************************************************/
297/*
298 *
299 */
300static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
301{
302 struct sigframe __user *frame;
303 int rsig;
304
305 frame = get_sigframe(ka, regs, sizeof(*frame));
306
307 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
308 goto give_sigsegv;
309
310 rsig = sig;
311 if (sig < 32 &&
312 __current_thread_info->exec_domain &&
313 __current_thread_info->exec_domain->signal_invmap)
314 rsig = __current_thread_info->exec_domain->signal_invmap[sig];
315
316 if (__put_user(rsig, &frame->sig) < 0)
317 goto give_sigsegv;
318
319 if (setup_sigcontext(&frame->sc, set->sig[0]))
320 goto give_sigsegv;
321
322 if (_NSIG_WORDS > 1) {
323 if (__copy_to_user(frame->extramask, &set->sig[1],
324 sizeof(frame->extramask)))
325 goto give_sigsegv;
326 }
327
328 /* Set up to return from userspace. If provided, use a stub
329 * already in userspace. */
330 if (ka->sa.sa_flags & SA_RESTORER) {
331 if (__put_user(ka->sa.sa_restorer, &frame->pretcode) < 0)
332 goto give_sigsegv;
333 }
334 else {
335 /* Set up the following code on the stack:
336 * setlos #__NR_sigreturn,gr7
337 * tira gr0,0
338 */
339 if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
340 __put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) ||
341 __put_user(0xc0700000, &frame->retcode[1]))
342 goto give_sigsegv;
343
344 flush_icache_range((unsigned long) frame->retcode,
345 (unsigned long) (frame->retcode + 2));
346 }
347
348 /* set up registers for signal handler */
349 regs->sp = (unsigned long) frame;
350 regs->lr = (unsigned long) &frame->retcode;
351 regs->gr8 = sig;
352
353 if (get_personality & FDPIC_FUNCPTRS) {
354 struct fdpic_func_descriptor __user *funcptr =
355 (struct fdpic_func_descriptor *) ka->sa.sa_handler;
356 __get_user(regs->pc, &funcptr->text);
357 __get_user(regs->gr15, &funcptr->GOT);
358 } else {
359 regs->pc = (unsigned long) ka->sa.sa_handler;
360 regs->gr15 = 0;
361 }
362
363 set_fs(USER_DS);
364
365#if DEBUG_SIG
366 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
367 sig, current->comm, current->pid, frame, regs->pc, frame->pretcode);
368#endif
369
370 return;
371
372give_sigsegv:
373 if (sig == SIGSEGV)
374 ka->sa.sa_handler = SIG_DFL;
375
376 force_sig(SIGSEGV, current);
377} /* end setup_frame() */
378
379/*****************************************************************************/
380/*
381 *
382 */
383static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
384 sigset_t *set, struct pt_regs * regs)
385{
386 struct rt_sigframe __user *frame;
387 int rsig;
388
389 frame = get_sigframe(ka, regs, sizeof(*frame));
390
391 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
392 goto give_sigsegv;
393
394 rsig = sig;
395 if (sig < 32 &&
396 __current_thread_info->exec_domain &&
397 __current_thread_info->exec_domain->signal_invmap)
398 rsig = __current_thread_info->exec_domain->signal_invmap[sig];
399
400 if (__put_user(rsig, &frame->sig) ||
401 __put_user(&frame->info, &frame->pinfo) ||
402 __put_user(&frame->uc, &frame->puc))
403 goto give_sigsegv;
404
405 if (copy_siginfo_to_user(&frame->info, info))
406 goto give_sigsegv;
407
408 /* Create the ucontext. */
409 if (__put_user(0, &frame->uc.uc_flags) ||
410 __put_user(0, &frame->uc.uc_link) ||
411 __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
412 __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) ||
413 __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size))
414 goto give_sigsegv;
415
416 if (setup_sigcontext(&frame->uc.uc_mcontext, set->sig[0]))
417 goto give_sigsegv;
418
419 if (__copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)))
420 goto give_sigsegv;
421
422 /* Set up to return from userspace. If provided, use a stub
423 * already in userspace. */
424 if (ka->sa.sa_flags & SA_RESTORER) {
425 if (__put_user(ka->sa.sa_restorer, &frame->pretcode))
426 goto give_sigsegv;
427 }
428 else {
429 /* Set up the following code on the stack:
430 * setlos #__NR_sigreturn,gr7
431 * tira gr0,0
432 */
433 if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
434 __put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) ||
435 __put_user(0xc0700000, &frame->retcode[1]))
436 goto give_sigsegv;
437
438 flush_icache_range((unsigned long) frame->retcode,
439 (unsigned long) (frame->retcode + 2));
440 }
441
442 /* Set up registers for signal handler */
443 regs->sp = (unsigned long) frame;
444 regs->lr = (unsigned long) &frame->retcode;
445 regs->gr8 = sig;
446 regs->gr9 = (unsigned long) &frame->info;
447
448 if (get_personality & FDPIC_FUNCPTRS) {
449 struct fdpic_func_descriptor *funcptr =
450 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
451 __get_user(regs->pc, &funcptr->text);
452 __get_user(regs->gr15, &funcptr->GOT);
453 } else {
454 regs->pc = (unsigned long) ka->sa.sa_handler;
455 regs->gr15 = 0;
456 }
457
458 set_fs(USER_DS);
459
460#if DEBUG_SIG
461 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
462 sig, current->comm, current->pid, frame, regs->pc, frame->pretcode);
463#endif
464
465 return;
466
467give_sigsegv:
468 if (sig == SIGSEGV)
469 ka->sa.sa_handler = SIG_DFL;
470 force_sig(SIGSEGV, current);
471
472} /* end setup_rt_frame() */
473
474/*****************************************************************************/
475/*
476 * OK, we're invoking a handler
477 */
478static void handle_signal(unsigned long sig, siginfo_t *info,
479 struct k_sigaction *ka, sigset_t *oldset,
480 struct pt_regs *regs)
481{
482 /* Are we from a system call? */
483 if (in_syscall(regs)) {
484 /* If so, check system call restarting.. */
485 switch (regs->gr8) {
486 case -ERESTART_RESTARTBLOCK:
487 case -ERESTARTNOHAND:
488 regs->gr8 = -EINTR;
489 break;
490
491 case -ERESTARTSYS:
492 if (!(ka->sa.sa_flags & SA_RESTART)) {
493 regs->gr8 = -EINTR;
494 break;
495 }
496 /* fallthrough */
497 case -ERESTARTNOINTR:
498 regs->gr8 = regs->orig_gr8;
499 regs->pc -= 4;
500 }
501 }
502
503 /* Set up the stack frame */
504 if (ka->sa.sa_flags & SA_SIGINFO)
505 setup_rt_frame(sig, ka, info, oldset, regs);
506 else
507 setup_frame(sig, ka, oldset, regs);
508
509 if (!(ka->sa.sa_flags & SA_NODEFER)) {
510 spin_lock_irq(&current->sighand->siglock);
511 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
512 sigaddset(&current->blocked, sig);
513 recalc_sigpending();
514 spin_unlock_irq(&current->sighand->siglock);
515 }
516} /* end handle_signal() */
517
518/*****************************************************************************/
519/*
520 * Note that 'init' is a special process: it doesn't get signals it doesn't
521 * want to handle. Thus you cannot kill init even with a SIGKILL even by
522 * mistake.
523 */
524int do_signal(struct pt_regs *regs, sigset_t *oldset)
525{
526 struct k_sigaction ka;
527 siginfo_t info;
528 int signr;
529
530 /*
531 * We want the common case to go fast, which
532 * is why we may in certain cases get here from
533 * kernel mode. Just return without doing anything
534 * if so.
535 */
536 if (!user_mode(regs))
537 return 1;
538
539 if (current->flags & PF_FREEZE) {
540 refrigerator(0);
541 goto no_signal;
542 }
543
544 if (!oldset)
545 oldset = &current->blocked;
546
547 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
548 if (signr > 0) {
549 handle_signal(signr, &info, &ka, oldset, regs);
550 return 1;
551 }
552
553 no_signal:
554 /* Did we come from a system call? */
555 if (regs->syscallno >= 0) {
556 /* Restart the system call - no handlers present */
557 if (regs->gr8 == -ERESTARTNOHAND ||
558 regs->gr8 == -ERESTARTSYS ||
559 regs->gr8 == -ERESTARTNOINTR) {
560 regs->gr8 = regs->orig_gr8;
561 regs->pc -= 4;
562 }
563
564 if (regs->gr8 == -ERESTART_RESTARTBLOCK){
565 regs->gr8 = __NR_restart_syscall;
566 regs->pc -= 4;
567 }
568 }
569
570 return 0;
571} /* end do_signal() */
572
573/*****************************************************************************/
574/*
575 * notification of userspace execution resumption
576 * - triggered by current->work.notify_resume
577 */
578asmlinkage void do_notify_resume(__u32 thread_info_flags)
579{
580 /* pending single-step? */
581 if (thread_info_flags & _TIF_SINGLESTEP)
582 clear_thread_flag(TIF_SINGLESTEP);
583
584 /* deal with pending signal delivery */
585 if (thread_info_flags & _TIF_SIGPENDING)
586 do_signal(__frame, NULL);
587
588} /* end do_notify_resume() */
diff --git a/arch/frv/kernel/sleep.S b/arch/frv/kernel/sleep.S
new file mode 100644
index 000000000000..e6079b8cac60
--- /dev/null
+++ b/arch/frv/kernel/sleep.S
@@ -0,0 +1,374 @@
1/* sleep.S: power saving mode entry
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Woodhouse (dwmw2@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/sys.h>
14#include <linux/config.h>
15#include <linux/linkage.h>
16#include <asm/setup.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/ptrace.h>
20#include <asm/errno.h>
21#include <asm/cache.h>
22#include <asm/spr-regs.h>
23
24#define __addr_MASK 0xfeff9820 /* interrupt controller mask */
25
26#define __addr_FR55X_DRCN 0xfeff0218 /* Address of DRCN register */
27#define FR55X_DSTS_OFFSET -4 /* Offset from DRCN to DSTS */
28#define FR55X_SDRAMC_DSTS_SSI 0x00000002 /* indicates that the SDRAM is in self-refresh mode */
29
30#define __addr_FR4XX_DRCN 0xfe000430 /* Address of DRCN register */
31#define FR4XX_DSTS_OFFSET -8 /* Offset from DRCN to DSTS */
32#define FR4XX_SDRAMC_DSTS_SSI 0x00000001 /* indicates that the SDRAM is in self-refresh mode */
33
34#define SDRAMC_DRCN_SR 0x00000001 /* transition SDRAM into self-refresh mode */
35
36 .section .bss
37 .balign 8
38 .globl __sleep_save_area
39__sleep_save_area:
40 .space 16
41
42
43 .text
44 .balign 4
45
46.macro li v r
47 sethi.p %hi(\v),\r
48 setlo %lo(\v),\r
49.endm
50
51#ifdef CONFIG_PM
52###############################################################################
53#
54# CPU suspension routine
55# - void frv_cpu_suspend(unsigned long pdm_mode)
56#
57###############################################################################
58 .globl frv_cpu_suspend
59 .type frv_cpu_suspend,@function
60frv_cpu_suspend:
61
62 #----------------------------------------------------
63 # save hsr0, psr, isr, and lr for resume code
64 #----------------------------------------------------
65 li __sleep_save_area,gr11
66
67 movsg hsr0,gr4
68 movsg psr,gr5
69 movsg isr,gr6
70 movsg lr,gr7
71 stdi gr4,@(gr11,#0)
72 stdi gr6,@(gr11,#8)
73
74 # store the return address from sleep in GR14, and its complement in GR13 as a check
75 li __ramboot_resume,gr14
76#ifdef CONFIG_MMU
77 # Resume via RAMBOOT# will turn MMU off, so bootloader needs a physical address.
78 sethi.p %hi(__page_offset),gr13
79 setlo %lo(__page_offset),gr13
80 sub gr14,gr13,gr14
81#endif
82 not gr14,gr13
83
84 #----------------------------------------------------
85 # preload and lock into icache that code which may have to run
86 # when dram is in self-refresh state.
87 #----------------------------------------------------
88 movsg hsr0, gr3
89 li HSR0_ICE,gr4
90 or gr3,gr4,gr3
91 movgs gr3,hsr0
92 or gr3,gr8,gr7 // add the sleep bits for later
93
94 li #__icache_lock_start,gr3
95 li #__icache_lock_end,gr4
961: icpl gr3,gr0,#1
97 addi gr3,#L1_CACHE_BYTES,gr3
98 cmp gr4,gr3,icc0
99 bhi icc0,#0,1b
100
101 # disable exceptions
102 movsg psr,gr8
103 andi.p gr8,#~PSR_PIL,gr8
104 andi gr8,~PSR_ET,gr8
105 movgs gr8,psr
106 ori gr8,#PSR_ET,gr8
107
108 srli gr8,#28,gr4
109 subicc gr4,#3,gr0,icc0
110 beq icc0,#0,1f
111 # FR4xx
112 li __addr_FR4XX_DRCN,gr4
113 li FR4XX_SDRAMC_DSTS_SSI,gr5
114 li FR4XX_DSTS_OFFSET,gr6
115 bra __icache_lock_start
1161:
117 # FR5xx
118 li __addr_FR55X_DRCN,gr4
119 li FR55X_SDRAMC_DSTS_SSI,gr5
120 li FR55X_DSTS_OFFSET,gr6
121 bra __icache_lock_start
122
123 .size frv_cpu_suspend, .-frv_cpu_suspend
124
125#
126# the final part of the sleep sequence...
127# - we want it to be be cacheline aligned so we can lock it into the icache easily
128# On entry: gr7 holds desired hsr0 sleep value
129# gr8 holds desired psr sleep value
130#
131 .balign L1_CACHE_BYTES
132 .type __icache_lock_start,@function
133__icache_lock_start:
134
135 #----------------------------------------------------
136 # put SDRAM in self-refresh mode
137 #----------------------------------------------------
138
139 # Flush all data in the cache using the DCEF instruction.
140 dcef @(gr0,gr0),#1
141
142 # Stop DMAC transfer
143
144 # Execute dummy load from SDRAM
145 ldi @(gr11,#0),gr11
146
147 # put the SDRAM into self-refresh mode
148 ld @(gr4,gr0),gr11
149 ori gr11,#SDRAMC_DRCN_SR,gr11
150 st gr11,@(gr4,gr0)
151 membar
152
153 # wait for SDRAM to reach self-refresh mode
1541: ld @(gr4,gr6),gr11
155 andcc gr11,gr5,gr11,icc0
156 beq icc0,#0,1b
157
158 # Set the GPIO register so that the IRQ[3:0] pins become valid, as required.
159 # Set the clock mode (CLKC register) as required.
160 # - At this time, also set the CLKC register P0 bit.
161
162 # Set the HSR0 register PDM field.
163 movgs gr7,hsr0
164
165 # Execute NOP 32 times.
166 .rept 32
167 nop
168 .endr
169
170#if 0 // Fujitsu recommend to skip this and will update docs.
171 # Release the interrupt mask setting of the MASK register of the
172 # interrupt controller if necessary.
173 sti gr10,@(gr9,#0)
174 membar
175#endif
176
177 # Set the PSR register ET bit to 1 to enable interrupts.
178 movgs gr8,psr
179
180 ###################################################
181 # this is only reached if waking up via interrupt
182 ###################################################
183
184 # Execute NOP 32 times.
185 .rept 32
186 nop
187 .endr
188
189 #----------------------------------------------------
190 # wake SDRAM from self-refresh mode
191 #----------------------------------------------------
192 ld @(gr4,gr0),gr11
193 andi gr11,#~SDRAMC_DRCN_SR,gr11
194 st gr11,@(gr4,gr0)
195 membar
1962:
197 ld @(gr4,gr6),gr11 // Wait for it to come back...
198 andcc gr11,gr5,gr0,icc0
199 bne icc0,0,2b
200
201 # wait for the SDRAM to stabilise
202 li 0x0100000,gr3
2033: subicc gr3,#1,gr3,icc0
204 bne icc0,#0,3b
205
206 # now that DRAM is back, this is the end of the code which gets
207 # locked in icache.
208__icache_lock_end:
209 .size __icache_lock_start, .-__icache_lock_start
210
211 # Fall-through to the RAMBOOT# wakeup path
212
213###############################################################################
214#
215# resume from suspend re-entry point reached via RAMBOOT# and bootloader
216#
217###############################################################################
218__ramboot_resume:
219
220 #----------------------------------------------------
221 # restore hsr0, psr, isr, and leave saved lr in gr7
222 #----------------------------------------------------
223 li __sleep_save_area,gr11
224#ifdef CONFIG_MMU
225 movsg hsr0,gr4
226 sethi.p %hi(HSR0_EXMMU),gr3
227 setlo %lo(HSR0_EXMMU),gr3
228 andcc gr3,gr4,gr0,icc0
229 bne icc0,#0,2f
230
231 # need to use physical address
232 sethi.p %hi(__page_offset),gr3
233 setlo %lo(__page_offset),gr3
234 sub gr11,gr3,gr11
235
236 # flush all tlb entries
237 setlos #64,gr4
238 setlos.p #PAGE_SIZE,gr5
239 setlos #0,gr6
2401:
241 tlbpr gr6,gr0,#6,#0
242 subicc.p gr4,#1,gr4,icc0
243 add gr6,gr5,gr6
244 bne icc0,#2,1b
245
246 # need a temporary mapping for the current physical address we are
247 # using between time MMU is enabled and jump to virtual address is
248 # made.
249 sethi.p %hi(0x00000000),gr4
250 setlo %lo(0x00000000),gr4 ; physical address
251 setlos #xAMPRx_L|xAMPRx_M|xAMPRx_SS_256Mb|xAMPRx_S_KERNEL|xAMPRx_V,gr5
252 or gr4,gr5,gr5
253
254 movsg cxnr,gr13
255 or gr4,gr13,gr4
256
257 movgs gr4,iamlr1 ; mapped from real address 0
258 movgs gr5,iampr1 ; cached kernel memory at 0x00000000
2592:
260#endif
261
262 lddi @(gr11,#0),gr4 ; hsr0, psr
263 lddi @(gr11,#8),gr6 ; isr, lr
264 movgs gr4,hsr0
265 bar
266
267#ifdef CONFIG_MMU
268 sethi.p %hi(1f),gr11
269 setlo %lo(1f),gr11
270 jmpl @(gr11,gr0)
2711:
272 movgs gr0,iampr1 ; get rid of temporary mapping
273#endif
274 movgs gr5,psr
275 movgs gr6,isr
276
277 #----------------------------------------------------
278 # unlock the icache which was locked before going to sleep
279 #----------------------------------------------------
280 li __icache_lock_start,gr3
281 li __icache_lock_end,gr4
2821: icul gr3
283 addi gr3,#L1_CACHE_BYTES,gr3
284 cmp gr4,gr3,icc0
285 bhi icc0,#0,1b
286
287 #----------------------------------------------------
288 # back to business as usual
289 #----------------------------------------------------
290 jmpl @(gr7,gr0) ;
291
292#endif /* CONFIG_PM */
293
294###############################################################################
295#
296# CPU core sleep mode routine
297#
298###############################################################################
299 .globl frv_cpu_core_sleep
300 .type frv_cpu_core_sleep,@function
301frv_cpu_core_sleep:
302
303 # Preload into icache.
304 li #__core_sleep_icache_lock_start,gr3
305 li #__core_sleep_icache_lock_end,gr4
306
3071: icpl gr3,gr0,#1
308 addi gr3,#L1_CACHE_BYTES,gr3
309 cmp gr4,gr3,icc0
310 bhi icc0,#0,1b
311
312 bra __core_sleep_icache_lock_start
313
314 .balign L1_CACHE_BYTES
315__core_sleep_icache_lock_start:
316
317 # (1) Set the PSR register ET bit to 0 to disable interrupts.
318 movsg psr,gr8
319 andi.p gr8,#~(PSR_PIL),gr8
320 andi gr8,#~(PSR_ET),gr4
321 movgs gr4,psr
322
323#if 0 // Fujitsu recommend to skip this and will update docs.
324 # (2) Set '1' to all bits in the MASK register of the interrupt
325 # controller and mask interrupts.
326 sethi.p %hi(__addr_MASK),gr9
327 setlo %lo(__addr_MASK),gr9
328 sethi.p %hi(0xffff0000),gr4
329 setlo %lo(0xffff0000),gr4
330 ldi @(gr9,#0),gr10
331 sti gr4,@(gr9,#0)
332#endif
333 # (3) Flush all data in the cache using the DCEF instruction.
334 dcef @(gr0,gr0),#1
335
336 # (4) Execute the memory barrier instruction
337 membar
338
339 # (5) Set the GPIO register so that the IRQ[3:0] pins become valid, as required.
340 # (6) Set the clock mode (CLKC register) as required.
341 # - At this time, also set the CLKC register P0 bit.
342 # (7) Set the HSR0 register PDM field to 001 .
343 movsg hsr0,gr4
344 ori gr4,HSR0_PDM_CORE_SLEEP,gr4
345 movgs gr4,hsr0
346
347 # (8) Execute NOP 32 times.
348 .rept 32
349 nop
350 .endr
351
352#if 0 // Fujitsu recommend to skip this and will update docs.
353 # (9) Release the interrupt mask setting of the MASK register of the
354 # interrupt controller if necessary.
355 sti gr10,@(gr9,#0)
356 membar
357#endif
358
359 # (10) Set the PSR register ET bit to 1 to enable interrupts.
360 movgs gr8,psr
361
362__core_sleep_icache_lock_end:
363
364 # Unlock from icache
365 li __core_sleep_icache_lock_start,gr3
366 li __core_sleep_icache_lock_end,gr4
3671: icul gr3
368 addi gr3,#L1_CACHE_BYTES,gr3
369 cmp gr4,gr3,icc0
370 bhi icc0,#0,1b
371
372 bralr
373
374 .size frv_cpu_core_sleep, .-frv_cpu_core_sleep
diff --git a/arch/frv/kernel/switch_to.S b/arch/frv/kernel/switch_to.S
new file mode 100644
index 000000000000..1703dc20174e
--- /dev/null
+++ b/arch/frv/kernel/switch_to.S
@@ -0,0 +1,496 @@
1###############################################################################
2#
3# switch_to.S: context switch operation
4#
5# Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
6# Written by David Howells (dhowells@redhat.com)
7#
8# This program is free software; you can redistribute it and/or
9# modify it under the terms of the GNU General Public License
10# as published by the Free Software Foundation; either version
11# 2 of the License, or (at your option) any later version.
12#
13###############################################################################
14#include <linux/config.h>
15#include <linux/linkage.h>
16#include <asm/thread_info.h>
17#include <asm/processor.h>
18#include <asm/registers.h>
19#include <asm/spr-regs.h>
20
21.macro LEDS val
22 setlos #~\val,gr27
23 st gr27,@(gr30,gr0)
24 membar
25 dcf @(gr30,gr0)
26.endm
27
28 .section .sdata
29 .balign 8
30
31 # address of frame 0 (userspace) on current kernel stack
32 .globl __kernel_frame0_ptr
33__kernel_frame0_ptr:
34 .long init_thread_union + THREAD_SIZE - USER_CONTEXT_SIZE
35
36 # address of current task
37 .globl __kernel_current_task
38__kernel_current_task:
39 .long init_task
40
41 .section .text
42 .balign 4
43
44###############################################################################
45#
46# struct task_struct *__switch_to(struct thread_struct *prev_thread,
47# struct thread_struct *next_thread,
48# struct task_struct *prev)
49#
50###############################################################################
51 .globl __switch_to
52__switch_to:
53 # save outgoing process's context
54 sethi.p %hi(__switch_back),gr13
55 setlo %lo(__switch_back),gr13
56 movsg lr,gr12
57
58 stdi gr28,@(gr8,#__THREAD_FRAME)
59 sti sp ,@(gr8,#__THREAD_SP)
60 sti fp ,@(gr8,#__THREAD_FP)
61 stdi gr12,@(gr8,#__THREAD_LR)
62 stdi gr16,@(gr8,#__THREAD_GR(16))
63 stdi gr18,@(gr8,#__THREAD_GR(18))
64 stdi gr20,@(gr8,#__THREAD_GR(20))
65 stdi gr22,@(gr8,#__THREAD_GR(22))
66 stdi gr24,@(gr8,#__THREAD_GR(24))
67 stdi.p gr26,@(gr8,#__THREAD_GR(26))
68
69 or gr8,gr8,gr22
70 ldi.p @(gr8,#__THREAD_USER),gr8
71 call save_user_regs
72 or gr22,gr22,gr8
73
74 # retrieve the new context
75 sethi.p %hi(__kernel_frame0_ptr),gr6
76 setlo %lo(__kernel_frame0_ptr),gr6
77 movsg psr,gr4
78
79 lddi.p @(gr9,#__THREAD_FRAME),gr10
80 or gr10,gr10,gr27 ; save prev for the return value
81
82 ldi @(gr11,#4),gr19 ; get new_current->thread_info
83
84 lddi @(gr9,#__THREAD_SP),gr12
85 ldi @(gr9,#__THREAD_LR),gr14
86 ldi @(gr9,#__THREAD_PC),gr18
87 ldi.p @(gr9,#__THREAD_FRAME0),gr7
88
89 # actually switch kernel contexts with ordinary exceptions disabled
90 andi gr4,#~PSR_ET,gr5
91 movgs gr5,psr
92
93 or.p gr10,gr0,gr28 ; set __frame
94 or gr11,gr0,gr29 ; set __current
95 or.p gr12,gr0,sp
96 or gr13,gr0,fp
97 or gr19,gr0,gr15 ; set __current_thread_info
98
99 sti gr7,@(gr6,#0) ; set __kernel_frame0_ptr
100 sti gr29,@(gr6,#4) ; set __kernel_current_task
101
102 movgs gr14,lr
103 bar
104
105 srli gr15,#28,gr5
106 subicc gr5,#0xc,gr0,icc0
107 beq icc0,#0,111f
108 break
109 nop
110111:
111
112 # jump to __switch_back or ret_from_fork as appropriate
113 # - move prev to GR8
114 movgs gr4,psr
115 jmpl.p @(gr18,gr0)
116 or gr27,gr27,gr8
117
118###############################################################################
119#
120# restore incoming process's context
121# - on entry:
122# - SP, FP, LR, GR15, GR28 and GR29 will have been set up appropriately
123# - GR8 will point to the outgoing task_struct
124# - GR9 will point to the incoming thread_struct
125#
126###############################################################################
127__switch_back:
128 lddi @(gr9,#__THREAD_GR(16)),gr16
129 lddi @(gr9,#__THREAD_GR(18)),gr18
130 lddi @(gr9,#__THREAD_GR(20)),gr20
131 lddi @(gr9,#__THREAD_GR(22)),gr22
132 lddi @(gr9,#__THREAD_GR(24)),gr24
133 lddi @(gr9,#__THREAD_GR(26)),gr26
134
135 # fall through into restore_user_regs()
136 ldi.p @(gr9,#__THREAD_USER),gr8
137 or gr8,gr8,gr9
138
139###############################################################################
140#
141# restore extra general regs and FP/Media regs
142# - void *restore_user_regs(const struct user_context *target, void *retval)
143# - on entry:
144# - GR8 will point to the user context to swap in
145# - GR9 will contain the value to be returned in GR8 (prev task on context switch)
146#
147###############################################################################
148 .globl restore_user_regs
149restore_user_regs:
150 movsg hsr0,gr6
151 ori gr6,#HSR0_GRHE|HSR0_FRLE|HSR0_FRHE,gr6
152 movgs gr6,hsr0
153 movsg hsr0,gr6
154
155 movsg psr,gr7
156 ori gr7,#PSR_EF|PSR_EM,gr7
157 movgs gr7,psr
158 movsg psr,gr7
159 srli gr7,#24,gr7
160 bar
161
162 lddi @(gr8,#__FPMEDIA_MSR(0)),gr4
163
164 movgs gr4,msr0
165 movgs gr5,msr1
166
167 lddfi @(gr8,#__FPMEDIA_ACC(0)),fr16
168 lddfi @(gr8,#__FPMEDIA_ACC(2)),fr18
169 ldbfi @(gr8,#__FPMEDIA_ACCG(0)),fr20
170 ldbfi @(gr8,#__FPMEDIA_ACCG(1)),fr21
171 ldbfi @(gr8,#__FPMEDIA_ACCG(2)),fr22
172 ldbfi @(gr8,#__FPMEDIA_ACCG(3)),fr23
173
174 mwtacc fr16,acc0
175 mwtacc fr17,acc1
176 mwtacc fr18,acc2
177 mwtacc fr19,acc3
178 mwtaccg fr20,accg0
179 mwtaccg fr21,accg1
180 mwtaccg fr22,accg2
181 mwtaccg fr23,accg3
182
183 # some CPUs have extra ACCx and ACCGx regs and maybe FSRx regs
184 subicc.p gr7,#0x50,gr0,icc0
185 subicc gr7,#0x31,gr0,icc1
186 beq icc0,#0,__restore_acc_fr451
187 beq icc1,#0,__restore_acc_fr555
188__restore_acc_cont:
189
190 # some CPU's have GR32-GR63
191 setlos #HSR0_FRHE,gr4
192 andcc gr6,gr4,gr0,icc0
193 beq icc0,#1,__restore_skip_gr32_gr63
194
195 lddi @(gr8,#__INT_GR(32)),gr32
196 lddi @(gr8,#__INT_GR(34)),gr34
197 lddi @(gr8,#__INT_GR(36)),gr36
198 lddi @(gr8,#__INT_GR(38)),gr38
199 lddi @(gr8,#__INT_GR(40)),gr40
200 lddi @(gr8,#__INT_GR(42)),gr42
201 lddi @(gr8,#__INT_GR(44)),gr44
202 lddi @(gr8,#__INT_GR(46)),gr46
203 lddi @(gr8,#__INT_GR(48)),gr48
204 lddi @(gr8,#__INT_GR(50)),gr50
205 lddi @(gr8,#__INT_GR(52)),gr52
206 lddi @(gr8,#__INT_GR(54)),gr54
207 lddi @(gr8,#__INT_GR(56)),gr56
208 lddi @(gr8,#__INT_GR(58)),gr58
209 lddi @(gr8,#__INT_GR(60)),gr60
210 lddi @(gr8,#__INT_GR(62)),gr62
211__restore_skip_gr32_gr63:
212
213 # all CPU's have FR0-FR31
214 lddfi @(gr8,#__FPMEDIA_FR( 0)),fr0
215 lddfi @(gr8,#__FPMEDIA_FR( 2)),fr2
216 lddfi @(gr8,#__FPMEDIA_FR( 4)),fr4
217 lddfi @(gr8,#__FPMEDIA_FR( 6)),fr6
218 lddfi @(gr8,#__FPMEDIA_FR( 8)),fr8
219 lddfi @(gr8,#__FPMEDIA_FR(10)),fr10
220 lddfi @(gr8,#__FPMEDIA_FR(12)),fr12
221 lddfi @(gr8,#__FPMEDIA_FR(14)),fr14
222 lddfi @(gr8,#__FPMEDIA_FR(16)),fr16
223 lddfi @(gr8,#__FPMEDIA_FR(18)),fr18
224 lddfi @(gr8,#__FPMEDIA_FR(20)),fr20
225 lddfi @(gr8,#__FPMEDIA_FR(22)),fr22
226 lddfi @(gr8,#__FPMEDIA_FR(24)),fr24
227 lddfi @(gr8,#__FPMEDIA_FR(26)),fr26
228 lddfi @(gr8,#__FPMEDIA_FR(28)),fr28
229 lddfi.p @(gr8,#__FPMEDIA_FR(30)),fr30
230
231 # some CPU's have FR32-FR63
232 setlos #HSR0_FRHE,gr4
233 andcc gr6,gr4,gr0,icc0
234 beq icc0,#1,__restore_skip_fr32_fr63
235
236 lddfi @(gr8,#__FPMEDIA_FR(32)),fr32
237 lddfi @(gr8,#__FPMEDIA_FR(34)),fr34
238 lddfi @(gr8,#__FPMEDIA_FR(36)),fr36
239 lddfi @(gr8,#__FPMEDIA_FR(38)),fr38
240 lddfi @(gr8,#__FPMEDIA_FR(40)),fr40
241 lddfi @(gr8,#__FPMEDIA_FR(42)),fr42
242 lddfi @(gr8,#__FPMEDIA_FR(44)),fr44
243 lddfi @(gr8,#__FPMEDIA_FR(46)),fr46
244 lddfi @(gr8,#__FPMEDIA_FR(48)),fr48
245 lddfi @(gr8,#__FPMEDIA_FR(50)),fr50
246 lddfi @(gr8,#__FPMEDIA_FR(52)),fr52
247 lddfi @(gr8,#__FPMEDIA_FR(54)),fr54
248 lddfi @(gr8,#__FPMEDIA_FR(56)),fr56
249 lddfi @(gr8,#__FPMEDIA_FR(58)),fr58
250 lddfi @(gr8,#__FPMEDIA_FR(60)),fr60
251 lddfi @(gr8,#__FPMEDIA_FR(62)),fr62
252__restore_skip_fr32_fr63:
253
254 lddi @(gr8,#__FPMEDIA_FNER(0)),gr4
255 movsg fner0,gr4
256 movsg fner1,gr5
257 or.p gr9,gr9,gr8
258 bralr
259
260 # the FR451 also has ACC8-11/ACCG8-11 regs (but not 4-7...)
261__restore_acc_fr451:
262 lddfi @(gr8,#__FPMEDIA_ACC(4)),fr16
263 lddfi @(gr8,#__FPMEDIA_ACC(6)),fr18
264 ldbfi @(gr8,#__FPMEDIA_ACCG(4)),fr20
265 ldbfi @(gr8,#__FPMEDIA_ACCG(5)),fr21
266 ldbfi @(gr8,#__FPMEDIA_ACCG(6)),fr22
267 ldbfi @(gr8,#__FPMEDIA_ACCG(7)),fr23
268
269 mwtacc fr16,acc8
270 mwtacc fr17,acc9
271 mwtacc fr18,acc10
272 mwtacc fr19,acc11
273 mwtaccg fr20,accg8
274 mwtaccg fr21,accg9
275 mwtaccg fr22,accg10
276 mwtaccg fr23,accg11
277 bra __restore_acc_cont
278
279 # the FR555 also has ACC4-7/ACCG4-7 regs and an FSR0 reg
280__restore_acc_fr555:
281 lddfi @(gr8,#__FPMEDIA_ACC(4)),fr16
282 lddfi @(gr8,#__FPMEDIA_ACC(6)),fr18
283 ldbfi @(gr8,#__FPMEDIA_ACCG(4)),fr20
284 ldbfi @(gr8,#__FPMEDIA_ACCG(5)),fr21
285 ldbfi @(gr8,#__FPMEDIA_ACCG(6)),fr22
286 ldbfi @(gr8,#__FPMEDIA_ACCG(7)),fr23
287
288 mnop.p
289 mwtacc fr16,acc4
290 mnop.p
291 mwtacc fr17,acc5
292 mnop.p
293 mwtacc fr18,acc6
294 mnop.p
295 mwtacc fr19,acc7
296 mnop.p
297 mwtaccg fr20,accg4
298 mnop.p
299 mwtaccg fr21,accg5
300 mnop.p
301 mwtaccg fr22,accg6
302 mnop.p
303 mwtaccg fr23,accg7
304
305 ldi @(gr8,#__FPMEDIA_FSR(0)),gr4
306 movgs gr4,fsr0
307
308 bra __restore_acc_cont
309
310
311###############################################################################
312#
313# save extra general regs and FP/Media regs
314# - void save_user_regs(struct user_context *target)
315#
316###############################################################################
317 .globl save_user_regs
318save_user_regs:
319 movsg hsr0,gr6
320 ori gr6,#HSR0_GRHE|HSR0_FRLE|HSR0_FRHE,gr6
321 movgs gr6,hsr0
322 movsg hsr0,gr6
323
324 movsg psr,gr7
325 ori gr7,#PSR_EF|PSR_EM,gr7
326 movgs gr7,psr
327 movsg psr,gr7
328 srli gr7,#24,gr7
329 bar
330
331 movsg fner0,gr4
332 movsg fner1,gr5
333 stdi.p gr4,@(gr8,#__FPMEDIA_FNER(0))
334
335 # some CPU's have GR32-GR63
336 setlos #HSR0_GRHE,gr4
337 andcc gr6,gr4,gr0,icc0
338 beq icc0,#1,__save_skip_gr32_gr63
339
340 stdi gr32,@(gr8,#__INT_GR(32))
341 stdi gr34,@(gr8,#__INT_GR(34))
342 stdi gr36,@(gr8,#__INT_GR(36))
343 stdi gr38,@(gr8,#__INT_GR(38))
344 stdi gr40,@(gr8,#__INT_GR(40))
345 stdi gr42,@(gr8,#__INT_GR(42))
346 stdi gr44,@(gr8,#__INT_GR(44))
347 stdi gr46,@(gr8,#__INT_GR(46))
348 stdi gr48,@(gr8,#__INT_GR(48))
349 stdi gr50,@(gr8,#__INT_GR(50))
350 stdi gr52,@(gr8,#__INT_GR(52))
351 stdi gr54,@(gr8,#__INT_GR(54))
352 stdi gr56,@(gr8,#__INT_GR(56))
353 stdi gr58,@(gr8,#__INT_GR(58))
354 stdi gr60,@(gr8,#__INT_GR(60))
355 stdi gr62,@(gr8,#__INT_GR(62))
356__save_skip_gr32_gr63:
357
358 # all CPU's have FR0-FR31
359 stdfi fr0 ,@(gr8,#__FPMEDIA_FR( 0))
360 stdfi fr2 ,@(gr8,#__FPMEDIA_FR( 2))
361 stdfi fr4 ,@(gr8,#__FPMEDIA_FR( 4))
362 stdfi fr6 ,@(gr8,#__FPMEDIA_FR( 6))
363 stdfi fr8 ,@(gr8,#__FPMEDIA_FR( 8))
364 stdfi fr10,@(gr8,#__FPMEDIA_FR(10))
365 stdfi fr12,@(gr8,#__FPMEDIA_FR(12))
366 stdfi fr14,@(gr8,#__FPMEDIA_FR(14))
367 stdfi fr16,@(gr8,#__FPMEDIA_FR(16))
368 stdfi fr18,@(gr8,#__FPMEDIA_FR(18))
369 stdfi fr20,@(gr8,#__FPMEDIA_FR(20))
370 stdfi fr22,@(gr8,#__FPMEDIA_FR(22))
371 stdfi fr24,@(gr8,#__FPMEDIA_FR(24))
372 stdfi fr26,@(gr8,#__FPMEDIA_FR(26))
373 stdfi fr28,@(gr8,#__FPMEDIA_FR(28))
374 stdfi.p fr30,@(gr8,#__FPMEDIA_FR(30))
375
376 # some CPU's have FR32-FR63
377 setlos #HSR0_FRHE,gr4
378 andcc gr6,gr4,gr0,icc0
379 beq icc0,#1,__save_skip_fr32_fr63
380
381 stdfi fr32,@(gr8,#__FPMEDIA_FR(32))
382 stdfi fr34,@(gr8,#__FPMEDIA_FR(34))
383 stdfi fr36,@(gr8,#__FPMEDIA_FR(36))
384 stdfi fr38,@(gr8,#__FPMEDIA_FR(38))
385 stdfi fr40,@(gr8,#__FPMEDIA_FR(40))
386 stdfi fr42,@(gr8,#__FPMEDIA_FR(42))
387 stdfi fr44,@(gr8,#__FPMEDIA_FR(44))
388 stdfi fr46,@(gr8,#__FPMEDIA_FR(46))
389 stdfi fr48,@(gr8,#__FPMEDIA_FR(48))
390 stdfi fr50,@(gr8,#__FPMEDIA_FR(50))
391 stdfi fr52,@(gr8,#__FPMEDIA_FR(52))
392 stdfi fr54,@(gr8,#__FPMEDIA_FR(54))
393 stdfi fr56,@(gr8,#__FPMEDIA_FR(56))
394 stdfi fr58,@(gr8,#__FPMEDIA_FR(58))
395 stdfi fr60,@(gr8,#__FPMEDIA_FR(60))
396 stdfi fr62,@(gr8,#__FPMEDIA_FR(62))
397__save_skip_fr32_fr63:
398
399 mrdacc acc0 ,fr4
400 mrdacc acc1 ,fr5
401
402 stdfi.p fr4 ,@(gr8,#__FPMEDIA_ACC(0))
403
404 mrdacc acc2 ,fr6
405 mrdacc acc3 ,fr7
406
407 stdfi.p fr6 ,@(gr8,#__FPMEDIA_ACC(2))
408
409 mrdaccg accg0,fr4
410 stbfi.p fr4 ,@(gr8,#__FPMEDIA_ACCG(0))
411
412 mrdaccg accg1,fr5
413 stbfi.p fr5 ,@(gr8,#__FPMEDIA_ACCG(1))
414
415 mrdaccg accg2,fr6
416 stbfi.p fr6 ,@(gr8,#__FPMEDIA_ACCG(2))
417
418 mrdaccg accg3,fr7
419 stbfi fr7 ,@(gr8,#__FPMEDIA_ACCG(3))
420
421 movsg msr0 ,gr4
422 movsg msr1 ,gr5
423
424 stdi gr4 ,@(gr8,#__FPMEDIA_MSR(0))
425
426 # some CPUs have extra ACCx and ACCGx regs and maybe FSRx regs
427 subicc.p gr7,#0x50,gr0,icc0
428 subicc gr7,#0x31,gr0,icc1
429 beq icc0,#0,__save_acc_fr451
430 beq icc1,#0,__save_acc_fr555
431__save_acc_cont:
432
433 lddfi @(gr8,#__FPMEDIA_FR(4)),fr4
434 lddfi.p @(gr8,#__FPMEDIA_FR(6)),fr6
435 bralr
436
437 # the FR451 also has ACC8-11/ACCG8-11 regs (but not 4-7...)
438__save_acc_fr451:
439 mrdacc acc8 ,fr4
440 mrdacc acc9 ,fr5
441
442 stdfi.p fr4 ,@(gr8,#__FPMEDIA_ACC(4))
443
444 mrdacc acc10,fr6
445 mrdacc acc11,fr7
446
447 stdfi.p fr6 ,@(gr8,#__FPMEDIA_ACC(6))
448
449 mrdaccg accg8,fr4
450 stbfi.p fr4 ,@(gr8,#__FPMEDIA_ACCG(4))
451
452 mrdaccg accg9,fr5
453 stbfi.p fr5 ,@(gr8,#__FPMEDIA_ACCG(5))
454
455 mrdaccg accg10,fr6
456 stbfi.p fr6 ,@(gr8,#__FPMEDIA_ACCG(6))
457
458 mrdaccg accg11,fr7
459 stbfi fr7 ,@(gr8,#__FPMEDIA_ACCG(7))
460 bra __save_acc_cont
461
462 # the FR555 also has ACC4-7/ACCG4-7 regs and an FSR0 reg
463__save_acc_fr555:
464 mnop.p
465 mrdacc acc4 ,fr4
466 mnop.p
467 mrdacc acc5 ,fr5
468
469 stdfi fr4 ,@(gr8,#__FPMEDIA_ACC(4))
470
471 mnop.p
472 mrdacc acc6 ,fr6
473 mnop.p
474 mrdacc acc7 ,fr7
475
476 stdfi fr6 ,@(gr8,#__FPMEDIA_ACC(6))
477
478 mnop.p
479 mrdaccg accg4,fr4
480 stbfi fr4 ,@(gr8,#__FPMEDIA_ACCG(4))
481
482 mnop.p
483 mrdaccg accg5,fr5
484 stbfi fr5 ,@(gr8,#__FPMEDIA_ACCG(5))
485
486 mnop.p
487 mrdaccg accg6,fr6
488 stbfi fr6 ,@(gr8,#__FPMEDIA_ACCG(6))
489
490 mnop.p
491 mrdaccg accg7,fr7
492 stbfi fr7 ,@(gr8,#__FPMEDIA_ACCG(7))
493
494 movsg fsr0 ,gr4
495 sti gr4 ,@(gr8,#__FPMEDIA_FSR(0))
496 bra __save_acc_cont
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
new file mode 100644
index 000000000000..931aa6d895e3
--- /dev/null
+++ b/arch/frv/kernel/sys_frv.c
@@ -0,0 +1,214 @@
1/* sys_frv.c: FRV arch-specific syscall wrappers
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/sys_m68k.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/smp_lock.h>
18#include <linux/sem.h>
19#include <linux/msg.h>
20#include <linux/shm.h>
21#include <linux/stat.h>
22#include <linux/mman.h>
23#include <linux/file.h>
24#include <linux/utsname.h>
25#include <linux/syscalls.h>
26
27#include <asm/setup.h>
28#include <asm/uaccess.h>
29#include <asm/ipc.h>
30
31/*
32 * sys_pipe() is the normal C calling standard for creating
33 * a pipe. It's not the way unix traditionally does this, though.
34 */
35asmlinkage long sys_pipe(unsigned long * fildes)
36{
37 int fd[2];
38 int error;
39
40 error = do_pipe(fd);
41 if (!error) {
42 if (copy_to_user(fildes, fd, 2*sizeof(int)))
43 error = -EFAULT;
44 }
45 return error;
46}
47
48asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
49 unsigned long prot, unsigned long flags,
50 unsigned long fd, unsigned long pgoff)
51{
52 int error = -EBADF;
53 struct file * file = NULL;
54
55 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
56 if (!(flags & MAP_ANONYMOUS)) {
57 file = fget(fd);
58 if (!file)
59 goto out;
60 }
61
62 /* As with sparc32, make sure the shift for mmap2 is constant
63 (12), no matter what PAGE_SIZE we have.... */
64
65 /* But unlike sparc32, don't just silently break if we're
66 trying to map something we can't */
67 if (pgoff & ((1<<(PAGE_SHIFT-12))-1))
68 return -EINVAL;
69
70 pgoff >>= (PAGE_SHIFT - 12);
71
72 down_write(&current->mm->mmap_sem);
73 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
74 up_write(&current->mm->mmap_sem);
75
76 if (file)
77 fput(file);
78out:
79 return error;
80}
81
82#if 0 /* DAVIDM - do we want this */
83struct mmap_arg_struct64 {
84 __u32 addr;
85 __u32 len;
86 __u32 prot;
87 __u32 flags;
88 __u64 offset; /* 64 bits */
89 __u32 fd;
90};
91
92asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
93{
94 int error = -EFAULT;
95 struct file * file = NULL;
96 struct mmap_arg_struct64 a;
97 unsigned long pgoff;
98
99 if (copy_from_user(&a, arg, sizeof(a)))
100 return -EFAULT;
101
102 if ((long)a.offset & ~PAGE_MASK)
103 return -EINVAL;
104
105 pgoff = a.offset >> PAGE_SHIFT;
106 if ((a.offset >> PAGE_SHIFT) != pgoff)
107 return -EINVAL;
108
109 if (!(a.flags & MAP_ANONYMOUS)) {
110 error = -EBADF;
111 file = fget(a.fd);
112 if (!file)
113 goto out;
114 }
115 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
116
117 down_write(&current->mm->mmap_sem);
118 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
119 up_write(&current->mm->mmap_sem);
120 if (file)
121 fput(file);
122out:
123 return error;
124}
125#endif
126
127/*
128 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
129 *
130 * This is really horribly ugly.
131 */
132asmlinkage long sys_ipc(unsigned long call,
133 unsigned long first,
134 unsigned long second,
135 unsigned long third,
136 void __user *ptr,
137 unsigned long fifth)
138{
139 int version, ret;
140
141 version = call >> 16; /* hack for backward compatibility */
142 call &= 0xffff;
143
144 switch (call) {
145 case SEMOP:
146 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
147 case SEMTIMEDOP:
148 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
149 (const struct timespec __user *)fifth);
150
151 case SEMGET:
152 return sys_semget (first, second, third);
153 case SEMCTL: {
154 union semun fourth;
155 if (!ptr)
156 return -EINVAL;
157 if (get_user(fourth.__pad, (void * __user *) ptr))
158 return -EFAULT;
159 return sys_semctl (first, second, third, fourth);
160 }
161
162 case MSGSND:
163 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
164 second, third);
165 case MSGRCV:
166 switch (version) {
167 case 0: {
168 struct ipc_kludge tmp;
169 if (!ptr)
170 return -EINVAL;
171
172 if (copy_from_user(&tmp,
173 (struct ipc_kludge __user *) ptr,
174 sizeof (tmp)))
175 return -EFAULT;
176 return sys_msgrcv (first, tmp.msgp, second,
177 tmp.msgtyp, third);
178 }
179 default:
180 return sys_msgrcv (first,
181 (struct msgbuf __user *) ptr,
182 second, fifth, third);
183 }
184 case MSGGET:
185 return sys_msgget ((key_t) first, second);
186 case MSGCTL:
187 return sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
188
189 case SHMAT:
190 switch (version) {
191 default: {
192 ulong raddr;
193 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
194 if (ret)
195 return ret;
196 return put_user (raddr, (ulong __user *) third);
197 }
198 case 1: /* iBCS2 emulator entry point */
199 if (!segment_eq(get_fs(), get_ds()))
200 return -EINVAL;
201 /* The "(ulong *) third" is valid _only_ because of the kernel segment thing */
202 return do_shmat (first, (char __user *) ptr, second, (ulong *) third);
203 }
204 case SHMDT:
205 return sys_shmdt ((char __user *)ptr);
206 case SHMGET:
207 return sys_shmget (first, second, third);
208 case SHMCTL:
209 return sys_shmctl (first, second,
210 (struct shmid_ds __user *) ptr);
211 default:
212 return -ENOSYS;
213 }
214}
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
new file mode 100644
index 000000000000..408b0f382b42
--- /dev/null
+++ b/arch/frv/kernel/sysctl.c
@@ -0,0 +1,206 @@
1/* sysctl.c: implementation of /proc/sys files relating to FRV specifically
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/slab.h>
14#include <linux/sysctl.h>
15#include <linux/proc_fs.h>
16#include <linux/init.h>
17#include <asm/uaccess.h>
18
19static const char frv_cache_wback[] = "wback";
20static const char frv_cache_wthru[] = "wthru";
21
22static void frv_change_dcache_mode(unsigned long newmode)
23{
24 unsigned long flags, hsr0;
25
26 local_irq_save(flags);
27
28 hsr0 = __get_HSR(0);
29 hsr0 &= ~HSR0_DCE;
30 __set_HSR(0, hsr0);
31
32 asm volatile(" dcef @(gr0,gr0),#1 \n"
33 " membar \n"
34 : : : "memory"
35 );
36
37 hsr0 = (hsr0 & ~HSR0_CBM) | newmode;
38 __set_HSR(0, hsr0);
39 hsr0 |= HSR0_DCE;
40 __set_HSR(0, hsr0);
41
42 local_irq_restore(flags);
43
44 //printk("HSR0 now %08lx\n", hsr0);
45}
46
47/*****************************************************************************/
48/*
49 * handle requests to dynamically switch the write caching mode delivered by /proc
50 */
51static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
52 void *buffer, size_t *lenp, loff_t *ppos)
53{
54 unsigned long hsr0;
55 char buff[8];
56 int len;
57
58 len = *lenp;
59
60 if (write) {
61 /* potential state change */
62 if (len <= 1 || len > sizeof(buff) - 1)
63 return -EINVAL;
64
65 if (copy_from_user(buff, buffer, len) != 0)
66 return -EFAULT;
67
68 if (buff[len - 1] == '\n')
69 buff[len - 1] = '\0';
70 else
71 buff[len] = '\0';
72
73 if (strcmp(buff, frv_cache_wback) == 0) {
74 /* switch dcache into write-back mode */
75 frv_change_dcache_mode(HSR0_CBM_COPY_BACK);
76 return 0;
77 }
78
79 if (strcmp(buff, frv_cache_wthru) == 0) {
80 /* switch dcache into write-through mode */
81 frv_change_dcache_mode(HSR0_CBM_WRITE_THRU);
82 return 0;
83 }
84
85 return -EINVAL;
86 }
87
88 /* read the state */
89 if (filp->f_pos > 0) {
90 *lenp = 0;
91 return 0;
92 }
93
94 hsr0 = __get_HSR(0);
95 switch (hsr0 & HSR0_CBM) {
96 case HSR0_CBM_WRITE_THRU:
97 memcpy(buff, frv_cache_wthru, sizeof(frv_cache_wthru) - 1);
98 buff[sizeof(frv_cache_wthru) - 1] = '\n';
99 len = sizeof(frv_cache_wthru);
100 break;
101 default:
102 memcpy(buff, frv_cache_wback, sizeof(frv_cache_wback) - 1);
103 buff[sizeof(frv_cache_wback) - 1] = '\n';
104 len = sizeof(frv_cache_wback);
105 break;
106 }
107
108 if (len > *lenp)
109 len = *lenp;
110
111 if (copy_to_user(buffer, buff, len) != 0)
112 return -EFAULT;
113
114 *lenp = len;
115 filp->f_pos = len;
116 return 0;
117
118} /* end procctl_frv_cachemode() */
119
120/*****************************************************************************/
121/*
122 * permit the mm_struct the nominated process is using have its MMU context ID pinned
123 */
124#ifdef CONFIG_MMU
125static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
126 void *buffer, size_t *lenp, loff_t *ppos)
127{
128 pid_t pid;
129 char buff[16], *p;
130 int len;
131
132 len = *lenp;
133
134 if (write) {
135 /* potential state change */
136 if (len <= 1 || len > sizeof(buff) - 1)
137 return -EINVAL;
138
139 if (copy_from_user(buff, buffer, len) != 0)
140 return -EFAULT;
141
142 if (buff[len - 1] == '\n')
143 buff[len - 1] = '\0';
144 else
145 buff[len] = '\0';
146
147 pid = simple_strtoul(buff, &p, 10);
148 if (*p)
149 return -EINVAL;
150
151 return cxn_pin_by_pid(pid);
152 }
153
154 /* read the currently pinned CXN */
155 if (filp->f_pos > 0) {
156 *lenp = 0;
157 return 0;
158 }
159
160 len = snprintf(buff, sizeof(buff), "%d\n", cxn_pinned);
161 if (len > *lenp)
162 len = *lenp;
163
164 if (copy_to_user(buffer, buff, len) != 0)
165 return -EFAULT;
166
167 *lenp = len;
168 filp->f_pos = len;
169 return 0;
170
171} /* end procctl_frv_pin_cxnr() */
172#endif
173
174/*
175 * FR-V specific sysctls
176 */
177static struct ctl_table frv_table[] =
178{
179 { 1, "cache-mode", NULL, 0, 0644, NULL, &procctl_frv_cachemode },
180#ifdef CONFIG_MMU
181 { 2, "pin-cxnr", NULL, 0, 0644, NULL, &procctl_frv_pin_cxnr },
182#endif
183 { 0 }
184};
185
186/*
187 * Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6
188 * when all the PM interfaces exist nicely.
189 */
190#define CTL_FRV 9898
191static struct ctl_table frv_dir_table[] =
192{
193 {CTL_FRV, "frv", NULL, 0, 0555, frv_table},
194 {0}
195};
196
197/*
198 * Initialize power interface
199 */
200static int __init frv_sysctl_init(void)
201{
202 register_sysctl_table(frv_dir_table, 1);
203 return 0;
204}
205
206__initcall(frv_sysctl_init);
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
new file mode 100644
index 000000000000..075db6644694
--- /dev/null
+++ b/arch/frv/kernel/time.c
@@ -0,0 +1,234 @@
1/* time.c: FRV arch-specific time handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/time.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h> /* CONFIG_HEARTBEAT */
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/param.h>
19#include <linux/string.h>
20#include <linux/interrupt.h>
21#include <linux/profile.h>
22#include <linux/irq.h>
23#include <linux/mm.h>
24
25#include <asm/io.h>
26#include <asm/timer-regs.h>
27#include <asm/mb-regs.h>
28#include <asm/mb86943a.h>
29#include <asm/irq-routing.h>
30
31#include <linux/timex.h>
32
33#define TICK_SIZE (tick_nsec / 1000)
34
35extern unsigned long wall_jiffies;
36
37u64 jiffies_64 = INITIAL_JIFFIES;
38EXPORT_SYMBOL(jiffies_64);
39
40unsigned long __nongprelbss __clkin_clock_speed_HZ;
41unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
42unsigned long __nongprelbss __res_bus_clock_speed_HZ;
43unsigned long __nongprelbss __sdram_clock_speed_HZ;
44unsigned long __nongprelbss __core_bus_clock_speed_HZ;
45unsigned long __nongprelbss __core_clock_speed_HZ;
46unsigned long __nongprelbss __dsu_clock_speed_HZ;
47unsigned long __nongprelbss __serial_clock_speed_HZ;
48unsigned long __delay_loops_MHz;
49
50static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs *regs);
51
52static struct irqaction timer_irq = {
53 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
54};
55
56static inline int set_rtc_mmss(unsigned long nowtime)
57{
58 return -1;
59}
60
61/*
62 * timer_interrupt() needs to keep up the real-time clock,
63 * as well as call the "do_timer()" routine every clocktick
64 */
65static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
66{
67 /* last time the cmos clock got updated */
68 static long last_rtc_update = 0;
69
70 /*
71 * Here we are in the timer irq handler. We just have irqs locally
72 * disabled but we don't know if the timer_bh is running on the other
73 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
74 * the irq version of write_lock because as just said we have irq
75 * locally disabled. -arca
76 */
77 write_seqlock(&xtime_lock);
78
79 do_timer(regs);
80 update_process_times(user_mode(regs));
81 profile_tick(CPU_PROFILING, regs);
82
83 /*
84 * If we have an externally synchronized Linux clock, then update
85 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
86 * called as close as possible to 500 ms before the new second starts.
87 */
88 if ((time_status & STA_UNSYNC) == 0 &&
89 xtime.tv_sec > last_rtc_update + 660 &&
90 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
91 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
92 ) {
93 if (set_rtc_mmss(xtime.tv_sec) == 0)
94 last_rtc_update = xtime.tv_sec;
95 else
96 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
97 }
98
99#ifdef CONFIG_HEARTBEAT
100 static unsigned short n;
101 n++;
102 __set_LEDS(n);
103#endif /* CONFIG_HEARTBEAT */
104
105 write_sequnlock(&xtime_lock);
106 return IRQ_HANDLED;
107}
108
109void time_divisor_init(void)
110{
111 unsigned short base, pre, prediv;
112
113 /* set the scheduling timer going */
114 pre = 1;
115 prediv = 4;
116 base = __res_bus_clock_speed_HZ / pre / HZ / (1 << prediv);
117
118 __set_TPRV(pre);
119 __set_TxCKSL_DATA(0, prediv);
120 __set_TCTR(TCTR_SC_CTR0 | TCTR_RL_RW_LH8 | TCTR_MODE_2);
121 __set_TCSR_DATA(0, base & 0xff);
122 __set_TCSR_DATA(0, base >> 8);
123}
124
125void time_init(void)
126{
127 unsigned int year, mon, day, hour, min, sec;
128
129 extern void arch_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
130
131 /* FIX by dqg : Set to zero for platforms that don't have tod */
132 /* without this time is undefined and can overflow time_t, causing */
133 /* very stange errors */
134 year = 1980;
135 mon = day = 1;
136 hour = min = sec = 0;
137 arch_gettod (&year, &mon, &day, &hour, &min, &sec);
138
139 if ((year += 1900) < 1970)
140 year += 100;
141 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
142 xtime.tv_nsec = 0;
143
144 /* install scheduling interrupt handler */
145 setup_irq(IRQ_CPU_TIMER0, &timer_irq);
146
147 time_divisor_init();
148}
149
150/*
151 * This version of gettimeofday has near microsecond resolution.
152 */
153void do_gettimeofday(struct timeval *tv)
154{
155 unsigned long seq;
156 unsigned long usec, sec;
157 unsigned long max_ntp_tick;
158
159 do {
160 unsigned long lost;
161
162 seq = read_seqbegin(&xtime_lock);
163
164 usec = 0;
165 lost = jiffies - wall_jiffies;
166
167 /*
168 * If time_adjust is negative then NTP is slowing the clock
169 * so make sure not to go into next possible interval.
170 * Better to lose some accuracy than have time go backwards..
171 */
172 if (unlikely(time_adjust < 0)) {
173 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
174 usec = min(usec, max_ntp_tick);
175
176 if (lost)
177 usec += lost * max_ntp_tick;
178 }
179 else if (unlikely(lost))
180 usec += lost * (USEC_PER_SEC / HZ);
181
182 sec = xtime.tv_sec;
183 usec += (xtime.tv_nsec / 1000);
184 } while (read_seqretry(&xtime_lock, seq));
185
186 while (usec >= 1000000) {
187 usec -= 1000000;
188 sec++;
189 }
190
191 tv->tv_sec = sec;
192 tv->tv_usec = usec;
193}
194
195int do_settimeofday(struct timespec *tv)
196{
197 time_t wtm_sec, sec = tv->tv_sec;
198 long wtm_nsec, nsec = tv->tv_nsec;
199
200 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
201 return -EINVAL;
202
203 write_seqlock_irq(&xtime_lock);
204 /*
205 * This is revolting. We need to set "xtime" correctly. However, the
206 * value in this location is the value at the most recent update of
207 * wall time. Discover what correction gettimeofday() would have
208 * made, and then undo it!
209 */
210 nsec -= 0 * NSEC_PER_USEC;
211 nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
212
213 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
214 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
215
216 set_normalized_timespec(&xtime, sec, nsec);
217 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
218
219 time_adjust = 0; /* stop active adjtime() */
220 time_status |= STA_UNSYNC;
221 time_maxerror = NTP_PHASE_LIMIT;
222 time_esterror = NTP_PHASE_LIMIT;
223 write_sequnlock_irq(&xtime_lock);
224 clock_was_set();
225 return 0;
226}
227
228/*
229 * Scheduler clock - returns current time in nanosec units.
230 */
231unsigned long long sched_clock(void)
232{
233 return jiffies_64 * (1000000000 / HZ);
234}
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
new file mode 100644
index 000000000000..89073cae4b5d
--- /dev/null
+++ b/arch/frv/kernel/traps.c
@@ -0,0 +1,431 @@
1/* traps.c: high-level exception handler for FR-V
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/sched.h>
14#include <linux/signal.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/types.h>
18#include <linux/user.h>
19#include <linux/string.h>
20#include <linux/linkage.h>
21#include <linux/init.h>
22
23#include <asm/setup.h>
24#include <asm/fpu.h>
25#include <asm/system.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/siginfo.h>
29#include <asm/unaligned.h>
30
31void show_backtrace(struct pt_regs *, unsigned long);
32
33extern asmlinkage void __break_hijack_kernel_event(void);
34
35/*****************************************************************************/
36/*
37 * instruction access error
38 */
39asmlinkage void insn_access_error(unsigned long esfr1, unsigned long epcr0, unsigned long esr0)
40{
41 siginfo_t info;
42
43 die_if_kernel("-- Insn Access Error --\n"
44 "EPCR0 : %08lx\n"
45 "ESR0 : %08lx\n",
46 epcr0, esr0);
47
48 info.si_signo = SIGSEGV;
49 info.si_code = SEGV_ACCERR;
50 info.si_errno = 0;
51 info.si_addr = (void *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc);
52
53 force_sig_info(info.si_signo, &info, current);
54} /* end insn_access_error() */
55
56/*****************************************************************************/
57/*
58 * handler for:
59 * - illegal instruction
60 * - privileged instruction
61 * - unsupported trap
62 * - debug exceptions
63 */
64asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, unsigned long esr0)
65{
66 siginfo_t info;
67
68 die_if_kernel("-- Illegal Instruction --\n"
69 "EPCR0 : %08lx\n"
70 "ESR0 : %08lx\n"
71 "ESFR1 : %08lx\n",
72 epcr0, esr0, esfr1);
73
74 info.si_errno = 0;
75 info.si_addr = (void *) ((epcr0 & EPCR0_PC) ? (epcr0 & EPCR0_PC) : __frame->pc);
76
77 switch (__frame->tbr & TBR_TT) {
78 case TBR_TT_ILLEGAL_INSTR:
79 info.si_signo = SIGILL;
80 info.si_code = ILL_ILLOPC;
81 break;
82 case TBR_TT_PRIV_INSTR:
83 info.si_signo = SIGILL;
84 info.si_code = ILL_PRVOPC;
85 break;
86 case TBR_TT_TRAP2 ... TBR_TT_TRAP126:
87 info.si_signo = SIGILL;
88 info.si_code = ILL_ILLTRP;
89 break;
90 /* GDB uses "tira gr0, #1" as a breakpoint instruction. */
91 case TBR_TT_TRAP1:
92 case TBR_TT_BREAK:
93 info.si_signo = SIGTRAP;
94 info.si_code =
95 (__frame->__status & REG__STATUS_STEPPED) ? TRAP_TRACE : TRAP_BRKPT;
96 break;
97 }
98
99 force_sig_info(info.si_signo, &info, current);
100} /* end illegal_instruction() */
101
102/*****************************************************************************/
103/*
104 *
105 */
106asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)
107{
108 siginfo_t info;
109
110 die_if_kernel("-- Media Exception --\n"
111 "MSR0 : %08lx\n"
112 "MSR1 : %08lx\n",
113 msr0, msr1);
114
115 info.si_signo = SIGFPE;
116 info.si_code = FPE_MDAOVF;
117 info.si_errno = 0;
118 info.si_addr = (void *) __frame->pc;
119
120 force_sig_info(info.si_signo, &info, current);
121} /* end media_exception() */
122
123/*****************************************************************************/
124/*
125 * instruction or data access exception
126 */
127asmlinkage void memory_access_exception(unsigned long esr0,
128 unsigned long ear0,
129 unsigned long epcr0)
130{
131 siginfo_t info;
132
133#ifdef CONFIG_MMU
134 unsigned long fixup;
135
136 if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS)
137 if (handle_misalignment(esr0, ear0, epcr0) == 0)
138 return;
139
140 if ((fixup = search_exception_table(__frame->pc)) != 0) {
141 __frame->pc = fixup;
142 return;
143 }
144#endif
145
146 die_if_kernel("-- Memory Access Exception --\n"
147 "ESR0 : %08lx\n"
148 "EAR0 : %08lx\n"
149 "EPCR0 : %08lx\n",
150 esr0, ear0, epcr0);
151
152 info.si_signo = SIGSEGV;
153 info.si_code = SEGV_ACCERR;
154 info.si_errno = 0;
155 info.si_addr = NULL;
156
157 if ((esr0 & (ESRx_VALID | ESR0_EAV)) == (ESRx_VALID | ESR0_EAV))
158 info.si_addr = (void *) ear0;
159
160 force_sig_info(info.si_signo, &info, current);
161
162} /* end memory_access_exception() */
163
164/*****************************************************************************/
165/*
166 * data access error
167 * - double-word data load from CPU control area (0xFExxxxxx)
168 * - read performed on inactive or self-refreshing SDRAM
169 * - error notification from slave device
170 * - misaligned address
171 * - access to out of bounds memory region
172 * - user mode accessing privileged memory region
173 * - write to R/O memory region
174 */
175asmlinkage void data_access_error(unsigned long esfr1, unsigned long esr15, unsigned long ear15)
176{
177 siginfo_t info;
178
179 die_if_kernel("-- Data Access Error --\n"
180 "ESR15 : %08lx\n"
181 "EAR15 : %08lx\n",
182 esr15, ear15);
183
184 info.si_signo = SIGSEGV;
185 info.si_code = SEGV_ACCERR;
186 info.si_errno = 0;
187 info.si_addr = (void *)
188 (((esr15 & (ESRx_VALID|ESR15_EAV)) == (ESRx_VALID|ESR15_EAV)) ? ear15 : 0);
189
190 force_sig_info(info.si_signo, &info, current);
191} /* end data_access_error() */
192
193/*****************************************************************************/
194/*
195 * data store error - should only happen if accessing inactive or self-refreshing SDRAM
196 */
197asmlinkage void data_store_error(unsigned long esfr1, unsigned long esr15)
198{
199 die_if_kernel("-- Data Store Error --\n"
200 "ESR15 : %08lx\n",
201 esr15);
202 BUG();
203} /* end data_store_error() */
204
205/*****************************************************************************/
206/*
207 *
208 */
209asmlinkage void division_exception(unsigned long esfr1, unsigned long esr0, unsigned long isr)
210{
211 siginfo_t info;
212
213 die_if_kernel("-- Division Exception --\n"
214 "ESR0 : %08lx\n"
215 "ISR : %08lx\n",
216 esr0, isr);
217
218 info.si_signo = SIGFPE;
219 info.si_code = FPE_INTDIV;
220 info.si_errno = 0;
221 info.si_addr = (void *) __frame->pc;
222
223 force_sig_info(info.si_signo, &info, current);
224} /* end division_exception() */
225
226/*****************************************************************************/
227/*
228 *
229 */
230asmlinkage void compound_exception(unsigned long esfr1,
231 unsigned long esr0, unsigned long esr14, unsigned long esr15,
232 unsigned long msr0, unsigned long msr1)
233{
234 die_if_kernel("-- Compound Exception --\n"
235 "ESR0 : %08lx\n"
236 "ESR15 : %08lx\n"
237 "ESR15 : %08lx\n"
238 "MSR0 : %08lx\n"
239 "MSR1 : %08lx\n",
240 esr0, esr14, esr15, msr0, msr1);
241 BUG();
242} /* end compound_exception() */
243
244/*****************************************************************************/
245/*
246 * The architecture-independent backtrace generator
247 */
248void dump_stack(void)
249{
250 show_stack(NULL, NULL);
251}
252
253void show_stack(struct task_struct *task, unsigned long *sp)
254{
255}
256
257void show_trace_task(struct task_struct *tsk)
258{
259 printk("CONTEXT: stack=0x%lx frame=0x%p LR=0x%lx RET=0x%lx\n",
260 tsk->thread.sp, tsk->thread.frame, tsk->thread.lr, tsk->thread.sched_lr);
261}
262
263static const char *regnames[] = {
264 "PSR ", "ISR ", "CCR ", "CCCR",
265 "LR ", "LCR ", "PC ", "_stt",
266 "sys ", "GR8*", "GNE0", "GNE1",
267 "IACH", "IACL",
268 "TBR ", "SP ", "FP ", "GR3 ",
269 "GR4 ", "GR5 ", "GR6 ", "GR7 ",
270 "GR8 ", "GR9 ", "GR10", "GR11",
271 "GR12", "GR13", "GR14", "GR15",
272 "GR16", "GR17", "GR18", "GR19",
273 "GR20", "GR21", "GR22", "GR23",
274 "GR24", "GR25", "GR26", "GR27",
275 "EFRM", "CURR", "GR30", "BFRM"
276};
277
278void show_regs(struct pt_regs *regs)
279{
280 uint32_t *reg;
281 int loop;
282
283 printk("\n");
284
285 printk("Frame: @%08x [%s]\n",
286 (uint32_t) regs,
287 regs->psr & PSR_S ? "kernel" : "user");
288
289 reg = (uint32_t *) regs;
290 for (loop = 0; loop < REG__END; loop++) {
291 printk("%s %08x", regnames[loop + 0], reg[loop + 0]);
292
293 if (loop == REG__END - 1 || loop % 5 == 4)
294 printk("\n");
295 else
296 printk(" | ");
297 }
298
299 printk("Process %s (pid: %d)\n", current->comm, current->pid);
300}
301
302void die_if_kernel(const char *str, ...)
303{
304 char buffer[256];
305 va_list va;
306
307 if (user_mode(__frame))
308 return;
309
310 va_start(va, str);
311 vsprintf(buffer, str, va);
312 va_end(va);
313
314 console_verbose();
315 printk("\n===================================\n");
316 printk("%s\n", buffer);
317 show_backtrace(__frame, 0);
318
319 __break_hijack_kernel_event();
320 do_exit(SIGSEGV);
321}
322
323/*****************************************************************************/
324/*
325 * dump the contents of an exception frame
326 */
327static void show_backtrace_regs(struct pt_regs *frame)
328{
329 uint32_t *reg;
330 int loop;
331
332 /* print the registers for this frame */
333 printk("<-- %s Frame: @%p -->\n",
334 frame->psr & PSR_S ? "Kernel Mode" : "User Mode",
335 frame);
336
337 reg = (uint32_t *) frame;
338 for (loop = 0; loop < REG__END; loop++) {
339 printk("%s %08x", regnames[loop + 0], reg[loop + 0]);
340
341 if (loop == REG__END - 1 || loop % 5 == 4)
342 printk("\n");
343 else
344 printk(" | ");
345 }
346
347 printk("--------\n");
348} /* end show_backtrace_regs() */
349
350/*****************************************************************************/
351/*
352 * generate a backtrace of the kernel stack
353 */
354void show_backtrace(struct pt_regs *frame, unsigned long sp)
355{
356 struct pt_regs *frame0;
357 unsigned long tos = 0, stop = 0, base;
358 int format;
359
360 base = ((((unsigned long) frame) + 8191) & ~8191) - sizeof(struct user_context);
361 frame0 = (struct pt_regs *) base;
362
363 if (sp) {
364 tos = sp;
365 stop = (unsigned long) frame;
366 }
367
368 printk("\nProcess %s (pid: %d)\n\n", current->comm, current->pid);
369
370 for (;;) {
371 /* dump stack segment between frames */
372 //printk("%08lx -> %08lx\n", tos, stop);
373 format = 0;
374 while (tos < stop) {
375 if (format == 0)
376 printk(" %04lx :", tos & 0xffff);
377
378 printk(" %08lx", *(unsigned long *) tos);
379
380 tos += 4;
381 format++;
382 if (format == 8) {
383 printk("\n");
384 format = 0;
385 }
386 }
387
388 if (format > 0)
389 printk("\n");
390
391 /* dump frame 0 outside of the loop */
392 if (frame == frame0)
393 break;
394
395 tos = frame->sp;
396 if (((unsigned long) frame) + sizeof(*frame) != tos) {
397 printk("-- TOS %08lx does not follow frame %p --\n",
398 tos, frame);
399 break;
400 }
401
402 show_backtrace_regs(frame);
403
404 /* dump the stack between this frame and the next */
405 stop = (unsigned long) frame->next_frame;
406 if (stop != base &&
407 (stop < tos ||
408 stop > base ||
409 (stop < base && stop + sizeof(*frame) > base) ||
410 stop & 3)) {
411 printk("-- next_frame %08lx is invalid (range %08lx-%08lx) --\n",
412 stop, tos, base);
413 break;
414 }
415
416 /* move to next frame */
417 frame = frame->next_frame;
418 }
419
420 /* we can always dump frame 0, even if the rest of the stack is corrupt */
421 show_backtrace_regs(frame0);
422
423} /* end show_backtrace() */
424
425/*****************************************************************************/
426/*
427 * initialise traps
428 */
429void __init trap_init (void)
430{
431} /* end trap_init() */
diff --git a/arch/frv/kernel/uaccess.c b/arch/frv/kernel/uaccess.c
new file mode 100644
index 000000000000..f3fd58a5bc4a
--- /dev/null
+++ b/arch/frv/kernel/uaccess.c
@@ -0,0 +1,95 @@
1/* uaccess.c: userspace access functions
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <asm/uaccess.h>
14
15/*****************************************************************************/
16/*
17 * copy a null terminated string from userspace
18 */
19long strncpy_from_user(char *dst, const char *src, long count)
20{
21 unsigned long max;
22 char *p, ch;
23 long err = -EFAULT;
24
25 if (count < 0)
26 BUG();
27
28 p = dst;
29
30#ifndef CONFIG_MMU
31 if ((unsigned long) src < memory_start)
32 goto error;
33#endif
34
35 if ((unsigned long) src >= get_addr_limit())
36 goto error;
37
38 max = get_addr_limit() - (unsigned long) src;
39 if ((unsigned long) count > max) {
40 memset(dst + max, 0, count - max);
41 count = max;
42 }
43
44 err = 0;
45 for (; count > 0; count--, p++, src++) {
46 __get_user_asm(err, ch, src, "ub", "=r");
47 if (err < 0)
48 goto error;
49 if (!ch)
50 break;
51 *p = ch;
52 }
53
54 err = p - dst; /* return length excluding NUL */
55
56 error:
57 if (count > 0)
58 memset(p, 0, count); /* clear remainder of buffer [security] */
59
60 return err;
61} /* end strncpy_from_user() */
62
63/*****************************************************************************/
64/*
65 * Return the size of a string (including the ending 0)
66 *
67 * Return 0 on exception, a value greater than N if too long
68 */
69long strnlen_user(const char *src, long count)
70{
71 const char *p;
72 long err = 0;
73 char ch;
74
75 if (count < 0)
76 BUG();
77
78#ifndef CONFIG_MMU
79 if ((unsigned long) src < memory_start)
80 return 0;
81#endif
82
83 if ((unsigned long) src >= get_addr_limit())
84 return 0;
85
86 for (p = src; count > 0; count--, p++) {
87 __get_user_asm(err, ch, p, "ub", "=r");
88 if (err < 0)
89 return 0;
90 if (!ch)
91 break;
92 }
93
94 return p - src + 1; /* return length including NUL */
95} /* end strnlen_user() */
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..fceafd2cc202
--- /dev/null
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -0,0 +1,187 @@
1/* ld script to make FRV Linux kernel
2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
3 */
4OUTPUT_FORMAT("elf32-frv", "elf32-frv", "elf32-frv")
5OUTPUT_ARCH(frv)
6ENTRY(_start)
7
8#include <asm-generic/vmlinux.lds.h>
9#include <asm/processor.h>
10#include <asm/page.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13
14jiffies = jiffies_64 + 4;
15
16__page_offset = 0xc0000000; /* start of area covered by struct pages */
17__kernel_image_start = __page_offset; /* address at which kernel image resides */
18
19SECTIONS
20{
21 . = __kernel_image_start;
22
23 /* discardable initialisation code and data */
24 . = ALIGN(PAGE_SIZE); /* Init code and data */
25 __init_begin = .;
26
27 _sinittext = .;
28 .init.text : {
29 *(.text.head)
30#ifndef CONFIG_DEBUG_INFO
31 *(.init.text)
32 *(.exit.text)
33 *(.exit.data)
34 *(.exitcall.exit)
35#endif
36 }
37 _einittext = .;
38 .init.data : { *(.init.data) }
39
40 . = ALIGN(8);
41 __setup_start = .;
42 .setup.init : { KEEP(*(.init.setup)) }
43 __setup_end = .;
44
45 __initcall_start = .;
46 .initcall.init : {
47 *(.initcall1.init)
48 *(.initcall2.init)
49 *(.initcall3.init)
50 *(.initcall4.init)
51 *(.initcall5.init)
52 *(.initcall6.init)
53 *(.initcall7.init)
54 }
55 __initcall_end = .;
56 __con_initcall_start = .;
57 .con_initcall.init : { *(.con_initcall.init) }
58 __con_initcall_end = .;
59 SECURITY_INIT
60 . = ALIGN(4);
61 __alt_instructions = .;
62 .altinstructions : { *(.altinstructions) }
63 __alt_instructions_end = .;
64 .altinstr_replacement : { *(.altinstr_replacement) }
65
66 __per_cpu_start = .;
67 .data.percpu : { *(.data.percpu) }
68 __per_cpu_end = .;
69
70 . = ALIGN(4096);
71 __initramfs_start = .;
72 .init.ramfs : { *(.init.ramfs) }
73 __initramfs_end = .;
74
75 . = ALIGN(THREAD_SIZE);
76 __init_end = .;
77
78 /* put sections together that have massive alignment issues */
79 . = ALIGN(THREAD_SIZE);
80 .data.init_task : {
81 /* init task record & stack */
82 *(.data.init_task)
83 }
84
85 .trap : {
86 /* trap table management - read entry-table.S before modifying */
87 . = ALIGN(8192);
88 __trap_tables = .;
89 *(.trap.user)
90 *(.trap.kernel)
91 . = ALIGN(4096);
92 *(.trap.break)
93 }
94
95 . = ALIGN(4096);
96 .data.page_aligned : { *(.data.idt) }
97
98 . = ALIGN(L1_CACHE_BYTES);
99 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
100
101 /* Text and read-only data */
102 . = ALIGN(4);
103 _text = .;
104 _stext = .;
105 .text : {
106 *(
107 .text.start .text .text.*
108#ifdef CONFIG_DEBUG_INFO
109 .init.text
110 .exit.text
111 .exitcall.exit
112#endif
113 )
114 SCHED_TEXT
115 *(.fixup)
116 *(.gnu.warning)
117 *(.exitcall.exit)
118 } = 0x9090
119
120 _etext = .; /* End of text section */
121
122 RODATA
123
124 .rodata : {
125 *(.trap.vector)
126
127 /* this clause must not be modified - the ordering and adjacency are imperative */
128 __trap_fixup_tables = .;
129 *(.trap.fixup.user .trap.fixup.kernel)
130
131 }
132
133 . = ALIGN(8); /* Exception table */
134 __start___ex_table = .;
135 __ex_table : { KEEP(*(__ex_table)) }
136 __stop___ex_table = .;
137
138 _sdata = .;
139 .data : { /* Data */
140 *(.data .data.*)
141 *(.exit.data)
142 CONSTRUCTORS
143 }
144
145 _edata = .; /* End of data section */
146
147 /* GP section */
148 . = ALIGN(L1_CACHE_BYTES);
149 _gp = . + 2048;
150 PROVIDE (gp = _gp);
151
152 .sdata : { *(.sdata .sdata.*) }
153
154 /* BSS */
155 . = ALIGN(L1_CACHE_BYTES);
156 __bss_start = .;
157
158 .sbss : { *(.sbss .sbss.*) }
159 .bss : { *(.bss .bss.*) }
160 .bss.stack : { *(.bss) }
161
162 __bss_stop = .;
163 _end = . ;
164 . = ALIGN(PAGE_SIZE);
165 __kernel_image_end = .;
166
167 /* Stabs debugging sections. */
168 .stab 0 : { *(.stab) }
169 .stabstr 0 : { *(.stabstr) }
170 .stab.excl 0 : { *(.stab.excl) }
171 .stab.exclstr 0 : { *(.stab.exclstr) }
172 .stab.index 0 : { *(.stab.index) }
173 .stab.indexstr 0 : { *(.stab.indexstr) }
174
175 .debug_line 0 : { *(.debug_line) }
176 .debug_info 0 : { *(.debug_info) }
177 .debug_abbrev 0 : { *(.debug_abbrev) }
178 .debug_aranges 0 : { *(.debug_aranges) }
179 .debug_frame 0 : { *(.debug_frame) }
180 .debug_pubnames 0 : { *(.debug_pubnames) }
181 .debug_str 0 : { *(.debug_str) }
182 .debug_ranges 0 : { *(.debug_ranges) }
183
184 .comment 0 : { *(.comment) }
185}
186
187__kernel_image_size_no_bss = __bss_start - __kernel_image_start;