aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
commitbbb20089a3275a19e475dbc21320c3742e3ca423 (patch)
tree216fdc1cbef450ca688135c5b8969169482d9a48 /arch/microblaze/kernel
parent3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff)
parent657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff)
Merge branch 'dmaengine' into async-tx-next
Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/asm-offsets.c21
-rw-r--r--arch/microblaze/kernel/early_printk.c3
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/entry.S1116
-rw-r--r--arch/microblaze/kernel/exceptions.c45
-rw-r--r--arch/microblaze/kernel/head.S190
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S746
-rw-r--r--arch/microblaze/kernel/init_task.c2
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c2
-rw-r--r--arch/microblaze/kernel/misc.S120
-rw-r--r--arch/microblaze/kernel/process.c59
-rw-r--r--arch/microblaze/kernel/prom.c7
-rw-r--r--arch/microblaze/kernel/setup.c62
-rw-r--r--arch/microblaze/kernel/signal.c109
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/traps.c42
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S16
18 files changed, 2391 insertions, 158 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index da94bec4ecba..f4a5e19a20eb 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
15obj-$(CONFIG_SELFMOD) += selfmod.o 15obj-$(CONFIG_SELFMOD) += selfmod.o
16obj-$(CONFIG_HEART_BEAT) += heartbeat.o 16obj-$(CONFIG_HEART_BEAT) += heartbeat.o
17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
18obj-$(CONFIG_MMU) += misc.o
18 19
19obj-y += entry$(MMUEXT).o 20obj-y += entry$(MMUEXT).o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index aabd9e9423a6..7bc7b68f97db 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
2 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
4 * 5 *
@@ -68,16 +69,26 @@ int main(int argc, char *argv[])
68 69
69 /* struct task_struct */ 70 /* struct task_struct */
70 DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); 71 DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
72#ifdef CONFIG_MMU
73 DEFINE(TASK_STATE, offsetof(struct task_struct, state));
74 DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
75 DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
76 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
77 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
78 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
79 DEFINE(TASK_PID, offsetof(struct task_struct, pid));
80 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
81 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
82 BLANK();
83
84 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
85 BLANK();
86#endif
71 87
72 /* struct thread_info */ 88 /* struct thread_info */
73 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 89 DEFINE(TI_TASK, offsetof(struct thread_info, task));
74 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
75 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
76 DEFINE(TI_STATUS, offsetof(struct thread_info, status));
77 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
78 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
79 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
80 DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
81 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
82 BLANK(); 93 BLANK();
83 94
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 4b0f0fdb9ca0..7de84923ba07 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt)
87 base_addr = early_uartlite_console(); 87 base_addr = early_uartlite_console();
88 if (base_addr) { 88 if (base_addr) {
89 early_console_initialized = 1; 89 early_console_initialized = 1;
90#ifdef CONFIG_MMU
91 early_console_reg_tlb_alloc(base_addr);
92#endif
90 early_printk("early_printk_console is enabled at 0x%08x\n", 93 early_printk("early_printk_console is enabled at 0x%08x\n",
91 base_addr); 94 base_addr);
92 95
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index f24b1268baaf..1fce6b803f54 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -10,7 +10,7 @@
10 10
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/thread_info.h> 12#include <asm/thread_info.h>
13#include <asm/errno.h> 13#include <linux/errno.h>
14#include <asm/entry.h> 14#include <asm/entry.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
new file mode 100644
index 000000000000..91a0e7b185dd
--- /dev/null
+++ b/arch/microblaze/kernel/entry.S
@@ -0,0 +1,1116 @@
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name) .globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48 .macro clear_bip
49 msrclr r11, MSR_BIP
50 nop
51 .endm
52
53 .macro set_bip
54 msrset r11, MSR_BIP
55 nop
56 .endm
57
58 .macro clear_eip
59 msrclr r11, MSR_EIP
60 nop
61 .endm
62
63 .macro set_ee
64 msrset r11, MSR_EE
65 nop
66 .endm
67
68 .macro disable_irq
69 msrclr r11, MSR_IE
70 nop
71 .endm
72
73 .macro enable_irq
74 msrset r11, MSR_IE
75 nop
76 .endm
77
78 .macro set_ums
79 msrset r11, MSR_UMS
80 nop
81 msrclr r11, MSR_VMS
82 nop
83 .endm
84
85 .macro set_vms
86 msrclr r11, MSR_UMS
87 nop
88 msrset r11, MSR_VMS
89 nop
90 .endm
91
92 .macro clear_vms_ums
93 msrclr r11, MSR_VMS
94 nop
95 msrclr r11, MSR_UMS
96 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
181 set_ums; \
182 rted r0, 2f; \
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF \
187 clear_vms_ums; \
188 rted r0, TOPHYS(1f); \
1891: nop;
190
191#define SAVE_REGS \
192 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
193 swi r5, r1, PTO+PT_R5; \
194 swi r6, r1, PTO+PT_R6; \
195 swi r7, r1, PTO+PT_R7; \
196 swi r8, r1, PTO+PT_R8; \
197 swi r9, r1, PTO+PT_R9; \
198 swi r10, r1, PTO+PT_R10; \
199 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
200 swi r12, r1, PTO+PT_R12; \
201 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
202 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
203 swi r15, r1, PTO+PT_R15; /* Save LP */ \
204 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
205 swi r19, r1, PTO+PT_R19; \
206 swi r20, r1, PTO+PT_R20; \
207 swi r21, r1, PTO+PT_R21; \
208 swi r22, r1, PTO+PT_R22; \
209 swi r23, r1, PTO+PT_R23; \
210 swi r24, r1, PTO+PT_R24; \
211 swi r25, r1, PTO+PT_R25; \
212 swi r26, r1, PTO+PT_R26; \
213 swi r27, r1, PTO+PT_R27; \
214 swi r28, r1, PTO+PT_R28; \
215 swi r29, r1, PTO+PT_R29; \
216 swi r30, r1, PTO+PT_R30; \
217 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
218 mfs r11, rmsr; /* save MSR */ \
219 nop; \
220 swi r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223 lwi r11, r1, PTO+PT_MSR; \
224 mts rmsr , r11; \
225 nop; \
226 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
227 lwi r5, r1, PTO+PT_R5; \
228 lwi r6, r1, PTO+PT_R6; \
229 lwi r7, r1, PTO+PT_R7; \
230 lwi r8, r1, PTO+PT_R8; \
231 lwi r9, r1, PTO+PT_R9; \
232 lwi r10, r1, PTO+PT_R10; \
233 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
234 lwi r12, r1, PTO+PT_R12; \
235 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
236 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
237 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
238 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
239 lwi r19, r1, PTO+PT_R19; \
240 lwi r20, r1, PTO+PT_R20; \
241 lwi r21, r1, PTO+PT_R21; \
242 lwi r22, r1, PTO+PT_R22; \
243 lwi r23, r1, PTO+PT_R23; \
244 lwi r24, r1, PTO+PT_R24; \
245 lwi r25, r1, PTO+PT_R25; \
246 lwi r26, r1, PTO+PT_R26; \
247 lwi r27, r1, PTO+PT_R27; \
248 lwi r28, r1, PTO+PT_R28; \
249 lwi r29, r1, PTO+PT_R29; \
250 lwi r30, r1, PTO+PT_R30; \
251 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269 addi r14, r14, 4 /* return address is 4 byte after call */
270 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
271
272 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273 beqi r11, 1f; /* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276 tophys(r1,r11);
277 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281 SAVE_REGS
282
283 addi r11, r0, 1; /* Was in kernel-mode. */
284 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285 brid 2f;
286 nop; /* Fill delay slot */
287
288/* User-mode state save. */
2891:
290 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
291 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292 tophys(r1,r1);
293 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295 addik r1, r1, THREAD_SIZE;
296 tophys(r1,r1);
297
298 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
299 SAVE_REGS
300
301 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
302 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303 swi r11, r1, PTO+PT_R1; /* Store user SP. */
304 addi r11, r0, 1;
305 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3062: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
307 /* Save away the syscall number. */
308 swi r12, r1, PTO+PT_R0;
309 tovirt(r1,r1)
310
311 la r15, r0, ret_from_trap-8
312/* where the trap should return need -8 to adjust for rtsd r15, 8*/
313/* Jump to the appropriate function for the system call number in r12
314 * (r12 is not preserved), or return an error if r12 is not valid. The LP
315 * register should point to the location where
316 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
317 /* See if the system call number is valid. */
318 addi r11, r12, -__NR_syscalls;
319 bgei r11,1f;
320 /* Figure out which function to use for this system call. */
321 /* Note Microblaze barrel shift is optional, so don't rely on it */
322 add r12, r12, r12; /* convert num -> ptr */
323 add r12, r12, r12;
324
325 /* Trac syscalls and stored them to r0_ram */
326 lwi r3, r12, 0x400 + TOPHYS(r0_ram)
327 addi r3, r3, 1
328 swi r3, r12, 0x400 + TOPHYS(r0_ram)
329
330 lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331 /* Make the system call. to r12*/
332 set_vms;
333 rtid r12, 0;
334 nop;
335 /* The syscall number is invalid, return an error. */
3361: VM_ON; /* RETURN() expects virtual mode*/
337 addi r3, r0, -ENOSYS;
338 rtsd r15,8; /* looks like a normal subroutine return */
339 or r0, r0, r0
340
341
342/* Entry point used to return from a syscall/trap. */
343/* We re-enable BIP bit before state restore */
344C_ENTRY(ret_from_trap):
345 set_bip; /* Ints masked for state restore*/
346 lwi r11, r1, PTO+PT_MODE;
347/* See if returning to kernel mode, if so, skip resched &c. */
348 bnei r11, 2f;
349
350 /* We're returning to user mode, so check for various conditions that
351 * trigger rescheduling. */
352 /* Get current task ptr into r11 */
353 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
354 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
355 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
356 andi r11, r11, _TIF_NEED_RESCHED;
357 beqi r11, 5f;
358
359 swi r3, r1, PTO + PT_R3; /* store syscall result */
360 swi r4, r1, PTO + PT_R4;
361 bralid r15, schedule; /* Call scheduler */
362 nop; /* delay slot */
363 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
364 lwi r4, r1, PTO + PT_R4;
365
366 /* Maybe handle a signal */
3675: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
369 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
370 andi r11, r11, _TIF_SIGPENDING;
371 beqi r11, 1f; /* Signals to handle, handle them */
372
373 swi r3, r1, PTO + PT_R3; /* store syscall result */
374 swi r4, r1, PTO + PT_R4;
375 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
376 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
377 addi r7, r0, 1; /* Arg 3: int in_syscall */
378 bralid r15, do_signal; /* Handle any signals */
379 nop;
380 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
381 lwi r4, r1, PTO + PT_R4;
382
383/* Finally, return to user state. */
3841: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
385 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
386 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
387 VM_OFF;
388 tophys(r1,r1);
389 RESTORE_REGS;
390 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
391 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
392 bri 6f;
393
394/* Return to kernel state. */
3952: VM_OFF;
396 tophys(r1,r1);
397 RESTORE_REGS;
398 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
399 tovirt(r1,r1);
4006:
401TRAP_return: /* Make global symbol for debugging */
402 rtbd r14, 0; /* Instructions to return from an IRQ */
403 nop;
404
405
406/* These syscalls need access to the struct pt_regs on the stack, so we
407 implement them in assembly (they're basically all wrappers anyway). */
408
409C_ENTRY(sys_fork_wrapper):
410 addi r5, r0, SIGCHLD /* Arg 0: flags */
411 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
412 la r7, r1, PTO /* Arg 2: parent context */
413 add r8. r0, r0 /* Arg 3: (unused) */
414 add r9, r0, r0; /* Arg 4: (unused) */
415 add r10, r0, r0; /* Arg 5: (unused) */
416 brid do_fork /* Do real work (tail-call) */
417 nop;
418
419/* This the initial entry point for a new child thread, with an appropriate
420 stack in place that makes it look the the child is in the middle of an
421 syscall. This function is actually `returned to' from switch_thread
422 (copy_thread makes ret_from_fork the return address in each new thread's
423 saved context). */
424C_ENTRY(ret_from_fork):
425 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
426 add r3, r5, r0; /* switch_thread returns the prev task */
427 /* ( in the delay slot ) */
428 add r3, r0, r0; /* Child's fork call should return 0. */
429 brid ret_from_trap; /* Do normal trap return */
430 nop;
431
432C_ENTRY(sys_vfork_wrapper):
433 la r5, r1, PTO
434 brid sys_vfork /* Do real work (tail-call) */
435 nop
436
437C_ENTRY(sys_clone_wrapper):
438 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
439 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
4401: la r7, r1, PTO; /* Arg 2: parent context */
441 add r8, r0, r0; /* Arg 3: (unused) */
442 add r9, r0, r0; /* Arg 4: (unused) */
443 add r10, r0, r0; /* Arg 5: (unused) */
444 brid do_fork /* Do real work (tail-call) */
445 nop;
446
447C_ENTRY(sys_execve_wrapper):
448 la r8, r1, PTO; /* add user context as 4th arg */
449 brid sys_execve; /* Do real work (tail-call).*/
450 nop;
451
452C_ENTRY(sys_sigsuspend_wrapper):
453 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
454 swi r4, r1, PTO+PT_R4;
455 la r6, r1, PTO; /* add user context as 2nd arg */
456 bralid r15, sys_sigsuspend; /* Do real work.*/
457 nop;
458 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
459 lwi r4, r1, PTO+PT_R4;
460 bri ret_from_trap /* fall through will not work here due to align */
461 nop;
462
463C_ENTRY(sys_rt_sigsuspend_wrapper):
464 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
465 swi r4, r1, PTO+PT_R4;
466 la r7, r1, PTO; /* add user context as 3rd arg */
467 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
468 nop;
469 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
470 lwi r4, r1, PTO+PT_R4;
471 bri ret_from_trap /* fall through will not work here due to align */
472 nop;
473
474
475C_ENTRY(sys_sigreturn_wrapper):
476 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
477 swi r4, r1, PTO+PT_R4;
478 la r5, r1, PTO; /* add user context as 1st arg */
479 brlid r15, sys_sigreturn; /* Do real work.*/
480 nop;
481 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
482 lwi r4, r1, PTO+PT_R4;
483 bri ret_from_trap /* fall through will not work here due to align */
484 nop;
485
486C_ENTRY(sys_rt_sigreturn_wrapper):
487 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
488 swi r4, r1, PTO+PT_R4;
489 la r5, r1, PTO; /* add user context as 1st arg */
490 brlid r15, sys_rt_sigreturn /* Do real work */
491 nop;
492 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
493 lwi r4, r1, PTO+PT_R4;
494 bri ret_from_trap /* fall through will not work here due to align */
495 nop;
496
497/*
498 * HW EXCEPTION rutine start
499 */
500
501#define SAVE_STATE \
502 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
503 set_bip; /*equalize initial state for all possible entries*/\
504 clear_eip; \
505 enable_irq; \
506 set_ee; \
507 /* See if already in kernel mode.*/ \
508 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
509 beqi r11, 1f; /* Jump ahead if coming from user */\
510 /* Kernel-mode state save. */ \
511 /* Reload kernel stack-ptr. */ \
512 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
513 tophys(r1,r11); \
514 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
515 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
516 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
517 /* store return registers separately because \
518 * this macros is use for others exceptions */ \
519 swi r3, r1, PTO + PT_R3; \
520 swi r4, r1, PTO + PT_R4; \
521 SAVE_REGS \
522 /* PC, before IRQ/trap - this is one instruction above */ \
523 swi r17, r1, PTO+PT_PC; \
524 \
525 addi r11, r0, 1; /* Was in kernel-mode. */ \
526 swi r11, r1, PTO+PT_MODE; \
527 brid 2f; \
528 nop; /* Fill delay slot */ \
5291: /* User-mode state save. */ \
530 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
531 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
532 tophys(r1,r1); \
533 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
534 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
535 tophys(r1,r1); \
536 \
537 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
538 /* store return registers separately because this macros \
539 * is use for others exceptions */ \
540 swi r3, r1, PTO + PT_R3; \
541 swi r4, r1, PTO + PT_R4; \
542 SAVE_REGS \
543 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
544 swi r17, r1, PTO+PT_PC; \
545 \
546 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
547 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
548 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
549 addi r11, r0, 1; \
550 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5512: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
552 /* Save away the syscall number. */ \
553 swi r0, r1, PTO+PT_R0; \
554 tovirt(r1,r1)
555
556C_ENTRY(full_exception_trap):
557 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
558 /* adjust exception address for privileged instruction
559 * for finding where is it */
560 addik r17, r17, -4
561 SAVE_STATE /* Save registers */
562 /* FIXME this can be store directly in PT_ESR reg.
563 * I tested it but there is a fault */
564 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
565 la r15, r0, ret_from_exc - 8
566 la r5, r1, PTO /* parameter struct pt_regs * regs */
567 mfs r6, resr
568 nop
569 mfs r7, rfsr; /* save FSR */
570 nop
571 la r12, r0, full_exception
572 set_vms;
573 rtbd r12, 0;
574 nop;
575
576/*
577 * Unaligned data trap.
578 *
579 * Unaligned data trap last on 4k page is handled here.
580 *
581 * Trap entered via exception, so EE bit is set, and interrupts
582 * are masked. This is nice, means we don't have to CLI before state save
583 *
584 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
585 */
586C_ENTRY(unaligned_data_trap):
587 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
588 SAVE_STATE /* Save registers.*/
589 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
590 la r15, r0, ret_from_exc-8
591 mfs r3, resr /* ESR */
592 nop
593 mfs r4, rear /* EAR */
594 nop
595 la r7, r1, PTO /* parameter struct pt_regs * regs */
596 la r12, r0, _unaligned_data_exception
597 set_vms;
598 rtbd r12, 0; /* interrupts enabled */
599 nop;
600
601/*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked. This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors. All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 * unsigned long address,
616 * unsigned long error_code)
617 */
618/* data and intruction trap - which is choose is resolved int fault.c */
619C_ENTRY(page_fault_data_trap):
620 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
621 SAVE_STATE /* Save registers.*/
622 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
623 la r15, r0, ret_from_exc-8
624 la r5, r1, PTO /* parameter struct pt_regs * regs */
625 mfs r6, rear /* parameter unsigned long address */
626 nop
627 mfs r7, resr /* parameter unsigned long error_code */
628 nop
629 la r12, r0, do_page_fault
630 set_vms;
631 rtbd r12, 0; /* interrupts enabled */
632 nop;
633
634C_ENTRY(page_fault_instr_trap):
635 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
636 SAVE_STATE /* Save registers.*/
637 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
638 la r15, r0, ret_from_exc-8
639 la r5, r1, PTO /* parameter struct pt_regs * regs */
640 mfs r6, rear /* parameter unsigned long address */
641 nop
642 ori r7, r0, 0 /* parameter unsigned long error_code */
643 la r12, r0, do_page_fault
644 set_vms;
645 rtbd r12, 0; /* interrupts enabled */
646 nop;
647
648/* Entry point used to return from an exception. */
649C_ENTRY(ret_from_exc):
650 set_bip; /* Ints masked for state restore*/
651 lwi r11, r1, PTO+PT_MODE;
652 bnei r11, 2f; /* See if returning to kernel mode, */
653 /* ... if so, skip resched &c. */
654
655 /* We're returning to user mode, so check for various conditions that
656 trigger rescheduling. */
657 /* Get current task ptr into r11 */
658 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
659 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_NEED_RESCHED;
662 beqi r11, 5f;
663
664/* Call the scheduler before returning from a syscall/trap. */
665 bralid r15, schedule; /* Call scheduler */
666 nop; /* delay slot */
667
668 /* Maybe handle a signal */
6695: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
670 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
671 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
672 andi r11, r11, _TIF_SIGPENDING;
673 beqi r11, 1f; /* Signals to handle, handle them */
674
675 /*
676 * Handle a signal return; Pending signals should be in r18.
677 *
678 * Not all registers are saved by the normal trap/interrupt entry
679 * points (for instance, call-saved registers (because the normal
680 * C-compiler calling sequence in the kernel makes sure they're
681 * preserved), and call-clobbered registers in the case of
682 * traps), but signal handlers may want to examine or change the
683 * complete register state. Here we save anything not saved by
684 * the normal entry sequence, so that it may be safely restored
685 * (in a possibly modified form) after do_signal returns.
686 * store return registers separately because this macros is use
687 * for others exceptions */
688 swi r3, r1, PTO + PT_R3;
689 swi r4, r1, PTO + PT_R4;
690 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
691 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
692 addi r7, r0, 0; /* Arg 3: int in_syscall */
693 bralid r15, do_signal; /* Handle any signals */
694 nop;
695 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
696 lwi r4, r1, PTO+PT_R4;
697
698/* Finally, return to user state. */
6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
700 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
701 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
702 VM_OFF;
703 tophys(r1,r1);
704
705 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
706 lwi r4, r1, PTO+PT_R4;
707 RESTORE_REGS;
708 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
709
710 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
711 bri 6f;
712/* Return to kernel state. */
7132: VM_OFF;
714 tophys(r1,r1);
715 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
716 lwi r4, r1, PTO+PT_R4;
717 RESTORE_REGS;
718 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
719
720 tovirt(r1,r1);
7216:
722EXC_return: /* Make global symbol for debugging */
723 rtbd r14, 0; /* Instructions to return from an IRQ */
724 nop;
725
726/*
727 * HW EXCEPTION rutine end
728 */
729
730/*
731 * Hardware maskable interrupts.
732 *
733 * The stack-pointer (r1) should have already been saved to the memory
734 * location PER_CPU(ENTRY_SP).
735 */
736C_ENTRY(_interrupt):
737/* MS: we are in physical address */
738/* Save registers, switch to proper stack, convert SP to virtual.*/
739 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
740 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
741 /* MS: See if already in kernel mode. */
742 lwi r11, r0, TOPHYS(PER_CPU(KM));
743 beqi r11, 1f; /* MS: Jump ahead if coming from user */
744
745/* Kernel-mode state save. */
746 or r11, r1, r0
747 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
748/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
749 swi r11, r1, (PT_R1 - PT_SIZE);
750/* MS: restore r11 because of saving in SAVE_REGS */
751 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
752 /* save registers */
753/* MS: Make room on the stack -> activation record */
754 addik r1, r1, -STATE_SAVE_SIZE;
755/* MS: store return registers separately because
756 * this macros is use for others exceptions */
757 swi r3, r1, PTO + PT_R3;
758 swi r4, r1, PTO + PT_R4;
759 SAVE_REGS
760 /* MS: store mode */
761 addi r11, r0, 1; /* MS: Was in kernel-mode. */
762 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
763 brid 2f;
764 nop; /* MS: Fill delay slot */
765
7661:
767/* User-mode state save. */
768/* MS: restore r11 -> FIXME move before SAVE_REG */
769 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
770 /* MS: get the saved current */
771 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
772 tophys(r1,r1);
773 lwi r1, r1, TS_THREAD_INFO;
774 addik r1, r1, THREAD_SIZE;
775 tophys(r1,r1);
776 /* save registers */
777 addik r1, r1, -STATE_SAVE_SIZE;
778 swi r3, r1, PTO+PT_R3;
779 swi r4, r1, PTO+PT_R4;
780 SAVE_REGS
781 /* calculate mode */
782 swi r0, r1, PTO + PT_MODE;
783 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
784 swi r11, r1, PTO+PT_R1;
785 /* setup kernel mode to KM */
786 addi r11, r0, 1;
787 swi r11, r0, TOPHYS(PER_CPU(KM));
788
7892:
790 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
791 swi r0, r1, PTO + PT_R0;
792 tovirt(r1,r1)
793 la r5, r1, PTO;
794 set_vms;
795 la r11, r0, do_IRQ;
796 la r15, r0, irq_call;
797irq_call:rtbd r11, 0;
798 nop;
799
800/* MS: we are in virtual mode */
801ret_from_irq:
802 lwi r11, r1, PTO + PT_MODE;
803 bnei r11, 2f;
804
805 add r11, r0, CURRENT_TASK;
806 lwi r11, r11, TS_THREAD_INFO;
807 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
808 andi r11, r11, _TIF_NEED_RESCHED;
809 beqi r11, 5f
810 bralid r15, schedule;
811 nop; /* delay slot */
812
813 /* Maybe handle a signal */
8145: add r11, r0, CURRENT_TASK;
815 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
816 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
817 andi r11, r11, _TIF_SIGPENDING;
818 beqid r11, no_intr_resched
819/* Handle a signal return; Pending signals should be in r18. */
820 addi r7, r0, 0; /* Arg 3: int in_syscall */
821 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
822 bralid r15, do_signal; /* Handle any signals */
823 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
824
825/* Finally, return to user state. */
826no_intr_resched:
827 /* Disable interrupts, we are now committed to the state restore */
828 disable_irq
829 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
830 add r11, r0, CURRENT_TASK;
831 swi r11, r0, PER_CPU(CURRENT_SAVE);
832 VM_OFF;
833 tophys(r1,r1);
834 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
835 lwi r4, r1, PTO + PT_R4;
836 RESTORE_REGS
837 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
838 lwi r1, r1, PT_R1 - PT_SIZE;
839 bri 6f;
840/* MS: Return to kernel state. */
8412: VM_OFF /* MS: turn off MMU */
842 tophys(r1,r1)
843 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
844 lwi r4, r1, PTO + PT_R4;
845 RESTORE_REGS
846 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
847 tovirt(r1,r1);
8486:
849IRQ_return: /* MS: Make global symbol for debugging */
850 rtid r14, 0
851 nop
852
853/*
854 * `Debug' trap
855 * We enter dbtrap in "BIP" (breakpoint) mode.
856 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
857 * original dbtrap.
858 * however, wait to save state first
859 */
860C_ENTRY(_debug_exception):
861 /* BIP bit is set on entry, no interrupts can occur */
862 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
863
864 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
865 set_bip; /*equalize initial state for all possible entries*/
866 clear_eip;
867 enable_irq;
868 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
869 beqi r11, 1f; /* Jump ahead if coming from user */
870 /* Kernel-mode state save. */
871 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
872 tophys(r1,r11);
873 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
874 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
875
876 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
877 swi r3, r1, PTO + PT_R3;
878 swi r4, r1, PTO + PT_R4;
879 SAVE_REGS;
880
881 addi r11, r0, 1; /* Was in kernel-mode. */
882 swi r11, r1, PTO + PT_MODE;
883 brid 2f;
884 nop; /* Fill delay slot */
8851: /* User-mode state save. */
886 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
887 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
888 tophys(r1,r1);
889 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
890 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
891 tophys(r1,r1);
892
893 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
894 swi r3, r1, PTO + PT_R3;
895 swi r4, r1, PTO + PT_R4;
896 SAVE_REGS;
897
898 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
899 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
900 swi r11, r1, PTO+PT_R1; /* Store user SP. */
901 addi r11, r0, 1;
902 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9032: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
904 /* Save away the syscall number. */
905 swi r0, r1, PTO+PT_R0;
906 tovirt(r1,r1)
907
908 addi r5, r0, SIGTRAP /* send the trap signal */
909 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
910 addk r7, r0, r0 /* 3rd param zero */
911
912 set_vms;
913 la r11, r0, send_sig;
914 la r15, r0, dbtrap_call;
915dbtrap_call: rtbd r11, 0;
916 nop;
917
918 set_bip; /* Ints masked for state restore*/
919 lwi r11, r1, PTO+PT_MODE;
920 bnei r11, 2f;
921
922 /* Get current task ptr into r11 */
923 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
924 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
925 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
926 andi r11, r11, _TIF_NEED_RESCHED;
927 beqi r11, 5f;
928
929/* Call the scheduler before returning from a syscall/trap. */
930
931 bralid r15, schedule; /* Call scheduler */
932 nop; /* delay slot */
933 /* XXX Is PT_DTRACE handling needed here? */
934 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
935
936 /* Maybe handle a signal */
9375: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
938 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
939 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
940 andi r11, r11, _TIF_SIGPENDING;
941 beqi r11, 1f; /* Signals to handle, handle them */
942
943/* Handle a signal return; Pending signals should be in r18. */
944 /* Not all registers are saved by the normal trap/interrupt entry
945 points (for instance, call-saved registers (because the normal
946 C-compiler calling sequence in the kernel makes sure they're
947 preserved), and call-clobbered registers in the case of
948 traps), but signal handlers may want to examine or change the
949 complete register state. Here we save anything not saved by
950 the normal entry sequence, so that it may be safely restored
951 (in a possibly modified form) after do_signal returns. */
952
953 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
954 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
955 addi r7, r0, 0; /* Arg 3: int in_syscall */
956 bralid r15, do_signal; /* Handle any signals */
957 nop;
958
959
960/* Finally, return to user state. */
9611: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
962 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
963 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
964 VM_OFF;
965 tophys(r1,r1);
966
967 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
968 lwi r4, r1, PTO+PT_R4;
969 RESTORE_REGS
970 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
971
972
973 lwi r1, r1, PT_R1 - PT_SIZE;
974 /* Restore user stack pointer. */
975 bri 6f;
976
977/* Return to kernel state. */
9782: VM_OFF;
979 tophys(r1,r1);
980 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
981 lwi r4, r1, PTO+PT_R4;
982 RESTORE_REGS
983 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
984
985 tovirt(r1,r1);
9866:
987DBTRAP_return: /* Make global symbol for debugging */
988 rtbd r14, 0; /* Instructions to return from an IRQ */
989 nop;
990
991
992
993ENTRY(_switch_to)
994 /* prepare return value */
995 addk r3, r0, r31
996
997 /* save registers in cpu_context */
998 /* use r11 and r12, volatile registers, as temp register */
999 /* give start of cpu_context for previous process */
1000 addik r11, r5, TI_CPU_CONTEXT
1001 swi r1, r11, CC_R1
1002 swi r2, r11, CC_R2
1003 /* skip volatile registers.
1004 * they are saved on stack when we jumped to _switch_to() */
1005 /* dedicated registers */
1006 swi r13, r11, CC_R13
1007 swi r14, r11, CC_R14
1008 swi r15, r11, CC_R15
1009 swi r16, r11, CC_R16
1010 swi r17, r11, CC_R17
1011 swi r18, r11, CC_R18
1012 /* save non-volatile registers */
1013 swi r19, r11, CC_R19
1014 swi r20, r11, CC_R20
1015 swi r21, r11, CC_R21
1016 swi r22, r11, CC_R22
1017 swi r23, r11, CC_R23
1018 swi r24, r11, CC_R24
1019 swi r25, r11, CC_R25
1020 swi r26, r11, CC_R26
1021 swi r27, r11, CC_R27
1022 swi r28, r11, CC_R28
1023 swi r29, r11, CC_R29
1024 swi r30, r11, CC_R30
1025 /* special purpose registers */
1026 mfs r12, rmsr
1027 nop
1028 swi r12, r11, CC_MSR
1029 mfs r12, rear
1030 nop
1031 swi r12, r11, CC_EAR
1032 mfs r12, resr
1033 nop
1034 swi r12, r11, CC_ESR
1035 mfs r12, rfsr
1036 nop
1037 swi r12, r11, CC_FSR
1038
1039 /* update r31, the current */
1040 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
1041 /* stored it to current_save too */
1042 swi r31, r0, PER_CPU(CURRENT_SAVE)
1043
1044 /* get new process' cpu context and restore */
1045 /* give me start where start context of next task */
1046 addik r11, r6, TI_CPU_CONTEXT
1047
1048 /* non-volatile registers */
1049 lwi r30, r11, CC_R30
1050 lwi r29, r11, CC_R29
1051 lwi r28, r11, CC_R28
1052 lwi r27, r11, CC_R27
1053 lwi r26, r11, CC_R26
1054 lwi r25, r11, CC_R25
1055 lwi r24, r11, CC_R24
1056 lwi r23, r11, CC_R23
1057 lwi r22, r11, CC_R22
1058 lwi r21, r11, CC_R21
1059 lwi r20, r11, CC_R20
1060 lwi r19, r11, CC_R19
1061 /* dedicated registers */
1062 lwi r18, r11, CC_R18
1063 lwi r17, r11, CC_R17
1064 lwi r16, r11, CC_R16
1065 lwi r15, r11, CC_R15
1066 lwi r14, r11, CC_R14
1067 lwi r13, r11, CC_R13
1068 /* skip volatile registers */
1069 lwi r2, r11, CC_R2
1070 lwi r1, r11, CC_R1
1071
1072 /* special purpose registers */
1073 lwi r12, r11, CC_FSR
1074 mts rfsr, r12
1075 nop
1076 lwi r12, r11, CC_MSR
1077 mts rmsr, r12
1078 nop
1079
1080 rtsd r15, 8
1081 nop
1082
1083ENTRY(_reset)
1084 brai 0x70; /* Jump back to FS-boot */
1085
1086ENTRY(_break)
1087 mfs r5, rmsr
1088 nop
1089 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1090 mfs r5, resr
1091 nop
1092 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1093 bri 0
1094
1095 /* These are compiled and loaded into high memory, then
1096 * copied into place in mach_early_setup */
1097 .section .init.ivt, "ax"
1098 .org 0x0
1099 /* this is very important - here is the reset vector */
1100 /* in current MMU branch you don't care what is here - it is
1101 * used from bootloader site - but this is correct for FS-BOOT */
1102 brai 0x70
1103 nop
1104 brai TOPHYS(_user_exception); /* syscall handler */
1105 brai TOPHYS(_interrupt); /* Interrupt handler */
1106 brai TOPHYS(_break); /* nmi trap handler */
1107 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1108
1109 .org 0x60
1110 brai TOPHYS(_debug_exception); /* debug trap handler*/
1111
1112.section .rodata,"a"
1113#include "syscall_table.S"
1114
1115syscall_table_size=(.-sys_call_table)
1116
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 4a8a4064c7ee..0cb64a31e89a 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -21,9 +21,9 @@
21 21
22#include <asm/exceptions.h> 22#include <asm/exceptions.h>
23#include <asm/entry.h> /* For KM CPU var */ 23#include <asm/entry.h> /* For KM CPU var */
24#include <asm/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/errno.h> 25#include <linux/errno.h>
26#include <asm/ptrace.h> 26#include <linux/ptrace.h>
27#include <asm/current.h> 27#include <asm/current.h>
28 28
29#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 29#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
@@ -31,7 +31,7 @@
31#define MICROBLAZE_DBUS_EXCEPTION 0x04 31#define MICROBLAZE_DBUS_EXCEPTION 0x04
32#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 32#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
33#define MICROBLAZE_FPU_EXCEPTION 0x06 33#define MICROBLAZE_FPU_EXCEPTION 0x06
34#define MICROBLAZE_PRIVILEG_EXCEPTION 0x07 34#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
35 35
36static DEFINE_SPINLOCK(die_lock); 36static DEFINE_SPINLOCK(die_lock);
37 37
@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
66asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, 66asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
67 int fsr, int addr) 67 int fsr, int addr)
68{ 68{
69#ifdef CONFIG_MMU
70 int code;
71 addr = regs->pc;
72#endif
73
69#if 0 74#if 0
70 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", 75 printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
71 type, user_mode(regs) ? "user" : "kernel", fsr, 76 type, user_mode(regs) ? "user" : "kernel", fsr,
@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
74 79
75 switch (type & 0x1F) { 80 switch (type & 0x1F) {
76 case MICROBLAZE_ILL_OPCODE_EXCEPTION: 81 case MICROBLAZE_ILL_OPCODE_EXCEPTION:
77 _exception(SIGILL, regs, ILL_ILLOPC, addr); 82 if (user_mode(regs)) {
83 printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
84 _exception(SIGILL, regs, ILL_ILLOPC, addr);
85 return;
86 }
87 printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
88 die("opcode exception", regs, SIGBUS);
78 break; 89 break;
79 case MICROBLAZE_IBUS_EXCEPTION: 90 case MICROBLAZE_IBUS_EXCEPTION:
80 if (user_mode(regs)) { 91 if (user_mode(regs)) {
@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
95 die("bus exception", regs, SIGBUS); 106 die("bus exception", regs, SIGBUS);
96 break; 107 break;
97 case MICROBLAZE_DIV_ZERO_EXCEPTION: 108 case MICROBLAZE_DIV_ZERO_EXCEPTION:
98 printk(KERN_WARNING "Divide by zero exception\n"); 109 if (user_mode(regs)) {
99 _exception(SIGILL, regs, ILL_ILLOPC, addr); 110 printk(KERN_WARNING "Divide by zero exception in user mode\n");
111 _exception(SIGILL, regs, ILL_ILLOPC, addr);
112 return;
113 }
114 printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
115 die("Divide by exception", regs, SIGBUS);
100 break; 116 break;
101
102 case MICROBLAZE_FPU_EXCEPTION: 117 case MICROBLAZE_FPU_EXCEPTION:
118 printk(KERN_WARNING "FPU exception\n");
103 /* IEEE FP exception */ 119 /* IEEE FP exception */
104 /* I removed fsr variable and use code var for storing fsr */ 120 /* I removed fsr variable and use code var for storing fsr */
105 if (fsr & FSR_IO) 121 if (fsr & FSR_IO)
@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
115 _exception(SIGFPE, regs, fsr, addr); 131 _exception(SIGFPE, regs, fsr, addr);
116 break; 132 break;
117 133
134#ifdef CONFIG_MMU
135 case MICROBLAZE_PRIVILEGED_EXCEPTION:
136 printk(KERN_WARNING "Privileged exception\n");
137 /* "brk r0,r0" - used as debug breakpoint */
138 if (get_user(code, (unsigned long *)regs->pc) == 0
139 && code == 0x980c0000) {
140 _exception(SIGTRAP, regs, TRAP_BRKPT, addr);
141 } else {
142 _exception(SIGILL, regs, ILL_PRVOPC, addr);
143 }
144 break;
145#endif
118 default: 146 default:
147 /* FIXME what to do in unexpected exception */
119 printk(KERN_WARNING "Unexpected exception %02x " 148 printk(KERN_WARNING "Unexpected exception %02x "
120 "PC=%08x in %s mode\n", type, (unsigned int) addr, 149 "PC=%08x in %s mode\n", type, (unsigned int) addr,
121 kernel_mode(regs) ? "kernel" : "user"); 150 kernel_mode(regs) ? "kernel" : "user");
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 319dc35fc922..e568d6ec621b 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -3,6 +3,26 @@
3 * Copyright (C) 2007-2009 PetaLogix 3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * MMU code derived from arch/ppc/kernel/head_4xx.S:
7 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
8 * Initial PowerPC version.
9 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Rewritten for PReP
11 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
12 * Low-level exception handers, MMU support, and rewrite.
13 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
14 * PowerPC 8xx modifications.
15 * Copyright (c) 1998-1999 TiVo, Inc.
16 * PowerPC 403GCX modifications.
17 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
18 * PowerPC 403GCX/405GP modifications.
19 * Copyright 2000 MontaVista Software Inc.
20 * PPC405 modifications
21 * PowerPC 403GCX/405GP modifications.
22 * Author: MontaVista Software, Inc.
23 * frank_rowand@mvista.com or source@mvista.com
24 * debbie_chu@mvista.com
25 *
6 * This file is subject to the terms and conditions of the GNU General Public 26 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 27 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 28 * for more details.
@@ -12,6 +32,22 @@
12#include <asm/thread_info.h> 32#include <asm/thread_info.h>
13#include <asm/page.h> 33#include <asm/page.h>
14 34
35#ifdef CONFIG_MMU
36#include <asm/setup.h> /* COMMAND_LINE_SIZE */
37#include <asm/mmu.h>
38#include <asm/processor.h>
39
40.data
41.global empty_zero_page
42.align 12
43empty_zero_page:
44 .space 4096
45.global swapper_pg_dir
46swapper_pg_dir:
47 .space 4096
48
49#endif /* CONFIG_MMU */
50
15 .text 51 .text
16ENTRY(_start) 52ENTRY(_start)
17 mfs r1, rmsr 53 mfs r1, rmsr
@@ -32,6 +68,123 @@ _copy_fdt:
32 addik r3, r3, -4 /* descrement loop */ 68 addik r3, r3, -4 /* descrement loop */
33no_fdt_arg: 69no_fdt_arg:
34 70
71#ifdef CONFIG_MMU
72
73#ifndef CONFIG_CMDLINE_BOOL
74/*
75 * handling command line
76 * copy command line to __init_end. There is space for storing command line.
77 */
78 or r6, r0, r0 /* incremment */
79 ori r4, r0, __init_end /* load address of command line */
80 tophys(r4,r4) /* convert to phys address */
81 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
82_copy_command_line:
83 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
84 sb r7, r4, r6 /* addr[r4+r6]= r7*/
85 addik r6, r6, 1 /* increment counting */
86 bgtid r3, _copy_command_line /* loop for all entries */
87 addik r3, r3, -1 /* descrement loop */
88 addik r5, r4, 0 /* add new space for command line */
89 tovirt(r5,r5)
90#endif /* CONFIG_CMDLINE_BOOL */
91
92#ifdef NOT_COMPILE
93/* save bram context */
94 or r6, r0, r0 /* incremment */
95 ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
96 ori r3, r0, (LMB_SIZE - 4)
97_copy_bram:
98 lw r7, r0, r6 /* r7 = r0 + r6 */
99 sw r7, r4, r6 /* addr[r4 + r6] = r7*/
100 addik r6, r6, 4 /* increment counting */
101 bgtid r3, _copy_bram /* loop for all entries */
102 addik r3, r3, -4 /* descrement loop */
103#endif
104 /* We have to turn on the MMU right away. */
105
106 /*
107 * Set up the initial MMU state so we can do the first level of
108 * kernel initialization. This maps the first 16 MBytes of memory 1:1
109 * virtual to physical.
110 */
111 nop
112 addik r3, r0, 63 /* Invalidate all TLB entries */
113_invalidate:
114 mts rtlbx, r3
115 mts rtlbhi, r0 /* flush: ensure V is clear */
116 bgtid r3, _invalidate /* loop for all entries */
117 addik r3, r3, -1
118 /* sync */
119
120 /*
121 * We should still be executing code at physical address area
122 * RAM_BASEADDR at this point. However, kernel code is at
123 * a virtual address. So, set up a TLB mapping to cover this once
124 * translation is enabled.
125 */
126
127 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
128 tophys(r4,r3) /* Load the kernel physical address */
129
130 mts rpid,r0 /* Load the kernel PID */
131 nop
132 bri 4
133
134 /*
135 * Configure and load two entries into TLB slots 0 and 1.
136 * In case we are pinning TLBs, these are reserved in by the
137 * other TLB functions. If not reserving, then it doesn't
138 * matter where they are loaded.
139 */
140 andi r4,r4,0xfffffc00 /* Mask off the real page number */
141 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
142
143 andi r3,r3,0xfffffc00 /* Mask off the effective page number */
144 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
145
146 mts rtlbx,r0 /* TLB slow 0 */
147
148 mts rtlblo,r4 /* Load the data portion of the entry */
149 mts rtlbhi,r3 /* Load the tag portion of the entry */
150
151 addik r4, r4, 0x01000000 /* Map next 16 M entries */
152 addik r3, r3, 0x01000000
153
154 ori r6,r0,1 /* TLB slot 1 */
155 mts rtlbx,r6
156
157 mts rtlblo,r4 /* Load the data portion of the entry */
158 mts rtlbhi,r3 /* Load the tag portion of the entry */
159
160 /*
161 * Load a TLB entry for LMB, since we need access to
162 * the exception vectors, using a 4k real==virtual mapping.
163 */
164 ori r6,r0,3 /* TLB slot 3 */
165 mts rtlbx,r6
166
167 ori r4,r0,(TLB_WR | TLB_EX)
168 ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
169
170 mts rtlblo,r4 /* Load the data portion of the entry */
171 mts rtlbhi,r3 /* Load the tag portion of the entry */
172
173 /*
174 * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
175 * caches ready to work.
176 */
177turn_on_mmu:
178 ori r15,r0,start_here
179 ori r4,r0,MSR_KERNEL_VMS
180 mts rmsr,r4
181 nop
182 rted r15,0 /* enables MMU */
183 nop
184
185start_here:
186#endif /* CONFIG_MMU */
187
35 /* Initialize small data anchors */ 188 /* Initialize small data anchors */
36 la r13, r0, _KERNEL_SDA_BASE_ 189 la r13, r0, _KERNEL_SDA_BASE_
37 la r2, r0, _KERNEL_SDA2_BASE_ 190 la r2, r0, _KERNEL_SDA2_BASE_
@@ -51,6 +204,43 @@ no_fdt_arg:
51 brald r15, r8 204 brald r15, r8
52 nop 205 nop
53 206
207#ifndef CONFIG_MMU
54 la r15, r0, machine_halt 208 la r15, r0, machine_halt
55 braid start_kernel 209 braid start_kernel
56 nop 210 nop
211#else
212 /*
213 * Initialize the MMU.
214 */
215 bralid r15, mmu_init
216 nop
217
218 /* Go back to running unmapped so we can load up new values
219 * and change to using our exception vectors.
220 * On the MicroBlaze, all we invalidate the used TLB entries to clear
221 * the old 16M byte TLB mappings.
222 */
223 ori r15,r0,TOPHYS(kernel_load_context)
224 ori r4,r0,MSR_KERNEL
225 mts rmsr,r4
226 nop
227 bri 4
228 rted r15,0
229 nop
230
231 /* Load up the kernel context */
232kernel_load_context:
233 # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
234 ori r5,r0,3
235 mts rtlbx,r5
236 nop
237 mts rtlbhi,r0
238 nop
239 addi r15, r0, machine_halt
240 ori r17, r0, start_kernel
241 ori r4, r0, MSR_KERNEL_VMS
242 mts rmsr, r4
243 nop
244 rted r17, 0 /* enable MMU and jump to start_kernel */
245 nop
246#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index cf9486d99838..9d591cd74fc2 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -53,6 +53,12 @@
53 * - Illegal instruction opcode 53 * - Illegal instruction opcode
54 * - Divide-by-zero 54 * - Divide-by-zero
55 * 55 *
56 * - Privileged instruction exception (MMU)
57 * - Data storage exception (MMU)
58 * - Instruction storage exception (MMU)
59 * - Data TLB miss exception (MMU)
60 * - Instruction TLB miss exception (MMU)
61 *
56 * Note we disable interrupts during exception handling, otherwise we will 62 * Note we disable interrupts during exception handling, otherwise we will
57 * possibly get multiple re-entrancy if interrupt handles themselves cause 63 * possibly get multiple re-entrancy if interrupt handles themselves cause
58 * exceptions. JW 64 * exceptions. JW
@@ -71,9 +77,24 @@
71#include <asm/asm-offsets.h> 77#include <asm/asm-offsets.h>
72 78
73/* Helpful Macros */ 79/* Helpful Macros */
80#ifndef CONFIG_MMU
74#define EX_HANDLER_STACK_SIZ (4*19) 81#define EX_HANDLER_STACK_SIZ (4*19)
82#endif
75#define NUM_TO_REG(num) r ## num 83#define NUM_TO_REG(num) r ## num
76 84
85#ifdef CONFIG_MMU
86/* FIXME you can't change first load of MSR because there is
87 * hardcoded jump bri 4 */
88 #define RESTORE_STATE \
89 lwi r3, r1, PT_R3; \
90 lwi r4, r1, PT_R4; \
91 lwi r5, r1, PT_R5; \
92 lwi r6, r1, PT_R6; \
93 lwi r11, r1, PT_R11; \
94 lwi r31, r1, PT_R31; \
95 lwi r1, r0, TOPHYS(r0_ram + 0);
96#endif /* CONFIG_MMU */
97
77#define LWREG_NOP \ 98#define LWREG_NOP \
78 bri ex_handler_unhandled; \ 99 bri ex_handler_unhandled; \
79 nop; 100 nop;
@@ -106,6 +127,54 @@
106 or r3, r0, NUM_TO_REG (regnum); \ 127 or r3, r0, NUM_TO_REG (regnum); \
107 bri ex_sw_tail; 128 bri ex_sw_tail;
108 129
130#ifdef CONFIG_MMU
131 #define R3_TO_LWREG_VM_V(regnum) \
132 brid ex_lw_end_vm; \
133 swi r3, r7, 4 * regnum;
134
135 #define R3_TO_LWREG_VM(regnum) \
136 brid ex_lw_end_vm; \
137 or NUM_TO_REG (regnum), r0, r3;
138
139 #define SWREG_TO_R3_VM_V(regnum) \
140 brid ex_sw_tail_vm; \
141 lwi r3, r7, 4 * regnum;
142
143 #define SWREG_TO_R3_VM(regnum) \
144 brid ex_sw_tail_vm; \
145 or r3, r0, NUM_TO_REG (regnum);
146
147 /* Shift right instruction depending on available configuration */
148 #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
149 #define BSRLI(rD, rA, imm) \
150 bsrli rD, rA, imm
151 #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
152 #define BSRLI(rD, rA, imm) \
153 ori rD, r0, (1 << imm); \
154 idivu rD, rD, rA
155 #else
156 #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
157 /* Only the used shift constants defined here - add more if needed */
158 #define BSRLI2(rD, rA) \
159 srl rD, rA; /* << 1 */ \
160 srl rD, rD; /* << 2 */
161 #define BSRLI10(rD, rA) \
162 srl rD, rA; /* << 1 */ \
163 srl rD, rD; /* << 2 */ \
164 srl rD, rD; /* << 3 */ \
165 srl rD, rD; /* << 4 */ \
166 srl rD, rD; /* << 5 */ \
167 srl rD, rD; /* << 6 */ \
168 srl rD, rD; /* << 7 */ \
169 srl rD, rD; /* << 8 */ \
170 srl rD, rD; /* << 9 */ \
171 srl rD, rD /* << 10 */
172 #define BSRLI20(rD, rA) \
173 BSRLI10(rD, rA); \
174 BSRLI10(rD, rD)
175 #endif
176#endif /* CONFIG_MMU */
177
109.extern other_exception_handler /* Defined in exception.c */ 178.extern other_exception_handler /* Defined in exception.c */
110 179
111/* 180/*
@@ -163,34 +232,119 @@
163 232
164/* wrappers to restore state before coming to entry.S */ 233/* wrappers to restore state before coming to entry.S */
165 234
235#ifdef CONFIG_MMU
236.section .rodata
237.align 4
238_MB_HW_ExceptionVectorTable:
239/* 0 - Undefined */
240 .long TOPHYS(ex_handler_unhandled)
241/* 1 - Unaligned data access exception */
242 .long TOPHYS(handle_unaligned_ex)
243/* 2 - Illegal op-code exception */
244 .long TOPHYS(full_exception_trapw)
245/* 3 - Instruction bus error exception */
246 .long TOPHYS(full_exception_trapw)
247/* 4 - Data bus error exception */
248 .long TOPHYS(full_exception_trapw)
249/* 5 - Divide by zero exception */
250 .long TOPHYS(full_exception_trapw)
251/* 6 - Floating point unit exception */
252 .long TOPHYS(full_exception_trapw)
253/* 7 - Privileged instruction exception */
254 .long TOPHYS(full_exception_trapw)
255/* 8 - 15 - Undefined */
256 .long TOPHYS(ex_handler_unhandled)
257 .long TOPHYS(ex_handler_unhandled)
258 .long TOPHYS(ex_handler_unhandled)
259 .long TOPHYS(ex_handler_unhandled)
260 .long TOPHYS(ex_handler_unhandled)
261 .long TOPHYS(ex_handler_unhandled)
262 .long TOPHYS(ex_handler_unhandled)
263 .long TOPHYS(ex_handler_unhandled)
264/* 16 - Data storage exception */
265 .long TOPHYS(handle_data_storage_exception)
266/* 17 - Instruction storage exception */
267 .long TOPHYS(handle_instruction_storage_exception)
268/* 18 - Data TLB miss exception */
269 .long TOPHYS(handle_data_tlb_miss_exception)
270/* 19 - Instruction TLB miss exception */
271 .long TOPHYS(handle_instruction_tlb_miss_exception)
272/* 20 - 31 - Undefined */
273 .long TOPHYS(ex_handler_unhandled)
274 .long TOPHYS(ex_handler_unhandled)
275 .long TOPHYS(ex_handler_unhandled)
276 .long TOPHYS(ex_handler_unhandled)
277 .long TOPHYS(ex_handler_unhandled)
278 .long TOPHYS(ex_handler_unhandled)
279 .long TOPHYS(ex_handler_unhandled)
280 .long TOPHYS(ex_handler_unhandled)
281 .long TOPHYS(ex_handler_unhandled)
282 .long TOPHYS(ex_handler_unhandled)
283 .long TOPHYS(ex_handler_unhandled)
284 .long TOPHYS(ex_handler_unhandled)
285#endif
286
166.global _hw_exception_handler 287.global _hw_exception_handler
167.section .text 288.section .text
168.align 4 289.align 4
169.ent _hw_exception_handler 290.ent _hw_exception_handler
170_hw_exception_handler: 291_hw_exception_handler:
292#ifndef CONFIG_MMU
171 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ 293 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
294#else
295 swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */
296 /* Save date to kernel memory. Here is the problem
297 * when you came from user space */
298 ori r1, r0, TOPHYS(r0_ram + 28);
299#endif
172 swi r3, r1, PT_R3 300 swi r3, r1, PT_R3
173 swi r4, r1, PT_R4 301 swi r4, r1, PT_R4
174 swi r5, r1, PT_R5 302 swi r5, r1, PT_R5
175 swi r6, r1, PT_R6 303 swi r6, r1, PT_R6
176 304
177 mfs r5, rmsr; 305#ifdef CONFIG_MMU
178 nop 306 swi r11, r1, PT_R11
179 swi r5, r1, 0; 307 swi r31, r1, PT_R31
180 mfs r4, rbtr /* Save BTR before jumping to handler */ 308 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
181 nop 309#endif
310
182 mfs r3, resr 311 mfs r3, resr
183 nop 312 nop
313 mfs r4, rear;
314 nop
184 315
316#ifndef CONFIG_MMU
185 andi r5, r3, 0x1000; /* Check ESR[DS] */ 317 andi r5, r3, 0x1000; /* Check ESR[DS] */
186 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 318 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
187 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 319 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
188 nop 320 nop
189not_in_delay_slot: 321not_in_delay_slot:
190 swi r17, r1, PT_R17 322 swi r17, r1, PT_R17
323#endif
191 324
192 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 325 andi r5, r3, 0x1F; /* Extract ESR[EXC] */
193 326
327#ifdef CONFIG_MMU
328 /* Calculate exception vector offset = r5 << 2 */
329 addk r6, r5, r5; /* << 1 */
330 addk r6, r6, r6; /* << 2 */
331
332/* counting which exception happen */
333 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
334 addi r5, r5, 1
335 swi r5, r0, 0x200 + TOPHYS(r0_ram)
336 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
337 addi r5, r5, 1
338 swi r5, r6, 0x200 + TOPHYS(r0_ram)
339/* end */
340 /* Load the HW Exception vector */
341 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
342 bra r6
343
344full_exception_trapw:
345 RESTORE_STATE
346 bri full_exception_trap
347#else
194 /* Exceptions enabled here. This will allow nested exceptions */ 348 /* Exceptions enabled here. This will allow nested exceptions */
195 mfs r6, rmsr; 349 mfs r6, rmsr;
196 nop 350 nop
@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */
254 lwi r18, r1, PT_R18 408 lwi r18, r1, PT_R18
255 409
256 bri ex_handler_done; /* Complete exception handling */ 410 bri ex_handler_done; /* Complete exception handling */
411#endif
257 412
258/* 0x01 - Unaligned data access exception 413/* 0x01 - Unaligned data access exception
259 * This occurs when a word access is not aligned on a word boundary, 414 * This occurs when a word access is not aligned on a word boundary,
@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */
265handle_unaligned_ex: 420handle_unaligned_ex:
266 /* Working registers already saved: R3, R4, R5, R6 421 /* Working registers already saved: R3, R4, R5, R6
267 * R3 = ESR 422 * R3 = ESR
268 * R4 = BTR 423 * R4 = EAR
269 */ 424 */
270 mfs r4, rear; 425#ifdef CONFIG_MMU
426 andi r6, r3, 0x1000 /* Check ESR[DS] */
427 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
428 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
271 nop 429 nop
430_no_delayslot:
431#endif
432
433#ifdef CONFIG_MMU
434 /* Check if unaligned address is last on a 4k page */
435 andi r5, r4, 0xffc
436 xori r5, r5, 0xffc
437 bnei r5, _unaligned_ex2
438 _unaligned_ex1:
439 RESTORE_STATE;
440/* Another page must be accessed or physical address not in page table */
441 bri unaligned_data_trap
272 442
443 _unaligned_ex2:
444#endif
273 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 445 andi r6, r3, 0x3E0; /* Mask and extract the register operand */
274 srl r6, r6; /* r6 >> 5 */ 446 srl r6, r6; /* r6 >> 5 */
275 srl r6, r6; 447 srl r6, r6;
@@ -278,6 +450,45 @@ handle_unaligned_ex:
278 srl r6, r6; 450 srl r6, r6;
279 /* Store the register operand in a temporary location */ 451 /* Store the register operand in a temporary location */
280 sbi r6, r0, TOPHYS(ex_reg_op); 452 sbi r6, r0, TOPHYS(ex_reg_op);
453#ifdef CONFIG_MMU
454 /* Get physical address */
455 /* If we are faulting a kernel address, we have to use the
456 * kernel page tables.
457 */
458 ori r5, r0, CONFIG_KERNEL_START
459 cmpu r5, r4, r5
460 bgti r5, _unaligned_ex3
461 ori r5, r0, swapper_pg_dir
462 bri _unaligned_ex4
463
464 /* Get the PGD for the current thread. */
465_unaligned_ex3: /* user thread */
466 addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
467 lwi r5, r5, TASK_THREAD + PGDIR
468_unaligned_ex4:
469 tophys(r5,r5)
470 BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
471 andi r6, r6, 0xffc
472/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
473 or r5, r5, r6
474 lwi r6, r5, 0 /* Get L1 entry */
475 andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
476 beqi r5, _unaligned_ex1 /* Bail if no table */
477
478 tophys(r5,r5)
479 BSRLI(r6,r4,10) /* Compute PTE address */
480 andi r6, r6, 0xffc
481 andi r5, r5, 0xfffff003
482 or r5, r5, r6
483 lwi r5, r5, 0 /* Get Linux PTE */
484
485 andi r6, r5, _PAGE_PRESENT
486 beqi r6, _unaligned_ex1 /* Bail if no page */
487
488 andi r5, r5, 0xfffff000 /* Extract RPN */
489 andi r4, r4, 0x00000fff /* Extract offset */
490 or r4, r4, r5 /* Create physical address */
491#endif /* CONFIG_MMU */
281 492
282 andi r6, r3, 0x400; /* Extract ESR[S] */ 493 andi r6, r3, 0x400; /* Extract ESR[S] */
283 bnei r6, ex_sw; 494 bnei r6, ex_sw;
@@ -355,6 +566,7 @@ ex_shw:
355ex_sw_end: /* Exception handling of store word, ends. */ 566ex_sw_end: /* Exception handling of store word, ends. */
356 567
357ex_handler_done: 568ex_handler_done:
569#ifndef CONFIG_MMU
358 lwi r5, r1, 0 /* RMSR */ 570 lwi r5, r1, 0 /* RMSR */
359 mts rmsr, r5 571 mts rmsr, r5
360 nop 572 nop
@@ -366,13 +578,455 @@ ex_handler_done:
366 578
367 rted r17, 0 579 rted r17, 0
368 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ 580 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
581#else
582 RESTORE_STATE;
583 rted r17, 0
584 nop
585#endif
586
587#ifdef CONFIG_MMU
588 /* Exception vector entry code. This code runs with address translation
589 * turned off (i.e. using physical addresses). */
590
591 /* Exception vectors. */
592
593 /* 0x10 - Data Storage Exception
594 * This happens for just a few reasons. U0 set (but we don't do that),
595 * or zone protection fault (user violation, write to protected page).
596 * If this is just an update of modified status, we do that quickly
597 * and exit. Otherwise, we call heavyweight functions to do the work.
598 */
599 handle_data_storage_exception:
600 /* Working registers already saved: R3, R4, R5, R6
601 * R3 = ESR
602 */
603 mfs r11, rpid
604 nop
605 bri 4
606 mfs r3, rear /* Get faulting address */
607 nop
608 /* If we are faulting a kernel address, we have to use the
609 * kernel page tables.
610 */
611 ori r4, r0, CONFIG_KERNEL_START
612 cmpu r4, r3, r4
613 bgti r4, ex3
614 /* First, check if it was a zone fault (which means a user
615 * tried to access a kernel or read-protected page - always
616 * a SEGV). All other faults here must be stores, so no
617 * need to check ESR_S as well. */
618 mfs r4, resr
619 nop
620 andi r4, r4, 0x800 /* ESR_Z - zone protection */
621 bnei r4, ex2
622
623 ori r4, r0, swapper_pg_dir
624 mts rpid, r0 /* TLB will have 0 TID */
625 nop
626 bri ex4
627
628 /* Get the PGD for the current thread. */
629 ex3:
630 /* First, check if it was a zone fault (which means a user
631 * tried to access a kernel or read-protected page - always
632 * a SEGV). All other faults here must be stores, so no
633 * need to check ESR_S as well. */
634 mfs r4, resr
635 nop
636 andi r4, r4, 0x800 /* ESR_Z */
637 bnei r4, ex2
638 /* get current task address */
639 addi r4 ,CURRENT_TASK, TOPHYS(0);
640 lwi r4, r4, TASK_THREAD+PGDIR
641 ex4:
642 tophys(r4,r4)
643 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
644 andi r5, r5, 0xffc
645/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
646 or r4, r4, r5
647 lwi r4, r4, 0 /* Get L1 entry */
648 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
649 beqi r5, ex2 /* Bail if no table */
650
651 tophys(r5,r5)
652 BSRLI(r6,r3,10) /* Compute PTE address */
653 andi r6, r6, 0xffc
654 andi r5, r5, 0xfffff003
655 or r5, r5, r6
656 lwi r4, r5, 0 /* Get Linux PTE */
657
658 andi r6, r4, _PAGE_RW /* Is it writeable? */
659 beqi r6, ex2 /* Bail if not */
660
661 /* Update 'changed' */
662 ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
663 swi r4, r5, 0 /* Update Linux page table */
664
665 /* Most of the Linux PTE is ready to load into the TLB LO.
666 * We set ZSEL, where only the LS-bit determines user access.
667 * We set execute, because we don't have the granularity to
668 * properly set this at the page level (Linux problem).
669 * If shared is set, we cause a zero PID->TID load.
670 * Many of these bits are software only. Bits we don't set
671 * here we (properly should) assume have the appropriate value.
672 */
673 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
674 ori r4, r4, _PAGE_HWEXEC /* make it executable */
675
676 /* find the TLB index that caused the fault. It has to be here*/
677 mts rtlbsx, r3
678 nop
679 mfs r5, rtlbx /* DEBUG: TBD */
680 nop
681 mts rtlblo, r4 /* Load TLB LO */
682 nop
683 /* Will sync shadow TLBs */
684
685 /* Done...restore registers and get out of here. */
686 mts rpid, r11
687 nop
688 bri 4
689
690 RESTORE_STATE;
691 rted r17, 0
692 nop
693 ex2:
694 /* The bailout. Restore registers to pre-exception conditions
695 * and call the heavyweights to help us out. */
696 mts rpid, r11
697 nop
698 bri 4
699 RESTORE_STATE;
700 bri page_fault_data_trap
701
702
703 /* 0x11 - Instruction Storage Exception
704 * This is caused by a fetch from non-execute or guarded pages. */
705 handle_instruction_storage_exception:
706 /* Working registers already saved: R3, R4, R5, R6
707 * R3 = ESR
708 */
709
710 mfs r3, rear /* Get faulting address */
711 nop
712 RESTORE_STATE;
713 bri page_fault_instr_trap
714
715 /* 0x12 - Data TLB Miss Exception
716 * As the name implies, translation is not in the MMU, so search the
717 * page tables and fix it. The only purpose of this function is to
718 * load TLB entries from the page table if they exist.
719 */
720 handle_data_tlb_miss_exception:
721 /* Working registers already saved: R3, R4, R5, R6
722 * R3 = ESR
723 */
724 mfs r11, rpid
725 nop
726 bri 4
727 mfs r3, rear /* Get faulting address */
728 nop
729
730 /* If we are faulting a kernel address, we have to use the
731 * kernel page tables. */
732 ori r4, r0, CONFIG_KERNEL_START
733 cmpu r4, r3, r4
734 bgti r4, ex5
735 ori r4, r0, swapper_pg_dir
736 mts rpid, r0 /* TLB will have 0 TID */
737 nop
738 bri ex6
369 739
740 /* Get the PGD for the current thread. */
741 ex5:
742 /* get current task address */
743 addi r4 ,CURRENT_TASK, TOPHYS(0);
744 lwi r4, r4, TASK_THREAD+PGDIR
745 ex6:
746 tophys(r4,r4)
747 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
748 andi r5, r5, 0xffc
749/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
750 or r4, r4, r5
751 lwi r4, r4, 0 /* Get L1 entry */
752 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
753 beqi r5, ex7 /* Bail if no table */
754
755 tophys(r5,r5)
756 BSRLI(r6,r3,10) /* Compute PTE address */
757 andi r6, r6, 0xffc
758 andi r5, r5, 0xfffff003
759 or r5, r5, r6
760 lwi r4, r5, 0 /* Get Linux PTE */
761
762 andi r6, r4, _PAGE_PRESENT
763 beqi r6, ex7
764
765 ori r4, r4, _PAGE_ACCESSED
766 swi r4, r5, 0
767
768 /* Most of the Linux PTE is ready to load into the TLB LO.
769 * We set ZSEL, where only the LS-bit determines user access.
770 * We set execute, because we don't have the granularity to
771 * properly set this at the page level (Linux problem).
772 * If shared is set, we cause a zero PID->TID load.
773 * Many of these bits are software only. Bits we don't set
774 * here we (properly should) assume have the appropriate value.
775 */
776 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
777
778 bri finish_tlb_load
779 ex7:
780 /* The bailout. Restore registers to pre-exception conditions
781 * and call the heavyweights to help us out.
782 */
783 mts rpid, r11
784 nop
785 bri 4
786 RESTORE_STATE;
787 bri page_fault_data_trap
788
789 /* 0x13 - Instruction TLB Miss Exception
790 * Nearly the same as above, except we get our information from
791 * different registers and bailout to a different point.
792 */
793 handle_instruction_tlb_miss_exception:
794 /* Working registers already saved: R3, R4, R5, R6
795 * R3 = ESR
796 */
797 mfs r11, rpid
798 nop
799 bri 4
800 mfs r3, rear /* Get faulting address */
801 nop
802
803 /* If we are faulting a kernel address, we have to use the
804 * kernel page tables.
805 */
806 ori r4, r0, CONFIG_KERNEL_START
807 cmpu r4, r3, r4
808 bgti r4, ex8
809 ori r4, r0, swapper_pg_dir
810 mts rpid, r0 /* TLB will have 0 TID */
811 nop
812 bri ex9
813
814 /* Get the PGD for the current thread. */
815 ex8:
816 /* get current task address */
817 addi r4 ,CURRENT_TASK, TOPHYS(0);
818 lwi r4, r4, TASK_THREAD+PGDIR
819 ex9:
820 tophys(r4,r4)
821 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
822 andi r5, r5, 0xffc
823/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
824 or r4, r4, r5
825 lwi r4, r4, 0 /* Get L1 entry */
826 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
827 beqi r5, ex10 /* Bail if no table */
828
829 tophys(r5,r5)
830 BSRLI(r6,r3,10) /* Compute PTE address */
831 andi r6, r6, 0xffc
832 andi r5, r5, 0xfffff003
833 or r5, r5, r6
834 lwi r4, r5, 0 /* Get Linux PTE */
835
836 andi r6, r4, _PAGE_PRESENT
837 beqi r6, ex7
838
839 ori r4, r4, _PAGE_ACCESSED
840 swi r4, r5, 0
841
842 /* Most of the Linux PTE is ready to load into the TLB LO.
843 * We set ZSEL, where only the LS-bit determines user access.
844 * We set execute, because we don't have the granularity to
845 * properly set this at the page level (Linux problem).
846 * If shared is set, we cause a zero PID->TID load.
847 * Many of these bits are software only. Bits we don't set
848 * here we (properly should) assume have the appropriate value.
849 */
850 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
851
852 bri finish_tlb_load
853 ex10:
854 /* The bailout. Restore registers to pre-exception conditions
855 * and call the heavyweights to help us out.
856 */
857 mts rpid, r11
858 nop
859 bri 4
860 RESTORE_STATE;
861 bri page_fault_instr_trap
862
863/* Both the instruction and data TLB miss get to this point to load the TLB.
864 * r3 - EA of fault
865 * r4 - TLB LO (info from Linux PTE)
866 * r5, r6 - available to use
867 * PID - loaded with proper value when we get here
868 * Upon exit, we reload everything and RFI.
869 * A common place to load the TLB.
870 */
871 tlb_index:
872 .long 1 /* MS: storing last used tlb index */
873 finish_tlb_load:
874 /* MS: load the last used TLB index. */
875 lwi r5, r0, TOPHYS(tlb_index)
876 addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
877
878/* MS: FIXME this is potential fault, because this is mask not count */
879 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
880 ori r6, r0, 1
881 cmp r31, r5, r6
882 blti r31, sem
883 addik r5, r6, 1
884 sem:
885 /* MS: save back current TLB index */
886 swi r5, r0, TOPHYS(tlb_index)
887
888 ori r4, r4, _PAGE_HWEXEC /* make it executable */
889 mts rtlbx, r5 /* MS: save current TLB */
890 nop
891 mts rtlblo, r4 /* MS: save to TLB LO */
892 nop
893
894 /* Create EPN. This is the faulting address plus a static
895 * set of bits. These are size, valid, E, U0, and ensure
896 * bits 20 and 21 are zero.
897 */
898 andi r3, r3, 0xfffff000
899 ori r3, r3, 0x0c0
900 mts rtlbhi, r3 /* Load TLB HI */
901 nop
902
903 /* Done...restore registers and get out of here. */
904 ex12:
905 mts rpid, r11
906 nop
907 bri 4
908 RESTORE_STATE;
909 rted r17, 0
910 nop
911
912 /* extern void giveup_fpu(struct task_struct *prev)
913 *
914 * The MicroBlaze processor may have an FPU, so this should not just
915 * return: TBD.
916 */
917 .globl giveup_fpu;
918 .align 4;
919 giveup_fpu:
920 bralid r15,0 /* TBD */
921 nop
922
923 /* At present, this routine just hangs. - extern void abort(void) */
924 .globl abort;
925 .align 4;
926 abort:
927 br r0
928
929 .globl set_context;
930 .align 4;
931 set_context:
932 mts rpid, r5 /* Shadow TLBs are automatically */
933 nop
934 bri 4 /* flushed by changing PID */
935 rtsd r15,8
936 nop
937
938#endif
370.end _hw_exception_handler 939.end _hw_exception_handler
371 940
941#ifdef CONFIG_MMU
942/* Unaligned data access exception last on a 4k page for MMU.
943 * When this is called, we are in virtual mode with exceptions enabled
944 * and registers 1-13,15,17,18 saved.
945 *
946 * R3 = ESR
947 * R4 = EAR
948 * R7 = pointer to saved registers (struct pt_regs *regs)
949 *
950 * This handler perform the access, and returns via ret_from_exc.
951 */
952.global _unaligned_data_exception
953.ent _unaligned_data_exception
954_unaligned_data_exception:
955 andi r8, r3, 0x3E0; /* Mask and extract the register operand */
956 BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */
957 andi r6, r3, 0x400; /* Extract ESR[S] */
958 bneid r6, ex_sw_vm;
959 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
960ex_lw_vm:
961 beqid r6, ex_lhw_vm;
962 lbui r5, r4, 0; /* Exception address in r4 - delay slot */
963/* Load a word, byte-by-byte from destination address and save it in tmp space*/
964 la r6, r0, ex_tmp_data_loc_0;
965 sbi r5, r6, 0;
966 lbui r5, r4, 1;
967 sbi r5, r6, 1;
968 lbui r5, r4, 2;
969 sbi r5, r6, 2;
970 lbui r5, r4, 3;
971 sbi r5, r6, 3;
972 brid ex_lw_tail_vm;
973/* Get the destination register value into r3 - delay slot */
974 lwi r3, r6, 0;
975ex_lhw_vm:
976 /* Load a half-word, byte-by-byte from destination address and
977 * save it in tmp space */
978 la r6, r0, ex_tmp_data_loc_0;
979 sbi r5, r6, 0;
980 lbui r5, r4, 1;
981 sbi r5, r6, 1;
982 lhui r3, r6, 0; /* Get the destination register value into r3 */
983ex_lw_tail_vm:
984 /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
985 addik r5, r8, lw_table_vm;
986 bra r5;
987ex_lw_end_vm: /* Exception handling of load word, ends */
988 brai ret_from_exc;
989ex_sw_vm:
990/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
991 addik r5, r8, sw_table_vm;
992 bra r5;
993ex_sw_tail_vm:
994 la r5, r0, ex_tmp_data_loc_0;
995 beqid r6, ex_shw_vm;
996 swi r3, r5, 0; /* Get the word - delay slot */
997 /* Store the word, byte-by-byte into destination address */
998 lbui r3, r5, 0;
999 sbi r3, r4, 0;
1000 lbui r3, r5, 1;
1001 sbi r3, r4, 1;
1002 lbui r3, r5, 2;
1003 sbi r3, r4, 2;
1004 lbui r3, r5, 3;
1005 brid ret_from_exc;
1006 sbi r3, r4, 3; /* Delay slot */
1007ex_shw_vm:
1008 /* Store the lower half-word, byte-by-byte into destination address */
1009 lbui r3, r5, 2;
1010 sbi r3, r4, 0;
1011 lbui r3, r5, 3;
1012 brid ret_from_exc;
1013 sbi r3, r4, 1; /* Delay slot */
1014ex_sw_end_vm: /* Exception handling of store word, ends. */
1015.end _unaligned_data_exception
1016#endif /* CONFIG_MMU */
1017
372ex_handler_unhandled: 1018ex_handler_unhandled:
373/* FIXME add handle function for unhandled exception - dump register */ 1019/* FIXME add handle function for unhandled exception - dump register */
374 bri 0 1020 bri 0
375 1021
1022/*
1023 * hw_exception_handler Jump Table
1024 * - Contains code snippets for each register that caused the unalign exception
1025 * - Hence exception handler is NOT self-modifying
1026 * - Separate table for load exceptions and store exceptions.
1027 * - Each table is of size: (8 * 32) = 256 bytes
1028 */
1029
376.section .text 1030.section .text
377.align 4 1031.align 4
378lw_table: 1032lw_table:
@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27);
407lw_r28: R3_TO_LWREG (28); 1061lw_r28: R3_TO_LWREG (28);
408lw_r29: R3_TO_LWREG (29); 1062lw_r29: R3_TO_LWREG (29);
409lw_r30: R3_TO_LWREG (30); 1063lw_r30: R3_TO_LWREG (30);
1064#ifdef CONFIG_MMU
1065lw_r31: R3_TO_LWREG_V (31);
1066#else
410lw_r31: R3_TO_LWREG (31); 1067lw_r31: R3_TO_LWREG (31);
1068#endif
411 1069
412sw_table: 1070sw_table:
413sw_r0: SWREG_TO_R3 (0); 1071sw_r0: SWREG_TO_R3 (0);
@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27);
441sw_r28: SWREG_TO_R3 (28); 1099sw_r28: SWREG_TO_R3 (28);
442sw_r29: SWREG_TO_R3 (29); 1100sw_r29: SWREG_TO_R3 (29);
443sw_r30: SWREG_TO_R3 (30); 1101sw_r30: SWREG_TO_R3 (30);
1102#ifdef CONFIG_MMU
1103sw_r31: SWREG_TO_R3_V (31);
1104#else
444sw_r31: SWREG_TO_R3 (31); 1105sw_r31: SWREG_TO_R3 (31);
1106#endif
1107
1108#ifdef CONFIG_MMU
1109lw_table_vm:
1110lw_r0_vm: R3_TO_LWREG_VM (0);
1111lw_r1_vm: R3_TO_LWREG_VM_V (1);
1112lw_r2_vm: R3_TO_LWREG_VM_V (2);
1113lw_r3_vm: R3_TO_LWREG_VM_V (3);
1114lw_r4_vm: R3_TO_LWREG_VM_V (4);
1115lw_r5_vm: R3_TO_LWREG_VM_V (5);
1116lw_r6_vm: R3_TO_LWREG_VM_V (6);
1117lw_r7_vm: R3_TO_LWREG_VM_V (7);
1118lw_r8_vm: R3_TO_LWREG_VM_V (8);
1119lw_r9_vm: R3_TO_LWREG_VM_V (9);
1120lw_r10_vm: R3_TO_LWREG_VM_V (10);
1121lw_r11_vm: R3_TO_LWREG_VM_V (11);
1122lw_r12_vm: R3_TO_LWREG_VM_V (12);
1123lw_r13_vm: R3_TO_LWREG_VM_V (13);
1124lw_r14_vm: R3_TO_LWREG_VM (14);
1125lw_r15_vm: R3_TO_LWREG_VM_V (15);
1126lw_r16_vm: R3_TO_LWREG_VM (16);
1127lw_r17_vm: R3_TO_LWREG_VM_V (17);
1128lw_r18_vm: R3_TO_LWREG_VM_V (18);
1129lw_r19_vm: R3_TO_LWREG_VM (19);
1130lw_r20_vm: R3_TO_LWREG_VM (20);
1131lw_r21_vm: R3_TO_LWREG_VM (21);
1132lw_r22_vm: R3_TO_LWREG_VM (22);
1133lw_r23_vm: R3_TO_LWREG_VM (23);
1134lw_r24_vm: R3_TO_LWREG_VM (24);
1135lw_r25_vm: R3_TO_LWREG_VM (25);
1136lw_r26_vm: R3_TO_LWREG_VM (26);
1137lw_r27_vm: R3_TO_LWREG_VM (27);
1138lw_r28_vm: R3_TO_LWREG_VM (28);
1139lw_r29_vm: R3_TO_LWREG_VM (29);
1140lw_r30_vm: R3_TO_LWREG_VM (30);
1141lw_r31_vm: R3_TO_LWREG_VM_V (31);
1142
1143sw_table_vm:
1144sw_r0_vm: SWREG_TO_R3_VM (0);
1145sw_r1_vm: SWREG_TO_R3_VM_V (1);
1146sw_r2_vm: SWREG_TO_R3_VM_V (2);
1147sw_r3_vm: SWREG_TO_R3_VM_V (3);
1148sw_r4_vm: SWREG_TO_R3_VM_V (4);
1149sw_r5_vm: SWREG_TO_R3_VM_V (5);
1150sw_r6_vm: SWREG_TO_R3_VM_V (6);
1151sw_r7_vm: SWREG_TO_R3_VM_V (7);
1152sw_r8_vm: SWREG_TO_R3_VM_V (8);
1153sw_r9_vm: SWREG_TO_R3_VM_V (9);
1154sw_r10_vm: SWREG_TO_R3_VM_V (10);
1155sw_r11_vm: SWREG_TO_R3_VM_V (11);
1156sw_r12_vm: SWREG_TO_R3_VM_V (12);
1157sw_r13_vm: SWREG_TO_R3_VM_V (13);
1158sw_r14_vm: SWREG_TO_R3_VM (14);
1159sw_r15_vm: SWREG_TO_R3_VM_V (15);
1160sw_r16_vm: SWREG_TO_R3_VM (16);
1161sw_r17_vm: SWREG_TO_R3_VM_V (17);
1162sw_r18_vm: SWREG_TO_R3_VM_V (18);
1163sw_r19_vm: SWREG_TO_R3_VM (19);
1164sw_r20_vm: SWREG_TO_R3_VM (20);
1165sw_r21_vm: SWREG_TO_R3_VM (21);
1166sw_r22_vm: SWREG_TO_R3_VM (22);
1167sw_r23_vm: SWREG_TO_R3_VM (23);
1168sw_r24_vm: SWREG_TO_R3_VM (24);
1169sw_r25_vm: SWREG_TO_R3_VM (25);
1170sw_r26_vm: SWREG_TO_R3_VM (26);
1171sw_r27_vm: SWREG_TO_R3_VM (27);
1172sw_r28_vm: SWREG_TO_R3_VM (28);
1173sw_r29_vm: SWREG_TO_R3_VM (29);
1174sw_r30_vm: SWREG_TO_R3_VM (30);
1175sw_r31_vm: SWREG_TO_R3_VM_V (31);
1176#endif /* CONFIG_MMU */
445 1177
446/* Temporary data structures used in the handler */ 1178/* Temporary data structures used in the handler */
447.section .data 1179.section .data
diff --git a/arch/microblaze/kernel/init_task.c b/arch/microblaze/kernel/init_task.c
index 48eb9fb255fa..67da22579b62 100644
--- a/arch/microblaze/kernel/init_task.c
+++ b/arch/microblaze/kernel/init_task.c
@@ -18,8 +18,6 @@
18 18
19static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 19static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
20static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 20static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
21struct mm_struct init_mm = INIT_MM(init_mm);
22EXPORT_SYMBOL(init_mm);
23 21
24union thread_union init_thread_union 22union thread_union init_thread_union
25 __attribute__((__section__(".data.init_task"))) = 23 __attribute__((__section__(".data.init_task"))) =
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 5f71790e3c3c..59ff20e33e0c 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -45,3 +45,5 @@ extern void __udivsi3(void);
45EXPORT_SYMBOL(__udivsi3); 45EXPORT_SYMBOL(__udivsi3);
46extern void __umodsi3(void); 46extern void __umodsi3(void);
47EXPORT_SYMBOL(__umodsi3); 47EXPORT_SYMBOL(__umodsi3);
48extern char *_ebss;
49EXPORT_SYMBOL_GPL(_ebss);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
new file mode 100644
index 000000000000..df16c6287a8e
--- /dev/null
+++ b/arch/microblaze/kernel/misc.S
@@ -0,0 +1,120 @@
1/*
2 * Miscellaneous low-level MMU functions.
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
7 *
8 * Derived from arch/ppc/kernel/misc.S
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 */
14
15#include <linux/linkage.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <linux/errno.h>
19#include <asm/mmu.h>
20#include <asm/page.h>
21
22 .text
23/*
24 * Flush MMU TLB
25 *
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */
28.globl _tlbia;
29.align 4;
30_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */
32 /* isync */
33_tlbia_1:
34 mts rtlbx, r12
35 nop
36 mts rtlbhi, r0 /* flush: ensure V is clear */
37 nop
38 addik r11, r12, -2
39 bneid r11, _tlbia_1 /* loop for all entries */
40 addik r12, r12, -1
41 /* sync */
42 rtsd r15, 8
43 nop
44
45/*
46 * Flush MMU TLB for a particular address (in r5)
47 */
48.globl _tlbie;
49.align 4;
50_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */
52 nop
53 mfs r12, rtlbx /* Retrieve index */
54 nop
55 blti r12, _tlbie_1 /* Check if found */
56 mts rtlbhi, r0 /* flush: ensure V is clear */
57 nop
58_tlbie_1:
59 rtsd r15, 8
60 nop
61
62/*
63 * Allocate TLB entry for early console
64 */
65.globl early_console_reg_tlb_alloc;
66.align 4;
67early_console_reg_tlb_alloc:
68 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */
72 ori r4, r0, 63
73 mts rtlbx, r4 /* TLB slot 2 */
74
75 or r4,r5,r0
76 andi r4,r4,0xfffff000
77 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
78
79 andi r5,r5,0xfffff000
80 ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
81
82 mts rtlblo,r4 /* Load the data portion of the entry */
83 nop
84 mts rtlbhi,r5 /* Load the tag portion of the entry */
85 nop
86 rtsd r15, 8
87 nop
88
89/*
90 * Copy a whole page (4096 bytes).
91 */
92#define COPY_16_BYTES \
93 lwi r7, r6, 0; \
94 lwi r8, r6, 4; \
95 lwi r9, r6, 8; \
96 lwi r10, r6, 12; \
97 swi r7, r5, 0; \
98 swi r8, r5, 4; \
99 swi r9, r5, 8; \
100 swi r10, r5, 12
101
102
103/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
104#define DCACHE_LINE_BYTES (4 * 4)
105
106.globl copy_page;
107.align 4;
108copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
110_copy_page_loop:
111 COPY_16_BYTES
112#if DCACHE_LINE_BYTES >= 32
113 COPY_16_BYTES
114#endif
115 addik r6, r6, DCACHE_LINE_BYTES
116 addik r5, r5, DCACHE_LINE_BYTES
117 bneid r11, _copy_page_loop
118 addik r11, r11, -1
119 rtsd r15, 8
120 nop
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 07d4fa339eda..00b12c6d5326 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
126 else 126 else
127 childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; 127 childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
128 128
129#ifndef CONFIG_MMU
129 memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); 130 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
130 ti->cpu_context.r1 = (unsigned long)childregs; 131 ti->cpu_context.r1 = (unsigned long)childregs;
131 ti->cpu_context.msr = (unsigned long)childregs->msr; 132 ti->cpu_context.msr = (unsigned long)childregs->msr;
133#else
134
135 /* if creating a kernel thread then update the current reg (we don't
136 * want to use the parent's value when restoring by POP_STATE) */
137 if (kernel_mode(regs))
138 /* save new current on stack to use POP_STATE */
139 childregs->CURRENT_TASK = (unsigned long)p;
140 /* if returning to user then use the parent's value of this register */
141
142 /* if we're creating a new kernel thread then just zeroing all
143 * the registers. That's OK for a brand new thread.*/
144 /* Pls. note that some of them will be restored in POP_STATE */
145 if (kernel_mode(regs))
146 memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
147 /* if this thread is created for fork/vfork/clone, then we want to
148 * restore all the parent's context */
149 /* in addition to the registers which will be restored by POP_STATE */
150 else {
151 ti->cpu_context = *(struct cpu_context *)regs;
152 childregs->msr |= MSR_UMS;
153 }
154
155 /* FIXME STATE_SAVE_PT_OFFSET; */
156 ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE;
157 /* we should consider the fact that childregs is a copy of the parent
158 * regs which were saved immediately after entering the kernel state
159 * before enabling VM. This MSR will be restored in switch_to and
160 * RETURN() and we want to have the right machine state there
161 * specifically this state must have INTs disabled before and enabled
162 * after performing rtbd
163 * compose the right MSR for RETURN(). It will work for switch_to also
164 * excepting for VM and UMS
165 * don't touch UMS , CARRY and cache bits
166 * right now MSR is a copy of parent one */
167 childregs->msr |= MSR_BIP;
168 childregs->msr &= ~MSR_EIP;
169 childregs->msr |= MSR_IE;
170 childregs->msr &= ~MSR_VM;
171 childregs->msr |= MSR_VMS;
172 childregs->msr |= MSR_EE; /* exceptions will be enabled*/
173
174 ti->cpu_context.msr = (childregs->msr|MSR_VM);
175 ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
176#endif
132 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; 177 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
133 178
134 if (clone_flags & CLONE_SETTLS) 179 if (clone_flags & CLONE_SETTLS)
@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
137 return 0; 182 return 0;
138} 183}
139 184
185#ifndef CONFIG_MMU
140/* 186/*
141 * Return saved PC of a blocked thread. 187 * Return saved PC of a blocked thread.
142 */ 188 */
@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
151 else 197 else
152 return ctx->r14; 198 return ctx->r14;
153} 199}
200#endif
154 201
155static void kernel_thread_helper(int (*fn)(void *), void *arg) 202static void kernel_thread_helper(int (*fn)(void *), void *arg)
156{ 203{
@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
173 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 220 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
174 &regs, 0, NULL, NULL); 221 &regs, 0, NULL, NULL);
175} 222}
223EXPORT_SYMBOL_GPL(kernel_thread);
176 224
177unsigned long get_wchan(struct task_struct *p) 225unsigned long get_wchan(struct task_struct *p)
178{ 226{
@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
188 regs->r1 = usp; 236 regs->r1 = usp;
189 regs->pt_mode = 0; 237 regs->pt_mode = 0;
190} 238}
239
240#ifdef CONFIG_MMU
241#include <linux/elfcore.h>
242/*
243 * Set up a thread for executing a new program
244 */
245int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
246{
247 return 0; /* MicroBlaze has no separate FPU registers */
248}
249#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 34c48718061a..c005cc6f1aaf 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
509 509
510 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 510 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
511 if (prop) { 511 if (prop) {
512 initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); 512 initrd_start = (unsigned long)
513 __va((u32)of_read_ulong(prop, l/4));
513 514
514 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 515 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
515 if (prop) { 516 if (prop) {
516 initrd_end = (unsigned long) 517 initrd_end = (unsigned long)
517 __va(of_read_ulong(prop, l/4)); 518 __va((u32)of_read_ulong(prop, 1/4));
518 initrd_below_start_ok = 1; 519 initrd_below_start_ok = 1;
519 } else { 520 } else {
520 initrd_start = 0; 521 initrd_start = 0;
@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
563 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 564 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
564 565
565#ifdef CONFIG_CMDLINE 566#ifdef CONFIG_CMDLINE
567#ifndef CONFIG_CMDLINE_FORCE
566 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 568 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
569#endif
567 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 570 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
568#endif /* CONFIG_CMDLINE */ 571#endif /* CONFIG_CMDLINE */
569 572
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index eb6b41758e23..8709bea09604 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE];
42 42
43void __init setup_arch(char **cmdline_p) 43void __init setup_arch(char **cmdline_p)
44{ 44{
45#ifdef CONFIG_CMDLINE_FORCE
46 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
47 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
48#endif
49 *cmdline_p = cmd_line; 45 *cmdline_p = cmd_line;
50 46
51 console_verbose(); 47 console_verbose();
@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
102{ 98{
103 unsigned long *src, *dst = (unsigned long *)0x0; 99 unsigned long *src, *dst = (unsigned long *)0x0;
104 100
101 /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
102 * end of kernel. There are two position which we want to check.
103 * The first is __init_end and the second __bss_start.
104 */
105#ifdef CONFIG_MTD_UCLINUX
106 int romfs_size;
107 unsigned int romfs_base;
108 char *old_klimit = klimit;
109
110 romfs_base = (ram ? ram : (unsigned int)&__init_end);
111 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
112 if (!romfs_size) {
113 romfs_base = (unsigned int)&__bss_start;
114 romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
115 }
116
117 /* Move ROMFS out of BSS before clearing it */
118 if (romfs_size > 0) {
119 memmove(&_ebss, (int *)romfs_base, romfs_size);
120 klimit += romfs_size;
121 }
122#endif
123
105/* clearing bss section */ 124/* clearing bss section */
106 memset(__bss_start, 0, __bss_stop-__bss_start); 125 memset(__bss_start, 0, __bss_stop-__bss_start);
107 memset(_ssbss, 0, _esbss-_ssbss); 126 memset(_ssbss, 0, _esbss-_ssbss);
108 127
109 /* 128 /* Copy command line passed from bootloader */
110 * Copy command line passed from bootloader, or use default
111 * if none provided, or forced
112 */
113#ifndef CONFIG_CMDLINE_BOOL 129#ifndef CONFIG_CMDLINE_BOOL
114 if (cmdline && cmdline[0] != '\0') 130 if (cmdline && cmdline[0] != '\0')
115 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); 131 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
126 printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); 142 printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
127 143
128#ifdef CONFIG_MTD_UCLINUX 144#ifdef CONFIG_MTD_UCLINUX
129 { 145 early_printk("Found romfs @ 0x%08x (0x%08x)\n",
130 int size; 146 romfs_base, romfs_size);
131 unsigned int romfs_base; 147 early_printk("#### klimit %p ####\n", old_klimit);
132 romfs_base = (ram ? ram : (unsigned int)&__init_end); 148 BUG_ON(romfs_size < 0); /* What else can we do? */
133 /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the 149
134 * end of kernel, which is ROMFS_LOCATION defined above. */ 150 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
135 size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); 151 romfs_size, romfs_base, (unsigned)&_ebss);
136 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 152
137 romfs_base, size); 153 early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
138 early_printk("#### klimit %p ####\n", klimit);
139 BUG_ON(size < 0); /* What else can we do? */
140
141 /* Use memmove to handle likely case of memory overlap */
142 early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n",
143 size, romfs_base, (unsigned)&_ebss);
144 memmove(&_ebss, (int *)romfs_base, size);
145
146 /* update klimit */
147 klimit += PAGE_ALIGN(size);
148 early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
149 }
150#endif 154#endif
151 155
152 for (src = __ivt_start; src < __ivt_end; src++, dst++) 156 for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 40d36931e363..4c0e6521b114 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -152,8 +152,8 @@ struct rt_sigframe {
152 unsigned long tramp[2]; /* signal trampoline */ 152 unsigned long tramp[2]; /* signal trampoline */
153}; 153};
154 154
155static int 155static int restore_sigcontext(struct pt_regs *regs,
156restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p) 156 struct sigcontext __user *sc, int *rval_p)
157{ 157{
158 unsigned int err = 0; 158 unsigned int err = 0;
159 159
@@ -211,11 +211,10 @@ badframe:
211 211
212asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 212asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
213{ 213{
214 struct rt_sigframe *frame = 214 struct rt_sigframe __user *frame =
215 (struct rt_sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE); 215 (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE);
216 216
217 sigset_t set; 217 sigset_t set;
218 stack_t st;
219 int rval; 218 int rval;
220 219
221 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 220 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
233 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) 232 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
234 goto badframe; 233 goto badframe;
235 234
236 if (__copy_from_user((void *)&st, &frame->uc.uc_stack, sizeof(st)))
237 goto badframe;
238 /* It is more difficult to avoid calling this function than to 235 /* It is more difficult to avoid calling this function than to
239 call it and ignore errors. */ 236 call it and ignore errors. */
240 do_sigaltstack(&st, NULL, regs->r1); 237 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1))
238 goto badframe;
241 239
242 return rval; 240 return rval;
243 241
@@ -251,7 +249,7 @@ badframe:
251 */ 249 */
252 250
253static int 251static int
254setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, 252setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
255 unsigned long mask) 253 unsigned long mask)
256{ 254{
257 int err = 0; 255 int err = 0;
@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
278/* 276/*
279 * Determine which stack to use.. 277 * Determine which stack to use..
280 */ 278 */
281static inline void * 279static inline void __user *
282get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 280get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
283{ 281{
284 /* Default to using normal stack */ 282 /* Default to using normal stack */
@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
287 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) 285 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp))
288 sp = current->sas_ss_sp + current->sas_ss_size; 286 sp = current->sas_ss_sp + current->sas_ss_size;
289 287
290 return (void *)((sp - frame_size) & -8UL); 288 return (void __user *)((sp - frame_size) & -8UL);
291}
292
293static void setup_frame(int sig, struct k_sigaction *ka,
294 sigset_t *set, struct pt_regs *regs)
295{
296 struct sigframe *frame;
297 int err = 0;
298 int signal;
299
300 frame = get_sigframe(ka, regs, sizeof(*frame));
301
302 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
303 goto give_sigsegv;
304
305 signal = current_thread_info()->exec_domain
306 && current_thread_info()->exec_domain->signal_invmap
307 && sig < 32
308 ? current_thread_info()->exec_domain->signal_invmap[sig]
309 : sig;
310
311 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
312
313 if (_NSIG_WORDS > 1) {
314 err |= __copy_to_user(frame->extramask, &set->sig[1],
315 sizeof(frame->extramask));
316 }
317
318 /* Set up to return from userspace. If provided, use a stub
319 already in userspace. */
320 /* minus 8 is offset to cater for "rtsd r15,8" offset */
321 if (ka->sa.sa_flags & SA_RESTORER) {
322 regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
323 } else {
324 /* Note, these encodings are _big endian_! */
325
326 /* addi r12, r0, __NR_sigreturn */
327 err |= __put_user(0x31800000 | __NR_sigreturn ,
328 frame->tramp + 0);
329 /* brki r14, 0x8 */
330 err |= __put_user(0xb9cc0008, frame->tramp + 1);
331
332 /* Return from sighandler will jump to the tramp.
333 Negative 8 offset because return is rtsd r15, 8 */
334 regs->r15 = ((unsigned long)frame->tramp)-8;
335
336 __invalidate_cache_sigtramp((unsigned long)frame->tramp);
337 }
338
339 if (err)
340 goto give_sigsegv;
341
342 /* Set up registers for signal handler */
343 regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE;
344
345 /* Signal handler args: */
346 regs->r5 = signal; /* Arg 0: signum */
347 regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */
348
349 /* Offset of 4 to handle microblaze rtid r14, 0 */
350 regs->pc = (unsigned long)ka->sa.sa_handler;
351
352 set_fs(USER_DS);
353
354#ifdef DEBUG_SIG
355 printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
356 current->comm, current->pid, frame, regs->pc);
357#endif
358
359 return;
360
361give_sigsegv:
362 if (sig == SIGSEGV)
363 ka->sa.sa_handler = SIG_DFL;
364 force_sig(SIGSEGV, current);
365} 289}
366 290
367static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 291static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
368 sigset_t *set, struct pt_regs *regs) 292 sigset_t *set, struct pt_regs *regs)
369{ 293{
370 struct rt_sigframe *frame; 294 struct rt_sigframe __user *frame;
371 int err = 0; 295 int err = 0;
372 int signal; 296 int signal;
373 297
@@ -382,7 +306,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
382 ? current_thread_info()->exec_domain->signal_invmap[sig] 306 ? current_thread_info()->exec_domain->signal_invmap[sig]
383 : sig; 307 : sig;
384 308
385 err |= copy_siginfo_to_user(&frame->info, info); 309 if (info)
310 err |= copy_siginfo_to_user(&frame->info, info);
386 311
387 /* Create the ucontext. */ 312 /* Create the ucontext. */
388 err |= __put_user(0, &frame->uc.uc_flags); 313 err |= __put_user(0, &frame->uc.uc_flags);
@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
463 case -ERESTARTNOINTR: 388 case -ERESTARTNOINTR:
464do_restart: 389do_restart:
465 /* offset of 4 bytes to re-execute trap (brki) instruction */ 390 /* offset of 4 bytes to re-execute trap (brki) instruction */
391#ifndef CONFIG_MMU
466 regs->pc -= 4; 392 regs->pc -= 4;
393#else
394 /* offset of 8 bytes required = 4 for rtbd
395 offset, plus 4 for size of
396 "brki r14,8"
397 instruction. */
398 regs->pc -= 8;
399#endif
467 break; 400 break;
468 } 401 }
469} 402}
@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
480 if (ka->sa.sa_flags & SA_SIGINFO) 413 if (ka->sa.sa_flags & SA_SIGINFO)
481 setup_rt_frame(sig, ka, info, oldset, regs); 414 setup_rt_frame(sig, ka, info, oldset, regs);
482 else 415 else
483 setup_frame(sig, ka, oldset, regs); 416 setup_rt_frame(sig, ka, NULL, oldset, regs);
484 417
485 if (ka->sa.sa_flags & SA_ONESHOT) 418 if (ka->sa.sa_flags & SA_ONESHOT)
486 ka->sa.sa_handler = SIG_DFL; 419 ka->sa.sa_handler = SIG_DFL;
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 3bb42ec924c2..376d1789f7c0 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -2,7 +2,11 @@ ENTRY(sys_call_table)
2 .long sys_restart_syscall /* 0 - old "setup()" system call, 2 .long sys_restart_syscall /* 0 - old "setup()" system call,
3 * used for restarting */ 3 * used for restarting */
4 .long sys_exit 4 .long sys_exit
5 .long sys_ni_syscall /* was fork */ 5#ifdef CONFIG_MMU
6 .long sys_fork_wrapper
7#else
8 .long sys_ni_syscall
9#endif
6 .long sys_read 10 .long sys_read
7 .long sys_write 11 .long sys_write
8 .long sys_open /* 5 */ 12 .long sys_open /* 5 */
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 293ef486013a..eaaaf805f31b 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,14 +22,6 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25void __bad_xchg(volatile void *ptr, int size)
26{
27 printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
28 __builtin_return_address(0), ptr, size);
29 BUG();
30}
31EXPORT_SYMBOL(__bad_xchg);
32
33static int kstack_depth_to_print = 24; 25static int kstack_depth_to_print = 24;
34 26
35static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
@@ -105,3 +97,37 @@ void dump_stack(void)
105 show_stack(NULL, NULL); 97 show_stack(NULL, NULL);
106} 98}
107EXPORT_SYMBOL(dump_stack); 99EXPORT_SYMBOL(dump_stack);
100
101#ifdef CONFIG_MMU
102void __bug(const char *file, int line, void *data)
103{
104 if (data)
105 printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
106 file, line, data);
107 else
108 printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
109
110 machine_halt();
111}
112
113int bad_trap(int trap_num, struct pt_regs *regs)
114{
115 printk(KERN_CRIT
116 "unimplemented trap %d called at 0x%08lx, pid %d!\n",
117 trap_num, regs->pc, current->pid);
118 return -ENOSYS;
119}
120
121int debug_trap(struct pt_regs *regs)
122{
123 int i;
124 printk(KERN_CRIT "debug trap\n");
125 for (i = 0; i < 32; i++) {
126 /* printk("r%i:%08X\t",i,regs->gpr[i]); */
127 if ((i % 4) == 3)
128 printk(KERN_CRIT "\n");
129 }
130 printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
131 return -ENOSYS;
132}
133#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 840385e51291..d34d38dcd12c 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -17,8 +17,7 @@ ENTRY(_start)
17jiffies = jiffies_64 + 4; 17jiffies = jiffies_64 + 4;
18 18
19SECTIONS { 19SECTIONS {
20 . = CONFIG_KERNEL_BASE_ADDR; 20 . = CONFIG_KERNEL_START;
21
22 .text : { 21 .text : {
23 _text = . ; 22 _text = . ;
24 _stext = . ; 23 _stext = . ;
@@ -63,7 +62,8 @@ SECTIONS {
63 62
64 _sdata = . ; 63 _sdata = . ;
65 .data ALIGN (4096) : { /* page aligned when MMU used - origin 0x4 */ 64 .data ALIGN (4096) : { /* page aligned when MMU used - origin 0x4 */
66 *(.data) 65 DATA_DATA
66 CONSTRUCTORS
67 } 67 }
68 . = ALIGN(32); 68 . = ALIGN(32);
69 .data.cacheline_aligned : { *(.data.cacheline_aligned) } 69 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
@@ -99,13 +99,13 @@ SECTIONS {
99 . = ALIGN(4096); 99 . = ALIGN(4096);
100 .init.text : { 100 .init.text : {
101 _sinittext = . ; 101 _sinittext = . ;
102 *(.init.text) 102 INIT_TEXT
103 *(.exit.text)
104 *(.exit.data)
105 _einittext = .; 103 _einittext = .;
106 } 104 }
107 105
108 .init.data : { *(.init.data) } 106 .init.data : {
107 INIT_DATA
108 }
109 109
110 . = ALIGN(4); 110 . = ALIGN(4);
111 .init.ivt : { 111 .init.ivt : {
@@ -132,6 +132,8 @@ SECTIONS {
132 __con_initcall_end = .; 132 __con_initcall_end = .;
133 } 133 }
134 134
135 SECURITY_INIT
136
135 __init_end_before_initramfs = .; 137 __init_end_before_initramfs = .;
136 138
137 .init.ramfs ALIGN(4096) : { 139 .init.ramfs ALIGN(4096) : {