aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/entry_32.S1002
-rw-r--r--arch/powerpc/kernel/entry_64.S842
-rw-r--r--arch/powerpc/kernel/misc_32.S1039
-rw-r--r--arch/powerpc/kernel/misc_64.S898
-rw-r--r--arch/powerpc/kernel/systbl.S323
6 files changed, 4108 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 344cab678c6a..0625470a6235 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,12 +17,14 @@ extra-$(CONFIG_44x) := head_44x.o
17extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o 17extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
18extra-$(CONFIG_8xx) := head_8xx.o 18extra-$(CONFIG_8xx) := head_8xx.o
19extra-$(CONFIG_6xx) += idle_6xx.o 19extra-$(CONFIG_6xx) += idle_6xx.o
20extra-$(CONFIG_PPC64) += entry_64.o
20extra-$(CONFIG_PPC_FPU) += fpu.o 21extra-$(CONFIG_PPC_FPU) += fpu.o
21extra-y += vmlinux.lds 22extra-y += vmlinux.lds
22 23
23obj-y := traps.o prom.o semaphore.o 24obj-y += traps.o prom.o semaphore.o
24obj-$(CONFIG_PPC32) += setup_32.o process.o 25obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o
25obj-$(CONFIG_PPC64) += idle_power4.o 26obj-$(CONFIG_PPC64) += idle_power4.o
27obj-$(CONFIG_PPC64) += misc_64.o
26ifeq ($(CONFIG_PPC32),y) 28ifeq ($(CONFIG_PPC32),y)
27obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o 29obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o
28obj-$(CONFIG_MODULES) += ppc_ksyms.o 30obj-$(CONFIG_MODULES) += ppc_ksyms.o
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..094eea6fbd69
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1002 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10)
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace
209syscall_dotrace_cont:
210 cmplwi 0,r0,NR_syscalls
211 lis r10,sys_call_table@h
212 ori r10,r10,sys_call_table@l
213 slwi r0,r0,2
214 bge- 66f
215 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 mtlr r10
217 addi r9,r1,STACK_FRAME_OVERHEAD
218 PPC440EP_ERR42
219 blrl /* Call handler */
220 .globl ret_from_syscall
221ret_from_syscall:
222#ifdef SHOW_SYSCALLS
223 bl do_show_syscall_exit
224#endif
225 mr r6,r3
226 li r11,-_LAST_ERRNO
227 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR
232 bne 30f
233 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */
235 oris r10,r10,0x1000
236 stw r10,_CCR(r1)
237
238 /* disable interrupts so current_thread_info()->flags can't change */
23930: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
240 SYNC
241 MTMSRD(r10)
242 lwz r9,TI_FLAGS(r12)
243 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
244 bne- syscall_exit_work
245syscall_exit_cont:
246#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
247 /* If the process has its own DBCR0 value, load it up. The single
248 step bit tells us that dbcr0 should be loaded. */
249 lwz r0,THREAD+THREAD_DBCR0(r2)
250 andis. r10,r0,DBCR0_IC@h
251 bnel- load_dbcr0
252#endif
253 stwcx. r0,0,r1 /* to clear the reservation */
254 lwz r4,_LINK(r1)
255 lwz r5,_CCR(r1)
256 mtlr r4
257 mtcr r5
258 lwz r7,_NIP(r1)
259 lwz r8,_MSR(r1)
260 FIX_SRR1(r8, r0)
261 lwz r2,GPR2(r1)
262 lwz r1,GPR1(r1)
263 mtspr SPRN_SRR0,r7
264 mtspr SPRN_SRR1,r8
265 SYNC
266 RFI
267
26866: li r3,-ENOSYS
269 b ret_from_syscall
270
271 .globl ret_from_fork
272ret_from_fork:
273 REST_NVGPRS(r1)
274 bl schedule_tail
275 li r3,0
276 b ret_from_syscall
277
278/* Traced system call support */
279syscall_dotrace:
280 SAVE_NVGPRS(r1)
281 li r0,0xc00
282 stw r0,TRAP(r1)
283 addi r3,r1,STACK_FRAME_OVERHEAD
284 bl do_syscall_trace_enter
285 lwz r0,GPR0(r1) /* Restore original registers */
286 lwz r3,GPR3(r1)
287 lwz r4,GPR4(r1)
288 lwz r5,GPR5(r1)
289 lwz r6,GPR6(r1)
290 lwz r7,GPR7(r1)
291 lwz r8,GPR8(r1)
292 REST_NVGPRS(r1)
293 b syscall_dotrace_cont
294
295syscall_exit_work:
296 stw r6,RESULT(r1) /* Save result */
297 stw r3,GPR3(r1) /* Update return value */
298 andi. r0,r9,_TIF_SYSCALL_T_OR_A
299 beq 5f
300 ori r10,r10,MSR_EE
301 SYNC
302 MTMSRD(r10) /* re-enable interrupts */
303 lwz r4,TRAP(r1)
304 andi. r4,r4,1
305 beq 4f
306 SAVE_NVGPRS(r1)
307 li r4,0xc00
308 stw r4,TRAP(r1)
3094:
310 addi r3,r1,STACK_FRAME_OVERHEAD
311 bl do_syscall_trace_leave
312 REST_NVGPRS(r1)
3132:
314 lwz r3,GPR3(r1)
315 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
316 SYNC
317 MTMSRD(r10) /* disable interrupts again */
318 rlwinm r12,r1,0,0,18 /* current_thread_info() */
319 lwz r9,TI_FLAGS(r12)
3205:
321 andi. r0,r9,_TIF_NEED_RESCHED
322 bne 1f
323 lwz r5,_MSR(r1)
324 andi. r5,r5,MSR_PR
325 beq syscall_exit_cont
326 andi. r0,r9,_TIF_SIGPENDING
327 beq syscall_exit_cont
328 b do_user_signal
3291:
330 ori r10,r10,MSR_EE
331 SYNC
332 MTMSRD(r10) /* re-enable interrupts */
333 bl schedule
334 b 2b
335
336#ifdef SHOW_SYSCALLS
337do_show_syscall:
338#ifdef SHOW_SYSCALLS_TASK
339 lis r11,show_syscalls_task@ha
340 lwz r11,show_syscalls_task@l(r11)
341 cmp 0,r2,r11
342 bnelr
343#endif
344 stw r31,GPR31(r1)
345 mflr r31
346 lis r3,7f@ha
347 addi r3,r3,7f@l
348 lwz r4,GPR0(r1)
349 lwz r5,GPR3(r1)
350 lwz r6,GPR4(r1)
351 lwz r7,GPR5(r1)
352 lwz r8,GPR6(r1)
353 lwz r9,GPR7(r1)
354 bl printk
355 lis r3,77f@ha
356 addi r3,r3,77f@l
357 lwz r4,GPR8(r1)
358 mr r5,r2
359 bl printk
360 lwz r0,GPR0(r1)
361 lwz r3,GPR3(r1)
362 lwz r4,GPR4(r1)
363 lwz r5,GPR5(r1)
364 lwz r6,GPR6(r1)
365 lwz r7,GPR7(r1)
366 lwz r8,GPR8(r1)
367 mtlr r31
368 lwz r31,GPR31(r1)
369 blr
370
371do_show_syscall_exit:
372#ifdef SHOW_SYSCALLS_TASK
373 lis r11,show_syscalls_task@ha
374 lwz r11,show_syscalls_task@l(r11)
375 cmp 0,r2,r11
376 bnelr
377#endif
378 stw r31,GPR31(r1)
379 mflr r31
380 stw r3,RESULT(r1) /* Save result */
381 mr r4,r3
382 lis r3,79f@ha
383 addi r3,r3,79f@l
384 bl printk
385 lwz r3,RESULT(r1)
386 mtlr r31
387 lwz r31,GPR31(r1)
388 blr
389
3907: .string "syscall %d(%x, %x, %x, %x, %x, "
39177: .string "%x), current=%p\n"
39279: .string " -> %x\n"
393 .align 2,0
394
395#ifdef SHOW_SYSCALLS_TASK
396 .data
397 .globl show_syscalls_task
398show_syscalls_task:
399 .long -1
400 .text
401#endif
402#endif /* SHOW_SYSCALLS */
403
404/*
405 * The sigsuspend and rt_sigsuspend system calls can call do_signal
406 * and thus put the process into the stopped state where we might
407 * want to examine its user state with ptrace. Therefore we need
408 * to save all the nonvolatile registers (r13 - r31) before calling
409 * the C code.
410 */
411 .globl ppc_sigsuspend
412ppc_sigsuspend:
413 SAVE_NVGPRS(r1)
414 lwz r0,TRAP(r1)
415 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
416 stw r0,TRAP(r1) /* register set saved */
417 b sys_sigsuspend
418
419 .globl ppc_rt_sigsuspend
420ppc_rt_sigsuspend:
421 SAVE_NVGPRS(r1)
422 lwz r0,TRAP(r1)
423 rlwinm r0,r0,0,0,30
424 stw r0,TRAP(r1)
425 b sys_rt_sigsuspend
426
427 .globl ppc_fork
428ppc_fork:
429 SAVE_NVGPRS(r1)
430 lwz r0,TRAP(r1)
431 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
432 stw r0,TRAP(r1) /* register set saved */
433 b sys_fork
434
435 .globl ppc_vfork
436ppc_vfork:
437 SAVE_NVGPRS(r1)
438 lwz r0,TRAP(r1)
439 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
440 stw r0,TRAP(r1) /* register set saved */
441 b sys_vfork
442
443 .globl ppc_clone
444ppc_clone:
445 SAVE_NVGPRS(r1)
446 lwz r0,TRAP(r1)
447 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
448 stw r0,TRAP(r1) /* register set saved */
449 b sys_clone
450
451 .globl ppc_swapcontext
452ppc_swapcontext:
453 SAVE_NVGPRS(r1)
454 lwz r0,TRAP(r1)
455 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
456 stw r0,TRAP(r1) /* register set saved */
457 b sys_swapcontext
458
459/*
460 * Top-level page fault handling.
461 * This is in assembler because if do_page_fault tells us that
462 * it is a bad kernel page fault, we want to save the non-volatile
463 * registers before calling bad_page_fault.
464 */
465 .globl handle_page_fault
466handle_page_fault:
467 stw r4,_DAR(r1)
468 addi r3,r1,STACK_FRAME_OVERHEAD
469 bl do_page_fault
470 cmpwi r3,0
471 beq+ ret_from_except
472 SAVE_NVGPRS(r1)
473 lwz r0,TRAP(r1)
474 clrrwi r0,r0,1
475 stw r0,TRAP(r1)
476 mr r5,r3
477 addi r3,r1,STACK_FRAME_OVERHEAD
478 lwz r4,_DAR(r1)
479 bl bad_page_fault
480 b ret_from_except_full
481
482/*
483 * This routine switches between two different tasks. The process
484 * state of one is saved on its kernel stack. Then the state
485 * of the other is restored from its kernel stack. The memory
486 * management hardware is updated to the second process's state.
487 * Finally, we can return to the second process.
488 * On entry, r3 points to the THREAD for the current task, r4
489 * points to the THREAD for the new task.
490 *
491 * This routine is always called with interrupts disabled.
492 *
493 * Note: there are two ways to get to the "going out" portion
494 * of this code; either by coming in via the entry (_switch)
495 * or via "fork" which must set up an environment equivalent
496 * to the "_switch" path. If you change this , you'll have to
497 * change the fork code also.
498 *
499 * The code which creates the new task context is in 'copy_thread'
500 * in arch/ppc/kernel/process.c
501 */
502_GLOBAL(_switch)
503 stwu r1,-INT_FRAME_SIZE(r1)
504 mflr r0
505 stw r0,INT_FRAME_SIZE+4(r1)
506 /* r3-r12 are caller saved -- Cort */
507 SAVE_NVGPRS(r1)
508 stw r0,_NIP(r1) /* Return to switch caller */
509 mfmsr r11
510 li r0,MSR_FP /* Disable floating-point */
511#ifdef CONFIG_ALTIVEC
512BEGIN_FTR_SECTION
513 oris r0,r0,MSR_VEC@h /* Disable altivec */
514 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
515 stw r12,THREAD+THREAD_VRSAVE(r2)
516END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
517#endif /* CONFIG_ALTIVEC */
518#ifdef CONFIG_SPE
519 oris r0,r0,MSR_SPE@h /* Disable SPE */
520 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
521 stw r12,THREAD+THREAD_SPEFSCR(r2)
522#endif /* CONFIG_SPE */
523 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
524 beq+ 1f
525 andc r11,r11,r0
526 MTMSRD(r11)
527 isync
5281: stw r11,_MSR(r1)
529 mfcr r10
530 stw r10,_CCR(r1)
531 stw r1,KSP(r3) /* Set old stack pointer */
532
533#ifdef CONFIG_SMP
534 /* We need a sync somewhere here to make sure that if the
535 * previous task gets rescheduled on another CPU, it sees all
536 * stores it has performed on this one.
537 */
538 sync
539#endif /* CONFIG_SMP */
540
541 tophys(r0,r4)
542 CLR_TOP32(r0)
543 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
544 lwz r1,KSP(r4) /* Load new stack pointer */
545
546 /* save the old current 'last' for return value */
547 mr r3,r2
548 addi r2,r4,-THREAD /* Update current */
549
550#ifdef CONFIG_ALTIVEC
551BEGIN_FTR_SECTION
552 lwz r0,THREAD+THREAD_VRSAVE(r2)
553 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
554END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
555#endif /* CONFIG_ALTIVEC */
556#ifdef CONFIG_SPE
557 lwz r0,THREAD+THREAD_SPEFSCR(r2)
558 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
559#endif /* CONFIG_SPE */
560
561 lwz r0,_CCR(r1)
562 mtcrf 0xFF,r0
563 /* r3-r12 are destroyed -- Cort */
564 REST_NVGPRS(r1)
565
566 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
567 mtlr r4
568 addi r1,r1,INT_FRAME_SIZE
569 blr
570
571 .globl fast_exception_return
572fast_exception_return:
573#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
574 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
575 beq 1f /* if not, we've got problems */
576#endif
577
5782: REST_4GPRS(3, r11)
579 lwz r10,_CCR(r11)
580 REST_GPR(1, r11)
581 mtcr r10
582 lwz r10,_LINK(r11)
583 mtlr r10
584 REST_GPR(10, r11)
585 mtspr SPRN_SRR1,r9
586 mtspr SPRN_SRR0,r12
587 REST_GPR(9, r11)
588 REST_GPR(12, r11)
589 lwz r11,GPR11(r11)
590 SYNC
591 RFI
592
593#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
594/* check if the exception happened in a restartable section */
5951: lis r3,exc_exit_restart_end@ha
596 addi r3,r3,exc_exit_restart_end@l
597 cmplw r12,r3
598 bge 3f
599 lis r4,exc_exit_restart@ha
600 addi r4,r4,exc_exit_restart@l
601 cmplw r12,r4
602 blt 3f
603 lis r3,fee_restarts@ha
604 tophys(r3,r3)
605 lwz r5,fee_restarts@l(r3)
606 addi r5,r5,1
607 stw r5,fee_restarts@l(r3)
608 mr r12,r4 /* restart at exc_exit_restart */
609 b 2b
610
611 .comm fee_restarts,4
612
613/* aargh, a nonrecoverable interrupt, panic */
614/* aargh, we don't know which trap this is */
615/* but the 601 doesn't implement the RI bit, so assume it's OK */
6163:
617BEGIN_FTR_SECTION
618 b 2b
619END_FTR_SECTION_IFSET(CPU_FTR_601)
620 li r10,-1
621 stw r10,TRAP(r11)
622 addi r3,r1,STACK_FRAME_OVERHEAD
623 lis r10,MSR_KERNEL@h
624 ori r10,r10,MSR_KERNEL@l
625 bl transfer_to_handler_full
626 .long nonrecoverable_exception
627 .long ret_from_except
628#endif
629
630 .globl sigreturn_exit
631sigreturn_exit:
632 subi r1,r3,STACK_FRAME_OVERHEAD
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 bnel- do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,18
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,18
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,18
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,18
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_OF
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 lis r4,rtas_data@ha
967 lwz r4,rtas_data@l(r4)
968 lis r6,1f@ha /* physical return address for rtas */
969 addi r6,r6,1f@l
970 tophys(r6,r6)
971 tophys(r7,r1)
972 lis r8,rtas_entry@ha
973 lwz r8,rtas_entry@l(r8)
974 mfmsr r9
975 stw r9,8(r1)
976 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
977 SYNC /* disable interrupts so SRR0/1 */
978 MTMSRD(r0) /* don't get trashed */
979 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
980 mtlr r6
981 CLR_TOP32(r7)
982 mtspr SPRN_SPRG2,r7
983 mtspr SPRN_SRR0,r8
984 mtspr SPRN_SRR1,r9
985 RFI
9861: tophys(r9,r1)
987 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
988 lwz r9,8(r9) /* original msr value */
989 FIX_SRR1(r9,r0)
990 addi r1,r1,INT_FRAME_SIZE
991 li r0,0
992 mtspr SPRN_SPRG2,r0
993 mtspr SPRN_SRR0,r8
994 mtspr SPRN_SRR1,r9
995 RFI /* return to caller */
996
997 .globl machine_check_in_rtas
998machine_check_in_rtas:
999 twi 31,0,0
1000 /* XXX load up BATs and panic */
1001
1002#endif /* CONFIG_PPC_OF */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
new file mode 100644
index 000000000000..22796e28881a
--- /dev/null
+++ b/arch/powerpc/kernel/entry_64.S
@@ -0,0 +1,842 @@
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
116 li r12,0
117 ld r10,TI_FLAGS(r11)
118 stb r12,TI_SC_NOERR(r11)
119 andi. r11,r10,_TIF_SYSCALL_T_OR_A
120 bne- syscall_dotrace
121syscall_dotrace_cont:
122 cmpldi 0,r0,NR_syscalls
123 bge- syscall_enosys
124
125system_call: /* label this so stack traces look sane */
126/*
127 * Need to vector to 32 Bit or default sys_call_table here,
128 * based on caller's run-mode / personality.
129 */
130 ld r11,.SYS_CALL_TABLE@toc(2)
131 andi. r10,r10,_TIF_32BIT
132 beq 15f
133 addi r11,r11,8 /* use 32-bit syscall entries */
134 clrldi r3,r3,32
135 clrldi r4,r4,32
136 clrldi r5,r5,32
137 clrldi r6,r6,32
138 clrldi r7,r7,32
139 clrldi r8,r8,32
14015:
141 slwi r0,r0,4
142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
143 mtctr r10
144 bctrl /* Call handler */
145
146syscall_exit:
147#ifdef SHOW_SYSCALLS
148 std r3,GPR3(r1)
149 bl .do_show_syscall_exit
150 ld r3,GPR3(r1)
151#endif
152 std r3,RESULT(r1)
153 ld r5,_CCR(r1)
154 li r10,-_LAST_ERRNO
155 cmpld r3,r10
156 clrrdi r12,r1,THREAD_SHIFT
157 bge- syscall_error
158syscall_error_cont:
159
160 /* check for syscall tracing or audit */
161 ld r9,TI_FLAGS(r12)
162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
163 bne- syscall_exit_trace
164syscall_exit_trace_cont:
165
166 /* disable interrupts so current_thread_info()->flags can't change,
167 and so that we don't get interrupted after loading SRR0/1. */
168 ld r8,_MSR(r1)
169 andi. r10,r8,MSR_RI
170 beq- unrecov_restore
171 mfmsr r10
172 rldicl r10,r10,48,1
173 rotldi r10,r10,16
174 mtmsrd r10,1
175 ld r9,TI_FLAGS(r12)
176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
177 bne- syscall_exit_work
178 ld r7,_NIP(r1)
179 stdcx. r0,0,r1 /* to clear the reservation */
180 andi. r6,r8,MSR_PR
181 ld r4,_LINK(r1)
182 beq- 1f /* only restore r13 if */
183 ld r13,GPR13(r1) /* returning to usermode */
1841: ld r2,GPR2(r1)
185 li r12,MSR_RI
186 andc r10,r10,r12
187 mtmsrd r10,1 /* clear MSR.RI */
188 ld r1,GPR1(r1)
189 mtlr r4
190 mtcr r5
191 mtspr SPRN_SRR0,r7
192 mtspr SPRN_SRR1,r8
193 rfid
194 b . /* prevent speculative execution */
195
196syscall_enosys:
197 li r3,-ENOSYS
198 std r3,RESULT(r1)
199 clrrdi r12,r1,THREAD_SHIFT
200 ld r5,_CCR(r1)
201
202syscall_error:
203 lbz r11,TI_SC_NOERR(r12)
204 cmpwi 0,r11,0
205 bne- syscall_error_cont
206 neg r3,r3
207 oris r5,r5,0x1000 /* Set SO bit in CR */
208 std r5,_CCR(r1)
209 b syscall_error_cont
210
211/* Traced system call support */
212syscall_dotrace:
213 bl .save_nvgprs
214 addi r3,r1,STACK_FRAME_OVERHEAD
215 bl .do_syscall_trace_enter
216 ld r0,GPR0(r1) /* Restore original registers */
217 ld r3,GPR3(r1)
218 ld r4,GPR4(r1)
219 ld r5,GPR5(r1)
220 ld r6,GPR6(r1)
221 ld r7,GPR7(r1)
222 ld r8,GPR8(r1)
223 addi r9,r1,STACK_FRAME_OVERHEAD
224 clrrdi r10,r1,THREAD_SHIFT
225 ld r10,TI_FLAGS(r10)
226 b syscall_dotrace_cont
227
228syscall_exit_trace:
229 std r3,GPR3(r1)
230 bl .save_nvgprs
231 addi r3,r1,STACK_FRAME_OVERHEAD
232 bl .do_syscall_trace_leave
233 REST_NVGPRS(r1)
234 ld r3,GPR3(r1)
235 ld r5,_CCR(r1)
236 clrrdi r12,r1,THREAD_SHIFT
237 b syscall_exit_trace_cont
238
239/* Stuff to do on exit from a system call. */
240syscall_exit_work:
241 std r3,GPR3(r1)
242 std r5,_CCR(r1)
243 b .ret_from_except_lite
244
245/* Save non-volatile GPRs, if not already saved. */
246_GLOBAL(save_nvgprs)
247 ld r11,_TRAP(r1)
248 andi. r0,r11,1
249 beqlr-
250 SAVE_NVGPRS(r1)
251 clrrdi r0,r11,1
252 std r0,_TRAP(r1)
253 blr
254
255/*
256 * The sigsuspend and rt_sigsuspend system calls can call do_signal
257 * and thus put the process into the stopped state where we might
258 * want to examine its user state with ptrace. Therefore we need
259 * to save all the nonvolatile registers (r14 - r31) before calling
260 * the C code. Similarly, fork, vfork and clone need the full
261 * register state on the stack so that it can be copied to the child.
262 */
263_GLOBAL(ppc32_sigsuspend)
264 bl .save_nvgprs
265 bl .sys32_sigsuspend
266 b 70f
267
268_GLOBAL(ppc64_rt_sigsuspend)
269 bl .save_nvgprs
270 bl .sys_rt_sigsuspend
271 b 70f
272
273_GLOBAL(ppc32_rt_sigsuspend)
274 bl .save_nvgprs
275 bl .sys32_rt_sigsuspend
27670: cmpdi 0,r3,0
277 /* If it returned an error, we need to return via syscall_exit to set
278 the SO bit in cr0 and potentially stop for ptrace. */
279 bne syscall_exit
280 /* If sigsuspend() returns zero, we are going into a signal handler. We
281 may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
282#ifdef CONFIG_AUDIT
283 ld r3,PACACURRENT(r13)
284 ld r4,AUDITCONTEXT(r3)
285 cmpdi 0,r4,0
286 beq .ret_from_except /* No audit_context: Leave immediately. */
287 li r4, 2 /* AUDITSC_FAILURE */
288 li r5,-4 /* It's always -EINTR */
289 bl .audit_syscall_exit
290#endif
291 b .ret_from_except
292
293_GLOBAL(ppc_fork)
294 bl .save_nvgprs
295 bl .sys_fork
296 b syscall_exit
297
298_GLOBAL(ppc_vfork)
299 bl .save_nvgprs
300 bl .sys_vfork
301 b syscall_exit
302
303_GLOBAL(ppc_clone)
304 bl .save_nvgprs
305 bl .sys_clone
306 b syscall_exit
307
308_GLOBAL(ppc32_swapcontext)
309 bl .save_nvgprs
310 bl .sys32_swapcontext
311 b 80f
312
313_GLOBAL(ppc64_swapcontext)
314 bl .save_nvgprs
315 bl .sys_swapcontext
316 b 80f
317
318_GLOBAL(ppc32_sigreturn)
319 bl .sys32_sigreturn
320 b 80f
321
322_GLOBAL(ppc32_rt_sigreturn)
323 bl .sys32_rt_sigreturn
324 b 80f
325
326_GLOBAL(ppc64_rt_sigreturn)
327 bl .sys_rt_sigreturn
328
32980: cmpdi 0,r3,0
330 blt syscall_exit
331 clrrdi r4,r1,THREAD_SHIFT
332 ld r4,TI_FLAGS(r4)
333 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
334 beq+ 81f
335 addi r3,r1,STACK_FRAME_OVERHEAD
336 bl .do_syscall_trace_leave
33781: b .ret_from_except
338
339_GLOBAL(ret_from_fork)
340 bl .schedule_tail
341 REST_NVGPRS(r1)
342 li r3,0
343 b syscall_exit
344
345/*
346 * This routine switches between two different tasks. The process
347 * state of one is saved on its kernel stack. Then the state
348 * of the other is restored from its kernel stack. The memory
349 * management hardware is updated to the second process's state.
350 * Finally, we can return to the second process, via ret_from_except.
351 * On entry, r3 points to the THREAD for the current task, r4
352 * points to the THREAD for the new task.
353 *
354 * Note: there are two ways to get to the "going out" portion
355 * of this code; either by coming in via the entry (_switch)
356 * or via "fork" which must set up an environment equivalent
357 * to the "_switch" path. If you change this you'll have to change
358 * the fork code also.
359 *
360 * The code which creates the new task context is in 'copy_thread'
361 * in arch/ppc64/kernel/process.c
362 */
363 .align 7
364_GLOBAL(_switch)
365 mflr r0
366 std r0,16(r1)
367 stdu r1,-SWITCH_FRAME_SIZE(r1)
368 /* r3-r13 are caller saved -- Cort */
369 SAVE_8GPRS(14, r1)
370 SAVE_10GPRS(22, r1)
371 mflr r20 /* Return to switch caller */
372 mfmsr r22
373 li r0, MSR_FP
374#ifdef CONFIG_ALTIVEC
375BEGIN_FTR_SECTION
376 oris r0,r0,MSR_VEC@h /* Disable altivec */
377 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
378 std r24,THREAD_VRSAVE(r3)
379END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
380#endif /* CONFIG_ALTIVEC */
381 and. r0,r0,r22
382 beq+ 1f
383 andc r22,r22,r0
384 mtmsrd r22
385 isync
3861: std r20,_NIP(r1)
387 mfcr r23
388 std r23,_CCR(r1)
389 std r1,KSP(r3) /* Set old stack pointer */
390
391#ifdef CONFIG_SMP
392 /* We need a sync somewhere here to make sure that if the
393 * previous task gets rescheduled on another CPU, it sees all
394 * stores it has performed on this one.
395 */
396 sync
397#endif /* CONFIG_SMP */
398
399 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
400 std r6,PACACURRENT(r13) /* Set new 'current' */
401
402 ld r8,KSP(r4) /* new stack pointer */
403BEGIN_FTR_SECTION
404 clrrdi r6,r8,28 /* get its ESID */
405 clrrdi r9,r1,28 /* get current sp ESID */
406 clrldi. r0,r6,2 /* is new ESID c00000000? */
407 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
408 cror eq,4*cr1+eq,eq
409 beq 2f /* if yes, don't slbie it */
410
411 /* Bolt in the new stack SLB entry */
412 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
413 oris r0,r6,(SLB_ESID_V)@h
414 ori r0,r0,(SLB_NUM_BOLTED-1)@l
415 slbie r6
416 slbie r6 /* Workaround POWER5 < DD2.1 issue */
417 slbmte r7,r0
418 isync
419
4202:
421END_FTR_SECTION_IFSET(CPU_FTR_SLB)
422 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
423 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
424 because we don't need to leave the 288-byte ABI gap at the
425 top of the kernel stack. */
426 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
427
428 mr r1,r8 /* start using new stack pointer */
429 std r7,PACAKSAVE(r13)
430
431 ld r6,_CCR(r1)
432 mtcrf 0xFF,r6
433
434#ifdef CONFIG_ALTIVEC
435BEGIN_FTR_SECTION
436 ld r0,THREAD_VRSAVE(r4)
437 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
438END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
439#endif /* CONFIG_ALTIVEC */
440
441 /* r3-r13 are destroyed -- Cort */
442 REST_8GPRS(14, r1)
443 REST_10GPRS(22, r1)
444
445 /* convert old thread to its task_struct for return value */
446 addi r3,r3,-THREAD
447 ld r7,_NIP(r1) /* Return to _switch caller in new task */
448 mtlr r7
449 addi r1,r1,SWITCH_FRAME_SIZE
450 blr
451
452 .align 7
453_GLOBAL(ret_from_except)
454 ld r11,_TRAP(r1)
455 andi. r0,r11,1
456 bne .ret_from_except_lite
457 REST_NVGPRS(r1)
458
459_GLOBAL(ret_from_except_lite)
460 /*
461 * Disable interrupts so that current_thread_info()->flags
462 * can't change between when we test it and when we return
463 * from the interrupt.
464 */
465 mfmsr r10 /* Get current interrupt state */
466 rldicl r9,r10,48,1 /* clear MSR_EE */
467 rotldi r9,r9,16
468 mtmsrd r9,1 /* Update machine state */
469
470#ifdef CONFIG_PREEMPT
471 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
472 li r0,_TIF_NEED_RESCHED /* bits to check */
473 ld r3,_MSR(r1)
474 ld r4,TI_FLAGS(r9)
475 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
476 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
477 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
478 bne do_work
479
480#else /* !CONFIG_PREEMPT */
481 ld r3,_MSR(r1) /* Returning to user mode? */
482 andi. r3,r3,MSR_PR
483 beq restore /* if not, just restore regs and return */
484
485 /* Check current_thread_info()->flags */
486 clrrdi r9,r1,THREAD_SHIFT
487 ld r4,TI_FLAGS(r9)
488 andi. r0,r4,_TIF_USER_WORK_MASK
489 bne do_work
490#endif
491
492restore:
493#ifdef CONFIG_PPC_ISERIES
494 ld r5,SOFTE(r1)
495 cmpdi 0,r5,0
496 beq 4f
497 /* Check for pending interrupts (iSeries) */
498 ld r3,PACALPPACA+LPPACAANYINT(r13)
499 cmpdi r3,0
500 beq+ 4f /* skip do_IRQ if no interrupts */
501
502 li r3,0
503 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
504 ori r10,r10,MSR_EE
505 mtmsrd r10 /* hard-enable again */
506 addi r3,r1,STACK_FRAME_OVERHEAD
507 bl .do_IRQ
508 b .ret_from_except_lite /* loop back and handle more */
509
5104: stb r5,PACAPROCENABLED(r13)
511#endif
512
513 ld r3,_MSR(r1)
514 andi. r0,r3,MSR_RI
515 beq- unrecov_restore
516
517 andi. r0,r3,MSR_PR
518
519 /*
520 * r13 is our per cpu area, only restore it if we are returning to
521 * userspace
522 */
523 beq 1f
524 REST_GPR(13, r1)
5251:
526 ld r3,_CTR(r1)
527 ld r0,_LINK(r1)
528 mtctr r3
529 mtlr r0
530 ld r3,_XER(r1)
531 mtspr SPRN_XER,r3
532
533 REST_8GPRS(5, r1)
534
535 stdcx. r0,0,r1 /* to clear the reservation */
536
537 mfmsr r0
538 li r2, MSR_RI
539 andc r0,r0,r2
540 mtmsrd r0,1
541
542 ld r0,_MSR(r1)
543 mtspr SPRN_SRR1,r0
544
545 ld r2,_CCR(r1)
546 mtcrf 0xFF,r2
547 ld r2,_NIP(r1)
548 mtspr SPRN_SRR0,r2
549
550 ld r0,GPR0(r1)
551 ld r2,GPR2(r1)
552 ld r3,GPR3(r1)
553 ld r4,GPR4(r1)
554 ld r1,GPR1(r1)
555
556 rfid
557 b . /* prevent speculative execution */
558
559/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
560do_work:
561#ifdef CONFIG_PREEMPT
562 andi. r0,r3,MSR_PR /* Returning to user mode? */
563 bne user_work
564 /* Check that preempt_count() == 0 and interrupts are enabled */
565 lwz r8,TI_PREEMPT(r9)
566 cmpwi cr1,r8,0
567#ifdef CONFIG_PPC_ISERIES
568 ld r0,SOFTE(r1)
569 cmpdi r0,0
570#else
571 andi. r0,r3,MSR_EE
572#endif
573 crandc eq,cr1*4+eq,eq
574 bne restore
575 /* here we are preempting the current task */
5761:
577#ifdef CONFIG_PPC_ISERIES
578 li r0,1
579 stb r0,PACAPROCENABLED(r13)
580#endif
581 ori r10,r10,MSR_EE
582 mtmsrd r10,1 /* reenable interrupts */
583 bl .preempt_schedule
584 mfmsr r10
585 clrrdi r9,r1,THREAD_SHIFT
586 rldicl r10,r10,48,1 /* disable interrupts again */
587 rotldi r10,r10,16
588 mtmsrd r10,1
589 ld r4,TI_FLAGS(r9)
590 andi. r0,r4,_TIF_NEED_RESCHED
591 bne 1b
592 b restore
593
594user_work:
595#endif
596 /* Enable interrupts */
597 ori r10,r10,MSR_EE
598 mtmsrd r10,1
599
600 andi. r0,r4,_TIF_NEED_RESCHED
601 beq 1f
602 bl .schedule
603 b .ret_from_except_lite
604
6051: bl .save_nvgprs
606 li r3,0
607 addi r4,r1,STACK_FRAME_OVERHEAD
608 bl .do_signal
609 b .ret_from_except
610
611unrecov_restore:
612 addi r3,r1,STACK_FRAME_OVERHEAD
613 bl .unrecoverable_exception
614 b unrecov_restore
615
616#ifdef CONFIG_PPC_RTAS
617/*
618 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
619 * called with the MMU off.
620 *
621 * In addition, we need to be in 32b mode, at least for now.
622 *
623 * Note: r3 is an input parameter to rtas, so don't trash it...
624 */
625_GLOBAL(enter_rtas)
626 mflr r0
627 std r0,16(r1)
628 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
629
630 /* Because RTAS is running in 32b mode, it clobbers the high order half
631 * of all registers that it saves. We therefore save those registers
632 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
633 */
634 SAVE_GPR(2, r1) /* Save the TOC */
635 SAVE_GPR(13, r1) /* Save paca */
636 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
637 SAVE_10GPRS(22, r1) /* ditto */
638
639 mfcr r4
640 std r4,_CCR(r1)
641 mfctr r5
642 std r5,_CTR(r1)
643 mfspr r6,SPRN_XER
644 std r6,_XER(r1)
645 mfdar r7
646 std r7,_DAR(r1)
647 mfdsisr r8
648 std r8,_DSISR(r1)
649 mfsrr0 r9
650 std r9,_SRR0(r1)
651 mfsrr1 r10
652 std r10,_SRR1(r1)
653
654 /* There is no way it is acceptable to get here with interrupts enabled,
655 * check it with the asm equivalent of WARN_ON
656 */
657 mfmsr r6
658 andi. r0,r6,MSR_EE
6591: tdnei r0,0
660.section __bug_table,"a"
661 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
662.previous
663.section .rodata,"a"
6641: .asciz __FILE__
6652: .asciz "enter_rtas"
666.previous
667
668 /* Unfortunately, the stack pointer and the MSR are also clobbered,
669 * so they are saved in the PACA which allows us to restore
670 * our original state after RTAS returns.
671 */
672 std r1,PACAR1(r13)
673 std r6,PACASAVEDMSR(r13)
674
675 /* Setup our real return addr */
676 SET_REG_TO_LABEL(r4,.rtas_return_loc)
677 SET_REG_TO_CONST(r9,KERNELBASE)
678 sub r4,r4,r9
679 mtlr r4
680
681 li r0,0
682 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
683 andc r0,r6,r0
684
685 li r9,1
686 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
687 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
688 andc r6,r0,r9
689 ori r6,r6,MSR_RI
690 sync /* disable interrupts so SRR0/1 */
691 mtmsrd r0 /* don't get trashed */
692
693 SET_REG_TO_LABEL(r4,rtas)
694 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
695 ld r4,RTASBASE(r4) /* get the rtas->base value */
696
697 mtspr SPRN_SRR0,r5
698 mtspr SPRN_SRR1,r6
699 rfid
700 b . /* prevent speculative execution */
701
702_STATIC(rtas_return_loc)
703 /* relocation is off at this point */
704 mfspr r4,SPRN_SPRG3 /* Get PACA */
705 SET_REG_TO_CONST(r5, KERNELBASE)
706 sub r4,r4,r5 /* RELOC the PACA base pointer */
707
708 mfmsr r6
709 li r0,MSR_RI
710 andc r6,r6,r0
711 sync
712 mtmsrd r6
713
714 ld r1,PACAR1(r4) /* Restore our SP */
715 LOADADDR(r3,.rtas_restore_regs)
716 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
717
718 mtspr SPRN_SRR0,r3
719 mtspr SPRN_SRR1,r4
720 rfid
721 b . /* prevent speculative execution */
722
723_STATIC(rtas_restore_regs)
724 /* relocation is on at this point */
725 REST_GPR(2, r1) /* Restore the TOC */
726 REST_GPR(13, r1) /* Restore paca */
727 REST_8GPRS(14, r1) /* Restore the non-volatiles */
728 REST_10GPRS(22, r1) /* ditto */
729
730 mfspr r13,SPRN_SPRG3
731
732 ld r4,_CCR(r1)
733 mtcr r4
734 ld r5,_CTR(r1)
735 mtctr r5
736 ld r6,_XER(r1)
737 mtspr SPRN_XER,r6
738 ld r7,_DAR(r1)
739 mtdar r7
740 ld r8,_DSISR(r1)
741 mtdsisr r8
742 ld r9,_SRR0(r1)
743 mtsrr0 r9
744 ld r10,_SRR1(r1)
745 mtsrr1 r10
746
747 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
748 ld r0,16(r1) /* get return address */
749
750 mtlr r0
751 blr /* return to caller */
752
753#endif /* CONFIG_PPC_RTAS */
754
755#ifdef CONFIG_PPC_MULTIPLATFORM
756
757_GLOBAL(enter_prom)
758 mflr r0
759 std r0,16(r1)
760 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
761
762 /* Because PROM is running in 32b mode, it clobbers the high order half
763 * of all registers that it saves. We therefore save those registers
764 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
765 */
766 SAVE_8GPRS(2, r1)
767 SAVE_GPR(13, r1)
768 SAVE_8GPRS(14, r1)
769 SAVE_10GPRS(22, r1)
770 mfcr r4
771 std r4,_CCR(r1)
772 mfctr r5
773 std r5,_CTR(r1)
774 mfspr r6,SPRN_XER
775 std r6,_XER(r1)
776 mfdar r7
777 std r7,_DAR(r1)
778 mfdsisr r8
779 std r8,_DSISR(r1)
780 mfsrr0 r9
781 std r9,_SRR0(r1)
782 mfsrr1 r10
783 std r10,_SRR1(r1)
784 mfmsr r11
785 std r11,_MSR(r1)
786
787 /* Get the PROM entrypoint */
788 ld r0,GPR4(r1)
789 mtlr r0
790
791 /* Switch MSR to 32 bits mode
792 */
793 mfmsr r11
794 li r12,1
795 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
796 andc r11,r11,r12
797 li r12,1
798 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
799 andc r11,r11,r12
800 mtmsrd r11
801 isync
802
803 /* Restore arguments & enter PROM here... */
804 ld r3,GPR3(r1)
805 blrl
806
807 /* Just make sure that r1 top 32 bits didn't get
808 * corrupt by OF
809 */
810 rldicl r1,r1,0,32
811
812 /* Restore the MSR (back to 64 bits) */
813 ld r0,_MSR(r1)
814 mtmsrd r0
815 isync
816
817 /* Restore other registers */
818 REST_GPR(2, r1)
819 REST_GPR(13, r1)
820 REST_8GPRS(14, r1)
821 REST_10GPRS(22, r1)
822 ld r4,_CCR(r1)
823 mtcr r4
824 ld r5,_CTR(r1)
825 mtctr r5
826 ld r6,_XER(r1)
827 mtspr SPRN_XER,r6
828 ld r7,_DAR(r1)
829 mtdar r7
830 ld r8,_DSISR(r1)
831 mtdsisr r8
832 ld r9,_SRR0(r1)
833 mtsrr0 r9
834 ld r10,_SRR1(r1)
835 mtsrr1 r10
836
837 addi r1,r1,PROM_FRAME_SIZE
838 ld r0,16(r1)
839 mtlr r0
840 blr
841
842#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644
index 000000000000..fa8c20ffec78
--- /dev/null
+++ b/arch/powerpc/kernel/misc_32.S
@@ -0,0 +1,1039 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
39 * Returns (address we're running at) - (address we were linked at)
40 * for use before the text and data are mapped to KERNELBASE.
41 */
42_GLOBAL(reloc_offset)
43 mflr r0
44 bl 1f
451: mflr r3
46 lis r4,1b@ha
47 addi r4,r4,1b@l
48 subf r3,r4,r3
49 mtlr r0
50 blr
51
52/*
53 * add_reloc_offset(x) returns x + reloc_offset().
54 */
55_GLOBAL(add_reloc_offset)
56 mflr r0
57 bl 1f
581: mflr r5
59 lis r4,1b@ha
60 addi r4,r4,1b@l
61 subf r5,r4,r5
62 add r3,r3,r5
63 mtlr r0
64 blr
65
66/*
67 * sub_reloc_offset(x) returns x - reloc_offset().
68 */
69_GLOBAL(sub_reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r5
73 lis r4,1b@ha
74 addi r4,r4,1b@l
75 subf r5,r4,r5
76 subf r3,r5,r3
77 mtlr r0
78 blr
79
80/*
81 * reloc_got2 runs through the .got2 section adding an offset
82 * to each entry.
83 */
84_GLOBAL(reloc_got2)
85 mflr r11
86 lis r7,__got2_start@ha
87 addi r7,r7,__got2_start@l
88 lis r8,__got2_end@ha
89 addi r8,r8,__got2_end@l
90 subf r8,r7,r8
91 srwi. r8,r8,2
92 beqlr
93 mtctr r8
94 bl 1f
951: mflr r0
96 lis r4,1b@ha
97 addi r4,r4,1b@l
98 subf r0,r4,r0
99 add r7,r0,r7
1002: lwz r0,0(r7)
101 add r0,r0,r3
102 stw r0,0(r7)
103 addi r7,r7,4
104 bdnz 2b
105 mtlr r11
106 blr
107
108/*
109 * identify_cpu,
110 * called with r3 = data offset and r4 = CPU number
111 * doesn't change r3
112 */
113_GLOBAL(identify_cpu)
114 addis r8,r3,cpu_specs@ha
115 addi r8,r8,cpu_specs@l
116 mfpvr r7
1171:
118 lwz r5,CPU_SPEC_PVR_MASK(r8)
119 and r5,r5,r7
120 lwz r6,CPU_SPEC_PVR_VALUE(r8)
121 cmplw 0,r6,r5
122 beq 1f
123 addi r8,r8,CPU_SPEC_ENTRY_SIZE
124 b 1b
1251:
126 addis r6,r3,cur_cpu_spec@ha
127 addi r6,r6,cur_cpu_spec@l
128 sub r8,r8,r3
129 stw r8,0(r6)
130 blr
131
132/*
133 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
134 * and writes nop's over sections of code that don't apply for this cpu.
135 * r3 = data offset (not changed)
136 */
137_GLOBAL(do_cpu_ftr_fixups)
138 /* Get CPU 0 features */
139 addis r6,r3,cur_cpu_spec@ha
140 addi r6,r6,cur_cpu_spec@l
141 lwz r4,0(r6)
142 add r4,r4,r3
143 lwz r4,CPU_SPEC_FEATURES(r4)
144
145 /* Get the fixup table */
146 addis r6,r3,__start___ftr_fixup@ha
147 addi r6,r6,__start___ftr_fixup@l
148 addis r7,r3,__stop___ftr_fixup@ha
149 addi r7,r7,__stop___ftr_fixup@l
150
151 /* Do the fixup */
1521: cmplw 0,r6,r7
153 bgelr
154 addi r6,r6,16
155 lwz r8,-16(r6) /* mask */
156 and r8,r8,r4
157 lwz r9,-12(r6) /* value */
158 cmplw 0,r8,r9
159 beq 1b
160 lwz r8,-8(r6) /* section begin */
161 lwz r9,-4(r6) /* section end */
162 subf. r9,r8,r9
163 beq 1b
164 /* write nops over the section of code */
165 /* todo: if large section, add a branch at the start of it */
166 srwi r9,r9,2
167 mtctr r9
168 add r8,r8,r3
169 lis r0,0x60000000@h /* nop */
1703: stw r0,0(r8)
171 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
172 beq 2f
173 dcbst 0,r8 /* suboptimal, but simpler */
174 sync
175 icbi 0,r8
1762: addi r8,r8,4
177 bdnz 3b
178 sync /* additional sync needed on g4 */
179 isync
180 b 1b
181
182/*
183 * call_setup_cpu - call the setup_cpu function for this cpu
184 * r3 = data offset, r24 = cpu number
185 *
186 * Setup function is called with:
187 * r3 = data offset
188 * r4 = ptr to CPU spec (relocated)
189 */
190_GLOBAL(call_setup_cpu)
191 addis r4,r3,cur_cpu_spec@ha
192 addi r4,r4,cur_cpu_spec@l
193 lwz r4,0(r4)
194 add r4,r4,r3
195 lwz r5,CPU_SPEC_SETUP(r4)
196 cmpi 0,r5,0
197 add r5,r5,r3
198 beqlr
199 mtctr r5
200 bctr
201
202#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
203
204/* This gets called by via-pmu.c to switch the PLL selection
205 * on 750fx CPU. This function should really be moved to some
206 * other place (as most of the cpufreq code in via-pmu
207 */
208_GLOBAL(low_choose_750fx_pll)
209 /* Clear MSR:EE */
210 mfmsr r7
211 rlwinm r0,r7,0,17,15
212 mtmsr r0
213
214 /* If switching to PLL1, disable HID0:BTIC */
215 cmplwi cr0,r3,0
216 beq 1f
217 mfspr r5,SPRN_HID0
218 rlwinm r5,r5,0,27,25
219 sync
220 mtspr SPRN_HID0,r5
221 isync
222 sync
223
2241:
225 /* Calc new HID1 value */
226 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
227 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
228 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
229 or r4,r4,r5
230 mtspr SPRN_HID1,r4
231
232 /* Store new HID1 image */
233 rlwinm r6,r1,0,0,18
234 lwz r6,TI_CPU(r6)
235 slwi r6,r6,2
236 addis r6,r6,nap_save_hid1@ha
237 stw r4,nap_save_hid1@l(r6)
238
239 /* If switching to PLL0, enable HID0:BTIC */
240 cmplwi cr0,r3,0
241 bne 1f
242 mfspr r5,SPRN_HID0
243 ori r5,r5,HID0_BTIC
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Return */
251 mtmsr r7
252 blr
253
254_GLOBAL(low_choose_7447a_dfs)
255 /* Clear MSR:EE */
256 mfmsr r7
257 rlwinm r0,r7,0,17,15
258 mtmsr r0
259
260 /* Calc new HID1 value */
261 mfspr r4,SPRN_HID1
262 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
263 sync
264 mtspr SPRN_HID1,r4
265 sync
266 isync
267
268 /* Return */
269 mtmsr r7
270 blr
271
272#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
273
274/*
275 * complement mask on the msr then "or" some values on.
276 * _nmask_and_or_msr(nmask, value_to_or)
277 */
278_GLOBAL(_nmask_and_or_msr)
279 mfmsr r0 /* Get current msr */
280 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
281 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
282 SYNC /* Some chip revs have problems here... */
283 mtmsr r0 /* Update machine state */
284 isync
285 blr /* Done */
286
287
288/*
289 * Flush MMU TLB
290 */
291_GLOBAL(_tlbia)
292#if defined(CONFIG_40x)
293 sync /* Flush to memory before changing mapping */
294 tlbia
295 isync /* Flush shadow TLB */
296#elif defined(CONFIG_44x)
297 li r3,0
298 sync
299
300 /* Load high watermark */
301 lis r4,tlb_44x_hwater@ha
302 lwz r5,tlb_44x_hwater@l(r4)
303
3041: tlbwe r3,r3,PPC44x_TLB_PAGEID
305 addi r3,r3,1
306 cmpw 0,r3,r5
307 ble 1b
308
309 isync
310#elif defined(CONFIG_FSL_BOOKE)
311 /* Invalidate all entries in TLB0 */
312 li r3, 0x04
313 tlbivax 0,3
314 /* Invalidate all entries in TLB1 */
315 li r3, 0x0c
316 tlbivax 0,3
317 /* Invalidate all entries in TLB2 */
318 li r3, 0x14
319 tlbivax 0,3
320 /* Invalidate all entries in TLB3 */
321 li r3, 0x1c
322 tlbivax 0,3
323 msync
324#ifdef CONFIG_SMP
325 tlbsync
326#endif /* CONFIG_SMP */
327#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
328#if defined(CONFIG_SMP)
329 rlwinm r8,r1,0,0,18
330 lwz r8,TI_CPU(r8)
331 oris r8,r8,10
332 mfmsr r10
333 SYNC
334 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
335 rlwinm r0,r0,0,28,26 /* clear DR */
336 mtmsr r0
337 SYNC_601
338 isync
339 lis r9,mmu_hash_lock@h
340 ori r9,r9,mmu_hash_lock@l
341 tophys(r9,r9)
34210: lwarx r7,0,r9
343 cmpwi 0,r7,0
344 bne- 10b
345 stwcx. r8,0,r9
346 bne- 10b
347 sync
348 tlbia
349 sync
350 TLBSYNC
351 li r0,0
352 stw r0,0(r9) /* clear mmu_hash_lock */
353 mtmsr r10
354 SYNC_601
355 isync
356#else /* CONFIG_SMP */
357 sync
358 tlbia
359 sync
360#endif /* CONFIG_SMP */
361#endif /* ! defined(CONFIG_40x) */
362 blr
363
364/*
365 * Flush MMU TLB for a particular address
366 */
367_GLOBAL(_tlbie)
368#if defined(CONFIG_40x)
369 tlbsx. r3, 0, r3
370 bne 10f
371 sync
372 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
373 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
374 * the TLB entry. */
375 tlbwe r3, r3, TLB_TAG
376 isync
37710:
378#elif defined(CONFIG_44x)
379 mfspr r4,SPRN_MMUCR
380 mfspr r5,SPRN_PID /* Get PID */
381 rlwimi r4,r5,0,24,31 /* Set TID */
382 mtspr SPRN_MMUCR,r4
383
384 tlbsx. r3, 0, r3
385 bne 10f
386 sync
387 /* There are only 64 TLB entries, so r3 < 64,
388 * which means bit 22, is clear. Since 22 is
389 * the V bit in the TLB_PAGEID, loading this
390 * value will invalidate the TLB entry.
391 */
392 tlbwe r3, r3, PPC44x_TLB_PAGEID
393 isync
39410:
395#elif defined(CONFIG_FSL_BOOKE)
396 rlwinm r4, r3, 0, 0, 19
397 ori r5, r4, 0x08 /* TLBSEL = 1 */
398 ori r6, r4, 0x10 /* TLBSEL = 2 */
399 ori r7, r4, 0x18 /* TLBSEL = 3 */
400 tlbivax 0, r4
401 tlbivax 0, r5
402 tlbivax 0, r6
403 tlbivax 0, r7
404 msync
405#if defined(CONFIG_SMP)
406 tlbsync
407#endif /* CONFIG_SMP */
408#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
409#if defined(CONFIG_SMP)
410 rlwinm r8,r1,0,0,18
411 lwz r8,TI_CPU(r8)
412 oris r8,r8,11
413 mfmsr r10
414 SYNC
415 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
416 rlwinm r0,r0,0,28,26 /* clear DR */
417 mtmsr r0
418 SYNC_601
419 isync
420 lis r9,mmu_hash_lock@h
421 ori r9,r9,mmu_hash_lock@l
422 tophys(r9,r9)
42310: lwarx r7,0,r9
424 cmpwi 0,r7,0
425 bne- 10b
426 stwcx. r8,0,r9
427 bne- 10b
428 eieio
429 tlbie r3
430 sync
431 TLBSYNC
432 li r0,0
433 stw r0,0(r9) /* clear mmu_hash_lock */
434 mtmsr r10
435 SYNC_601
436 isync
437#else /* CONFIG_SMP */
438 tlbie r3
439 sync
440#endif /* CONFIG_SMP */
441#endif /* ! CONFIG_40x */
442 blr
443
444/*
445 * Flush instruction cache.
446 * This is a no-op on the 601.
447 */
448_GLOBAL(flush_instruction_cache)
449#if defined(CONFIG_8xx)
450 isync
451 lis r5, IDC_INVALL@h
452 mtspr SPRN_IC_CST, r5
453#elif defined(CONFIG_4xx)
454#ifdef CONFIG_403GCX
455 li r3, 512
456 mtctr r3
457 lis r4, KERNELBASE@h
4581: iccci 0, r4
459 addi r4, r4, 16
460 bdnz 1b
461#else
462 lis r3, KERNELBASE@h
463 iccci 0,r3
464#endif
465#elif CONFIG_FSL_BOOKE
466BEGIN_FTR_SECTION
467 mfspr r3,SPRN_L1CSR0
468 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
469 /* msync; isync recommended here */
470 mtspr SPRN_L1CSR0,r3
471 isync
472 blr
473END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
474 mfspr r3,SPRN_L1CSR1
475 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
476 mtspr SPRN_L1CSR1,r3
477#else
478 mfspr r3,SPRN_PVR
479 rlwinm r3,r3,16,16,31
480 cmpwi 0,r3,1
481 beqlr /* for 601, do nothing */
482 /* 603/604 processor - use invalidate-all bit in HID0 */
483 mfspr r3,SPRN_HID0
484 ori r3,r3,HID0_ICFI
485 mtspr SPRN_HID0,r3
486#endif /* CONFIG_8xx/4xx */
487 isync
488 blr
489
490/*
491 * Write any modified data cache blocks out to memory
492 * and invalidate the corresponding instruction cache blocks.
493 * This is a no-op on the 601.
494 *
495 * flush_icache_range(unsigned long start, unsigned long stop)
496 */
497_GLOBAL(flush_icache_range)
498BEGIN_FTR_SECTION
499 blr /* for 601, do nothing */
500END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
501 li r5,L1_CACHE_LINE_SIZE-1
502 andc r3,r3,r5
503 subf r4,r3,r4
504 add r4,r4,r5
505 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
506 beqlr
507 mtctr r4
508 mr r6,r3
5091: dcbst 0,r3
510 addi r3,r3,L1_CACHE_LINE_SIZE
511 bdnz 1b
512 sync /* wait for dcbst's to get to ram */
513 mtctr r4
5142: icbi 0,r6
515 addi r6,r6,L1_CACHE_LINE_SIZE
516 bdnz 2b
517 sync /* additional sync needed on g4 */
518 isync
519 blr
520/*
521 * Write any modified data cache blocks out to memory.
522 * Does not invalidate the corresponding cache lines (especially for
523 * any corresponding instruction cache).
524 *
525 * clean_dcache_range(unsigned long start, unsigned long stop)
526 */
527_GLOBAL(clean_dcache_range)
528 li r5,L1_CACHE_LINE_SIZE-1
529 andc r3,r3,r5
530 subf r4,r3,r4
531 add r4,r4,r5
532 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
533 beqlr
534 mtctr r4
535
5361: dcbst 0,r3
537 addi r3,r3,L1_CACHE_LINE_SIZE
538 bdnz 1b
539 sync /* wait for dcbst's to get to ram */
540 blr
541
542/*
543 * Write any modified data cache blocks out to memory and invalidate them.
544 * Does not invalidate the corresponding instruction cache blocks.
545 *
546 * flush_dcache_range(unsigned long start, unsigned long stop)
547 */
548_GLOBAL(flush_dcache_range)
549 li r5,L1_CACHE_LINE_SIZE-1
550 andc r3,r3,r5
551 subf r4,r3,r4
552 add r4,r4,r5
553 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
554 beqlr
555 mtctr r4
556
5571: dcbf 0,r3
558 addi r3,r3,L1_CACHE_LINE_SIZE
559 bdnz 1b
560 sync /* wait for dcbst's to get to ram */
561 blr
562
563/*
564 * Like above, but invalidate the D-cache. This is used by the 8xx
565 * to invalidate the cache so the PPC core doesn't get stale data
566 * from the CPM (no cache snooping here :-).
567 *
568 * invalidate_dcache_range(unsigned long start, unsigned long stop)
569 */
570_GLOBAL(invalidate_dcache_range)
571 li r5,L1_CACHE_LINE_SIZE-1
572 andc r3,r3,r5
573 subf r4,r3,r4
574 add r4,r4,r5
575 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
576 beqlr
577 mtctr r4
578
5791: dcbi 0,r3
580 addi r3,r3,L1_CACHE_LINE_SIZE
581 bdnz 1b
582 sync /* wait for dcbi's to get to ram */
583 blr
584
585#ifdef CONFIG_NOT_COHERENT_CACHE
586/*
587 * 40x cores have 8K or 16K dcache and 32 byte line size.
588 * 44x has a 32K dcache and 32 byte line size.
589 * 8xx has 1, 2, 4, 8K variants.
590 * For now, cover the worst case of the 44x.
591 * Must be called with external interrupts disabled.
592 */
593#define CACHE_NWAYS 64
594#define CACHE_NLINES 16
595
596_GLOBAL(flush_dcache_all)
597 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
598 mtctr r4
599 lis r5, KERNELBASE@h
6001: lwz r3, 0(r5) /* Load one word from every line */
601 addi r5, r5, L1_CACHE_LINE_SIZE
602 bdnz 1b
603 blr
604#endif /* CONFIG_NOT_COHERENT_CACHE */
605
606/*
607 * Flush a particular page from the data cache to RAM.
608 * Note: this is necessary because the instruction cache does *not*
609 * snoop from the data cache.
610 * This is a no-op on the 601 which has a unified cache.
611 *
612 * void __flush_dcache_icache(void *page)
613 */
614_GLOBAL(__flush_dcache_icache)
615BEGIN_FTR_SECTION
616 blr /* for 601, do nothing */
617END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
618 rlwinm r3,r3,0,0,19 /* Get page base address */
619 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
620 mtctr r4
621 mr r6,r3
6220: dcbst 0,r3 /* Write line to ram */
623 addi r3,r3,L1_CACHE_LINE_SIZE
624 bdnz 0b
625 sync
626 mtctr r4
6271: icbi 0,r6
628 addi r6,r6,L1_CACHE_LINE_SIZE
629 bdnz 1b
630 sync
631 isync
632 blr
633
634/*
635 * Flush a particular page from the data cache to RAM, identified
636 * by its physical address. We turn off the MMU so we can just use
637 * the physical address (this may be a highmem page without a kernel
638 * mapping).
639 *
640 * void __flush_dcache_icache_phys(unsigned long physaddr)
641 */
642_GLOBAL(__flush_dcache_icache_phys)
643BEGIN_FTR_SECTION
644 blr /* for 601, do nothing */
645END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
646 mfmsr r10
647 rlwinm r0,r10,0,28,26 /* clear DR */
648 mtmsr r0
649 isync
650 rlwinm r3,r3,0,0,19 /* Get page base address */
651 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
652 mtctr r4
653 mr r6,r3
6540: dcbst 0,r3 /* Write line to ram */
655 addi r3,r3,L1_CACHE_LINE_SIZE
656 bdnz 0b
657 sync
658 mtctr r4
6591: icbi 0,r6
660 addi r6,r6,L1_CACHE_LINE_SIZE
661 bdnz 1b
662 sync
663 mtmsr r10 /* restore DR */
664 isync
665 blr
666
667/*
668 * Clear pages using the dcbz instruction, which doesn't cause any
669 * memory traffic (except to write out any cache lines which get
670 * displaced). This only works on cacheable memory.
671 *
672 * void clear_pages(void *page, int order) ;
673 */
674_GLOBAL(clear_pages)
675 li r0,4096/L1_CACHE_LINE_SIZE
676 slw r0,r0,r4
677 mtctr r0
678#ifdef CONFIG_8xx
679 li r4, 0
6801: stw r4, 0(r3)
681 stw r4, 4(r3)
682 stw r4, 8(r3)
683 stw r4, 12(r3)
684#else
6851: dcbz 0,r3
686#endif
687 addi r3,r3,L1_CACHE_LINE_SIZE
688 bdnz 1b
689 blr
690
691/*
692 * Copy a whole page. We use the dcbz instruction on the destination
693 * to reduce memory traffic (it eliminates the unnecessary reads of
694 * the destination into cache). This requires that the destination
695 * is cacheable.
696 */
697#define COPY_16_BYTES \
698 lwz r6,4(r4); \
699 lwz r7,8(r4); \
700 lwz r8,12(r4); \
701 lwzu r9,16(r4); \
702 stw r6,4(r3); \
703 stw r7,8(r3); \
704 stw r8,12(r3); \
705 stwu r9,16(r3)
706
707_GLOBAL(copy_page)
708 addi r3,r3,-4
709 addi r4,r4,-4
710
711#ifdef CONFIG_8xx
712 /* don't use prefetch on 8xx */
713 li r0,4096/L1_CACHE_LINE_SIZE
714 mtctr r0
7151: COPY_16_BYTES
716 bdnz 1b
717 blr
718
719#else /* not 8xx, we can prefetch */
720 li r5,4
721
722#if MAX_COPY_PREFETCH > 1
723 li r0,MAX_COPY_PREFETCH
724 li r11,4
725 mtctr r0
72611: dcbt r11,r4
727 addi r11,r11,L1_CACHE_LINE_SIZE
728 bdnz 11b
729#else /* MAX_COPY_PREFETCH == 1 */
730 dcbt r5,r4
731 li r11,L1_CACHE_LINE_SIZE+4
732#endif /* MAX_COPY_PREFETCH */
733 li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
734 crclr 4*cr0+eq
7352:
736 mtctr r0
7371:
738 dcbt r11,r4
739 dcbz r5,r3
740 COPY_16_BYTES
741#if L1_CACHE_LINE_SIZE >= 32
742 COPY_16_BYTES
743#if L1_CACHE_LINE_SIZE >= 64
744 COPY_16_BYTES
745 COPY_16_BYTES
746#if L1_CACHE_LINE_SIZE >= 128
747 COPY_16_BYTES
748 COPY_16_BYTES
749 COPY_16_BYTES
750 COPY_16_BYTES
751#endif
752#endif
753#endif
754 bdnz 1b
755 beqlr
756 crnot 4*cr0+eq,4*cr0+eq
757 li r0,MAX_COPY_PREFETCH
758 li r11,4
759 b 2b
760#endif /* CONFIG_8xx */
761
762/*
763 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
764 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
765 */
766_GLOBAL(atomic_clear_mask)
76710: lwarx r5,0,r4
768 andc r5,r5,r3
769 PPC405_ERR77(0,r4)
770 stwcx. r5,0,r4
771 bne- 10b
772 blr
773_GLOBAL(atomic_set_mask)
77410: lwarx r5,0,r4
775 or r5,r5,r3
776 PPC405_ERR77(0,r4)
777 stwcx. r5,0,r4
778 bne- 10b
779 blr
780
781/*
782 * I/O string operations
783 *
784 * insb(port, buf, len)
785 * outsb(port, buf, len)
786 * insw(port, buf, len)
787 * outsw(port, buf, len)
788 * insl(port, buf, len)
789 * outsl(port, buf, len)
790 * insw_ns(port, buf, len)
791 * outsw_ns(port, buf, len)
792 * insl_ns(port, buf, len)
793 * outsl_ns(port, buf, len)
794 *
795 * The *_ns versions don't do byte-swapping.
796 */
797_GLOBAL(_insb)
798 cmpwi 0,r5,0
799 mtctr r5
800 subi r4,r4,1
801 blelr-
80200: lbz r5,0(r3)
803 eieio
804 stbu r5,1(r4)
805 bdnz 00b
806 blr
807
808_GLOBAL(_outsb)
809 cmpwi 0,r5,0
810 mtctr r5
811 subi r4,r4,1
812 blelr-
81300: lbzu r5,1(r4)
814 stb r5,0(r3)
815 eieio
816 bdnz 00b
817 blr
818
819_GLOBAL(_insw)
820 cmpwi 0,r5,0
821 mtctr r5
822 subi r4,r4,2
823 blelr-
82400: lhbrx r5,0,r3
825 eieio
826 sthu r5,2(r4)
827 bdnz 00b
828 blr
829
830_GLOBAL(_outsw)
831 cmpwi 0,r5,0
832 mtctr r5
833 subi r4,r4,2
834 blelr-
83500: lhzu r5,2(r4)
836 eieio
837 sthbrx r5,0,r3
838 bdnz 00b
839 blr
840
841_GLOBAL(_insl)
842 cmpwi 0,r5,0
843 mtctr r5
844 subi r4,r4,4
845 blelr-
84600: lwbrx r5,0,r3
847 eieio
848 stwu r5,4(r4)
849 bdnz 00b
850 blr
851
852_GLOBAL(_outsl)
853 cmpwi 0,r5,0
854 mtctr r5
855 subi r4,r4,4
856 blelr-
85700: lwzu r5,4(r4)
858 stwbrx r5,0,r3
859 eieio
860 bdnz 00b
861 blr
862
863_GLOBAL(__ide_mm_insw)
864_GLOBAL(_insw_ns)
865 cmpwi 0,r5,0
866 mtctr r5
867 subi r4,r4,2
868 blelr-
86900: lhz r5,0(r3)
870 eieio
871 sthu r5,2(r4)
872 bdnz 00b
873 blr
874
875_GLOBAL(__ide_mm_outsw)
876_GLOBAL(_outsw_ns)
877 cmpwi 0,r5,0
878 mtctr r5
879 subi r4,r4,2
880 blelr-
88100: lhzu r5,2(r4)
882 sth r5,0(r3)
883 eieio
884 bdnz 00b
885 blr
886
887_GLOBAL(__ide_mm_insl)
888_GLOBAL(_insl_ns)
889 cmpwi 0,r5,0
890 mtctr r5
891 subi r4,r4,4
892 blelr-
89300: lwz r5,0(r3)
894 eieio
895 stwu r5,4(r4)
896 bdnz 00b
897 blr
898
899_GLOBAL(__ide_mm_outsl)
900_GLOBAL(_outsl_ns)
901 cmpwi 0,r5,0
902 mtctr r5
903 subi r4,r4,4
904 blelr-
90500: lwzu r5,4(r4)
906 stw r5,0(r3)
907 eieio
908 bdnz 00b
909 blr
910
911/*
912 * Extended precision shifts.
913 *
914 * Updated to be valid for shift counts from 0 to 63 inclusive.
915 * -- Gabriel
916 *
917 * R3/R4 has 64 bit value
918 * R5 has shift count
919 * result in R3/R4
920 *
921 * ashrdi3: arithmetic right shift (sign propagation)
922 * lshrdi3: logical right shift
923 * ashldi3: left shift
924 */
925_GLOBAL(__ashrdi3)
926 subfic r6,r5,32
927 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
928 addi r7,r5,32 # could be xori, or addi with -32
929 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
930 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
931 sraw r7,r3,r7 # t2 = MSW >> (count-32)
932 or r4,r4,r6 # LSW |= t1
933 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
934 sraw r3,r3,r5 # MSW = MSW >> count
935 or r4,r4,r7 # LSW |= t2
936 blr
937
938_GLOBAL(__ashldi3)
939 subfic r6,r5,32
940 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
941 addi r7,r5,32 # could be xori, or addi with -32
942 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
943 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
944 or r3,r3,r6 # MSW |= t1
945 slw r4,r4,r5 # LSW = LSW << count
946 or r3,r3,r7 # MSW |= t2
947 blr
948
949_GLOBAL(__lshrdi3)
950 subfic r6,r5,32
951 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
952 addi r7,r5,32 # could be xori, or addi with -32
953 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
954 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
955 or r4,r4,r6 # LSW |= t1
956 srw r3,r3,r5 # MSW = MSW >> count
957 or r4,r4,r7 # LSW |= t2
958 blr
959
960_GLOBAL(abs)
961 srawi r4,r3,31
962 xor r3,r3,r4
963 sub r3,r3,r4
964 blr
965
966_GLOBAL(_get_SP)
967 mr r3,r1 /* Close enough */
968 blr
969
970/*
971 * These are used in the alignment trap handler when emulating
972 * single-precision loads and stores.
973 * We restore and save the fpscr so the task gets the same result
974 * and exceptions as if the cpu had performed the load or store.
975 */
976
977#ifdef CONFIG_PPC_FPU
978_GLOBAL(cvt_fd)
979 lfd 0,-4(r5) /* load up fpscr value */
980 mtfsf 0xff,0
981 lfs 0,0(r3)
982 stfd 0,0(r4)
983 mffs 0 /* save new fpscr value */
984 stfd 0,-4(r5)
985 blr
986
987_GLOBAL(cvt_df)
988 lfd 0,-4(r5) /* load up fpscr value */
989 mtfsf 0xff,0
990 lfd 0,0(r3)
991 stfs 0,0(r4)
992 mffs 0 /* save new fpscr value */
993 stfd 0,-4(r5)
994 blr
995#endif
996
997/*
998 * Create a kernel thread
999 * kernel_thread(fn, arg, flags)
1000 */
1001_GLOBAL(kernel_thread)
1002 stwu r1,-16(r1)
1003 stw r30,8(r1)
1004 stw r31,12(r1)
1005 mr r30,r3 /* function */
1006 mr r31,r4 /* argument */
1007 ori r3,r5,CLONE_VM /* flags */
1008 oris r3,r3,CLONE_UNTRACED>>16
1009 li r4,0 /* new sp (unused) */
1010 li r0,__NR_clone
1011 sc
1012 cmpwi 0,r3,0 /* parent or child? */
1013 bne 1f /* return if parent */
1014 li r0,0 /* make top-level stack frame */
1015 stwu r0,-16(r1)
1016 mtlr r30 /* fn addr in lr */
1017 mr r3,r31 /* load arg and call fn */
1018 PPC440EP_ERR42
1019 blrl
1020 li r0,__NR_exit /* exit if function returns */
1021 li r3,0
1022 sc
10231: lwz r30,8(r1)
1024 lwz r31,12(r1)
1025 addi r1,r1,16
1026 blr
1027
1028_GLOBAL(execve)
1029 li r0,__NR_execve
1030 sc
1031 bnslr
1032 neg r3,r3
1033 blr
1034
1035/*
1036 * This routine is just here to keep GCC happy - sigh...
1037 */
1038_GLOBAL(__main)
1039 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644
index 000000000000..8fe295693c09
--- /dev/null
+++ b/arch/powerpc/kernel/misc_64.S
@@ -0,0 +1,898 @@
1/*
2 * arch/powerpc/kernel/misc64.S
3 *
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * and Paul Mackerras.
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29
30 .text
31
32/*
33 * Returns (address we are running at) - (address we were linked at)
34 * for use before the text and data are mapped to KERNELBASE.
35 */
36
37_GLOBAL(reloc_offset)
38 mflr r0
39 bl 1f
401: mflr r3
41 LOADADDR(r4,1b)
42 subf r3,r4,r3
43 mtlr r0
44 blr
45
46/*
47 * add_reloc_offset(x) returns x + reloc_offset().
48 */
49_GLOBAL(add_reloc_offset)
50 mflr r0
51 bl 1f
521: mflr r5
53 LOADADDR(r4,1b)
54 subf r5,r4,r5
55 add r3,r3,r5
56 mtlr r0
57 blr
58
59_GLOBAL(get_msr)
60 mfmsr r3
61 blr
62
63_GLOBAL(get_dar)
64 mfdar r3
65 blr
66
67_GLOBAL(get_srr0)
68 mfsrr0 r3
69 blr
70
71_GLOBAL(get_srr1)
72 mfsrr1 r3
73 blr
74
75_GLOBAL(get_sp)
76 mr r3,r1
77 blr
78
79#ifdef CONFIG_IRQSTACKS
80_GLOBAL(call_do_softirq)
81 mflr r0
82 std r0,16(r1)
83 stdu r1,THREAD_SIZE-112(r3)
84 mr r1,r3
85 bl .__do_softirq
86 ld r1,0(r1)
87 ld r0,16(r1)
88 mtlr r0
89 blr
90
91_GLOBAL(call_handle_IRQ_event)
92 mflr r0
93 std r0,16(r1)
94 stdu r1,THREAD_SIZE-112(r6)
95 mr r1,r6
96 bl .handle_IRQ_event
97 ld r1,0(r1)
98 ld r0,16(r1)
99 mtlr r0
100 blr
101#endif /* CONFIG_IRQSTACKS */
102
103 /*
104 * To be called by C code which needs to do some operations with MMU
105 * disabled. Note that interrupts have to be disabled by the caller
106 * prior to calling us. The code called _MUST_ be in the RMO of course
107 * and part of the linear mapping as we don't attempt to translate the
108 * stack pointer at all. The function is called with the stack switched
109 * to this CPU emergency stack
110 *
111 * prototype is void *call_with_mmu_off(void *func, void *data);
112 *
113 * the called function is expected to be of the form
114 *
115 * void *called(void *data);
116 */
117_GLOBAL(call_with_mmu_off)
118 mflr r0 /* get link, save it on stackframe */
119 std r0,16(r1)
120 mr r1,r5 /* save old stack ptr */
121 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
122 subi r1,r1,STACK_FRAME_OVERHEAD
123 std r0,16(r1) /* save link on emerg. stack */
124 std r5,0(r1) /* save old stack ptr in backchain */
125 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
126 bl 2f /* we need LR to return, continue at label 2 */
127
128 ld r0,16(r1) /* we return here from the call, get LR and */
129 ld r1,0(r1) /* .. old stack ptr */
130 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
131 mfmsr r4
132 ori r4,r4,MSR_IR|MSR_DR
133 mtspr SPRN_SRR1,r4
134 rfid
135
1362: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
137 mr r3,r4 /* get parameter */
138 mfmsr r0
139 ori r0,r0,MSR_IR|MSR_DR
140 xori r0,r0,MSR_IR|MSR_DR
141 mtspr SPRN_SRR1,r0
142 rfid
143
144
145 .section ".toc","aw"
146PPC64_CACHES:
147 .tc ppc64_caches[TC],ppc64_caches
148 .section ".text"
149
150/*
151 * Write any modified data cache blocks out to memory
152 * and invalidate the corresponding instruction cache blocks.
153 *
154 * flush_icache_range(unsigned long start, unsigned long stop)
155 *
156 * flush all bytes from start through stop-1 inclusive
157 */
158
159_KPROBE(__flush_icache_range)
160
161/*
162 * Flush the data cache to memory
163 *
164 * Different systems have different cache line sizes
165 * and in some cases i-cache and d-cache line sizes differ from
166 * each other.
167 */
168 ld r10,PPC64_CACHES@toc(r2)
169 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
170 addi r5,r7,-1
171 andc r6,r3,r5 /* round low to line bdy */
172 subf r8,r6,r4 /* compute length */
173 add r8,r8,r5 /* ensure we get enough */
174 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
175 srw. r8,r8,r9 /* compute line count */
176 beqlr /* nothing to do? */
177 mtctr r8
1781: dcbst 0,r6
179 add r6,r6,r7
180 bdnz 1b
181 sync
182
183/* Now invalidate the instruction cache */
184
185 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
186 addi r5,r7,-1
187 andc r6,r3,r5 /* round low to line bdy */
188 subf r8,r6,r4 /* compute length */
189 add r8,r8,r5
190 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
191 srw. r8,r8,r9 /* compute line count */
192 beqlr /* nothing to do? */
193 mtctr r8
1942: icbi 0,r6
195 add r6,r6,r7
196 bdnz 2b
197 isync
198 blr
199 .previous .text
200/*
201 * Like above, but only do the D-cache.
202 *
203 * flush_dcache_range(unsigned long start, unsigned long stop)
204 *
205 * flush all bytes from start to stop-1 inclusive
206 */
207_GLOBAL(flush_dcache_range)
208
209/*
210 * Flush the data cache to memory
211 *
212 * Different systems have different cache line sizes
213 */
214 ld r10,PPC64_CACHES@toc(r2)
215 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
216 addi r5,r7,-1
217 andc r6,r3,r5 /* round low to line bdy */
218 subf r8,r6,r4 /* compute length */
219 add r8,r8,r5 /* ensure we get enough */
220 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
221 srw. r8,r8,r9 /* compute line count */
222 beqlr /* nothing to do? */
223 mtctr r8
2240: dcbst 0,r6
225 add r6,r6,r7
226 bdnz 0b
227 sync
228 blr
229
230/*
231 * Like above, but works on non-mapped physical addresses.
232 * Use only for non-LPAR setups ! It also assumes real mode
233 * is cacheable. Used for flushing out the DART before using
234 * it as uncacheable memory
235 *
236 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
237 *
238 * flush all bytes from start to stop-1 inclusive
239 */
240_GLOBAL(flush_dcache_phys_range)
241 ld r10,PPC64_CACHES@toc(r2)
242 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
243 addi r5,r7,-1
244 andc r6,r3,r5 /* round low to line bdy */
245 subf r8,r6,r4 /* compute length */
246 add r8,r8,r5 /* ensure we get enough */
247 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
248 srw. r8,r8,r9 /* compute line count */
249 beqlr /* nothing to do? */
250 mfmsr r5 /* Disable MMU Data Relocation */
251 ori r0,r5,MSR_DR
252 xori r0,r0,MSR_DR
253 sync
254 mtmsr r0
255 sync
256 isync
257 mtctr r8
2580: dcbst 0,r6
259 add r6,r6,r7
260 bdnz 0b
261 sync
262 isync
263 mtmsr r5 /* Re-enable MMU Data Relocation */
264 sync
265 isync
266 blr
267
268_GLOBAL(flush_inval_dcache_range)
269 ld r10,PPC64_CACHES@toc(r2)
270 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
271 addi r5,r7,-1
272 andc r6,r3,r5 /* round low to line bdy */
273 subf r8,r6,r4 /* compute length */
274 add r8,r8,r5 /* ensure we get enough */
275 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
276 srw. r8,r8,r9 /* compute line count */
277 beqlr /* nothing to do? */
278 sync
279 isync
280 mtctr r8
2810: dcbf 0,r6
282 add r6,r6,r7
283 bdnz 0b
284 sync
285 isync
286 blr
287
288
289/*
290 * Flush a particular page from the data cache to RAM.
291 * Note: this is necessary because the instruction cache does *not*
292 * snoop from the data cache.
293 *
294 * void __flush_dcache_icache(void *page)
295 */
296_GLOBAL(__flush_dcache_icache)
297/*
298 * Flush the data cache to memory
299 *
300 * Different systems have different cache line sizes
301 */
302
303/* Flush the dcache */
304 ld r7,PPC64_CACHES@toc(r2)
305 clrrdi r3,r3,PAGE_SHIFT /* Page align */
306 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
307 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
308 mr r6,r3
309 mtctr r4
3100: dcbst 0,r6
311 add r6,r6,r5
312 bdnz 0b
313 sync
314
315/* Now invalidate the icache */
316
317 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
318 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
319 mtctr r4
3201: icbi 0,r3
321 add r3,r3,r5
322 bdnz 1b
323 isync
324 blr
325
326/*
327 * I/O string operations
328 *
329 * insb(port, buf, len)
330 * outsb(port, buf, len)
331 * insw(port, buf, len)
332 * outsw(port, buf, len)
333 * insl(port, buf, len)
334 * outsl(port, buf, len)
335 * insw_ns(port, buf, len)
336 * outsw_ns(port, buf, len)
337 * insl_ns(port, buf, len)
338 * outsl_ns(port, buf, len)
339 *
340 * The *_ns versions don't do byte-swapping.
341 */
342_GLOBAL(_insb)
343 cmpwi 0,r5,0
344 mtctr r5
345 subi r4,r4,1
346 blelr-
34700: lbz r5,0(r3)
348 eieio
349 stbu r5,1(r4)
350 bdnz 00b
351 twi 0,r5,0
352 isync
353 blr
354
355_GLOBAL(_outsb)
356 cmpwi 0,r5,0
357 mtctr r5
358 subi r4,r4,1
359 blelr-
36000: lbzu r5,1(r4)
361 stb r5,0(r3)
362 bdnz 00b
363 sync
364 blr
365
366_GLOBAL(_insw)
367 cmpwi 0,r5,0
368 mtctr r5
369 subi r4,r4,2
370 blelr-
37100: lhbrx r5,0,r3
372 eieio
373 sthu r5,2(r4)
374 bdnz 00b
375 twi 0,r5,0
376 isync
377 blr
378
379_GLOBAL(_outsw)
380 cmpwi 0,r5,0
381 mtctr r5
382 subi r4,r4,2
383 blelr-
38400: lhzu r5,2(r4)
385 sthbrx r5,0,r3
386 bdnz 00b
387 sync
388 blr
389
390_GLOBAL(_insl)
391 cmpwi 0,r5,0
392 mtctr r5
393 subi r4,r4,4
394 blelr-
39500: lwbrx r5,0,r3
396 eieio
397 stwu r5,4(r4)
398 bdnz 00b
399 twi 0,r5,0
400 isync
401 blr
402
403_GLOBAL(_outsl)
404 cmpwi 0,r5,0
405 mtctr r5
406 subi r4,r4,4
407 blelr-
40800: lwzu r5,4(r4)
409 stwbrx r5,0,r3
410 bdnz 00b
411 sync
412 blr
413
414/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
415_GLOBAL(_insw_ns)
416 cmpwi 0,r5,0
417 mtctr r5
418 subi r4,r4,2
419 blelr-
42000: lhz r5,0(r3)
421 eieio
422 sthu r5,2(r4)
423 bdnz 00b
424 twi 0,r5,0
425 isync
426 blr
427
428/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
429_GLOBAL(_outsw_ns)
430 cmpwi 0,r5,0
431 mtctr r5
432 subi r4,r4,2
433 blelr-
43400: lhzu r5,2(r4)
435 sth r5,0(r3)
436 bdnz 00b
437 sync
438 blr
439
440_GLOBAL(_insl_ns)
441 cmpwi 0,r5,0
442 mtctr r5
443 subi r4,r4,4
444 blelr-
44500: lwz r5,0(r3)
446 eieio
447 stwu r5,4(r4)
448 bdnz 00b
449 twi 0,r5,0
450 isync
451 blr
452
453_GLOBAL(_outsl_ns)
454 cmpwi 0,r5,0
455 mtctr r5
456 subi r4,r4,4
457 blelr-
45800: lwzu r5,4(r4)
459 stw r5,0(r3)
460 bdnz 00b
461 sync
462 blr
463
464
465_GLOBAL(cvt_fd)
466 lfd 0,0(r5) /* load up fpscr value */
467 mtfsf 0xff,0
468 lfs 0,0(r3)
469 stfd 0,0(r4)
470 mffs 0 /* save new fpscr value */
471 stfd 0,0(r5)
472 blr
473
474_GLOBAL(cvt_df)
475 lfd 0,0(r5) /* load up fpscr value */
476 mtfsf 0xff,0
477 lfd 0,0(r3)
478 stfs 0,0(r4)
479 mffs 0 /* save new fpscr value */
480 stfd 0,0(r5)
481 blr
482
483/*
484 * identify_cpu and calls setup_cpu
485 * In: r3 = base of the cpu_specs array
486 * r4 = address of cur_cpu_spec
487 * r5 = relocation offset
488 */
489_GLOBAL(identify_cpu)
490 mfpvr r7
4911:
492 lwz r8,CPU_SPEC_PVR_MASK(r3)
493 and r8,r8,r7
494 lwz r9,CPU_SPEC_PVR_VALUE(r3)
495 cmplw 0,r9,r8
496 beq 1f
497 addi r3,r3,CPU_SPEC_ENTRY_SIZE
498 b 1b
4991:
500 sub r0,r3,r5
501 std r0,0(r4)
502 ld r4,CPU_SPEC_SETUP(r3)
503 add r4,r4,r5
504 ld r4,0(r4)
505 add r4,r4,r5
506 mtctr r4
507 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
508 mr r4,r3
509 mr r3,r5
510 bctr
511
512/*
513 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
514 * and writes nop's over sections of code that don't apply for this cpu.
515 * r3 = data offset (not changed)
516 */
517_GLOBAL(do_cpu_ftr_fixups)
518 /* Get CPU 0 features */
519 LOADADDR(r6,cur_cpu_spec)
520 sub r6,r6,r3
521 ld r4,0(r6)
522 sub r4,r4,r3
523 ld r4,CPU_SPEC_FEATURES(r4)
524 /* Get the fixup table */
525 LOADADDR(r6,__start___ftr_fixup)
526 sub r6,r6,r3
527 LOADADDR(r7,__stop___ftr_fixup)
528 sub r7,r7,r3
529 /* Do the fixup */
5301: cmpld r6,r7
531 bgelr
532 addi r6,r6,32
533 ld r8,-32(r6) /* mask */
534 and r8,r8,r4
535 ld r9,-24(r6) /* value */
536 cmpld r8,r9
537 beq 1b
538 ld r8,-16(r6) /* section begin */
539 ld r9,-8(r6) /* section end */
540 subf. r9,r8,r9
541 beq 1b
542 /* write nops over the section of code */
543 /* todo: if large section, add a branch at the start of it */
544 srwi r9,r9,2
545 mtctr r9
546 sub r8,r8,r3
547 lis r0,0x60000000@h /* nop */
5483: stw r0,0(r8)
549 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
550 beq 2f
551 dcbst 0,r8 /* suboptimal, but simpler */
552 sync
553 icbi 0,r8
5542: addi r8,r8,4
555 bdnz 3b
556 sync /* additional sync needed on g4 */
557 isync
558 b 1b
559
560#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
561/*
562 * Do an IO access in real mode
563 */
564_GLOBAL(real_readb)
565 mfmsr r7
566 ori r0,r7,MSR_DR
567 xori r0,r0,MSR_DR
568 sync
569 mtmsrd r0
570 sync
571 isync
572 mfspr r6,SPRN_HID4
573 rldicl r5,r6,32,0
574 ori r5,r5,0x100
575 rldicl r5,r5,32,0
576 sync
577 mtspr SPRN_HID4,r5
578 isync
579 slbia
580 isync
581 lbz r3,0(r3)
582 sync
583 mtspr SPRN_HID4,r6
584 isync
585 slbia
586 isync
587 mtmsrd r7
588 sync
589 isync
590 blr
591
592 /*
593 * Do an IO access in real mode
594 */
595_GLOBAL(real_writeb)
596 mfmsr r7
597 ori r0,r7,MSR_DR
598 xori r0,r0,MSR_DR
599 sync
600 mtmsrd r0
601 sync
602 isync
603 mfspr r6,SPRN_HID4
604 rldicl r5,r6,32,0
605 ori r5,r5,0x100
606 rldicl r5,r5,32,0
607 sync
608 mtspr SPRN_HID4,r5
609 isync
610 slbia
611 isync
612 stb r3,0(r4)
613 sync
614 mtspr SPRN_HID4,r6
615 isync
616 slbia
617 isync
618 mtmsrd r7
619 sync
620 isync
621 blr
622#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
623
624/*
625 * Create a kernel thread
626 * kernel_thread(fn, arg, flags)
627 */
628_GLOBAL(kernel_thread)
629 std r29,-24(r1)
630 std r30,-16(r1)
631 stdu r1,-STACK_FRAME_OVERHEAD(r1)
632 mr r29,r3
633 mr r30,r4
634 ori r3,r5,CLONE_VM /* flags */
635 oris r3,r3,(CLONE_UNTRACED>>16)
636 li r4,0 /* new sp (unused) */
637 li r0,__NR_clone
638 sc
639 cmpdi 0,r3,0 /* parent or child? */
640 bne 1f /* return if parent */
641 li r0,0
642 stdu r0,-STACK_FRAME_OVERHEAD(r1)
643 ld r2,8(r29)
644 ld r29,0(r29)
645 mtlr r29 /* fn addr in lr */
646 mr r3,r30 /* load arg and call fn */
647 blrl
648 li r0,__NR_exit /* exit after child exits */
649 li r3,0
650 sc
6511: addi r1,r1,STACK_FRAME_OVERHEAD
652 ld r29,-24(r1)
653 ld r30,-16(r1)
654 blr
655
656/*
657 * disable_kernel_fp()
658 * Disable the FPU.
659 */
660_GLOBAL(disable_kernel_fp)
661 mfmsr r3
662 rldicl r0,r3,(63-MSR_FP_LG),1
663 rldicl r3,r0,(MSR_FP_LG+1),0
664 mtmsrd r3 /* disable use of fpu now */
665 isync
666 blr
667
668#ifdef CONFIG_ALTIVEC
669
670#if 0 /* this has no callers for now */
671/*
672 * disable_kernel_altivec()
673 * Disable the VMX.
674 */
675_GLOBAL(disable_kernel_altivec)
676 mfmsr r3
677 rldicl r0,r3,(63-MSR_VEC_LG),1
678 rldicl r3,r0,(MSR_VEC_LG+1),0
679 mtmsrd r3 /* disable use of VMX now */
680 isync
681 blr
682#endif /* 0 */
683
684/*
685 * giveup_altivec(tsk)
686 * Disable VMX for the task given as the argument,
687 * and save the vector registers in its thread_struct.
688 * Enables the VMX for use in the kernel on return.
689 */
690_GLOBAL(giveup_altivec)
691 mfmsr r5
692 oris r5,r5,MSR_VEC@h
693 mtmsrd r5 /* enable use of VMX now */
694 isync
695 cmpdi 0,r3,0
696 beqlr- /* if no previous owner, done */
697 addi r3,r3,THREAD /* want THREAD of task */
698 ld r5,PT_REGS(r3)
699 cmpdi 0,r5,0
700 SAVE_32VRS(0,r4,r3)
701 mfvscr vr0
702 li r4,THREAD_VSCR
703 stvx vr0,r4,r3
704 beq 1f
705 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
706 lis r3,MSR_VEC@h
707 andc r4,r4,r3 /* disable FP for previous task */
708 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7091:
710#ifndef CONFIG_SMP
711 li r5,0
712 ld r4,last_task_used_altivec@got(r2)
713 std r5,0(r4)
714#endif /* CONFIG_SMP */
715 blr
716
717#endif /* CONFIG_ALTIVEC */
718
719_GLOBAL(__setup_cpu_power3)
720 blr
721
722_GLOBAL(execve)
723 li r0,__NR_execve
724 sc
725 bnslr
726 neg r3,r3
727 blr
728
729/* kexec_wait(phys_cpu)
730 *
731 * wait for the flag to change, indicating this kernel is going away but
732 * the slave code for the next one is at addresses 0 to 100.
733 *
734 * This is used by all slaves.
735 *
736 * Physical (hardware) cpu id should be in r3.
737 */
738_GLOBAL(kexec_wait)
739 bl 1f
7401: mflr r5
741 addi r5,r5,kexec_flag-1b
742
74399: HMT_LOW
744#ifdef CONFIG_KEXEC /* use no memory without kexec */
745 lwz r4,0(r5)
746 cmpwi 0,r4,0
747 bnea 0x60
748#endif
749 b 99b
750
751/* this can be in text because we won't change it until we are
752 * running in real anyways
753 */
754kexec_flag:
755 .long 0
756
757
758#ifdef CONFIG_KEXEC
759
760/* kexec_smp_wait(void)
761 *
762 * call with interrupts off
763 * note: this is a terminal routine, it does not save lr
764 *
765 * get phys id from paca
766 * set paca id to -1 to say we got here
767 * switch to real mode
768 * join other cpus in kexec_wait(phys_id)
769 */
770_GLOBAL(kexec_smp_wait)
771 lhz r3,PACAHWCPUID(r13)
772 li r4,-1
773 sth r4,PACAHWCPUID(r13) /* let others know we left */
774 bl real_mode
775 b .kexec_wait
776
777/*
778 * switch to real mode (turn mmu off)
779 * we use the early kernel trick that the hardware ignores bits
780 * 0 and 1 (big endian) of the effective address in real mode
781 *
782 * don't overwrite r3 here, it is live for kexec_wait above.
783 */
784real_mode: /* assume normal blr return */
7851: li r9,MSR_RI
786 li r10,MSR_DR|MSR_IR
787 mflr r11 /* return address to SRR0 */
788 mfmsr r12
789 andc r9,r12,r9
790 andc r10,r12,r10
791
792 mtmsrd r9,1
793 mtspr SPRN_SRR1,r10
794 mtspr SPRN_SRR0,r11
795 rfid
796
797
798/*
799 * kexec_sequence(newstack, start, image, control, clear_all())
800 *
801 * does the grungy work with stack switching and real mode switches
802 * also does simple calls to other code
803 */
804
805_GLOBAL(kexec_sequence)
806 mflr r0
807 std r0,16(r1)
808
809 /* switch stacks to newstack -- &kexec_stack.stack */
810 stdu r1,THREAD_SIZE-112(r3)
811 mr r1,r3
812
813 li r0,0
814 std r0,16(r1)
815
816 /* save regs for local vars on new stack.
817 * yes, we won't go back, but ...
818 */
819 std r31,-8(r1)
820 std r30,-16(r1)
821 std r29,-24(r1)
822 std r28,-32(r1)
823 std r27,-40(r1)
824 std r26,-48(r1)
825 std r25,-56(r1)
826
827 stdu r1,-112-64(r1)
828
829 /* save args into preserved regs */
830 mr r31,r3 /* newstack (both) */
831 mr r30,r4 /* start (real) */
832 mr r29,r5 /* image (virt) */
833 mr r28,r6 /* control, unused */
834 mr r27,r7 /* clear_all() fn desc */
835 mr r26,r8 /* spare */
836 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
837
838 /* disable interrupts, we are overwriting kernel data next */
839 mfmsr r3
840 rlwinm r3,r3,0,17,15
841 mtmsrd r3,1
842
843 /* copy dest pages, flush whole dest image */
844 mr r3,r29
845 bl .kexec_copy_flush /* (image) */
846
847 /* turn off mmu */
848 bl real_mode
849
850 /* clear out hardware hash page table and tlb */
851 ld r5,0(r27) /* deref function descriptor */
852 mtctr r5
853 bctrl /* ppc_md.hash_clear_all(void); */
854
855/*
856 * kexec image calling is:
857 * the first 0x100 bytes of the entry point are copied to 0
858 *
859 * all slaves branch to slave = 0x60 (absolute)
860 * slave(phys_cpu_id);
861 *
862 * master goes to start = entry point
863 * start(phys_cpu_id, start, 0);
864 *
865 *
866 * a wrapper is needed to call existing kernels, here is an approximate
867 * description of one method:
868 *
869 * v2: (2.6.10)
870 * start will be near the boot_block (maybe 0x100 bytes before it?)
871 * it will have a 0x60, which will b to boot_block, where it will wait
872 * and 0 will store phys into struct boot-block and load r3 from there,
873 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
874 *
875 * v1: (2.6.9)
876 * boot block will have all cpus scanning device tree to see if they
877 * are the boot cpu ?????
878 * other device tree differences (prop sizes, va vs pa, etc)...
879 */
880
881 /* copy 0x100 bytes starting at start to 0 */
882 li r3,0
883 mr r4,r30
884 li r5,0x100
885 li r6,0
886 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
8871: /* assume normal blr return */
888
889 /* release other cpus to the new kernel secondary start at 0x60 */
890 mflr r5
891 li r6,1
892 stw r6,kexec_flag-1b(5)
893 mr r3,r25 # my phys cpu
894 mr r4,r30 # start, aka phys mem offset
895 mtlr 4
896 li r5,0
897 blr /* image->start(physid, image->start, 0); */
898#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644
index 000000000000..82d1fedb441c
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.S
@@ -0,0 +1,323 @@
1/*
2 * This file contains the table of syscall-handling functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/ppc_asm.h>
19
20#ifdef CONFIG_PPC64
21#define SYSCALL(func) .llong .sys_##func,.sys_##func
22#define SYSCALL32(func) .llong .sys_##func,.sys32_##func
23#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
24#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
25#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
26#define SYS32ONLY(func) .llong .sys_ni_syscall,.sys32_##func
27#define SYSX(f, f3264, f32) .llong .f,.f3264
28#else
29#define SYSCALL(func) .long sys_##func
30#define SYSCALL32(func) .long sys_##func
31#define COMPAT_SYS(func) .long sys_##func
32#define PPC_SYS(func) .long ppc_##func
33#define OLDSYS(func) .long sys_##func
34#define SYS32ONLY(func) .long sys_##func
35#define SYSX(f, f3264, f32) .long f32
36#endif
37
38#ifdef CONFIG_PPC64
39#define sys_sigpending sys_ni_syscall
40#define sys_old_getrlimit sys_ni_syscall
41#else
42#define ppc_rtas sys_ni_syscall
43#endif
44
45_GLOBAL(sys_call_table)
46SYSCALL(restart_syscall)
47SYSCALL(exit)
48PPC_SYS(fork)
49SYSCALL(read)
50SYSCALL(write)
51COMPAT_SYS(open)
52SYSCALL(close)
53SYSCALL32(waitpid)
54SYSCALL32(creat)
55SYSCALL(link)
56SYSCALL(unlink)
57SYSCALL32(execve)
58SYSCALL(chdir)
59SYSX(sys64_time,compat_sys_time,sys_time)
60SYSCALL(mknod)
61SYSCALL(chmod)
62SYSCALL(lchown)
63SYSCALL(ni_syscall)
64OLDSYS(stat)
65SYSX(sys_lseek,ppc32_lseek,sys_lseek)
66SYSCALL(getpid)
67COMPAT_SYS(mount)
68SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
69SYSCALL(setuid)
70SYSCALL(getuid)
71COMPAT_SYS(stime)
72SYSCALL32(ptrace)
73SYSCALL(alarm)
74OLDSYS(fstat)
75SYSCALL32(pause)
76COMPAT_SYS(utime)
77SYSCALL(ni_syscall)
78SYSCALL(ni_syscall)
79SYSCALL32(access)
80SYSCALL32(nice)
81SYSCALL(ni_syscall)
82SYSCALL(sync)
83SYSCALL32(kill)
84SYSCALL(rename)
85SYSCALL32(mkdir)
86SYSCALL(rmdir)
87SYSCALL(dup)
88SYSCALL(pipe)
89COMPAT_SYS(times)
90SYSCALL(ni_syscall)
91SYSCALL(brk)
92SYSCALL(setgid)
93SYSCALL(getgid)
94SYSCALL(signal)
95SYSCALL(geteuid)
96SYSCALL(getegid)
97SYSCALL(acct)
98SYSCALL(umount)
99SYSCALL(ni_syscall)
100COMPAT_SYS(ioctl)
101COMPAT_SYS(fcntl)
102SYSCALL(ni_syscall)
103SYSCALL32(setpgid)
104SYSCALL(ni_syscall)
105SYS32ONLY(olduname)
106SYSCALL32(umask)
107SYSCALL(chroot)
108SYSCALL(ustat)
109SYSCALL(dup2)
110SYSCALL(getppid)
111SYSCALL(getpgrp)
112SYSCALL(setsid)
113SYS32ONLY(sigaction)
114SYSCALL(sgetmask)
115SYSCALL32(ssetmask)
116SYSCALL(setreuid)
117SYSCALL(setregid)
118SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
119COMPAT_SYS(sigpending)
120SYSCALL32(sethostname)
121COMPAT_SYS(setrlimit)
122COMPAT_SYS(old_getrlimit)
123COMPAT_SYS(getrusage)
124SYSCALL32(gettimeofday)
125SYSCALL32(settimeofday)
126SYSCALL32(getgroups)
127SYSCALL32(setgroups)
128SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
129SYSCALL(symlink)
130OLDSYS(lstat)
131SYSCALL32(readlink)
132SYSCALL(uselib)
133SYSCALL(swapon)
134SYSCALL(reboot)
135SYSX(sys_ni_syscall,old32_readdir,old_readdir)
136SYSCALL(mmap)
137SYSCALL(munmap)
138SYSCALL(truncate)
139SYSCALL(ftruncate)
140SYSCALL(fchmod)
141SYSCALL(fchown)
142SYSCALL32(getpriority)
143SYSCALL32(setpriority)
144SYSCALL(ni_syscall)
145COMPAT_SYS(statfs)
146COMPAT_SYS(fstatfs)
147SYSCALL(ni_syscall)
148COMPAT_SYS(socketcall)
149SYSCALL32(syslog)
150COMPAT_SYS(setitimer)
151COMPAT_SYS(getitimer)
152COMPAT_SYS(newstat)
153COMPAT_SYS(newlstat)
154COMPAT_SYS(newfstat)
155SYSX(sys_ni_syscall,sys32_uname,sys_uname)
156SYSCALL(ni_syscall)
157SYSCALL(vhangup)
158SYSCALL(ni_syscall)
159SYSCALL(ni_syscall)
160COMPAT_SYS(wait4)
161SYSCALL(swapoff)
162SYSCALL32(sysinfo)
163SYSCALL32(ipc)
164SYSCALL(fsync)
165SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
166PPC_SYS(clone)
167SYSCALL32(setdomainname)
168SYSX(ppc64_newuname,ppc64_newuname,sys_newuname)
169SYSCALL(ni_syscall)
170SYSCALL32(adjtimex)
171SYSCALL(mprotect)
172SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
173SYSCALL(ni_syscall)
174SYSCALL(init_module)
175SYSCALL(delete_module)
176SYSCALL(ni_syscall)
177SYSCALL(quotactl)
178SYSCALL32(getpgid)
179SYSCALL(fchdir)
180SYSCALL(bdflush)
181SYSCALL32(sysfs)
182SYSX(ppc64_personality,ppc64_personality,sys_personality)
183SYSCALL(ni_syscall)
184SYSCALL(setfsuid)
185SYSCALL(setfsgid)
186SYSCALL(llseek)
187SYSCALL32(getdents)
188SYSX(sys_select,ppc32_select,ppc_select)
189SYSCALL(flock)
190SYSCALL(msync)
191COMPAT_SYS(readv)
192COMPAT_SYS(writev)
193SYSCALL32(getsid)
194SYSCALL(fdatasync)
195SYSCALL32(sysctl)
196SYSCALL(mlock)
197SYSCALL(munlock)
198SYSCALL(mlockall)
199SYSCALL(munlockall)
200SYSCALL32(sched_setparam)
201SYSCALL32(sched_getparam)
202SYSCALL32(sched_setscheduler)
203SYSCALL32(sched_getscheduler)
204SYSCALL(sched_yield)
205SYSCALL32(sched_get_priority_max)
206SYSCALL32(sched_get_priority_min)
207SYSCALL32(sched_rr_get_interval)
208COMPAT_SYS(nanosleep)
209SYSCALL(mremap)
210SYSCALL(setresuid)
211SYSCALL(getresuid)
212SYSCALL(ni_syscall)
213SYSCALL(poll)
214COMPAT_SYS(nfsservctl)
215SYSCALL(setresgid)
216SYSCALL(getresgid)
217SYSCALL32(prctl)
218SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
219SYSCALL32(rt_sigaction)
220SYSCALL32(rt_sigprocmask)
221SYSCALL32(rt_sigpending)
222COMPAT_SYS(rt_sigtimedwait)
223SYSCALL32(rt_sigqueueinfo)
224SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
225SYSCALL32(pread64)
226SYSCALL32(pwrite64)
227SYSCALL(chown)
228SYSCALL(getcwd)
229SYSCALL(capget)
230SYSCALL(capset)
231SYSCALL32(sigaltstack)
232SYSX(sys_sendfile64,sys32_sendfile,sys_sendfile)
233SYSCALL(ni_syscall)
234SYSCALL(ni_syscall)
235PPC_SYS(vfork)
236COMPAT_SYS(getrlimit)
237SYSCALL32(readahead)
238SYS32ONLY(mmap2)
239SYS32ONLY(truncate64)
240SYS32ONLY(ftruncate64)
241SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
242SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
243SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
244SYSCALL32(pciconfig_read)
245SYSCALL32(pciconfig_write)
246SYSCALL32(pciconfig_iobase)
247SYSCALL(ni_syscall)
248SYSCALL(getdents64)
249SYSCALL(pivot_root)
250SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
251SYSCALL(madvise)
252SYSCALL(mincore)
253SYSCALL(gettid)
254SYSCALL(tkill)
255SYSCALL(setxattr)
256SYSCALL(lsetxattr)
257SYSCALL(fsetxattr)
258SYSCALL(getxattr)
259SYSCALL(lgetxattr)
260SYSCALL(fgetxattr)
261SYSCALL(listxattr)
262SYSCALL(llistxattr)
263SYSCALL(flistxattr)
264SYSCALL(removexattr)
265SYSCALL(lremovexattr)
266SYSCALL(fremovexattr)
267COMPAT_SYS(futex)
268COMPAT_SYS(sched_setaffinity)
269COMPAT_SYS(sched_getaffinity)
270SYSCALL(ni_syscall)
271SYSCALL(ni_syscall)
272SYS32ONLY(sendfile64)
273COMPAT_SYS(io_setup)
274SYSCALL(io_destroy)
275COMPAT_SYS(io_getevents)
276COMPAT_SYS(io_submit)
277SYSCALL(io_cancel)
278SYSCALL(set_tid_address)
279SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
280SYSCALL(exit_group)
281SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
282SYSCALL(epoll_create)
283SYSCALL(epoll_ctl)
284SYSCALL(epoll_wait)
285SYSCALL(remap_file_pages)
286SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
287COMPAT_SYS(timer_settime)
288COMPAT_SYS(timer_gettime)
289SYSCALL(timer_getoverrun)
290SYSCALL(timer_delete)
291COMPAT_SYS(clock_settime)
292COMPAT_SYS(clock_gettime)
293COMPAT_SYS(clock_getres)
294COMPAT_SYS(clock_nanosleep)
295SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
296SYSCALL32(tgkill)
297SYSCALL32(utimes)
298COMPAT_SYS(statfs64)
299COMPAT_SYS(fstatfs64)
300SYSX(sys_ni_syscall, ppc32_fadvise64_64, sys_fadvise64_64)
301PPC_SYS(rtas)
302OLDSYS(debug_setcontext)
303SYSCALL(ni_syscall)
304SYSCALL(ni_syscall)
305COMPAT_SYS(mbind)
306COMPAT_SYS(get_mempolicy)
307COMPAT_SYS(set_mempolicy)
308COMPAT_SYS(mq_open)
309SYSCALL(mq_unlink)
310COMPAT_SYS(mq_timedsend)
311COMPAT_SYS(mq_timedreceive)
312COMPAT_SYS(mq_notify)
313COMPAT_SYS(mq_getsetattr)
314COMPAT_SYS(kexec_load)
315SYSCALL32(add_key)
316SYSCALL32(request_key)
317COMPAT_SYS(keyctl)
318COMPAT_SYS(waitid)
319SYSCALL32(ioprio_set)
320SYSCALL32(ioprio_get)
321SYSCALL(inotify_init)
322SYSCALL(inotify_add_watch)
323SYSCALL(inotify_rm_watch)