diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-27 23:50:39 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-27 23:50:39 -0400 |
commit | 22b280324acbfd1a1f2374055d9bb39e7069e2bf (patch) | |
tree | 135f45548120ffc198ba0b2105b83f7f805d123a /arch | |
parent | 4542437679de448de0f75bc901dab380d6a5bc5b (diff) | |
parent | 299f6ce491aa28515d86f29af2779cbfdc7a4790 (diff) |
Merge git://oak/home/sfr/kernels/iseries/work/
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 18 | ||||
-rw-r--r-- | arch/powerpc/lib/Makefile | 12 | ||||
-rw-r--r-- | arch/ppc64/Makefile | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/entry.S | 845 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 563 | ||||
-rw-r--r-- | arch/ppc64/lib/Makefile | 15 | ||||
-rw-r--r-- | arch/ppc64/lib/checksum.S | 229 | ||||
-rw-r--r-- | arch/ppc64/lib/copypage.S | 121 | ||||
-rw-r--r-- | arch/ppc64/lib/copyuser.S | 576 | ||||
-rw-r--r-- | arch/ppc64/lib/e2a.c | 108 | ||||
-rw-r--r-- | arch/ppc64/lib/locks.c | 95 | ||||
-rw-r--r-- | arch/ppc64/lib/memcpy.S | 172 | ||||
-rw-r--r-- | arch/ppc64/lib/sstep.c | 141 | ||||
-rw-r--r-- | arch/ppc64/lib/strcase.c | 31 | ||||
-rw-r--r-- | arch/ppc64/lib/string.S | 106 | ||||
-rw-r--r-- | arch/ppc64/lib/usercopy.c | 41 |
18 files changed, 21 insertions, 3064 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 8421f1975dad..572d4f5eaacb 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -13,7 +13,7 @@ endif | |||
13 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ | 13 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ |
14 | signal_32.o pmc.o | 14 | signal_32.o pmc.o |
15 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ | 15 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ |
16 | ptrace32.o | 16 | ptrace32.o systbl.o |
17 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | 17 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o |
18 | obj-$(CONFIG_POWER4) += idle_power4.o | 18 | obj-$(CONFIG_POWER4) += idle_power4.o |
19 | obj-$(CONFIG_PPC_OF) += of_device.o | 19 | obj-$(CONFIG_PPC_OF) += of_device.o |
@@ -28,12 +28,11 @@ extra-$(CONFIG_40x) := head_4xx.o | |||
28 | extra-$(CONFIG_44x) := head_44x.o | 28 | extra-$(CONFIG_44x) := head_44x.o |
29 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | 29 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o |
30 | extra-$(CONFIG_8xx) := head_8xx.o | 30 | extra-$(CONFIG_8xx) := head_8xx.o |
31 | extra-$(CONFIG_PPC64) += entry_64.o | ||
32 | extra-y += vmlinux.lds | 31 | extra-y += vmlinux.lds |
33 | 32 | ||
34 | obj-y += process.o init_task.o time.o \ | 33 | obj-y += process.o init_task.o time.o \ |
35 | prom.o systbl.o traps.o setup-common.o | 34 | prom.o traps.o setup-common.o |
36 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o | 35 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o |
37 | obj-$(CONFIG_PPC64) += misc_64.o | 36 | obj-$(CONFIG_PPC64) += misc_64.o |
38 | obj-$(CONFIG_PPC_OF) += prom_init.o | 37 | obj-$(CONFIG_PPC_OF) += prom_init.o |
39 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 38 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
@@ -54,3 +53,4 @@ obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \ | |||
54 | endif | 53 | endif |
55 | 54 | ||
56 | extra-$(CONFIG_PPC_FPU) += fpu.o | 55 | extra-$(CONFIG_PPC_FPU) += fpu.o |
56 | extra-$(CONFIG_PPC64) += entry_64.o | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a8f7ff5ab1a4..950e6f0fea98 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -701,17 +701,6 @@ static void __init emergency_stack_init(void) | |||
701 | limit)) + PAGE_SIZE; | 701 | limit)) + PAGE_SIZE; |
702 | } | 702 | } |
703 | 703 | ||
704 | extern unsigned long *sys_call_table; | ||
705 | extern unsigned long sys_ni_syscall; | ||
706 | #ifdef CONFIG_PPC_MERGE | ||
707 | #define SYS_CALL_ENTRY64(i) sys_call_table[(i) * 2] | ||
708 | #define SYS_CALL_ENTRY32(i) sys_call_table[(i) * 2 + 1] | ||
709 | #else | ||
710 | extern unsigned long *sys_call_table32; | ||
711 | #define SYS_CALL_ENTRY64(i) sys_call_table[(i)] | ||
712 | #define SYS_CALL_ENTRY32(i) sys_call_table32[(i)] | ||
713 | #endif | ||
714 | |||
715 | /* | 704 | /* |
716 | * Called from setup_arch to initialize the bitmap of available | 705 | * Called from setup_arch to initialize the bitmap of available |
717 | * syscalls in the systemcfg page | 706 | * syscalls in the systemcfg page |
@@ -719,14 +708,17 @@ extern unsigned long *sys_call_table32; | |||
719 | void __init setup_syscall_map(void) | 708 | void __init setup_syscall_map(void) |
720 | { | 709 | { |
721 | unsigned int i, count64 = 0, count32 = 0; | 710 | unsigned int i, count64 = 0, count32 = 0; |
711 | extern unsigned long *sys_call_table; | ||
712 | extern unsigned long sys_ni_syscall; | ||
713 | |||
722 | 714 | ||
723 | for (i = 0; i < __NR_syscalls; i++) { | 715 | for (i = 0; i < __NR_syscalls; i++) { |
724 | if (SYS_CALL_ENTRY64(i) != sys_ni_syscall) { | 716 | if (sys_call_table[i*2] != sys_ni_syscall) { |
725 | count64++; | 717 | count64++; |
726 | systemcfg->syscall_map_64[i >> 5] |= | 718 | systemcfg->syscall_map_64[i >> 5] |= |
727 | 0x80000000UL >> (i & 0x1f); | 719 | 0x80000000UL >> (i & 0x1f); |
728 | } | 720 | } |
729 | if (SYS_CALL_ENTRY32(i) != sys_ni_syscall) { | 721 | if (sys_call_table[i*2+1] != sys_ni_syscall) { |
730 | count32++; | 722 | count32++; |
731 | systemcfg->syscall_map_32[i >> 5] |= | 723 | systemcfg->syscall_map_32[i >> 5] |= |
732 | 0x80000000UL >> (i & 0x1f); | 724 | 0x80000000UL >> (i & 0x1f); |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 30367a0237dd..0115bf96751c 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -2,12 +2,16 @@ | |||
2 | # Makefile for ppc-specific library files.. | 2 | # Makefile for ppc-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := strcase.o string.o | 5 | ifeq ($(CONFIG_PPC_MERGE),y) |
6 | obj-y := string.o | ||
7 | endif | ||
8 | |||
9 | obj-y += strcase.o | ||
6 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o | 10 | obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o |
7 | obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o memcpy_64.o \ | 11 | obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ |
8 | usercopy_64.o sstep.o checksum_64.o mem_64.o | 12 | memcpy_64.o usercopy_64.o mem_64.o |
9 | obj-$(CONFIG_PPC_ISERIES) += e2a.o | 13 | obj-$(CONFIG_PPC_ISERIES) += e2a.o |
10 | ifeq ($(CONFIG_PPC64),y) | 14 | ifeq ($(CONFIG_PPC64),y) |
11 | obj-$(CONFIG_SMP) += locks.o | 15 | obj-$(CONFIG_SMP) += locks.o |
16 | obj-$(CONFIG_DEBUG_KERNEL) += sstep.o | ||
12 | endif | 17 | endif |
13 | |||
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index ba59225fd373..2a7af765bfb6 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile | |||
@@ -81,12 +81,14 @@ CFLAGS += $(call cc-option,-funit-at-a-time) | |||
81 | 81 | ||
82 | head-y := arch/ppc64/kernel/head.o | 82 | head-y := arch/ppc64/kernel/head.o |
83 | head-y += arch/powerpc/kernel/fpu.o | 83 | head-y += arch/powerpc/kernel/fpu.o |
84 | head-y += arch/powerpc/kernel/entry_64.o | ||
84 | 85 | ||
85 | libs-y += arch/ppc64/lib/ | 86 | libs-y += arch/ppc64/lib/ |
86 | core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ | 87 | core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ |
87 | core-y += arch/powerpc/mm/ | 88 | core-y += arch/powerpc/mm/ |
88 | core-y += arch/powerpc/sysdev/ | 89 | core-y += arch/powerpc/sysdev/ |
89 | core-y += arch/powerpc/platforms/ | 90 | core-y += arch/powerpc/platforms/ |
91 | core-y += arch/powerpc/lib/ | ||
90 | core-$(CONFIG_XMON) += arch/ppc64/xmon/ | 92 | core-$(CONFIG_XMON) += arch/ppc64/xmon/ |
91 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | 93 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ |
92 | 94 | ||
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index 863bd7d746fb..a20a305b825d 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -7,7 +7,7 @@ ifneq ($(CONFIG_PPC_MERGE),y) | |||
7 | EXTRA_CFLAGS += -mno-minimal-toc | 7 | EXTRA_CFLAGS += -mno-minimal-toc |
8 | extra-y := head.o vmlinux.lds | 8 | extra-y := head.o vmlinux.lds |
9 | 9 | ||
10 | obj-y := entry.o misc.o prom.o | 10 | obj-y := misc.o prom.o |
11 | 11 | ||
12 | endif | 12 | endif |
13 | 13 | ||
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S deleted file mode 100644 index 5d2fcbe384c1..000000000000 --- a/arch/ppc64/kernel/entry.S +++ /dev/null | |||
@@ -1,845 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/kernel/entry.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * Adapted for Power Macintosh by Paul Mackerras. | ||
9 | * Low-level exception handlers and MMU support | ||
10 | * rewritten by Paul Mackerras. | ||
11 | * Copyright (C) 1996 Paul Mackerras. | ||
12 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
13 | * | ||
14 | * This file contains the system call entry code, context switch | ||
15 | * code, and exception/interrupt return code for PowerPC. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <asm/unistd.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/thread_info.h> | ||
30 | #include <asm/ppc_asm.h> | ||
31 | #include <asm/asm-offsets.h> | ||
32 | #include <asm/cputable.h> | ||
33 | |||
34 | #ifdef CONFIG_PPC_ISERIES | ||
35 | #define DO_SOFT_DISABLE | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | * System calls. | ||
40 | */ | ||
41 | .section ".toc","aw" | ||
42 | .SYS_CALL_TABLE: | ||
43 | .tc .sys_call_table[TC],.sys_call_table | ||
44 | |||
45 | .SYS_CALL_TABLE32: | ||
46 | .tc .sys_call_table32[TC],.sys_call_table32 | ||
47 | |||
48 | /* This value is used to mark exception frames on the stack. */ | ||
49 | exception_marker: | ||
50 | .tc ID_72656773_68657265[TC],0x7265677368657265 | ||
51 | |||
52 | .section ".text" | ||
53 | .align 7 | ||
54 | |||
55 | #undef SHOW_SYSCALLS | ||
56 | |||
57 | .globl system_call_common | ||
58 | system_call_common: | ||
59 | andi. r10,r12,MSR_PR | ||
60 | mr r10,r1 | ||
61 | addi r1,r1,-INT_FRAME_SIZE | ||
62 | beq- 1f | ||
63 | ld r1,PACAKSAVE(r13) | ||
64 | 1: std r10,0(r1) | ||
65 | std r11,_NIP(r1) | ||
66 | std r12,_MSR(r1) | ||
67 | std r0,GPR0(r1) | ||
68 | std r10,GPR1(r1) | ||
69 | std r2,GPR2(r1) | ||
70 | std r3,GPR3(r1) | ||
71 | std r4,GPR4(r1) | ||
72 | std r5,GPR5(r1) | ||
73 | std r6,GPR6(r1) | ||
74 | std r7,GPR7(r1) | ||
75 | std r8,GPR8(r1) | ||
76 | li r11,0 | ||
77 | std r11,GPR9(r1) | ||
78 | std r11,GPR10(r1) | ||
79 | std r11,GPR11(r1) | ||
80 | std r11,GPR12(r1) | ||
81 | std r9,GPR13(r1) | ||
82 | crclr so | ||
83 | mfcr r9 | ||
84 | mflr r10 | ||
85 | li r11,0xc01 | ||
86 | std r9,_CCR(r1) | ||
87 | std r10,_LINK(r1) | ||
88 | std r11,_TRAP(r1) | ||
89 | mfxer r9 | ||
90 | mfctr r10 | ||
91 | std r9,_XER(r1) | ||
92 | std r10,_CTR(r1) | ||
93 | std r3,ORIG_GPR3(r1) | ||
94 | ld r2,PACATOC(r13) | ||
95 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
96 | ld r11,exception_marker@toc(r2) | ||
97 | std r11,-16(r9) /* "regshere" marker */ | ||
98 | #ifdef CONFIG_PPC_ISERIES | ||
99 | /* Hack for handling interrupts when soft-enabling on iSeries */ | ||
100 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ | ||
101 | andi. r10,r12,MSR_PR /* from kernel */ | ||
102 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq | ||
103 | beq hardware_interrupt_entry | ||
104 | lbz r10,PACAPROCENABLED(r13) | ||
105 | std r10,SOFTE(r1) | ||
106 | #endif | ||
107 | mfmsr r11 | ||
108 | ori r11,r11,MSR_EE | ||
109 | mtmsrd r11,1 | ||
110 | |||
111 | #ifdef SHOW_SYSCALLS | ||
112 | bl .do_show_syscall | ||
113 | REST_GPR(0,r1) | ||
114 | REST_4GPRS(3,r1) | ||
115 | REST_2GPRS(7,r1) | ||
116 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
117 | #endif | ||
118 | clrrdi r11,r1,THREAD_SHIFT | ||
119 | li r12,0 | ||
120 | ld r10,TI_FLAGS(r11) | ||
121 | stb r12,TI_SC_NOERR(r11) | ||
122 | andi. r11,r10,_TIF_SYSCALL_T_OR_A | ||
123 | bne- syscall_dotrace | ||
124 | syscall_dotrace_cont: | ||
125 | cmpldi 0,r0,NR_syscalls | ||
126 | bge- syscall_enosys | ||
127 | |||
128 | system_call: /* label this so stack traces look sane */ | ||
129 | /* | ||
130 | * Need to vector to 32 Bit or default sys_call_table here, | ||
131 | * based on caller's run-mode / personality. | ||
132 | */ | ||
133 | ld r11,.SYS_CALL_TABLE@toc(2) | ||
134 | andi. r10,r10,_TIF_32BIT | ||
135 | beq 15f | ||
136 | ld r11,.SYS_CALL_TABLE32@toc(2) | ||
137 | clrldi r3,r3,32 | ||
138 | clrldi r4,r4,32 | ||
139 | clrldi r5,r5,32 | ||
140 | clrldi r6,r6,32 | ||
141 | clrldi r7,r7,32 | ||
142 | clrldi r8,r8,32 | ||
143 | 15: | ||
144 | slwi r0,r0,3 | ||
145 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ | ||
146 | mtctr r10 | ||
147 | bctrl /* Call handler */ | ||
148 | |||
149 | syscall_exit: | ||
150 | #ifdef SHOW_SYSCALLS | ||
151 | std r3,GPR3(r1) | ||
152 | bl .do_show_syscall_exit | ||
153 | ld r3,GPR3(r1) | ||
154 | #endif | ||
155 | std r3,RESULT(r1) | ||
156 | ld r5,_CCR(r1) | ||
157 | li r10,-_LAST_ERRNO | ||
158 | cmpld r3,r10 | ||
159 | clrrdi r12,r1,THREAD_SHIFT | ||
160 | bge- syscall_error | ||
161 | syscall_error_cont: | ||
162 | |||
163 | /* check for syscall tracing or audit */ | ||
164 | ld r9,TI_FLAGS(r12) | ||
165 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
166 | bne- syscall_exit_trace | ||
167 | syscall_exit_trace_cont: | ||
168 | |||
169 | /* disable interrupts so current_thread_info()->flags can't change, | ||
170 | and so that we don't get interrupted after loading SRR0/1. */ | ||
171 | ld r8,_MSR(r1) | ||
172 | andi. r10,r8,MSR_RI | ||
173 | beq- unrecov_restore | ||
174 | mfmsr r10 | ||
175 | rldicl r10,r10,48,1 | ||
176 | rotldi r10,r10,16 | ||
177 | mtmsrd r10,1 | ||
178 | ld r9,TI_FLAGS(r12) | ||
179 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED) | ||
180 | bne- syscall_exit_work | ||
181 | ld r7,_NIP(r1) | ||
182 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
183 | andi. r6,r8,MSR_PR | ||
184 | ld r4,_LINK(r1) | ||
185 | beq- 1f /* only restore r13 if */ | ||
186 | ld r13,GPR13(r1) /* returning to usermode */ | ||
187 | 1: ld r2,GPR2(r1) | ||
188 | li r12,MSR_RI | ||
189 | andc r10,r10,r12 | ||
190 | mtmsrd r10,1 /* clear MSR.RI */ | ||
191 | ld r1,GPR1(r1) | ||
192 | mtlr r4 | ||
193 | mtcr r5 | ||
194 | mtspr SPRN_SRR0,r7 | ||
195 | mtspr SPRN_SRR1,r8 | ||
196 | rfid | ||
197 | b . /* prevent speculative execution */ | ||
198 | |||
199 | syscall_enosys: | ||
200 | li r3,-ENOSYS | ||
201 | std r3,RESULT(r1) | ||
202 | clrrdi r12,r1,THREAD_SHIFT | ||
203 | ld r5,_CCR(r1) | ||
204 | |||
205 | syscall_error: | ||
206 | lbz r11,TI_SC_NOERR(r12) | ||
207 | cmpwi 0,r11,0 | ||
208 | bne- syscall_error_cont | ||
209 | neg r3,r3 | ||
210 | oris r5,r5,0x1000 /* Set SO bit in CR */ | ||
211 | std r5,_CCR(r1) | ||
212 | b syscall_error_cont | ||
213 | |||
214 | /* Traced system call support */ | ||
215 | syscall_dotrace: | ||
216 | bl .save_nvgprs | ||
217 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
218 | bl .do_syscall_trace_enter | ||
219 | ld r0,GPR0(r1) /* Restore original registers */ | ||
220 | ld r3,GPR3(r1) | ||
221 | ld r4,GPR4(r1) | ||
222 | ld r5,GPR5(r1) | ||
223 | ld r6,GPR6(r1) | ||
224 | ld r7,GPR7(r1) | ||
225 | ld r8,GPR8(r1) | ||
226 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
227 | clrrdi r10,r1,THREAD_SHIFT | ||
228 | ld r10,TI_FLAGS(r10) | ||
229 | b syscall_dotrace_cont | ||
230 | |||
231 | syscall_exit_trace: | ||
232 | std r3,GPR3(r1) | ||
233 | bl .save_nvgprs | ||
234 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
235 | bl .do_syscall_trace_leave | ||
236 | REST_NVGPRS(r1) | ||
237 | ld r3,GPR3(r1) | ||
238 | ld r5,_CCR(r1) | ||
239 | clrrdi r12,r1,THREAD_SHIFT | ||
240 | b syscall_exit_trace_cont | ||
241 | |||
242 | /* Stuff to do on exit from a system call. */ | ||
243 | syscall_exit_work: | ||
244 | std r3,GPR3(r1) | ||
245 | std r5,_CCR(r1) | ||
246 | b .ret_from_except_lite | ||
247 | |||
248 | /* Save non-volatile GPRs, if not already saved. */ | ||
249 | _GLOBAL(save_nvgprs) | ||
250 | ld r11,_TRAP(r1) | ||
251 | andi. r0,r11,1 | ||
252 | beqlr- | ||
253 | SAVE_NVGPRS(r1) | ||
254 | clrrdi r0,r11,1 | ||
255 | std r0,_TRAP(r1) | ||
256 | blr | ||
257 | |||
258 | /* | ||
259 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | ||
260 | * and thus put the process into the stopped state where we might | ||
261 | * want to examine its user state with ptrace. Therefore we need | ||
262 | * to save all the nonvolatile registers (r14 - r31) before calling | ||
263 | * the C code. Similarly, fork, vfork and clone need the full | ||
264 | * register state on the stack so that it can be copied to the child. | ||
265 | */ | ||
266 | _GLOBAL(ppc32_sigsuspend) | ||
267 | bl .save_nvgprs | ||
268 | bl .compat_sys_sigsuspend | ||
269 | b 70f | ||
270 | |||
271 | _GLOBAL(ppc64_rt_sigsuspend) | ||
272 | bl .save_nvgprs | ||
273 | bl .sys_rt_sigsuspend | ||
274 | b 70f | ||
275 | |||
276 | _GLOBAL(ppc32_rt_sigsuspend) | ||
277 | bl .save_nvgprs | ||
278 | bl .compat_sys_rt_sigsuspend | ||
279 | 70: cmpdi 0,r3,0 | ||
280 | /* If it returned an error, we need to return via syscall_exit to set | ||
281 | the SO bit in cr0 and potentially stop for ptrace. */ | ||
282 | bne syscall_exit | ||
283 | /* If sigsuspend() returns zero, we are going into a signal handler. We | ||
284 | may need to call audit_syscall_exit() to mark the exit from sigsuspend() */ | ||
285 | #ifdef CONFIG_AUDIT | ||
286 | ld r3,PACACURRENT(r13) | ||
287 | ld r4,AUDITCONTEXT(r3) | ||
288 | cmpdi 0,r4,0 | ||
289 | beq .ret_from_except /* No audit_context: Leave immediately. */ | ||
290 | li r4, 2 /* AUDITSC_FAILURE */ | ||
291 | li r5,-4 /* It's always -EINTR */ | ||
292 | bl .audit_syscall_exit | ||
293 | #endif | ||
294 | b .ret_from_except | ||
295 | |||
296 | _GLOBAL(ppc_fork) | ||
297 | bl .save_nvgprs | ||
298 | bl .sys_fork | ||
299 | b syscall_exit | ||
300 | |||
301 | _GLOBAL(ppc_vfork) | ||
302 | bl .save_nvgprs | ||
303 | bl .sys_vfork | ||
304 | b syscall_exit | ||
305 | |||
306 | _GLOBAL(ppc_clone) | ||
307 | bl .save_nvgprs | ||
308 | bl .sys_clone | ||
309 | b syscall_exit | ||
310 | |||
311 | _GLOBAL(ppc32_swapcontext) | ||
312 | bl .save_nvgprs | ||
313 | bl .compat_sys_swapcontext | ||
314 | b 80f | ||
315 | |||
316 | _GLOBAL(ppc64_swapcontext) | ||
317 | bl .save_nvgprs | ||
318 | bl .sys_swapcontext | ||
319 | b 80f | ||
320 | |||
321 | _GLOBAL(ppc32_sigreturn) | ||
322 | bl .compat_sys_sigreturn | ||
323 | b 80f | ||
324 | |||
325 | _GLOBAL(ppc32_rt_sigreturn) | ||
326 | bl .compat_sys_rt_sigreturn | ||
327 | b 80f | ||
328 | |||
329 | _GLOBAL(ppc64_rt_sigreturn) | ||
330 | bl .sys_rt_sigreturn | ||
331 | |||
332 | 80: cmpdi 0,r3,0 | ||
333 | blt syscall_exit | ||
334 | clrrdi r4,r1,THREAD_SHIFT | ||
335 | ld r4,TI_FLAGS(r4) | ||
336 | andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
337 | beq+ 81f | ||
338 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
339 | bl .do_syscall_trace_leave | ||
340 | 81: b .ret_from_except | ||
341 | |||
342 | _GLOBAL(ret_from_fork) | ||
343 | bl .schedule_tail | ||
344 | REST_NVGPRS(r1) | ||
345 | li r3,0 | ||
346 | b syscall_exit | ||
347 | |||
348 | /* | ||
349 | * This routine switches between two different tasks. The process | ||
350 | * state of one is saved on its kernel stack. Then the state | ||
351 | * of the other is restored from its kernel stack. The memory | ||
352 | * management hardware is updated to the second process's state. | ||
353 | * Finally, we can return to the second process, via ret_from_except. | ||
354 | * On entry, r3 points to the THREAD for the current task, r4 | ||
355 | * points to the THREAD for the new task. | ||
356 | * | ||
357 | * Note: there are two ways to get to the "going out" portion | ||
358 | * of this code; either by coming in via the entry (_switch) | ||
359 | * or via "fork" which must set up an environment equivalent | ||
360 | * to the "_switch" path. If you change this you'll have to change | ||
361 | * the fork code also. | ||
362 | * | ||
363 | * The code which creates the new task context is in 'copy_thread' | ||
364 | * in arch/ppc64/kernel/process.c | ||
365 | */ | ||
366 | .align 7 | ||
367 | _GLOBAL(_switch) | ||
368 | mflr r0 | ||
369 | std r0,16(r1) | ||
370 | stdu r1,-SWITCH_FRAME_SIZE(r1) | ||
371 | /* r3-r13 are caller saved -- Cort */ | ||
372 | SAVE_8GPRS(14, r1) | ||
373 | SAVE_10GPRS(22, r1) | ||
374 | mflr r20 /* Return to switch caller */ | ||
375 | mfmsr r22 | ||
376 | li r0, MSR_FP | ||
377 | #ifdef CONFIG_ALTIVEC | ||
378 | BEGIN_FTR_SECTION | ||
379 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | ||
380 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ | ||
381 | std r24,THREAD_VRSAVE(r3) | ||
382 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
383 | #endif /* CONFIG_ALTIVEC */ | ||
384 | and. r0,r0,r22 | ||
385 | beq+ 1f | ||
386 | andc r22,r22,r0 | ||
387 | mtmsrd r22 | ||
388 | isync | ||
389 | 1: std r20,_NIP(r1) | ||
390 | mfcr r23 | ||
391 | std r23,_CCR(r1) | ||
392 | std r1,KSP(r3) /* Set old stack pointer */ | ||
393 | |||
394 | #ifdef CONFIG_SMP | ||
395 | /* We need a sync somewhere here to make sure that if the | ||
396 | * previous task gets rescheduled on another CPU, it sees all | ||
397 | * stores it has performed on this one. | ||
398 | */ | ||
399 | sync | ||
400 | #endif /* CONFIG_SMP */ | ||
401 | |||
402 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | ||
403 | std r6,PACACURRENT(r13) /* Set new 'current' */ | ||
404 | |||
405 | ld r8,KSP(r4) /* new stack pointer */ | ||
406 | BEGIN_FTR_SECTION | ||
407 | clrrdi r6,r8,28 /* get its ESID */ | ||
408 | clrrdi r9,r1,28 /* get current sp ESID */ | ||
409 | clrldi. r0,r6,2 /* is new ESID c00000000? */ | ||
410 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | ||
411 | cror eq,4*cr1+eq,eq | ||
412 | beq 2f /* if yes, don't slbie it */ | ||
413 | |||
414 | /* Bolt in the new stack SLB entry */ | ||
415 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | ||
416 | oris r0,r6,(SLB_ESID_V)@h | ||
417 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | ||
418 | slbie r6 | ||
419 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | ||
420 | slbmte r7,r0 | ||
421 | isync | ||
422 | |||
423 | 2: | ||
424 | END_FTR_SECTION_IFSET(CPU_FTR_SLB) | ||
425 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ | ||
426 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE | ||
427 | because we don't need to leave the 288-byte ABI gap at the | ||
428 | top of the kernel stack. */ | ||
429 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | ||
430 | |||
431 | mr r1,r8 /* start using new stack pointer */ | ||
432 | std r7,PACAKSAVE(r13) | ||
433 | |||
434 | ld r6,_CCR(r1) | ||
435 | mtcrf 0xFF,r6 | ||
436 | |||
437 | #ifdef CONFIG_ALTIVEC | ||
438 | BEGIN_FTR_SECTION | ||
439 | ld r0,THREAD_VRSAVE(r4) | ||
440 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | ||
441 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
442 | #endif /* CONFIG_ALTIVEC */ | ||
443 | |||
444 | /* r3-r13 are destroyed -- Cort */ | ||
445 | REST_8GPRS(14, r1) | ||
446 | REST_10GPRS(22, r1) | ||
447 | |||
448 | /* convert old thread to its task_struct for return value */ | ||
449 | addi r3,r3,-THREAD | ||
450 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | ||
451 | mtlr r7 | ||
452 | addi r1,r1,SWITCH_FRAME_SIZE | ||
453 | blr | ||
454 | |||
455 | .align 7 | ||
456 | _GLOBAL(ret_from_except) | ||
457 | ld r11,_TRAP(r1) | ||
458 | andi. r0,r11,1 | ||
459 | bne .ret_from_except_lite | ||
460 | REST_NVGPRS(r1) | ||
461 | |||
462 | _GLOBAL(ret_from_except_lite) | ||
463 | /* | ||
464 | * Disable interrupts so that current_thread_info()->flags | ||
465 | * can't change between when we test it and when we return | ||
466 | * from the interrupt. | ||
467 | */ | ||
468 | mfmsr r10 /* Get current interrupt state */ | ||
469 | rldicl r9,r10,48,1 /* clear MSR_EE */ | ||
470 | rotldi r9,r9,16 | ||
471 | mtmsrd r9,1 /* Update machine state */ | ||
472 | |||
473 | #ifdef CONFIG_PREEMPT | ||
474 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ | ||
475 | li r0,_TIF_NEED_RESCHED /* bits to check */ | ||
476 | ld r3,_MSR(r1) | ||
477 | ld r4,TI_FLAGS(r9) | ||
478 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ | ||
479 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING | ||
480 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ | ||
481 | bne do_work | ||
482 | |||
483 | #else /* !CONFIG_PREEMPT */ | ||
484 | ld r3,_MSR(r1) /* Returning to user mode? */ | ||
485 | andi. r3,r3,MSR_PR | ||
486 | beq restore /* if not, just restore regs and return */ | ||
487 | |||
488 | /* Check current_thread_info()->flags */ | ||
489 | clrrdi r9,r1,THREAD_SHIFT | ||
490 | ld r4,TI_FLAGS(r9) | ||
491 | andi. r0,r4,_TIF_USER_WORK_MASK | ||
492 | bne do_work | ||
493 | #endif | ||
494 | |||
495 | restore: | ||
496 | #ifdef CONFIG_PPC_ISERIES | ||
497 | ld r5,SOFTE(r1) | ||
498 | cmpdi 0,r5,0 | ||
499 | beq 4f | ||
500 | /* Check for pending interrupts (iSeries) */ | ||
501 | ld r3,PACALPPACA+LPPACAANYINT(r13) | ||
502 | cmpdi r3,0 | ||
503 | beq+ 4f /* skip do_IRQ if no interrupts */ | ||
504 | |||
505 | li r3,0 | ||
506 | stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ | ||
507 | ori r10,r10,MSR_EE | ||
508 | mtmsrd r10 /* hard-enable again */ | ||
509 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
510 | bl .do_IRQ | ||
511 | b .ret_from_except_lite /* loop back and handle more */ | ||
512 | |||
513 | 4: stb r5,PACAPROCENABLED(r13) | ||
514 | #endif | ||
515 | |||
516 | ld r3,_MSR(r1) | ||
517 | andi. r0,r3,MSR_RI | ||
518 | beq- unrecov_restore | ||
519 | |||
520 | andi. r0,r3,MSR_PR | ||
521 | |||
522 | /* | ||
523 | * r13 is our per cpu area, only restore it if we are returning to | ||
524 | * userspace | ||
525 | */ | ||
526 | beq 1f | ||
527 | REST_GPR(13, r1) | ||
528 | 1: | ||
529 | ld r3,_CTR(r1) | ||
530 | ld r0,_LINK(r1) | ||
531 | mtctr r3 | ||
532 | mtlr r0 | ||
533 | ld r3,_XER(r1) | ||
534 | mtspr SPRN_XER,r3 | ||
535 | |||
536 | REST_8GPRS(5, r1) | ||
537 | |||
538 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
539 | |||
540 | mfmsr r0 | ||
541 | li r2, MSR_RI | ||
542 | andc r0,r0,r2 | ||
543 | mtmsrd r0,1 | ||
544 | |||
545 | ld r0,_MSR(r1) | ||
546 | mtspr SPRN_SRR1,r0 | ||
547 | |||
548 | ld r2,_CCR(r1) | ||
549 | mtcrf 0xFF,r2 | ||
550 | ld r2,_NIP(r1) | ||
551 | mtspr SPRN_SRR0,r2 | ||
552 | |||
553 | ld r0,GPR0(r1) | ||
554 | ld r2,GPR2(r1) | ||
555 | ld r3,GPR3(r1) | ||
556 | ld r4,GPR4(r1) | ||
557 | ld r1,GPR1(r1) | ||
558 | |||
559 | rfid | ||
560 | b . /* prevent speculative execution */ | ||
561 | |||
562 | /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ | ||
563 | do_work: | ||
564 | #ifdef CONFIG_PREEMPT | ||
565 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | ||
566 | bne user_work | ||
567 | /* Check that preempt_count() == 0 and interrupts are enabled */ | ||
568 | lwz r8,TI_PREEMPT(r9) | ||
569 | cmpwi cr1,r8,0 | ||
570 | #ifdef CONFIG_PPC_ISERIES | ||
571 | ld r0,SOFTE(r1) | ||
572 | cmpdi r0,0 | ||
573 | #else | ||
574 | andi. r0,r3,MSR_EE | ||
575 | #endif | ||
576 | crandc eq,cr1*4+eq,eq | ||
577 | bne restore | ||
578 | /* here we are preempting the current task */ | ||
579 | 1: | ||
580 | #ifdef CONFIG_PPC_ISERIES | ||
581 | li r0,1 | ||
582 | stb r0,PACAPROCENABLED(r13) | ||
583 | #endif | ||
584 | ori r10,r10,MSR_EE | ||
585 | mtmsrd r10,1 /* reenable interrupts */ | ||
586 | bl .preempt_schedule | ||
587 | mfmsr r10 | ||
588 | clrrdi r9,r1,THREAD_SHIFT | ||
589 | rldicl r10,r10,48,1 /* disable interrupts again */ | ||
590 | rotldi r10,r10,16 | ||
591 | mtmsrd r10,1 | ||
592 | ld r4,TI_FLAGS(r9) | ||
593 | andi. r0,r4,_TIF_NEED_RESCHED | ||
594 | bne 1b | ||
595 | b restore | ||
596 | |||
597 | user_work: | ||
598 | #endif | ||
599 | /* Enable interrupts */ | ||
600 | ori r10,r10,MSR_EE | ||
601 | mtmsrd r10,1 | ||
602 | |||
603 | andi. r0,r4,_TIF_NEED_RESCHED | ||
604 | beq 1f | ||
605 | bl .schedule | ||
606 | b .ret_from_except_lite | ||
607 | |||
608 | 1: bl .save_nvgprs | ||
609 | li r3,0 | ||
610 | addi r4,r1,STACK_FRAME_OVERHEAD | ||
611 | bl .do_signal | ||
612 | b .ret_from_except | ||
613 | |||
614 | unrecov_restore: | ||
615 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
616 | bl .unrecoverable_exception | ||
617 | b unrecov_restore | ||
618 | |||
619 | #ifdef CONFIG_PPC_RTAS | ||
620 | /* | ||
621 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | ||
622 | * called with the MMU off. | ||
623 | * | ||
624 | * In addition, we need to be in 32b mode, at least for now. | ||
625 | * | ||
626 | * Note: r3 is an input parameter to rtas, so don't trash it... | ||
627 | */ | ||
628 | _GLOBAL(enter_rtas) | ||
629 | mflr r0 | ||
630 | std r0,16(r1) | ||
631 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ | ||
632 | |||
633 | /* Because RTAS is running in 32b mode, it clobbers the high order half | ||
634 | * of all registers that it saves. We therefore save those registers | ||
635 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | ||
636 | */ | ||
637 | SAVE_GPR(2, r1) /* Save the TOC */ | ||
638 | SAVE_GPR(13, r1) /* Save paca */ | ||
639 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | ||
640 | SAVE_10GPRS(22, r1) /* ditto */ | ||
641 | |||
642 | mfcr r4 | ||
643 | std r4,_CCR(r1) | ||
644 | mfctr r5 | ||
645 | std r5,_CTR(r1) | ||
646 | mfspr r6,SPRN_XER | ||
647 | std r6,_XER(r1) | ||
648 | mfdar r7 | ||
649 | std r7,_DAR(r1) | ||
650 | mfdsisr r8 | ||
651 | std r8,_DSISR(r1) | ||
652 | mfsrr0 r9 | ||
653 | std r9,_SRR0(r1) | ||
654 | mfsrr1 r10 | ||
655 | std r10,_SRR1(r1) | ||
656 | |||
657 | /* There is no way it is acceptable to get here with interrupts enabled, | ||
658 | * check it with the asm equivalent of WARN_ON | ||
659 | */ | ||
660 | mfmsr r6 | ||
661 | andi. r0,r6,MSR_EE | ||
662 | 1: tdnei r0,0 | ||
663 | .section __bug_table,"a" | ||
664 | .llong 1b,__LINE__ + 0x1000000, 1f, 2f | ||
665 | .previous | ||
666 | .section .rodata,"a" | ||
667 | 1: .asciz __FILE__ | ||
668 | 2: .asciz "enter_rtas" | ||
669 | .previous | ||
670 | |||
671 | /* Unfortunately, the stack pointer and the MSR are also clobbered, | ||
672 | * so they are saved in the PACA which allows us to restore | ||
673 | * our original state after RTAS returns. | ||
674 | */ | ||
675 | std r1,PACAR1(r13) | ||
676 | std r6,PACASAVEDMSR(r13) | ||
677 | |||
678 | /* Setup our real return addr */ | ||
679 | SET_REG_TO_LABEL(r4,.rtas_return_loc) | ||
680 | SET_REG_TO_CONST(r9,KERNELBASE) | ||
681 | sub r4,r4,r9 | ||
682 | mtlr r4 | ||
683 | |||
684 | li r0,0 | ||
685 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | ||
686 | andc r0,r6,r0 | ||
687 | |||
688 | li r9,1 | ||
689 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | ||
690 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP | ||
691 | andc r6,r0,r9 | ||
692 | ori r6,r6,MSR_RI | ||
693 | sync /* disable interrupts so SRR0/1 */ | ||
694 | mtmsrd r0 /* don't get trashed */ | ||
695 | |||
696 | SET_REG_TO_LABEL(r4,rtas) | ||
697 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ | ||
698 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | ||
699 | |||
700 | mtspr SPRN_SRR0,r5 | ||
701 | mtspr SPRN_SRR1,r6 | ||
702 | rfid | ||
703 | b . /* prevent speculative execution */ | ||
704 | |||
705 | _STATIC(rtas_return_loc) | ||
706 | /* relocation is off at this point */ | ||
707 | mfspr r4,SPRN_SPRG3 /* Get PACA */ | ||
708 | SET_REG_TO_CONST(r5, KERNELBASE) | ||
709 | sub r4,r4,r5 /* RELOC the PACA base pointer */ | ||
710 | |||
711 | mfmsr r6 | ||
712 | li r0,MSR_RI | ||
713 | andc r6,r6,r0 | ||
714 | sync | ||
715 | mtmsrd r6 | ||
716 | |||
717 | ld r1,PACAR1(r4) /* Restore our SP */ | ||
718 | LOADADDR(r3,.rtas_restore_regs) | ||
719 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ | ||
720 | |||
721 | mtspr SPRN_SRR0,r3 | ||
722 | mtspr SPRN_SRR1,r4 | ||
723 | rfid | ||
724 | b . /* prevent speculative execution */ | ||
725 | |||
726 | _STATIC(rtas_restore_regs) | ||
727 | /* relocation is on at this point */ | ||
728 | REST_GPR(2, r1) /* Restore the TOC */ | ||
729 | REST_GPR(13, r1) /* Restore paca */ | ||
730 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | ||
731 | REST_10GPRS(22, r1) /* ditto */ | ||
732 | |||
733 | mfspr r13,SPRN_SPRG3 | ||
734 | |||
735 | ld r4,_CCR(r1) | ||
736 | mtcr r4 | ||
737 | ld r5,_CTR(r1) | ||
738 | mtctr r5 | ||
739 | ld r6,_XER(r1) | ||
740 | mtspr SPRN_XER,r6 | ||
741 | ld r7,_DAR(r1) | ||
742 | mtdar r7 | ||
743 | ld r8,_DSISR(r1) | ||
744 | mtdsisr r8 | ||
745 | ld r9,_SRR0(r1) | ||
746 | mtsrr0 r9 | ||
747 | ld r10,_SRR1(r1) | ||
748 | mtsrr1 r10 | ||
749 | |||
750 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ | ||
751 | ld r0,16(r1) /* get return address */ | ||
752 | |||
753 | mtlr r0 | ||
754 | blr /* return to caller */ | ||
755 | |||
756 | #endif /* CONFIG_PPC_RTAS */ | ||
757 | |||
758 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
759 | |||
760 | _GLOBAL(enter_prom) | ||
761 | mflr r0 | ||
762 | std r0,16(r1) | ||
763 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ | ||
764 | |||
765 | /* Because PROM is running in 32b mode, it clobbers the high order half | ||
766 | * of all registers that it saves. We therefore save those registers | ||
767 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | ||
768 | */ | ||
769 | SAVE_8GPRS(2, r1) | ||
770 | SAVE_GPR(13, r1) | ||
771 | SAVE_8GPRS(14, r1) | ||
772 | SAVE_10GPRS(22, r1) | ||
773 | mfcr r4 | ||
774 | std r4,_CCR(r1) | ||
775 | mfctr r5 | ||
776 | std r5,_CTR(r1) | ||
777 | mfspr r6,SPRN_XER | ||
778 | std r6,_XER(r1) | ||
779 | mfdar r7 | ||
780 | std r7,_DAR(r1) | ||
781 | mfdsisr r8 | ||
782 | std r8,_DSISR(r1) | ||
783 | mfsrr0 r9 | ||
784 | std r9,_SRR0(r1) | ||
785 | mfsrr1 r10 | ||
786 | std r10,_SRR1(r1) | ||
787 | mfmsr r11 | ||
788 | std r11,_MSR(r1) | ||
789 | |||
790 | /* Get the PROM entrypoint */ | ||
791 | ld r0,GPR4(r1) | ||
792 | mtlr r0 | ||
793 | |||
794 | /* Switch MSR to 32 bits mode | ||
795 | */ | ||
796 | mfmsr r11 | ||
797 | li r12,1 | ||
798 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | ||
799 | andc r11,r11,r12 | ||
800 | li r12,1 | ||
801 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | ||
802 | andc r11,r11,r12 | ||
803 | mtmsrd r11 | ||
804 | isync | ||
805 | |||
806 | /* Restore arguments & enter PROM here... */ | ||
807 | ld r3,GPR3(r1) | ||
808 | blrl | ||
809 | |||
810 | /* Just make sure that r1 top 32 bits didn't get | ||
811 | * corrupt by OF | ||
812 | */ | ||
813 | rldicl r1,r1,0,32 | ||
814 | |||
815 | /* Restore the MSR (back to 64 bits) */ | ||
816 | ld r0,_MSR(r1) | ||
817 | mtmsrd r0 | ||
818 | isync | ||
819 | |||
820 | /* Restore other registers */ | ||
821 | REST_GPR(2, r1) | ||
822 | REST_GPR(13, r1) | ||
823 | REST_8GPRS(14, r1) | ||
824 | REST_10GPRS(22, r1) | ||
825 | ld r4,_CCR(r1) | ||
826 | mtcr r4 | ||
827 | ld r5,_CTR(r1) | ||
828 | mtctr r5 | ||
829 | ld r6,_XER(r1) | ||
830 | mtspr SPRN_XER,r6 | ||
831 | ld r7,_DAR(r1) | ||
832 | mtdar r7 | ||
833 | ld r8,_DSISR(r1) | ||
834 | mtdsisr r8 | ||
835 | ld r9,_SRR0(r1) | ||
836 | mtsrr0 r9 | ||
837 | ld r10,_SRR1(r1) | ||
838 | mtsrr1 r10 | ||
839 | |||
840 | addi r1,r1,PROM_FRAME_SIZE | ||
841 | ld r0,16(r1) | ||
842 | mtlr r0 | ||
843 | blr | ||
844 | |||
845 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index 9cae3d5c40e6..077507ffbab8 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
@@ -867,566 +867,3 @@ _GLOBAL(kexec_sequence) | |||
867 | li r5,0 | 867 | li r5,0 |
868 | blr /* image->start(physid, image->start, 0); */ | 868 | blr /* image->start(physid, image->start, 0); */ |
869 | #endif /* CONFIG_KEXEC */ | 869 | #endif /* CONFIG_KEXEC */ |
870 | |||
871 | /* Why isn't this a) automatic, b) written in 'C'? */ | ||
872 | .balign 8 | ||
873 | _GLOBAL(sys_call_table32) | ||
874 | .llong .sys_restart_syscall /* 0 */ | ||
875 | .llong .sys_exit | ||
876 | .llong .ppc_fork | ||
877 | .llong .sys_read | ||
878 | .llong .sys_write | ||
879 | .llong .compat_sys_open /* 5 */ | ||
880 | .llong .sys_close | ||
881 | .llong .compat_sys_waitpid | ||
882 | .llong .compat_sys_creat | ||
883 | .llong .sys_link | ||
884 | .llong .sys_unlink /* 10 */ | ||
885 | .llong .compat_sys_execve | ||
886 | .llong .sys_chdir | ||
887 | .llong .compat_sys_time | ||
888 | .llong .sys_mknod | ||
889 | .llong .sys_chmod /* 15 */ | ||
890 | .llong .sys_lchown | ||
891 | .llong .sys_ni_syscall /* old break syscall */ | ||
892 | .llong .sys_ni_syscall /* old stat syscall */ | ||
893 | .llong .ppc32_lseek | ||
894 | .llong .sys_getpid /* 20 */ | ||
895 | .llong .compat_sys_mount | ||
896 | .llong .sys_oldumount | ||
897 | .llong .sys_setuid | ||
898 | .llong .sys_getuid | ||
899 | .llong .compat_sys_stime /* 25 */ | ||
900 | .llong .compat_sys_ptrace | ||
901 | .llong .sys_alarm | ||
902 | .llong .sys_ni_syscall /* old fstat syscall */ | ||
903 | .llong .compat_sys_pause | ||
904 | .llong .compat_sys_utime /* 30 */ | ||
905 | .llong .sys_ni_syscall /* old stty syscall */ | ||
906 | .llong .sys_ni_syscall /* old gtty syscall */ | ||
907 | .llong .compat_sys_access | ||
908 | .llong .compat_sys_nice | ||
909 | .llong .sys_ni_syscall /* 35 - old ftime syscall */ | ||
910 | .llong .sys_sync | ||
911 | .llong .compat_sys_kill | ||
912 | .llong .sys_rename | ||
913 | .llong .compat_sys_mkdir | ||
914 | .llong .sys_rmdir /* 40 */ | ||
915 | .llong .sys_dup | ||
916 | .llong .sys_pipe | ||
917 | .llong .compat_sys_times | ||
918 | .llong .sys_ni_syscall /* old prof syscall */ | ||
919 | .llong .sys_brk /* 45 */ | ||
920 | .llong .sys_setgid | ||
921 | .llong .sys_getgid | ||
922 | .llong .sys_signal | ||
923 | .llong .sys_geteuid | ||
924 | .llong .sys_getegid /* 50 */ | ||
925 | .llong .sys_acct | ||
926 | .llong .sys_umount | ||
927 | .llong .sys_ni_syscall /* old lock syscall */ | ||
928 | .llong .compat_sys_ioctl | ||
929 | .llong .compat_sys_fcntl /* 55 */ | ||
930 | .llong .sys_ni_syscall /* old mpx syscall */ | ||
931 | .llong .compat_sys_setpgid | ||
932 | .llong .sys_ni_syscall /* old ulimit syscall */ | ||
933 | .llong .sys_olduname | ||
934 | .llong .compat_sys_umask /* 60 */ | ||
935 | .llong .sys_chroot | ||
936 | .llong .sys_ustat | ||
937 | .llong .sys_dup2 | ||
938 | .llong .sys_getppid | ||
939 | .llong .sys_getpgrp /* 65 */ | ||
940 | .llong .sys_setsid | ||
941 | .llong .compat_sys_sigaction | ||
942 | .llong .sys_sgetmask | ||
943 | .llong .compat_sys_ssetmask | ||
944 | .llong .sys_setreuid /* 70 */ | ||
945 | .llong .sys_setregid | ||
946 | .llong .ppc32_sigsuspend | ||
947 | .llong .compat_sys_sigpending | ||
948 | .llong .compat_sys_sethostname | ||
949 | .llong .compat_sys_setrlimit /* 75 */ | ||
950 | .llong .compat_sys_old_getrlimit | ||
951 | .llong .compat_sys_getrusage | ||
952 | .llong .compat_sys_gettimeofday | ||
953 | .llong .compat_sys_settimeofday | ||
954 | .llong .compat_sys_getgroups /* 80 */ | ||
955 | .llong .compat_sys_setgroups | ||
956 | .llong .sys_ni_syscall /* old select syscall */ | ||
957 | .llong .sys_symlink | ||
958 | .llong .sys_ni_syscall /* old lstat syscall */ | ||
959 | .llong .compat_sys_readlink /* 85 */ | ||
960 | .llong .sys_uselib | ||
961 | .llong .sys_swapon | ||
962 | .llong .sys_reboot | ||
963 | .llong .old32_readdir | ||
964 | .llong .sys_mmap /* 90 */ | ||
965 | .llong .sys_munmap | ||
966 | .llong .sys_truncate | ||
967 | .llong .sys_ftruncate | ||
968 | .llong .sys_fchmod | ||
969 | .llong .sys_fchown /* 95 */ | ||
970 | .llong .compat_sys_getpriority | ||
971 | .llong .compat_sys_setpriority | ||
972 | .llong .sys_ni_syscall /* old profil syscall */ | ||
973 | .llong .compat_sys_statfs | ||
974 | .llong .compat_sys_fstatfs /* 100 */ | ||
975 | .llong .sys_ni_syscall /* old ioperm syscall */ | ||
976 | .llong .compat_sys_socketcall | ||
977 | .llong .compat_sys_syslog | ||
978 | .llong .compat_sys_setitimer | ||
979 | .llong .compat_sys_getitimer /* 105 */ | ||
980 | .llong .compat_sys_newstat | ||
981 | .llong .compat_sys_newlstat | ||
982 | .llong .compat_sys_newfstat | ||
983 | .llong .sys_uname | ||
984 | .llong .sys_ni_syscall /* 110 old iopl syscall */ | ||
985 | .llong .sys_vhangup | ||
986 | .llong .sys_ni_syscall /* old idle syscall */ | ||
987 | .llong .sys_ni_syscall /* old vm86 syscall */ | ||
988 | .llong .compat_sys_wait4 | ||
989 | .llong .sys_swapoff /* 115 */ | ||
990 | .llong .compat_sys_sysinfo | ||
991 | .llong .sys32_ipc | ||
992 | .llong .sys_fsync | ||
993 | .llong .ppc32_sigreturn | ||
994 | .llong .ppc_clone /* 120 */ | ||
995 | .llong .compat_sys_setdomainname | ||
996 | .llong .ppc_newuname | ||
997 | .llong .sys_ni_syscall /* old modify_ldt syscall */ | ||
998 | .llong .compat_sys_adjtimex | ||
999 | .llong .sys_mprotect /* 125 */ | ||
1000 | .llong .compat_sys_sigprocmask | ||
1001 | .llong .sys_ni_syscall /* old create_module syscall */ | ||
1002 | .llong .sys_init_module | ||
1003 | .llong .sys_delete_module | ||
1004 | .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */ | ||
1005 | .llong .sys_quotactl | ||
1006 | .llong .compat_sys_getpgid | ||
1007 | .llong .sys_fchdir | ||
1008 | .llong .sys_bdflush | ||
1009 | .llong .compat_sys_sysfs /* 135 */ | ||
1010 | .llong .ppc64_personality | ||
1011 | .llong .sys_ni_syscall /* for afs_syscall */ | ||
1012 | .llong .sys_setfsuid | ||
1013 | .llong .sys_setfsgid | ||
1014 | .llong .sys_llseek /* 140 */ | ||
1015 | .llong .compat_sys_getdents | ||
1016 | .llong .ppc32_select | ||
1017 | .llong .sys_flock | ||
1018 | .llong .sys_msync | ||
1019 | .llong .compat_sys_readv /* 145 */ | ||
1020 | .llong .compat_sys_writev | ||
1021 | .llong .compat_sys_getsid | ||
1022 | .llong .sys_fdatasync | ||
1023 | .llong .compat_sys_sysctl | ||
1024 | .llong .sys_mlock /* 150 */ | ||
1025 | .llong .sys_munlock | ||
1026 | .llong .sys_mlockall | ||
1027 | .llong .sys_munlockall | ||
1028 | .llong .compat_sys_sched_setparam | ||
1029 | .llong .compat_sys_sched_getparam /* 155 */ | ||
1030 | .llong .compat_sys_sched_setscheduler | ||
1031 | .llong .compat_sys_sched_getscheduler | ||
1032 | .llong .sys_sched_yield | ||
1033 | .llong .compat_sys_sched_get_priority_max | ||
1034 | .llong .compat_sys_sched_get_priority_min /* 160 */ | ||
1035 | .llong .compat_sys_sched_rr_get_interval | ||
1036 | .llong .compat_sys_nanosleep | ||
1037 | .llong .sys_mremap | ||
1038 | .llong .sys_setresuid | ||
1039 | .llong .sys_getresuid /* 165 */ | ||
1040 | .llong .sys_ni_syscall /* old query_module syscall */ | ||
1041 | .llong .sys_poll | ||
1042 | .llong .compat_sys_nfsservctl | ||
1043 | .llong .sys_setresgid | ||
1044 | .llong .sys_getresgid /* 170 */ | ||
1045 | .llong .compat_sys_prctl | ||
1046 | .llong .ppc32_rt_sigreturn | ||
1047 | .llong .compat_sys_rt_sigaction | ||
1048 | .llong .compat_sys_rt_sigprocmask | ||
1049 | .llong .compat_sys_rt_sigpending /* 175 */ | ||
1050 | .llong .compat_sys_rt_sigtimedwait | ||
1051 | .llong .compat_sys_rt_sigqueueinfo | ||
1052 | .llong .ppc32_rt_sigsuspend | ||
1053 | .llong .compat_sys_pread64 | ||
1054 | .llong .compat_sys_pwrite64 /* 180 */ | ||
1055 | .llong .sys_chown | ||
1056 | .llong .sys_getcwd | ||
1057 | .llong .sys_capget | ||
1058 | .llong .sys_capset | ||
1059 | .llong .compat_sys_sigaltstack /* 185 */ | ||
1060 | .llong .compat_sys_sendfile | ||
1061 | .llong .sys_ni_syscall /* reserved for streams1 */ | ||
1062 | .llong .sys_ni_syscall /* reserved for streams2 */ | ||
1063 | .llong .ppc_vfork | ||
1064 | .llong .compat_sys_getrlimit /* 190 */ | ||
1065 | .llong .compat_sys_readahead | ||
1066 | .llong .compat_sys_mmap2 | ||
1067 | .llong .compat_sys_truncate64 | ||
1068 | .llong .compat_sys_ftruncate64 | ||
1069 | .llong .sys_stat64 /* 195 */ | ||
1070 | .llong .sys_lstat64 | ||
1071 | .llong .sys_fstat64 | ||
1072 | .llong .compat_sys_pciconfig_read | ||
1073 | .llong .compat_sys_pciconfig_write | ||
1074 | .llong .compat_sys_pciconfig_iobase /* 200 - pciconfig_iobase */ | ||
1075 | .llong .sys_ni_syscall /* reserved for MacOnLinux */ | ||
1076 | .llong .sys_getdents64 | ||
1077 | .llong .sys_pivot_root | ||
1078 | .llong .compat_sys_fcntl64 | ||
1079 | .llong .sys_madvise /* 205 */ | ||
1080 | .llong .sys_mincore | ||
1081 | .llong .sys_gettid | ||
1082 | .llong .sys_tkill | ||
1083 | .llong .sys_setxattr | ||
1084 | .llong .sys_lsetxattr /* 210 */ | ||
1085 | .llong .sys_fsetxattr | ||
1086 | .llong .sys_getxattr | ||
1087 | .llong .sys_lgetxattr | ||
1088 | .llong .sys_fgetxattr | ||
1089 | .llong .sys_listxattr /* 215 */ | ||
1090 | .llong .sys_llistxattr | ||
1091 | .llong .sys_flistxattr | ||
1092 | .llong .sys_removexattr | ||
1093 | .llong .sys_lremovexattr | ||
1094 | .llong .sys_fremovexattr /* 220 */ | ||
1095 | .llong .compat_sys_futex | ||
1096 | .llong .compat_sys_sched_setaffinity | ||
1097 | .llong .compat_sys_sched_getaffinity | ||
1098 | .llong .sys_ni_syscall | ||
1099 | .llong .sys_ni_syscall /* 225 - reserved for tux */ | ||
1100 | .llong .compat_sys_sendfile64 | ||
1101 | .llong .compat_sys_io_setup | ||
1102 | .llong .sys_io_destroy | ||
1103 | .llong .compat_sys_io_getevents | ||
1104 | .llong .compat_sys_io_submit | ||
1105 | .llong .sys_io_cancel | ||
1106 | .llong .sys_set_tid_address | ||
1107 | .llong .ppc32_fadvise64 | ||
1108 | .llong .sys_exit_group | ||
1109 | .llong .ppc32_lookup_dcookie /* 235 */ | ||
1110 | .llong .sys_epoll_create | ||
1111 | .llong .sys_epoll_ctl | ||
1112 | .llong .sys_epoll_wait | ||
1113 | .llong .sys_remap_file_pages | ||
1114 | .llong .ppc32_timer_create /* 240 */ | ||
1115 | .llong .compat_sys_timer_settime | ||
1116 | .llong .compat_sys_timer_gettime | ||
1117 | .llong .sys_timer_getoverrun | ||
1118 | .llong .sys_timer_delete | ||
1119 | .llong .compat_sys_clock_settime/* 245 */ | ||
1120 | .llong .compat_sys_clock_gettime | ||
1121 | .llong .compat_sys_clock_getres | ||
1122 | .llong .compat_sys_clock_nanosleep | ||
1123 | .llong .ppc32_swapcontext | ||
1124 | .llong .compat_sys_tgkill /* 250 */ | ||
1125 | .llong .compat_sys_utimes | ||
1126 | .llong .compat_sys_statfs64 | ||
1127 | .llong .compat_sys_fstatfs64 | ||
1128 | .llong .ppc_fadvise64_64 /* 32bit only fadvise64_64 */ | ||
1129 | .llong .ppc_rtas /* 255 */ | ||
1130 | .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */ | ||
1131 | .llong .sys_ni_syscall /* 257 reserved for vserver */ | ||
1132 | .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */ | ||
1133 | .llong .compat_sys_mbind | ||
1134 | .llong .compat_sys_get_mempolicy /* 260 */ | ||
1135 | .llong .compat_sys_set_mempolicy | ||
1136 | .llong .compat_sys_mq_open | ||
1137 | .llong .sys_mq_unlink | ||
1138 | .llong .compat_sys_mq_timedsend | ||
1139 | .llong .compat_sys_mq_timedreceive /* 265 */ | ||
1140 | .llong .compat_sys_mq_notify | ||
1141 | .llong .compat_sys_mq_getsetattr | ||
1142 | .llong .compat_sys_kexec_load | ||
1143 | .llong .compat_sys_add_key | ||
1144 | .llong .compat_sys_request_key /* 270 */ | ||
1145 | .llong .compat_sys_keyctl | ||
1146 | .llong .compat_sys_waitid | ||
1147 | .llong .compat_sys_ioprio_set | ||
1148 | .llong .compat_sys_ioprio_get | ||
1149 | .llong .sys_inotify_init /* 275 */ | ||
1150 | .llong .sys_inotify_add_watch | ||
1151 | .llong .sys_inotify_rm_watch | ||
1152 | |||
1153 | .balign 8 | ||
1154 | _GLOBAL(sys_call_table) | ||
1155 | .llong .sys_restart_syscall /* 0 */ | ||
1156 | .llong .sys_exit | ||
1157 | .llong .ppc_fork | ||
1158 | .llong .sys_read | ||
1159 | .llong .sys_write | ||
1160 | .llong .sys_open /* 5 */ | ||
1161 | .llong .sys_close | ||
1162 | .llong .sys_waitpid | ||
1163 | .llong .sys_creat | ||
1164 | .llong .sys_link | ||
1165 | .llong .sys_unlink /* 10 */ | ||
1166 | .llong .sys_execve | ||
1167 | .llong .sys_chdir | ||
1168 | .llong .sys64_time | ||
1169 | .llong .sys_mknod | ||
1170 | .llong .sys_chmod /* 15 */ | ||
1171 | .llong .sys_lchown | ||
1172 | .llong .sys_ni_syscall /* old break syscall */ | ||
1173 | .llong .sys_ni_syscall /* old stat syscall */ | ||
1174 | .llong .sys_lseek | ||
1175 | .llong .sys_getpid /* 20 */ | ||
1176 | .llong .sys_mount | ||
1177 | .llong .sys_ni_syscall /* old umount syscall */ | ||
1178 | .llong .sys_setuid | ||
1179 | .llong .sys_getuid | ||
1180 | .llong .sys_stime /* 25 */ | ||
1181 | .llong .sys_ptrace | ||
1182 | .llong .sys_alarm | ||
1183 | .llong .sys_ni_syscall /* old fstat syscall */ | ||
1184 | .llong .sys_pause | ||
1185 | .llong .sys_utime /* 30 */ | ||
1186 | .llong .sys_ni_syscall /* old stty syscall */ | ||
1187 | .llong .sys_ni_syscall /* old gtty syscall */ | ||
1188 | .llong .sys_access | ||
1189 | .llong .sys_nice | ||
1190 | .llong .sys_ni_syscall /* 35 - old ftime syscall */ | ||
1191 | .llong .sys_sync | ||
1192 | .llong .sys_kill | ||
1193 | .llong .sys_rename | ||
1194 | .llong .sys_mkdir | ||
1195 | .llong .sys_rmdir /* 40 */ | ||
1196 | .llong .sys_dup | ||
1197 | .llong .sys_pipe | ||
1198 | .llong .sys_times | ||
1199 | .llong .sys_ni_syscall /* old prof syscall */ | ||
1200 | .llong .sys_brk /* 45 */ | ||
1201 | .llong .sys_setgid | ||
1202 | .llong .sys_getgid | ||
1203 | .llong .sys_signal | ||
1204 | .llong .sys_geteuid | ||
1205 | .llong .sys_getegid /* 50 */ | ||
1206 | .llong .sys_acct | ||
1207 | .llong .sys_umount | ||
1208 | .llong .sys_ni_syscall /* old lock syscall */ | ||
1209 | .llong .sys_ioctl | ||
1210 | .llong .sys_fcntl /* 55 */ | ||
1211 | .llong .sys_ni_syscall /* old mpx syscall */ | ||
1212 | .llong .sys_setpgid | ||
1213 | .llong .sys_ni_syscall /* old ulimit syscall */ | ||
1214 | .llong .sys_ni_syscall /* old uname syscall */ | ||
1215 | .llong .sys_umask /* 60 */ | ||
1216 | .llong .sys_chroot | ||
1217 | .llong .sys_ustat | ||
1218 | .llong .sys_dup2 | ||
1219 | .llong .sys_getppid | ||
1220 | .llong .sys_getpgrp /* 65 */ | ||
1221 | .llong .sys_setsid | ||
1222 | .llong .sys_ni_syscall | ||
1223 | .llong .sys_sgetmask | ||
1224 | .llong .sys_ssetmask | ||
1225 | .llong .sys_setreuid /* 70 */ | ||
1226 | .llong .sys_setregid | ||
1227 | .llong .sys_ni_syscall | ||
1228 | .llong .sys_ni_syscall | ||
1229 | .llong .sys_sethostname | ||
1230 | .llong .sys_setrlimit /* 75 */ | ||
1231 | .llong .sys_ni_syscall /* old getrlimit syscall */ | ||
1232 | .llong .sys_getrusage | ||
1233 | .llong .sys_gettimeofday | ||
1234 | .llong .sys_settimeofday | ||
1235 | .llong .sys_getgroups /* 80 */ | ||
1236 | .llong .sys_setgroups | ||
1237 | .llong .sys_ni_syscall /* old select syscall */ | ||
1238 | .llong .sys_symlink | ||
1239 | .llong .sys_ni_syscall /* old lstat syscall */ | ||
1240 | .llong .sys_readlink /* 85 */ | ||
1241 | .llong .sys_uselib | ||
1242 | .llong .sys_swapon | ||
1243 | .llong .sys_reboot | ||
1244 | .llong .sys_ni_syscall /* old readdir syscall */ | ||
1245 | .llong .sys_mmap /* 90 */ | ||
1246 | .llong .sys_munmap | ||
1247 | .llong .sys_truncate | ||
1248 | .llong .sys_ftruncate | ||
1249 | .llong .sys_fchmod | ||
1250 | .llong .sys_fchown /* 95 */ | ||
1251 | .llong .sys_getpriority | ||
1252 | .llong .sys_setpriority | ||
1253 | .llong .sys_ni_syscall /* old profil syscall holder */ | ||
1254 | .llong .sys_statfs | ||
1255 | .llong .sys_fstatfs /* 100 */ | ||
1256 | .llong .sys_ni_syscall /* old ioperm syscall */ | ||
1257 | .llong .sys_socketcall | ||
1258 | .llong .sys_syslog | ||
1259 | .llong .sys_setitimer | ||
1260 | .llong .sys_getitimer /* 105 */ | ||
1261 | .llong .sys_newstat | ||
1262 | .llong .sys_newlstat | ||
1263 | .llong .sys_newfstat | ||
1264 | .llong .sys_ni_syscall /* old uname syscall */ | ||
1265 | .llong .sys_ni_syscall /* 110 old iopl syscall */ | ||
1266 | .llong .sys_vhangup | ||
1267 | .llong .sys_ni_syscall /* old idle syscall */ | ||
1268 | .llong .sys_ni_syscall /* old vm86 syscall */ | ||
1269 | .llong .sys_wait4 | ||
1270 | .llong .sys_swapoff /* 115 */ | ||
1271 | .llong .sys_sysinfo | ||
1272 | .llong .sys_ipc | ||
1273 | .llong .sys_fsync | ||
1274 | .llong .sys_ni_syscall | ||
1275 | .llong .ppc_clone /* 120 */ | ||
1276 | .llong .sys_setdomainname | ||
1277 | .llong .ppc_newuname | ||
1278 | .llong .sys_ni_syscall /* old modify_ldt syscall */ | ||
1279 | .llong .sys_adjtimex | ||
1280 | .llong .sys_mprotect /* 125 */ | ||
1281 | .llong .sys_ni_syscall | ||
1282 | .llong .sys_ni_syscall /* old create_module syscall */ | ||
1283 | .llong .sys_init_module | ||
1284 | .llong .sys_delete_module | ||
1285 | .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */ | ||
1286 | .llong .sys_quotactl | ||
1287 | .llong .sys_getpgid | ||
1288 | .llong .sys_fchdir | ||
1289 | .llong .sys_bdflush | ||
1290 | .llong .sys_sysfs /* 135 */ | ||
1291 | .llong .ppc64_personality | ||
1292 | .llong .sys_ni_syscall /* for afs_syscall */ | ||
1293 | .llong .sys_setfsuid | ||
1294 | .llong .sys_setfsgid | ||
1295 | .llong .sys_llseek /* 140 */ | ||
1296 | .llong .sys_getdents | ||
1297 | .llong .sys_select | ||
1298 | .llong .sys_flock | ||
1299 | .llong .sys_msync | ||
1300 | .llong .sys_readv /* 145 */ | ||
1301 | .llong .sys_writev | ||
1302 | .llong .sys_getsid | ||
1303 | .llong .sys_fdatasync | ||
1304 | .llong .sys_sysctl | ||
1305 | .llong .sys_mlock /* 150 */ | ||
1306 | .llong .sys_munlock | ||
1307 | .llong .sys_mlockall | ||
1308 | .llong .sys_munlockall | ||
1309 | .llong .sys_sched_setparam | ||
1310 | .llong .sys_sched_getparam /* 155 */ | ||
1311 | .llong .sys_sched_setscheduler | ||
1312 | .llong .sys_sched_getscheduler | ||
1313 | .llong .sys_sched_yield | ||
1314 | .llong .sys_sched_get_priority_max | ||
1315 | .llong .sys_sched_get_priority_min /* 160 */ | ||
1316 | .llong .sys_sched_rr_get_interval | ||
1317 | .llong .sys_nanosleep | ||
1318 | .llong .sys_mremap | ||
1319 | .llong .sys_setresuid | ||
1320 | .llong .sys_getresuid /* 165 */ | ||
1321 | .llong .sys_ni_syscall /* old query_module syscall */ | ||
1322 | .llong .sys_poll | ||
1323 | .llong .sys_nfsservctl | ||
1324 | .llong .sys_setresgid | ||
1325 | .llong .sys_getresgid /* 170 */ | ||
1326 | .llong .sys_prctl | ||
1327 | .llong .ppc64_rt_sigreturn | ||
1328 | .llong .sys_rt_sigaction | ||
1329 | .llong .sys_rt_sigprocmask | ||
1330 | .llong .sys_rt_sigpending /* 175 */ | ||
1331 | .llong .sys_rt_sigtimedwait | ||
1332 | .llong .sys_rt_sigqueueinfo | ||
1333 | .llong .ppc64_rt_sigsuspend | ||
1334 | .llong .sys_pread64 | ||
1335 | .llong .sys_pwrite64 /* 180 */ | ||
1336 | .llong .sys_chown | ||
1337 | .llong .sys_getcwd | ||
1338 | .llong .sys_capget | ||
1339 | .llong .sys_capset | ||
1340 | .llong .sys_sigaltstack /* 185 */ | ||
1341 | .llong .sys_sendfile64 | ||
1342 | .llong .sys_ni_syscall /* reserved for streams1 */ | ||
1343 | .llong .sys_ni_syscall /* reserved for streams2 */ | ||
1344 | .llong .ppc_vfork | ||
1345 | .llong .sys_getrlimit /* 190 */ | ||
1346 | .llong .sys_readahead | ||
1347 | .llong .sys_ni_syscall /* 32bit only mmap2 */ | ||
1348 | .llong .sys_ni_syscall /* 32bit only truncate64 */ | ||
1349 | .llong .sys_ni_syscall /* 32bit only ftruncate64 */ | ||
1350 | .llong .sys_ni_syscall /* 195 - 32bit only stat64 */ | ||
1351 | .llong .sys_ni_syscall /* 32bit only lstat64 */ | ||
1352 | .llong .sys_ni_syscall /* 32bit only fstat64 */ | ||
1353 | .llong .sys_pciconfig_read | ||
1354 | .llong .sys_pciconfig_write | ||
1355 | .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */ | ||
1356 | .llong .sys_ni_syscall /* reserved for MacOnLinux */ | ||
1357 | .llong .sys_getdents64 | ||
1358 | .llong .sys_pivot_root | ||
1359 | .llong .sys_ni_syscall /* 32bit only fcntl64 */ | ||
1360 | .llong .sys_madvise /* 205 */ | ||
1361 | .llong .sys_mincore | ||
1362 | .llong .sys_gettid | ||
1363 | .llong .sys_tkill | ||
1364 | .llong .sys_setxattr | ||
1365 | .llong .sys_lsetxattr /* 210 */ | ||
1366 | .llong .sys_fsetxattr | ||
1367 | .llong .sys_getxattr | ||
1368 | .llong .sys_lgetxattr | ||
1369 | .llong .sys_fgetxattr | ||
1370 | .llong .sys_listxattr /* 215 */ | ||
1371 | .llong .sys_llistxattr | ||
1372 | .llong .sys_flistxattr | ||
1373 | .llong .sys_removexattr | ||
1374 | .llong .sys_lremovexattr | ||
1375 | .llong .sys_fremovexattr /* 220 */ | ||
1376 | .llong .sys_futex | ||
1377 | .llong .sys_sched_setaffinity | ||
1378 | .llong .sys_sched_getaffinity | ||
1379 | .llong .sys_ni_syscall | ||
1380 | .llong .sys_ni_syscall /* 225 - reserved for tux */ | ||
1381 | .llong .sys_ni_syscall /* 32bit only sendfile64 */ | ||
1382 | .llong .sys_io_setup | ||
1383 | .llong .sys_io_destroy | ||
1384 | .llong .sys_io_getevents | ||
1385 | .llong .sys_io_submit /* 230 */ | ||
1386 | .llong .sys_io_cancel | ||
1387 | .llong .sys_set_tid_address | ||
1388 | .llong .sys_fadvise64 | ||
1389 | .llong .sys_exit_group | ||
1390 | .llong .sys_lookup_dcookie /* 235 */ | ||
1391 | .llong .sys_epoll_create | ||
1392 | .llong .sys_epoll_ctl | ||
1393 | .llong .sys_epoll_wait | ||
1394 | .llong .sys_remap_file_pages | ||
1395 | .llong .sys_timer_create /* 240 */ | ||
1396 | .llong .sys_timer_settime | ||
1397 | .llong .sys_timer_gettime | ||
1398 | .llong .sys_timer_getoverrun | ||
1399 | .llong .sys_timer_delete | ||
1400 | .llong .sys_clock_settime /* 245 */ | ||
1401 | .llong .sys_clock_gettime | ||
1402 | .llong .sys_clock_getres | ||
1403 | .llong .sys_clock_nanosleep | ||
1404 | .llong .ppc64_swapcontext | ||
1405 | .llong .sys_tgkill /* 250 */ | ||
1406 | .llong .sys_utimes | ||
1407 | .llong .sys_statfs64 | ||
1408 | .llong .sys_fstatfs64 | ||
1409 | .llong .sys_ni_syscall /* 32bit only fadvise64_64 */ | ||
1410 | .llong .ppc_rtas /* 255 */ | ||
1411 | .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */ | ||
1412 | .llong .sys_ni_syscall /* 257 reserved for vserver */ | ||
1413 | .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */ | ||
1414 | .llong .sys_mbind | ||
1415 | .llong .sys_get_mempolicy /* 260 */ | ||
1416 | .llong .sys_set_mempolicy | ||
1417 | .llong .sys_mq_open | ||
1418 | .llong .sys_mq_unlink | ||
1419 | .llong .sys_mq_timedsend | ||
1420 | .llong .sys_mq_timedreceive /* 265 */ | ||
1421 | .llong .sys_mq_notify | ||
1422 | .llong .sys_mq_getsetattr | ||
1423 | .llong .sys_kexec_load | ||
1424 | .llong .sys_add_key | ||
1425 | .llong .sys_request_key /* 270 */ | ||
1426 | .llong .sys_keyctl | ||
1427 | .llong .sys_waitid | ||
1428 | .llong .sys_ioprio_set | ||
1429 | .llong .sys_ioprio_get | ||
1430 | .llong .sys_inotify_init /* 275 */ | ||
1431 | .llong .sys_inotify_add_watch | ||
1432 | .llong .sys_inotify_rm_watch | ||
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile index 0b6e967de948..42d5295bf345 100644 --- a/arch/ppc64/lib/Makefile +++ b/arch/ppc64/lib/Makefile | |||
@@ -2,17 +2,4 @@ | |||
2 | # Makefile for ppc64-specific library files.. | 2 | # Makefile for ppc64-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y := checksum.o string.o strcase.o | 5 | lib-y := string.o |
6 | lib-y += copypage.o memcpy.o copyuser.o usercopy.o | ||
7 | |||
8 | # Lock primitives are defined as no-ops in include/linux/spinlock.h | ||
9 | # for non-SMP configs. Don't build the real versions. | ||
10 | |||
11 | lib-$(CONFIG_SMP) += locks.o | ||
12 | |||
13 | # e2a provides EBCDIC to ASCII conversions. | ||
14 | ifdef CONFIG_PPC_ISERIES | ||
15 | obj-y += e2a.o | ||
16 | endif | ||
17 | |||
18 | lib-$(CONFIG_DEBUG_KERNEL) += sstep.o | ||
diff --git a/arch/ppc64/lib/checksum.S b/arch/ppc64/lib/checksum.S deleted file mode 100644 index ef96c6c58efc..000000000000 --- a/arch/ppc64/lib/checksum.S +++ /dev/null | |||
@@ -1,229 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains assembly-language implementations | ||
3 | * of IP-style 1's complement checksum routines. | ||
4 | * | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). | ||
13 | */ | ||
14 | |||
15 | #include <linux/sys.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ppc_asm.h> | ||
19 | |||
20 | /* | ||
21 | * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header | ||
22 | * len is in words and is always >= 5. | ||
23 | * | ||
24 | * In practice len == 5, but this is not guaranteed. So this code does not | ||
25 | * attempt to use doubleword instructions. | ||
26 | */ | ||
27 | _GLOBAL(ip_fast_csum) | ||
28 | lwz r0,0(r3) | ||
29 | lwzu r5,4(r3) | ||
30 | addic. r4,r4,-2 | ||
31 | addc r0,r0,r5 | ||
32 | mtctr r4 | ||
33 | blelr- | ||
34 | 1: lwzu r4,4(r3) | ||
35 | adde r0,r0,r4 | ||
36 | bdnz 1b | ||
37 | addze r0,r0 /* add in final carry */ | ||
38 | rldicl r4,r0,32,0 /* fold two 32-bit halves together */ | ||
39 | add r0,r0,r4 | ||
40 | srdi r0,r0,32 | ||
41 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
42 | add r3,r0,r3 | ||
43 | not r3,r3 | ||
44 | srwi r3,r3,16 | ||
45 | blr | ||
46 | |||
47 | /* | ||
48 | * Compute checksum of TCP or UDP pseudo-header: | ||
49 | * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum) | ||
50 | * No real gain trying to do this specially for 64 bit, but | ||
51 | * the 32 bit addition may spill into the upper bits of | ||
52 | * the doubleword so we still must fold it down from 64. | ||
53 | */ | ||
54 | _GLOBAL(csum_tcpudp_magic) | ||
55 | rlwimi r5,r6,16,0,15 /* put proto in upper half of len */ | ||
56 | addc r0,r3,r4 /* add 4 32-bit words together */ | ||
57 | adde r0,r0,r5 | ||
58 | adde r0,r0,r7 | ||
59 | rldicl r4,r0,32,0 /* fold 64 bit value */ | ||
60 | add r0,r4,r0 | ||
61 | srdi r0,r0,32 | ||
62 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
63 | add r3,r0,r3 | ||
64 | not r3,r3 | ||
65 | srwi r3,r3,16 | ||
66 | blr | ||
67 | |||
68 | /* | ||
69 | * Computes the checksum of a memory block at buff, length len, | ||
70 | * and adds in "sum" (32-bit). | ||
71 | * | ||
72 | * This code assumes at least halfword alignment, though the length | ||
73 | * can be any number of bytes. The sum is accumulated in r5. | ||
74 | * | ||
75 | * csum_partial(r3=buff, r4=len, r5=sum) | ||
76 | */ | ||
77 | _GLOBAL(csum_partial) | ||
78 | subi r3,r3,8 /* we'll offset by 8 for the loads */ | ||
79 | srdi. r6,r4,3 /* divide by 8 for doubleword count */ | ||
80 | addic r5,r5,0 /* clear carry */ | ||
81 | beq 3f /* if we're doing < 8 bytes */ | ||
82 | andi. r0,r3,2 /* aligned on a word boundary already? */ | ||
83 | beq+ 1f | ||
84 | lhz r6,8(r3) /* do 2 bytes to get aligned */ | ||
85 | addi r3,r3,2 | ||
86 | subi r4,r4,2 | ||
87 | addc r5,r5,r6 | ||
88 | srdi. r6,r4,3 /* recompute number of doublewords */ | ||
89 | beq 3f /* any left? */ | ||
90 | 1: mtctr r6 | ||
91 | 2: ldu r6,8(r3) /* main sum loop */ | ||
92 | adde r5,r5,r6 | ||
93 | bdnz 2b | ||
94 | andi. r4,r4,7 /* compute bytes left to sum after doublewords */ | ||
95 | 3: cmpwi 0,r4,4 /* is at least a full word left? */ | ||
96 | blt 4f | ||
97 | lwz r6,8(r3) /* sum this word */ | ||
98 | addi r3,r3,4 | ||
99 | subi r4,r4,4 | ||
100 | adde r5,r5,r6 | ||
101 | 4: cmpwi 0,r4,2 /* is at least a halfword left? */ | ||
102 | blt+ 5f | ||
103 | lhz r6,8(r3) /* sum this halfword */ | ||
104 | addi r3,r3,2 | ||
105 | subi r4,r4,2 | ||
106 | adde r5,r5,r6 | ||
107 | 5: cmpwi 0,r4,1 /* is at least a byte left? */ | ||
108 | bne+ 6f | ||
109 | lbz r6,8(r3) /* sum this byte */ | ||
110 | slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */ | ||
111 | adde r5,r5,r6 | ||
112 | 6: addze r5,r5 /* add in final carry */ | ||
113 | rldicl r4,r5,32,0 /* fold two 32-bit halves together */ | ||
114 | add r3,r4,r5 | ||
115 | srdi r3,r3,32 | ||
116 | blr | ||
117 | |||
118 | /* | ||
119 | * Computes the checksum of a memory block at src, length len, | ||
120 | * and adds in "sum" (32-bit), while copying the block to dst. | ||
121 | * If an access exception occurs on src or dst, it stores -EFAULT | ||
122 | * to *src_err or *dst_err respectively, and (for an error on | ||
123 | * src) zeroes the rest of dst. | ||
124 | * | ||
125 | * This code needs to be reworked to take advantage of 64 bit sum+copy. | ||
126 | * However, due to tokenring halfword alignment problems this will be very | ||
127 | * tricky. For now we'll leave it until we instrument it somehow. | ||
128 | * | ||
129 | * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) | ||
130 | */ | ||
131 | _GLOBAL(csum_partial_copy_generic) | ||
132 | addic r0,r6,0 | ||
133 | subi r3,r3,4 | ||
134 | subi r4,r4,4 | ||
135 | srwi. r6,r5,2 | ||
136 | beq 3f /* if we're doing < 4 bytes */ | ||
137 | andi. r9,r4,2 /* Align dst to longword boundary */ | ||
138 | beq+ 1f | ||
139 | 81: lhz r6,4(r3) /* do 2 bytes to get aligned */ | ||
140 | addi r3,r3,2 | ||
141 | subi r5,r5,2 | ||
142 | 91: sth r6,4(r4) | ||
143 | addi r4,r4,2 | ||
144 | addc r0,r0,r6 | ||
145 | srwi. r6,r5,2 /* # words to do */ | ||
146 | beq 3f | ||
147 | 1: mtctr r6 | ||
148 | 82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */ | ||
149 | 92: stwu r6,4(r4) /* be unnecessary to unroll this loop */ | ||
150 | adde r0,r0,r6 | ||
151 | bdnz 82b | ||
152 | andi. r5,r5,3 | ||
153 | 3: cmpwi 0,r5,2 | ||
154 | blt+ 4f | ||
155 | 83: lhz r6,4(r3) | ||
156 | addi r3,r3,2 | ||
157 | subi r5,r5,2 | ||
158 | 93: sth r6,4(r4) | ||
159 | addi r4,r4,2 | ||
160 | adde r0,r0,r6 | ||
161 | 4: cmpwi 0,r5,1 | ||
162 | bne+ 5f | ||
163 | 84: lbz r6,4(r3) | ||
164 | 94: stb r6,4(r4) | ||
165 | slwi r6,r6,8 /* Upper byte of word */ | ||
166 | adde r0,r0,r6 | ||
167 | 5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */ | ||
168 | rldicl r4,r3,32,0 /* fold 64 bit value */ | ||
169 | add r3,r4,r3 | ||
170 | srdi r3,r3,32 | ||
171 | blr | ||
172 | |||
173 | /* These shouldn't go in the fixup section, since that would | ||
174 | cause the ex_table addresses to get out of order. */ | ||
175 | |||
176 | .globl src_error_1 | ||
177 | src_error_1: | ||
178 | li r6,0 | ||
179 | subi r5,r5,2 | ||
180 | 95: sth r6,4(r4) | ||
181 | addi r4,r4,2 | ||
182 | srwi. r6,r5,2 | ||
183 | beq 3f | ||
184 | mtctr r6 | ||
185 | .globl src_error_2 | ||
186 | src_error_2: | ||
187 | li r6,0 | ||
188 | 96: stwu r6,4(r4) | ||
189 | bdnz 96b | ||
190 | 3: andi. r5,r5,3 | ||
191 | beq src_error | ||
192 | .globl src_error_3 | ||
193 | src_error_3: | ||
194 | li r6,0 | ||
195 | mtctr r5 | ||
196 | addi r4,r4,3 | ||
197 | 97: stbu r6,1(r4) | ||
198 | bdnz 97b | ||
199 | .globl src_error | ||
200 | src_error: | ||
201 | cmpdi 0,r7,0 | ||
202 | beq 1f | ||
203 | li r6,-EFAULT | ||
204 | stw r6,0(r7) | ||
205 | 1: addze r3,r0 | ||
206 | blr | ||
207 | |||
208 | .globl dst_error | ||
209 | dst_error: | ||
210 | cmpdi 0,r8,0 | ||
211 | beq 1f | ||
212 | li r6,-EFAULT | ||
213 | stw r6,0(r8) | ||
214 | 1: addze r3,r0 | ||
215 | blr | ||
216 | |||
217 | .section __ex_table,"a" | ||
218 | .align 3 | ||
219 | .llong 81b,src_error_1 | ||
220 | .llong 91b,dst_error | ||
221 | .llong 82b,src_error_2 | ||
222 | .llong 92b,dst_error | ||
223 | .llong 83b,src_error_3 | ||
224 | .llong 93b,dst_error | ||
225 | .llong 84b,src_error_3 | ||
226 | .llong 94b,dst_error | ||
227 | .llong 95b,dst_error | ||
228 | .llong 96b,dst_error | ||
229 | .llong 97b,dst_error | ||
diff --git a/arch/ppc64/lib/copypage.S b/arch/ppc64/lib/copypage.S deleted file mode 100644 index 733d61618bbf..000000000000 --- a/arch/ppc64/lib/copypage.S +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | _GLOBAL(copy_page) | ||
15 | std r31,-8(1) | ||
16 | std r30,-16(1) | ||
17 | std r29,-24(1) | ||
18 | std r28,-32(1) | ||
19 | std r27,-40(1) | ||
20 | std r26,-48(1) | ||
21 | std r25,-56(1) | ||
22 | std r24,-64(1) | ||
23 | std r23,-72(1) | ||
24 | std r22,-80(1) | ||
25 | std r21,-88(1) | ||
26 | std r20,-96(1) | ||
27 | li r5,4096/32 - 1 | ||
28 | addi r3,r3,-8 | ||
29 | li r12,5 | ||
30 | 0: addi r5,r5,-24 | ||
31 | mtctr r12 | ||
32 | ld r22,640(4) | ||
33 | ld r21,512(4) | ||
34 | ld r20,384(4) | ||
35 | ld r11,256(4) | ||
36 | ld r9,128(4) | ||
37 | ld r7,0(4) | ||
38 | ld r25,648(4) | ||
39 | ld r24,520(4) | ||
40 | ld r23,392(4) | ||
41 | ld r10,264(4) | ||
42 | ld r8,136(4) | ||
43 | ldu r6,8(4) | ||
44 | cmpwi r5,24 | ||
45 | 1: std r22,648(3) | ||
46 | std r21,520(3) | ||
47 | std r20,392(3) | ||
48 | std r11,264(3) | ||
49 | std r9,136(3) | ||
50 | std r7,8(3) | ||
51 | ld r28,648(4) | ||
52 | ld r27,520(4) | ||
53 | ld r26,392(4) | ||
54 | ld r31,264(4) | ||
55 | ld r30,136(4) | ||
56 | ld r29,8(4) | ||
57 | std r25,656(3) | ||
58 | std r24,528(3) | ||
59 | std r23,400(3) | ||
60 | std r10,272(3) | ||
61 | std r8,144(3) | ||
62 | std r6,16(3) | ||
63 | ld r22,656(4) | ||
64 | ld r21,528(4) | ||
65 | ld r20,400(4) | ||
66 | ld r11,272(4) | ||
67 | ld r9,144(4) | ||
68 | ld r7,16(4) | ||
69 | std r28,664(3) | ||
70 | std r27,536(3) | ||
71 | std r26,408(3) | ||
72 | std r31,280(3) | ||
73 | std r30,152(3) | ||
74 | stdu r29,24(3) | ||
75 | ld r25,664(4) | ||
76 | ld r24,536(4) | ||
77 | ld r23,408(4) | ||
78 | ld r10,280(4) | ||
79 | ld r8,152(4) | ||
80 | ldu r6,24(4) | ||
81 | bdnz 1b | ||
82 | std r22,648(3) | ||
83 | std r21,520(3) | ||
84 | std r20,392(3) | ||
85 | std r11,264(3) | ||
86 | std r9,136(3) | ||
87 | std r7,8(3) | ||
88 | addi r4,r4,640 | ||
89 | addi r3,r3,648 | ||
90 | bge 0b | ||
91 | mtctr r5 | ||
92 | ld r7,0(4) | ||
93 | ld r8,8(4) | ||
94 | ldu r9,16(4) | ||
95 | 3: ld r10,8(4) | ||
96 | std r7,8(3) | ||
97 | ld r7,16(4) | ||
98 | std r8,16(3) | ||
99 | ld r8,24(4) | ||
100 | std r9,24(3) | ||
101 | ldu r9,32(4) | ||
102 | stdu r10,32(3) | ||
103 | bdnz 3b | ||
104 | 4: ld r10,8(4) | ||
105 | std r7,8(3) | ||
106 | std r8,16(3) | ||
107 | std r9,24(3) | ||
108 | std r10,32(3) | ||
109 | 9: ld r20,-96(1) | ||
110 | ld r21,-88(1) | ||
111 | ld r22,-80(1) | ||
112 | ld r23,-72(1) | ||
113 | ld r24,-64(1) | ||
114 | ld r25,-56(1) | ||
115 | ld r26,-48(1) | ||
116 | ld r27,-40(1) | ||
117 | ld r28,-32(1) | ||
118 | ld r29,-24(1) | ||
119 | ld r30,-16(1) | ||
120 | ld r31,-8(1) | ||
121 | blr | ||
diff --git a/arch/ppc64/lib/copyuser.S b/arch/ppc64/lib/copyuser.S deleted file mode 100644 index a0b3fbbd6fb1..000000000000 --- a/arch/ppc64/lib/copyuser.S +++ /dev/null | |||
@@ -1,576 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/copyuser.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | .align 7 | ||
15 | _GLOBAL(__copy_tofrom_user) | ||
16 | /* first check for a whole page copy on a page boundary */ | ||
17 | cmpldi cr1,r5,16 | ||
18 | cmpdi cr6,r5,4096 | ||
19 | or r0,r3,r4 | ||
20 | neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */ | ||
21 | andi. r0,r0,4095 | ||
22 | std r3,-24(r1) | ||
23 | crand cr0*4+2,cr0*4+2,cr6*4+2 | ||
24 | std r4,-16(r1) | ||
25 | std r5,-8(r1) | ||
26 | dcbt 0,r4 | ||
27 | beq .Lcopy_page | ||
28 | andi. r6,r6,7 | ||
29 | mtcrf 0x01,r5 | ||
30 | blt cr1,.Lshort_copy | ||
31 | bne .Ldst_unaligned | ||
32 | .Ldst_aligned: | ||
33 | andi. r0,r4,7 | ||
34 | addi r3,r3,-16 | ||
35 | bne .Lsrc_unaligned | ||
36 | srdi r7,r5,4 | ||
37 | 20: ld r9,0(r4) | ||
38 | addi r4,r4,-8 | ||
39 | mtctr r7 | ||
40 | andi. r5,r5,7 | ||
41 | bf cr7*4+0,22f | ||
42 | addi r3,r3,8 | ||
43 | addi r4,r4,8 | ||
44 | mr r8,r9 | ||
45 | blt cr1,72f | ||
46 | 21: ld r9,8(r4) | ||
47 | 70: std r8,8(r3) | ||
48 | 22: ldu r8,16(r4) | ||
49 | 71: stdu r9,16(r3) | ||
50 | bdnz 21b | ||
51 | 72: std r8,8(r3) | ||
52 | beq+ 3f | ||
53 | addi r3,r3,16 | ||
54 | 23: ld r9,8(r4) | ||
55 | .Ldo_tail: | ||
56 | bf cr7*4+1,1f | ||
57 | rotldi r9,r9,32 | ||
58 | 73: stw r9,0(r3) | ||
59 | addi r3,r3,4 | ||
60 | 1: bf cr7*4+2,2f | ||
61 | rotldi r9,r9,16 | ||
62 | 74: sth r9,0(r3) | ||
63 | addi r3,r3,2 | ||
64 | 2: bf cr7*4+3,3f | ||
65 | rotldi r9,r9,8 | ||
66 | 75: stb r9,0(r3) | ||
67 | 3: li r3,0 | ||
68 | blr | ||
69 | |||
70 | .Lsrc_unaligned: | ||
71 | srdi r6,r5,3 | ||
72 | addi r5,r5,-16 | ||
73 | subf r4,r0,r4 | ||
74 | srdi r7,r5,4 | ||
75 | sldi r10,r0,3 | ||
76 | cmpldi cr6,r6,3 | ||
77 | andi. r5,r5,7 | ||
78 | mtctr r7 | ||
79 | subfic r11,r10,64 | ||
80 | add r5,r5,r0 | ||
81 | bt cr7*4+0,28f | ||
82 | |||
83 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | ||
84 | 25: ld r0,8(r4) | ||
85 | sld r6,r9,r10 | ||
86 | 26: ldu r9,16(r4) | ||
87 | srd r7,r0,r11 | ||
88 | sld r8,r0,r10 | ||
89 | or r7,r7,r6 | ||
90 | blt cr6,79f | ||
91 | 27: ld r0,8(r4) | ||
92 | b 2f | ||
93 | |||
94 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | ||
95 | 29: ldu r9,8(r4) | ||
96 | sld r8,r0,r10 | ||
97 | addi r3,r3,-8 | ||
98 | blt cr6,5f | ||
99 | 30: ld r0,8(r4) | ||
100 | srd r12,r9,r11 | ||
101 | sld r6,r9,r10 | ||
102 | 31: ldu r9,16(r4) | ||
103 | or r12,r8,r12 | ||
104 | srd r7,r0,r11 | ||
105 | sld r8,r0,r10 | ||
106 | addi r3,r3,16 | ||
107 | beq cr6,78f | ||
108 | |||
109 | 1: or r7,r7,r6 | ||
110 | 32: ld r0,8(r4) | ||
111 | 76: std r12,8(r3) | ||
112 | 2: srd r12,r9,r11 | ||
113 | sld r6,r9,r10 | ||
114 | 33: ldu r9,16(r4) | ||
115 | or r12,r8,r12 | ||
116 | 77: stdu r7,16(r3) | ||
117 | srd r7,r0,r11 | ||
118 | sld r8,r0,r10 | ||
119 | bdnz 1b | ||
120 | |||
121 | 78: std r12,8(r3) | ||
122 | or r7,r7,r6 | ||
123 | 79: std r7,16(r3) | ||
124 | 5: srd r12,r9,r11 | ||
125 | or r12,r8,r12 | ||
126 | 80: std r12,24(r3) | ||
127 | bne 6f | ||
128 | li r3,0 | ||
129 | blr | ||
130 | 6: cmpwi cr1,r5,8 | ||
131 | addi r3,r3,32 | ||
132 | sld r9,r9,r10 | ||
133 | ble cr1,.Ldo_tail | ||
134 | 34: ld r0,8(r4) | ||
135 | srd r7,r0,r11 | ||
136 | or r9,r7,r9 | ||
137 | b .Ldo_tail | ||
138 | |||
139 | .Ldst_unaligned: | ||
140 | mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */ | ||
141 | subf r5,r6,r5 | ||
142 | li r7,0 | ||
143 | cmpldi r1,r5,16 | ||
144 | bf cr7*4+3,1f | ||
145 | 35: lbz r0,0(r4) | ||
146 | 81: stb r0,0(r3) | ||
147 | addi r7,r7,1 | ||
148 | 1: bf cr7*4+2,2f | ||
149 | 36: lhzx r0,r7,r4 | ||
150 | 82: sthx r0,r7,r3 | ||
151 | addi r7,r7,2 | ||
152 | 2: bf cr7*4+1,3f | ||
153 | 37: lwzx r0,r7,r4 | ||
154 | 83: stwx r0,r7,r3 | ||
155 | 3: mtcrf 0x01,r5 | ||
156 | add r4,r6,r4 | ||
157 | add r3,r6,r3 | ||
158 | b .Ldst_aligned | ||
159 | |||
160 | .Lshort_copy: | ||
161 | bf cr7*4+0,1f | ||
162 | 38: lwz r0,0(r4) | ||
163 | 39: lwz r9,4(r4) | ||
164 | addi r4,r4,8 | ||
165 | 84: stw r0,0(r3) | ||
166 | 85: stw r9,4(r3) | ||
167 | addi r3,r3,8 | ||
168 | 1: bf cr7*4+1,2f | ||
169 | 40: lwz r0,0(r4) | ||
170 | addi r4,r4,4 | ||
171 | 86: stw r0,0(r3) | ||
172 | addi r3,r3,4 | ||
173 | 2: bf cr7*4+2,3f | ||
174 | 41: lhz r0,0(r4) | ||
175 | addi r4,r4,2 | ||
176 | 87: sth r0,0(r3) | ||
177 | addi r3,r3,2 | ||
178 | 3: bf cr7*4+3,4f | ||
179 | 42: lbz r0,0(r4) | ||
180 | 88: stb r0,0(r3) | ||
181 | 4: li r3,0 | ||
182 | blr | ||
183 | |||
184 | /* | ||
185 | * exception handlers follow | ||
186 | * we have to return the number of bytes not copied | ||
187 | * for an exception on a load, we set the rest of the destination to 0 | ||
188 | */ | ||
189 | |||
190 | 136: | ||
191 | 137: | ||
192 | add r3,r3,r7 | ||
193 | b 1f | ||
194 | 130: | ||
195 | 131: | ||
196 | addi r3,r3,8 | ||
197 | 120: | ||
198 | 122: | ||
199 | 124: | ||
200 | 125: | ||
201 | 126: | ||
202 | 127: | ||
203 | 128: | ||
204 | 129: | ||
205 | 133: | ||
206 | addi r3,r3,8 | ||
207 | 121: | ||
208 | 132: | ||
209 | addi r3,r3,8 | ||
210 | 123: | ||
211 | 134: | ||
212 | 135: | ||
213 | 138: | ||
214 | 139: | ||
215 | 140: | ||
216 | 141: | ||
217 | 142: | ||
218 | |||
219 | /* | ||
220 | * here we have had a fault on a load and r3 points to the first | ||
221 | * unmodified byte of the destination | ||
222 | */ | ||
223 | 1: ld r6,-24(r1) | ||
224 | ld r4,-16(r1) | ||
225 | ld r5,-8(r1) | ||
226 | subf r6,r6,r3 | ||
227 | add r4,r4,r6 | ||
228 | subf r5,r6,r5 /* #bytes left to go */ | ||
229 | |||
230 | /* | ||
231 | * first see if we can copy any more bytes before hitting another exception | ||
232 | */ | ||
233 | mtctr r5 | ||
234 | 43: lbz r0,0(r4) | ||
235 | addi r4,r4,1 | ||
236 | 89: stb r0,0(r3) | ||
237 | addi r3,r3,1 | ||
238 | bdnz 43b | ||
239 | li r3,0 /* huh? all copied successfully this time? */ | ||
240 | blr | ||
241 | |||
242 | /* | ||
243 | * here we have trapped again, need to clear ctr bytes starting at r3 | ||
244 | */ | ||
245 | 143: mfctr r5 | ||
246 | li r0,0 | ||
247 | mr r4,r3 | ||
248 | mr r3,r5 /* return the number of bytes not copied */ | ||
249 | 1: andi. r9,r4,7 | ||
250 | beq 3f | ||
251 | 90: stb r0,0(r4) | ||
252 | addic. r5,r5,-1 | ||
253 | addi r4,r4,1 | ||
254 | bne 1b | ||
255 | blr | ||
256 | 3: cmpldi cr1,r5,8 | ||
257 | srdi r9,r5,3 | ||
258 | andi. r5,r5,7 | ||
259 | blt cr1,93f | ||
260 | mtctr r9 | ||
261 | 91: std r0,0(r4) | ||
262 | addi r4,r4,8 | ||
263 | bdnz 91b | ||
264 | 93: beqlr | ||
265 | mtctr r5 | ||
266 | 92: stb r0,0(r4) | ||
267 | addi r4,r4,1 | ||
268 | bdnz 92b | ||
269 | blr | ||
270 | |||
271 | /* | ||
272 | * exception handlers for stores: we just need to work | ||
273 | * out how many bytes weren't copied | ||
274 | */ | ||
275 | 182: | ||
276 | 183: | ||
277 | add r3,r3,r7 | ||
278 | b 1f | ||
279 | 180: | ||
280 | addi r3,r3,8 | ||
281 | 171: | ||
282 | 177: | ||
283 | addi r3,r3,8 | ||
284 | 170: | ||
285 | 172: | ||
286 | 176: | ||
287 | 178: | ||
288 | addi r3,r3,4 | ||
289 | 185: | ||
290 | addi r3,r3,4 | ||
291 | 173: | ||
292 | 174: | ||
293 | 175: | ||
294 | 179: | ||
295 | 181: | ||
296 | 184: | ||
297 | 186: | ||
298 | 187: | ||
299 | 188: | ||
300 | 189: | ||
301 | 1: | ||
302 | ld r6,-24(r1) | ||
303 | ld r5,-8(r1) | ||
304 | add r6,r6,r5 | ||
305 | subf r3,r3,r6 /* #bytes not copied */ | ||
306 | 190: | ||
307 | 191: | ||
308 | 192: | ||
309 | blr /* #bytes not copied in r3 */ | ||
310 | |||
311 | .section __ex_table,"a" | ||
312 | .align 3 | ||
313 | .llong 20b,120b | ||
314 | .llong 21b,121b | ||
315 | .llong 70b,170b | ||
316 | .llong 22b,122b | ||
317 | .llong 71b,171b | ||
318 | .llong 72b,172b | ||
319 | .llong 23b,123b | ||
320 | .llong 73b,173b | ||
321 | .llong 74b,174b | ||
322 | .llong 75b,175b | ||
323 | .llong 24b,124b | ||
324 | .llong 25b,125b | ||
325 | .llong 26b,126b | ||
326 | .llong 27b,127b | ||
327 | .llong 28b,128b | ||
328 | .llong 29b,129b | ||
329 | .llong 30b,130b | ||
330 | .llong 31b,131b | ||
331 | .llong 32b,132b | ||
332 | .llong 76b,176b | ||
333 | .llong 33b,133b | ||
334 | .llong 77b,177b | ||
335 | .llong 78b,178b | ||
336 | .llong 79b,179b | ||
337 | .llong 80b,180b | ||
338 | .llong 34b,134b | ||
339 | .llong 35b,135b | ||
340 | .llong 81b,181b | ||
341 | .llong 36b,136b | ||
342 | .llong 82b,182b | ||
343 | .llong 37b,137b | ||
344 | .llong 83b,183b | ||
345 | .llong 38b,138b | ||
346 | .llong 39b,139b | ||
347 | .llong 84b,184b | ||
348 | .llong 85b,185b | ||
349 | .llong 40b,140b | ||
350 | .llong 86b,186b | ||
351 | .llong 41b,141b | ||
352 | .llong 87b,187b | ||
353 | .llong 42b,142b | ||
354 | .llong 88b,188b | ||
355 | .llong 43b,143b | ||
356 | .llong 89b,189b | ||
357 | .llong 90b,190b | ||
358 | .llong 91b,191b | ||
359 | .llong 92b,192b | ||
360 | |||
361 | .text | ||
362 | |||
363 | /* | ||
364 | * Routine to copy a whole page of data, optimized for POWER4. | ||
365 | * On POWER4 it is more than 50% faster than the simple loop | ||
366 | * above (following the .Ldst_aligned label) but it runs slightly | ||
367 | * slower on POWER3. | ||
368 | */ | ||
369 | .Lcopy_page: | ||
370 | std r31,-32(1) | ||
371 | std r30,-40(1) | ||
372 | std r29,-48(1) | ||
373 | std r28,-56(1) | ||
374 | std r27,-64(1) | ||
375 | std r26,-72(1) | ||
376 | std r25,-80(1) | ||
377 | std r24,-88(1) | ||
378 | std r23,-96(1) | ||
379 | std r22,-104(1) | ||
380 | std r21,-112(1) | ||
381 | std r20,-120(1) | ||
382 | li r5,4096/32 - 1 | ||
383 | addi r3,r3,-8 | ||
384 | li r0,5 | ||
385 | 0: addi r5,r5,-24 | ||
386 | mtctr r0 | ||
387 | 20: ld r22,640(4) | ||
388 | 21: ld r21,512(4) | ||
389 | 22: ld r20,384(4) | ||
390 | 23: ld r11,256(4) | ||
391 | 24: ld r9,128(4) | ||
392 | 25: ld r7,0(4) | ||
393 | 26: ld r25,648(4) | ||
394 | 27: ld r24,520(4) | ||
395 | 28: ld r23,392(4) | ||
396 | 29: ld r10,264(4) | ||
397 | 30: ld r8,136(4) | ||
398 | 31: ldu r6,8(4) | ||
399 | cmpwi r5,24 | ||
400 | 1: | ||
401 | 32: std r22,648(3) | ||
402 | 33: std r21,520(3) | ||
403 | 34: std r20,392(3) | ||
404 | 35: std r11,264(3) | ||
405 | 36: std r9,136(3) | ||
406 | 37: std r7,8(3) | ||
407 | 38: ld r28,648(4) | ||
408 | 39: ld r27,520(4) | ||
409 | 40: ld r26,392(4) | ||
410 | 41: ld r31,264(4) | ||
411 | 42: ld r30,136(4) | ||
412 | 43: ld r29,8(4) | ||
413 | 44: std r25,656(3) | ||
414 | 45: std r24,528(3) | ||
415 | 46: std r23,400(3) | ||
416 | 47: std r10,272(3) | ||
417 | 48: std r8,144(3) | ||
418 | 49: std r6,16(3) | ||
419 | 50: ld r22,656(4) | ||
420 | 51: ld r21,528(4) | ||
421 | 52: ld r20,400(4) | ||
422 | 53: ld r11,272(4) | ||
423 | 54: ld r9,144(4) | ||
424 | 55: ld r7,16(4) | ||
425 | 56: std r28,664(3) | ||
426 | 57: std r27,536(3) | ||
427 | 58: std r26,408(3) | ||
428 | 59: std r31,280(3) | ||
429 | 60: std r30,152(3) | ||
430 | 61: stdu r29,24(3) | ||
431 | 62: ld r25,664(4) | ||
432 | 63: ld r24,536(4) | ||
433 | 64: ld r23,408(4) | ||
434 | 65: ld r10,280(4) | ||
435 | 66: ld r8,152(4) | ||
436 | 67: ldu r6,24(4) | ||
437 | bdnz 1b | ||
438 | 68: std r22,648(3) | ||
439 | 69: std r21,520(3) | ||
440 | 70: std r20,392(3) | ||
441 | 71: std r11,264(3) | ||
442 | 72: std r9,136(3) | ||
443 | 73: std r7,8(3) | ||
444 | 74: addi r4,r4,640 | ||
445 | 75: addi r3,r3,648 | ||
446 | bge 0b | ||
447 | mtctr r5 | ||
448 | 76: ld r7,0(4) | ||
449 | 77: ld r8,8(4) | ||
450 | 78: ldu r9,16(4) | ||
451 | 3: | ||
452 | 79: ld r10,8(4) | ||
453 | 80: std r7,8(3) | ||
454 | 81: ld r7,16(4) | ||
455 | 82: std r8,16(3) | ||
456 | 83: ld r8,24(4) | ||
457 | 84: std r9,24(3) | ||
458 | 85: ldu r9,32(4) | ||
459 | 86: stdu r10,32(3) | ||
460 | bdnz 3b | ||
461 | 4: | ||
462 | 87: ld r10,8(4) | ||
463 | 88: std r7,8(3) | ||
464 | 89: std r8,16(3) | ||
465 | 90: std r9,24(3) | ||
466 | 91: std r10,32(3) | ||
467 | 9: ld r20,-120(1) | ||
468 | ld r21,-112(1) | ||
469 | ld r22,-104(1) | ||
470 | ld r23,-96(1) | ||
471 | ld r24,-88(1) | ||
472 | ld r25,-80(1) | ||
473 | ld r26,-72(1) | ||
474 | ld r27,-64(1) | ||
475 | ld r28,-56(1) | ||
476 | ld r29,-48(1) | ||
477 | ld r30,-40(1) | ||
478 | ld r31,-32(1) | ||
479 | li r3,0 | ||
480 | blr | ||
481 | |||
482 | /* | ||
483 | * on an exception, reset to the beginning and jump back into the | ||
484 | * standard __copy_tofrom_user | ||
485 | */ | ||
486 | 100: ld r20,-120(1) | ||
487 | ld r21,-112(1) | ||
488 | ld r22,-104(1) | ||
489 | ld r23,-96(1) | ||
490 | ld r24,-88(1) | ||
491 | ld r25,-80(1) | ||
492 | ld r26,-72(1) | ||
493 | ld r27,-64(1) | ||
494 | ld r28,-56(1) | ||
495 | ld r29,-48(1) | ||
496 | ld r30,-40(1) | ||
497 | ld r31,-32(1) | ||
498 | ld r3,-24(r1) | ||
499 | ld r4,-16(r1) | ||
500 | li r5,4096 | ||
501 | b .Ldst_aligned | ||
502 | |||
503 | .section __ex_table,"a" | ||
504 | .align 3 | ||
505 | .llong 20b,100b | ||
506 | .llong 21b,100b | ||
507 | .llong 22b,100b | ||
508 | .llong 23b,100b | ||
509 | .llong 24b,100b | ||
510 | .llong 25b,100b | ||
511 | .llong 26b,100b | ||
512 | .llong 27b,100b | ||
513 | .llong 28b,100b | ||
514 | .llong 29b,100b | ||
515 | .llong 30b,100b | ||
516 | .llong 31b,100b | ||
517 | .llong 32b,100b | ||
518 | .llong 33b,100b | ||
519 | .llong 34b,100b | ||
520 | .llong 35b,100b | ||
521 | .llong 36b,100b | ||
522 | .llong 37b,100b | ||
523 | .llong 38b,100b | ||
524 | .llong 39b,100b | ||
525 | .llong 40b,100b | ||
526 | .llong 41b,100b | ||
527 | .llong 42b,100b | ||
528 | .llong 43b,100b | ||
529 | .llong 44b,100b | ||
530 | .llong 45b,100b | ||
531 | .llong 46b,100b | ||
532 | .llong 47b,100b | ||
533 | .llong 48b,100b | ||
534 | .llong 49b,100b | ||
535 | .llong 50b,100b | ||
536 | .llong 51b,100b | ||
537 | .llong 52b,100b | ||
538 | .llong 53b,100b | ||
539 | .llong 54b,100b | ||
540 | .llong 55b,100b | ||
541 | .llong 56b,100b | ||
542 | .llong 57b,100b | ||
543 | .llong 58b,100b | ||
544 | .llong 59b,100b | ||
545 | .llong 60b,100b | ||
546 | .llong 61b,100b | ||
547 | .llong 62b,100b | ||
548 | .llong 63b,100b | ||
549 | .llong 64b,100b | ||
550 | .llong 65b,100b | ||
551 | .llong 66b,100b | ||
552 | .llong 67b,100b | ||
553 | .llong 68b,100b | ||
554 | .llong 69b,100b | ||
555 | .llong 70b,100b | ||
556 | .llong 71b,100b | ||
557 | .llong 72b,100b | ||
558 | .llong 73b,100b | ||
559 | .llong 74b,100b | ||
560 | .llong 75b,100b | ||
561 | .llong 76b,100b | ||
562 | .llong 77b,100b | ||
563 | .llong 78b,100b | ||
564 | .llong 79b,100b | ||
565 | .llong 80b,100b | ||
566 | .llong 81b,100b | ||
567 | .llong 82b,100b | ||
568 | .llong 83b,100b | ||
569 | .llong 84b,100b | ||
570 | .llong 85b,100b | ||
571 | .llong 86b,100b | ||
572 | .llong 87b,100b | ||
573 | .llong 88b,100b | ||
574 | .llong 89b,100b | ||
575 | .llong 90b,100b | ||
576 | .llong 91b,100b | ||
diff --git a/arch/ppc64/lib/e2a.c b/arch/ppc64/lib/e2a.c deleted file mode 100644 index d2b834887920..000000000000 --- a/arch/ppc64/lib/e2a.c +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/e2a.c | ||
3 | * | ||
4 | * EBCDIC to ASCII conversion | ||
5 | * | ||
6 | * This function moved here from arch/ppc64/kernel/viopath.c | ||
7 | * | ||
8 | * (C) Copyright 2000-2004 IBM Corporation | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License as | ||
12 | * published by the Free Software Foundation; either version 2 of the | ||
13 | * License, or (at your option) anyu later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software Foundation, | ||
22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | |||
28 | unsigned char e2a(unsigned char x) | ||
29 | { | ||
30 | switch (x) { | ||
31 | case 0xF0: | ||
32 | return '0'; | ||
33 | case 0xF1: | ||
34 | return '1'; | ||
35 | case 0xF2: | ||
36 | return '2'; | ||
37 | case 0xF3: | ||
38 | return '3'; | ||
39 | case 0xF4: | ||
40 | return '4'; | ||
41 | case 0xF5: | ||
42 | return '5'; | ||
43 | case 0xF6: | ||
44 | return '6'; | ||
45 | case 0xF7: | ||
46 | return '7'; | ||
47 | case 0xF8: | ||
48 | return '8'; | ||
49 | case 0xF9: | ||
50 | return '9'; | ||
51 | case 0xC1: | ||
52 | return 'A'; | ||
53 | case 0xC2: | ||
54 | return 'B'; | ||
55 | case 0xC3: | ||
56 | return 'C'; | ||
57 | case 0xC4: | ||
58 | return 'D'; | ||
59 | case 0xC5: | ||
60 | return 'E'; | ||
61 | case 0xC6: | ||
62 | return 'F'; | ||
63 | case 0xC7: | ||
64 | return 'G'; | ||
65 | case 0xC8: | ||
66 | return 'H'; | ||
67 | case 0xC9: | ||
68 | return 'I'; | ||
69 | case 0xD1: | ||
70 | return 'J'; | ||
71 | case 0xD2: | ||
72 | return 'K'; | ||
73 | case 0xD3: | ||
74 | return 'L'; | ||
75 | case 0xD4: | ||
76 | return 'M'; | ||
77 | case 0xD5: | ||
78 | return 'N'; | ||
79 | case 0xD6: | ||
80 | return 'O'; | ||
81 | case 0xD7: | ||
82 | return 'P'; | ||
83 | case 0xD8: | ||
84 | return 'Q'; | ||
85 | case 0xD9: | ||
86 | return 'R'; | ||
87 | case 0xE2: | ||
88 | return 'S'; | ||
89 | case 0xE3: | ||
90 | return 'T'; | ||
91 | case 0xE4: | ||
92 | return 'U'; | ||
93 | case 0xE5: | ||
94 | return 'V'; | ||
95 | case 0xE6: | ||
96 | return 'W'; | ||
97 | case 0xE7: | ||
98 | return 'X'; | ||
99 | case 0xE8: | ||
100 | return 'Y'; | ||
101 | case 0xE9: | ||
102 | return 'Z'; | ||
103 | } | ||
104 | return ' '; | ||
105 | } | ||
106 | EXPORT_SYMBOL(e2a); | ||
107 | |||
108 | |||
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c deleted file mode 100644 index 033643ab69e0..000000000000 --- a/arch/ppc64/lib/locks.c +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | /* | ||
2 | * Spin and read/write lock operations. | ||
3 | * | ||
4 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
6 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | ||
7 | * Rework to support virtual processors | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/stringify.h> | ||
20 | #include <asm/hvcall.h> | ||
21 | #include <asm/iSeries/HvCall.h> | ||
22 | |||
23 | /* waiting for a spinlock... */ | ||
24 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | ||
25 | |||
26 | void __spin_yield(raw_spinlock_t *lock) | ||
27 | { | ||
28 | unsigned int lock_value, holder_cpu, yield_count; | ||
29 | struct paca_struct *holder_paca; | ||
30 | |||
31 | lock_value = lock->slock; | ||
32 | if (lock_value == 0) | ||
33 | return; | ||
34 | holder_cpu = lock_value & 0xffff; | ||
35 | BUG_ON(holder_cpu >= NR_CPUS); | ||
36 | holder_paca = &paca[holder_cpu]; | ||
37 | yield_count = holder_paca->lppaca.yield_count; | ||
38 | if ((yield_count & 1) == 0) | ||
39 | return; /* virtual cpu is currently running */ | ||
40 | rmb(); | ||
41 | if (lock->slock != lock_value) | ||
42 | return; /* something has changed */ | ||
43 | #ifdef CONFIG_PPC_ISERIES | ||
44 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | ||
45 | ((u64)holder_cpu << 32) | yield_count); | ||
46 | #else | ||
47 | plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), | ||
48 | yield_count); | ||
49 | #endif | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Waiting for a read lock or a write lock on a rwlock... | ||
54 | * This turns out to be the same for read and write locks, since | ||
55 | * we only know the holder if it is write-locked. | ||
56 | */ | ||
57 | void __rw_yield(raw_rwlock_t *rw) | ||
58 | { | ||
59 | int lock_value; | ||
60 | unsigned int holder_cpu, yield_count; | ||
61 | struct paca_struct *holder_paca; | ||
62 | |||
63 | lock_value = rw->lock; | ||
64 | if (lock_value >= 0) | ||
65 | return; /* no write lock at present */ | ||
66 | holder_cpu = lock_value & 0xffff; | ||
67 | BUG_ON(holder_cpu >= NR_CPUS); | ||
68 | holder_paca = &paca[holder_cpu]; | ||
69 | yield_count = holder_paca->lppaca.yield_count; | ||
70 | if ((yield_count & 1) == 0) | ||
71 | return; /* virtual cpu is currently running */ | ||
72 | rmb(); | ||
73 | if (rw->lock != lock_value) | ||
74 | return; /* something has changed */ | ||
75 | #ifdef CONFIG_PPC_ISERIES | ||
76 | HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, | ||
77 | ((u64)holder_cpu << 32) | yield_count); | ||
78 | #else | ||
79 | plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), | ||
80 | yield_count); | ||
81 | #endif | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
86 | { | ||
87 | while (lock->slock) { | ||
88 | HMT_low(); | ||
89 | if (SHARED_PROCESSOR) | ||
90 | __spin_yield(lock); | ||
91 | } | ||
92 | HMT_medium(); | ||
93 | } | ||
94 | |||
95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); | ||
diff --git a/arch/ppc64/lib/memcpy.S b/arch/ppc64/lib/memcpy.S deleted file mode 100644 index 9ccacdf5bcb9..000000000000 --- a/arch/ppc64/lib/memcpy.S +++ /dev/null | |||
@@ -1,172 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/memcpy.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | .align 7 | ||
15 | _GLOBAL(memcpy) | ||
16 | mtcrf 0x01,r5 | ||
17 | cmpldi cr1,r5,16 | ||
18 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry | ||
19 | andi. r6,r6,7 | ||
20 | dcbt 0,r4 | ||
21 | blt cr1,.Lshort_copy | ||
22 | bne .Ldst_unaligned | ||
23 | .Ldst_aligned: | ||
24 | andi. r0,r4,7 | ||
25 | addi r3,r3,-16 | ||
26 | bne .Lsrc_unaligned | ||
27 | srdi r7,r5,4 | ||
28 | ld r9,0(r4) | ||
29 | addi r4,r4,-8 | ||
30 | mtctr r7 | ||
31 | andi. r5,r5,7 | ||
32 | bf cr7*4+0,2f | ||
33 | addi r3,r3,8 | ||
34 | addi r4,r4,8 | ||
35 | mr r8,r9 | ||
36 | blt cr1,3f | ||
37 | 1: ld r9,8(r4) | ||
38 | std r8,8(r3) | ||
39 | 2: ldu r8,16(r4) | ||
40 | stdu r9,16(r3) | ||
41 | bdnz 1b | ||
42 | 3: std r8,8(r3) | ||
43 | beqlr | ||
44 | addi r3,r3,16 | ||
45 | ld r9,8(r4) | ||
46 | .Ldo_tail: | ||
47 | bf cr7*4+1,1f | ||
48 | rotldi r9,r9,32 | ||
49 | stw r9,0(r3) | ||
50 | addi r3,r3,4 | ||
51 | 1: bf cr7*4+2,2f | ||
52 | rotldi r9,r9,16 | ||
53 | sth r9,0(r3) | ||
54 | addi r3,r3,2 | ||
55 | 2: bf cr7*4+3,3f | ||
56 | rotldi r9,r9,8 | ||
57 | stb r9,0(r3) | ||
58 | 3: blr | ||
59 | |||
60 | .Lsrc_unaligned: | ||
61 | srdi r6,r5,3 | ||
62 | addi r5,r5,-16 | ||
63 | subf r4,r0,r4 | ||
64 | srdi r7,r5,4 | ||
65 | sldi r10,r0,3 | ||
66 | cmpdi cr6,r6,3 | ||
67 | andi. r5,r5,7 | ||
68 | mtctr r7 | ||
69 | subfic r11,r10,64 | ||
70 | add r5,r5,r0 | ||
71 | |||
72 | bt cr7*4+0,0f | ||
73 | |||
74 | ld r9,0(r4) # 3+2n loads, 2+2n stores | ||
75 | ld r0,8(r4) | ||
76 | sld r6,r9,r10 | ||
77 | ldu r9,16(r4) | ||
78 | srd r7,r0,r11 | ||
79 | sld r8,r0,r10 | ||
80 | or r7,r7,r6 | ||
81 | blt cr6,4f | ||
82 | ld r0,8(r4) | ||
83 | # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 | ||
84 | b 2f | ||
85 | |||
86 | 0: ld r0,0(r4) # 4+2n loads, 3+2n stores | ||
87 | ldu r9,8(r4) | ||
88 | sld r8,r0,r10 | ||
89 | addi r3,r3,-8 | ||
90 | blt cr6,5f | ||
91 | ld r0,8(r4) | ||
92 | srd r12,r9,r11 | ||
93 | sld r6,r9,r10 | ||
94 | ldu r9,16(r4) | ||
95 | or r12,r8,r12 | ||
96 | srd r7,r0,r11 | ||
97 | sld r8,r0,r10 | ||
98 | addi r3,r3,16 | ||
99 | beq cr6,3f | ||
100 | |||
101 | # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 | ||
102 | 1: or r7,r7,r6 | ||
103 | ld r0,8(r4) | ||
104 | std r12,8(r3) | ||
105 | 2: srd r12,r9,r11 | ||
106 | sld r6,r9,r10 | ||
107 | ldu r9,16(r4) | ||
108 | or r12,r8,r12 | ||
109 | stdu r7,16(r3) | ||
110 | srd r7,r0,r11 | ||
111 | sld r8,r0,r10 | ||
112 | bdnz 1b | ||
113 | |||
114 | 3: std r12,8(r3) | ||
115 | or r7,r7,r6 | ||
116 | 4: std r7,16(r3) | ||
117 | 5: srd r12,r9,r11 | ||
118 | or r12,r8,r12 | ||
119 | std r12,24(r3) | ||
120 | beqlr | ||
121 | cmpwi cr1,r5,8 | ||
122 | addi r3,r3,32 | ||
123 | sld r9,r9,r10 | ||
124 | ble cr1,.Ldo_tail | ||
125 | ld r0,8(r4) | ||
126 | srd r7,r0,r11 | ||
127 | or r9,r7,r9 | ||
128 | b .Ldo_tail | ||
129 | |||
130 | .Ldst_unaligned: | ||
131 | mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7 | ||
132 | subf r5,r6,r5 | ||
133 | li r7,0 | ||
134 | cmpldi r1,r5,16 | ||
135 | bf cr7*4+3,1f | ||
136 | lbz r0,0(r4) | ||
137 | stb r0,0(r3) | ||
138 | addi r7,r7,1 | ||
139 | 1: bf cr7*4+2,2f | ||
140 | lhzx r0,r7,r4 | ||
141 | sthx r0,r7,r3 | ||
142 | addi r7,r7,2 | ||
143 | 2: bf cr7*4+1,3f | ||
144 | lwzx r0,r7,r4 | ||
145 | stwx r0,r7,r3 | ||
146 | 3: mtcrf 0x01,r5 | ||
147 | add r4,r6,r4 | ||
148 | add r3,r6,r3 | ||
149 | b .Ldst_aligned | ||
150 | |||
151 | .Lshort_copy: | ||
152 | bf cr7*4+0,1f | ||
153 | lwz r0,0(r4) | ||
154 | lwz r9,4(r4) | ||
155 | addi r4,r4,8 | ||
156 | stw r0,0(r3) | ||
157 | stw r9,4(r3) | ||
158 | addi r3,r3,8 | ||
159 | 1: bf cr7*4+1,2f | ||
160 | lwz r0,0(r4) | ||
161 | addi r4,r4,4 | ||
162 | stw r0,0(r3) | ||
163 | addi r3,r3,4 | ||
164 | 2: bf cr7*4+2,3f | ||
165 | lhz r0,0(r4) | ||
166 | addi r4,r4,2 | ||
167 | sth r0,0(r3) | ||
168 | addi r3,r3,2 | ||
169 | 3: bf cr7*4+3,4f | ||
170 | lbz r0,0(r4) | ||
171 | stb r0,0(r3) | ||
172 | 4: blr | ||
diff --git a/arch/ppc64/lib/sstep.c b/arch/ppc64/lib/sstep.c deleted file mode 100644 index e79123d1485c..000000000000 --- a/arch/ppc64/lib/sstep.c +++ /dev/null | |||
@@ -1,141 +0,0 @@ | |||
1 | /* | ||
2 | * Single-step support. | ||
3 | * | ||
4 | * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <asm/sstep.h> | ||
14 | #include <asm/processor.h> | ||
15 | |||
16 | extern char system_call_common[]; | ||
17 | |||
18 | /* Bits in SRR1 that are copied from MSR */ | ||
19 | #define MSR_MASK 0xffffffff87c0ffff | ||
20 | |||
21 | /* | ||
22 | * Determine whether a conditional branch instruction would branch. | ||
23 | */ | ||
24 | static int branch_taken(unsigned int instr, struct pt_regs *regs) | ||
25 | { | ||
26 | unsigned int bo = (instr >> 21) & 0x1f; | ||
27 | unsigned int bi; | ||
28 | |||
29 | if ((bo & 4) == 0) { | ||
30 | /* decrement counter */ | ||
31 | --regs->ctr; | ||
32 | if (((bo >> 1) & 1) ^ (regs->ctr == 0)) | ||
33 | return 0; | ||
34 | } | ||
35 | if ((bo & 0x10) == 0) { | ||
36 | /* check bit from CR */ | ||
37 | bi = (instr >> 16) & 0x1f; | ||
38 | if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) | ||
39 | return 0; | ||
40 | } | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Emulate instructions that cause a transfer of control. | ||
46 | * Returns 1 if the step was emulated, 0 if not, | ||
47 | * or -1 if the instruction is one that should not be stepped, | ||
48 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | ||
49 | */ | ||
50 | int emulate_step(struct pt_regs *regs, unsigned int instr) | ||
51 | { | ||
52 | unsigned int opcode, rd; | ||
53 | unsigned long int imm; | ||
54 | |||
55 | opcode = instr >> 26; | ||
56 | switch (opcode) { | ||
57 | case 16: /* bc */ | ||
58 | imm = (signed short)(instr & 0xfffc); | ||
59 | if ((instr & 2) == 0) | ||
60 | imm += regs->nip; | ||
61 | regs->nip += 4; | ||
62 | if ((regs->msr & MSR_SF) == 0) | ||
63 | regs->nip &= 0xffffffffUL; | ||
64 | if (instr & 1) | ||
65 | regs->link = regs->nip; | ||
66 | if (branch_taken(instr, regs)) | ||
67 | regs->nip = imm; | ||
68 | return 1; | ||
69 | case 17: /* sc */ | ||
70 | /* | ||
71 | * N.B. this uses knowledge about how the syscall | ||
72 | * entry code works. If that is changed, this will | ||
73 | * need to be changed also. | ||
74 | */ | ||
75 | regs->gpr[9] = regs->gpr[13]; | ||
76 | regs->gpr[11] = regs->nip + 4; | ||
77 | regs->gpr[12] = regs->msr & MSR_MASK; | ||
78 | regs->gpr[13] = (unsigned long) get_paca(); | ||
79 | regs->nip = (unsigned long) &system_call_common; | ||
80 | regs->msr = MSR_KERNEL; | ||
81 | return 1; | ||
82 | case 18: /* b */ | ||
83 | imm = instr & 0x03fffffc; | ||
84 | if (imm & 0x02000000) | ||
85 | imm -= 0x04000000; | ||
86 | if ((instr & 2) == 0) | ||
87 | imm += regs->nip; | ||
88 | if (instr & 1) { | ||
89 | regs->link = regs->nip + 4; | ||
90 | if ((regs->msr & MSR_SF) == 0) | ||
91 | regs->link &= 0xffffffffUL; | ||
92 | } | ||
93 | if ((regs->msr & MSR_SF) == 0) | ||
94 | imm &= 0xffffffffUL; | ||
95 | regs->nip = imm; | ||
96 | return 1; | ||
97 | case 19: | ||
98 | switch (instr & 0x7fe) { | ||
99 | case 0x20: /* bclr */ | ||
100 | case 0x420: /* bcctr */ | ||
101 | imm = (instr & 0x400)? regs->ctr: regs->link; | ||
102 | regs->nip += 4; | ||
103 | if ((regs->msr & MSR_SF) == 0) { | ||
104 | regs->nip &= 0xffffffffUL; | ||
105 | imm &= 0xffffffffUL; | ||
106 | } | ||
107 | if (instr & 1) | ||
108 | regs->link = regs->nip; | ||
109 | if (branch_taken(instr, regs)) | ||
110 | regs->nip = imm; | ||
111 | return 1; | ||
112 | case 0x24: /* rfid, scary */ | ||
113 | return -1; | ||
114 | } | ||
115 | case 31: | ||
116 | rd = (instr >> 21) & 0x1f; | ||
117 | switch (instr & 0x7fe) { | ||
118 | case 0xa6: /* mfmsr */ | ||
119 | regs->gpr[rd] = regs->msr & MSR_MASK; | ||
120 | regs->nip += 4; | ||
121 | if ((regs->msr & MSR_SF) == 0) | ||
122 | regs->nip &= 0xffffffffUL; | ||
123 | return 1; | ||
124 | case 0x164: /* mtmsrd */ | ||
125 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ | ||
126 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ | ||
127 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; | ||
128 | imm = (regs->msr & MSR_MASK & ~imm) | ||
129 | | (regs->gpr[rd] & imm); | ||
130 | if ((imm & MSR_RI) == 0) | ||
131 | /* can't step mtmsrd that would clear MSR_RI */ | ||
132 | return -1; | ||
133 | regs->msr = imm; | ||
134 | regs->nip += 4; | ||
135 | if ((imm & MSR_SF) == 0) | ||
136 | regs->nip &= 0xffffffffUL; | ||
137 | return 1; | ||
138 | } | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
diff --git a/arch/ppc64/lib/strcase.c b/arch/ppc64/lib/strcase.c deleted file mode 100644 index e84f243368c0..000000000000 --- a/arch/ppc64/lib/strcase.c +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | /* | ||
2 | * c 2001 PPC 64 Team, IBM Corp | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #include <linux/ctype.h> | ||
10 | |||
11 | int strcasecmp(const char *s1, const char *s2) | ||
12 | { | ||
13 | int c1, c2; | ||
14 | |||
15 | do { | ||
16 | c1 = tolower(*s1++); | ||
17 | c2 = tolower(*s2++); | ||
18 | } while (c1 == c2 && c1 != 0); | ||
19 | return c1 - c2; | ||
20 | } | ||
21 | |||
22 | int strncasecmp(const char *s1, const char *s2, int n) | ||
23 | { | ||
24 | int c1, c2; | ||
25 | |||
26 | do { | ||
27 | c1 = tolower(*s1++); | ||
28 | c2 = tolower(*s2++); | ||
29 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
30 | return c1 - c2; | ||
31 | } | ||
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S index 813587e5c2ec..e21a0038a4d6 100644 --- a/arch/ppc64/lib/string.S +++ b/arch/ppc64/lib/string.S | |||
@@ -65,112 +65,6 @@ _GLOBAL(strlen) | |||
65 | subf r3,r3,r4 | 65 | subf r3,r3,r4 |
66 | blr | 66 | blr |
67 | 67 | ||
68 | _GLOBAL(memset) | ||
69 | neg r0,r3 | ||
70 | rlwimi r4,r4,8,16,23 | ||
71 | andi. r0,r0,7 /* # bytes to be 8-byte aligned */ | ||
72 | rlwimi r4,r4,16,0,15 | ||
73 | cmplw cr1,r5,r0 /* do we get that far? */ | ||
74 | rldimi r4,r4,32,0 | ||
75 | mtcrf 1,r0 | ||
76 | mr r6,r3 | ||
77 | blt cr1,8f | ||
78 | beq+ 3f /* if already 8-byte aligned */ | ||
79 | subf r5,r0,r5 | ||
80 | bf 31,1f | ||
81 | stb r4,0(r6) | ||
82 | addi r6,r6,1 | ||
83 | 1: bf 30,2f | ||
84 | sth r4,0(r6) | ||
85 | addi r6,r6,2 | ||
86 | 2: bf 29,3f | ||
87 | stw r4,0(r6) | ||
88 | addi r6,r6,4 | ||
89 | 3: srdi. r0,r5,6 | ||
90 | clrldi r5,r5,58 | ||
91 | mtctr r0 | ||
92 | beq 5f | ||
93 | 4: std r4,0(r6) | ||
94 | std r4,8(r6) | ||
95 | std r4,16(r6) | ||
96 | std r4,24(r6) | ||
97 | std r4,32(r6) | ||
98 | std r4,40(r6) | ||
99 | std r4,48(r6) | ||
100 | std r4,56(r6) | ||
101 | addi r6,r6,64 | ||
102 | bdnz 4b | ||
103 | 5: srwi. r0,r5,3 | ||
104 | clrlwi r5,r5,29 | ||
105 | mtcrf 1,r0 | ||
106 | beq 8f | ||
107 | bf 29,6f | ||
108 | std r4,0(r6) | ||
109 | std r4,8(r6) | ||
110 | std r4,16(r6) | ||
111 | std r4,24(r6) | ||
112 | addi r6,r6,32 | ||
113 | 6: bf 30,7f | ||
114 | std r4,0(r6) | ||
115 | std r4,8(r6) | ||
116 | addi r6,r6,16 | ||
117 | 7: bf 31,8f | ||
118 | std r4,0(r6) | ||
119 | addi r6,r6,8 | ||
120 | 8: cmpwi r5,0 | ||
121 | mtcrf 1,r5 | ||
122 | beqlr+ | ||
123 | bf 29,9f | ||
124 | stw r4,0(r6) | ||
125 | addi r6,r6,4 | ||
126 | 9: bf 30,10f | ||
127 | sth r4,0(r6) | ||
128 | addi r6,r6,2 | ||
129 | 10: bflr 31 | ||
130 | stb r4,0(r6) | ||
131 | blr | ||
132 | |||
133 | _GLOBAL(memmove) | ||
134 | cmplw 0,r3,r4 | ||
135 | bgt .backwards_memcpy | ||
136 | b .memcpy | ||
137 | |||
138 | _GLOBAL(backwards_memcpy) | ||
139 | rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ | ||
140 | add r6,r3,r5 | ||
141 | add r4,r4,r5 | ||
142 | beq 2f | ||
143 | andi. r0,r6,3 | ||
144 | mtctr r7 | ||
145 | bne 5f | ||
146 | 1: lwz r7,-4(r4) | ||
147 | lwzu r8,-8(r4) | ||
148 | stw r7,-4(r6) | ||
149 | stwu r8,-8(r6) | ||
150 | bdnz 1b | ||
151 | andi. r5,r5,7 | ||
152 | 2: cmplwi 0,r5,4 | ||
153 | blt 3f | ||
154 | lwzu r0,-4(r4) | ||
155 | subi r5,r5,4 | ||
156 | stwu r0,-4(r6) | ||
157 | 3: cmpwi 0,r5,0 | ||
158 | beqlr | ||
159 | mtctr r5 | ||
160 | 4: lbzu r0,-1(r4) | ||
161 | stbu r0,-1(r6) | ||
162 | bdnz 4b | ||
163 | blr | ||
164 | 5: mtctr r0 | ||
165 | 6: lbzu r7,-1(r4) | ||
166 | stbu r7,-1(r6) | ||
167 | bdnz 6b | ||
168 | subf r5,r0,r5 | ||
169 | rlwinm. r7,r5,32-3,3,31 | ||
170 | beq 2b | ||
171 | mtctr r7 | ||
172 | b 1b | ||
173 | |||
174 | _GLOBAL(memcmp) | 68 | _GLOBAL(memcmp) |
175 | cmpwi 0,r5,0 | 69 | cmpwi 0,r5,0 |
176 | ble- 2f | 70 | ble- 2f |
diff --git a/arch/ppc64/lib/usercopy.c b/arch/ppc64/lib/usercopy.c deleted file mode 100644 index 5eea6f3c1e03..000000000000 --- a/arch/ppc64/lib/usercopy.c +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * Functions which are too large to be inlined. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) | ||
13 | { | ||
14 | if (likely(access_ok(VERIFY_READ, from, n))) | ||
15 | n = __copy_from_user(to, from, n); | ||
16 | else | ||
17 | memset(to, 0, n); | ||
18 | return n; | ||
19 | } | ||
20 | |||
21 | unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) | ||
22 | { | ||
23 | if (likely(access_ok(VERIFY_WRITE, to, n))) | ||
24 | n = __copy_to_user(to, from, n); | ||
25 | return n; | ||
26 | } | ||
27 | |||
28 | unsigned long copy_in_user(void __user *to, const void __user *from, | ||
29 | unsigned long n) | ||
30 | { | ||
31 | might_sleep(); | ||
32 | if (likely(access_ok(VERIFY_READ, from, n) && | ||
33 | access_ok(VERIFY_WRITE, to, n))) | ||
34 | n =__copy_tofrom_user(to, from, n); | ||
35 | return n; | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(copy_from_user); | ||
39 | EXPORT_SYMBOL(copy_to_user); | ||
40 | EXPORT_SYMBOL(copy_in_user); | ||
41 | |||