diff options
-rw-r--r-- | arch/powerpc/kernel/process.c | 83 | ||||
-rw-r--r-- | include/asm-powerpc/elf.h | 20 |
2 files changed, 0 insertions, 103 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1924b57bd241..85e557300d86 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -105,29 +105,6 @@ void enable_kernel_fp(void) | |||
105 | } | 105 | } |
106 | EXPORT_SYMBOL(enable_kernel_fp); | 106 | EXPORT_SYMBOL(enable_kernel_fp); |
107 | 107 | ||
108 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
109 | { | ||
110 | #ifdef CONFIG_VSX | ||
111 | int i; | ||
112 | elf_fpreg_t *reg; | ||
113 | #endif | ||
114 | |||
115 | if (!tsk->thread.regs) | ||
116 | return 0; | ||
117 | flush_fp_to_thread(current); | ||
118 | |||
119 | #ifdef CONFIG_VSX | ||
120 | reg = (elf_fpreg_t *)fpregs; | ||
121 | for (i = 0; i < ELF_NFPREG - 1; i++, reg++) | ||
122 | *reg = tsk->thread.TS_FPR(i); | ||
123 | memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t)); | ||
124 | #else | ||
125 | memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs)); | ||
126 | #endif | ||
127 | |||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | #ifdef CONFIG_ALTIVEC | 108 | #ifdef CONFIG_ALTIVEC |
132 | void enable_kernel_altivec(void) | 109 | void enable_kernel_altivec(void) |
133 | { | 110 | { |
@@ -161,35 +138,6 @@ void flush_altivec_to_thread(struct task_struct *tsk) | |||
161 | preempt_enable(); | 138 | preempt_enable(); |
162 | } | 139 | } |
163 | } | 140 | } |
164 | |||
165 | int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs) | ||
166 | { | ||
167 | /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save | ||
168 | * separately, see below */ | ||
169 | const int nregs = ELF_NVRREG - 2; | ||
170 | elf_vrreg_t *reg; | ||
171 | u32 *dest; | ||
172 | |||
173 | if (tsk == current) | ||
174 | flush_altivec_to_thread(tsk); | ||
175 | |||
176 | reg = (elf_vrreg_t *)vrregs; | ||
177 | |||
178 | /* copy the 32 vr registers */ | ||
179 | memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg)); | ||
180 | reg += nregs; | ||
181 | |||
182 | /* copy the vscr */ | ||
183 | memcpy(reg, &tsk->thread.vscr, sizeof(*reg)); | ||
184 | reg++; | ||
185 | |||
186 | /* vrsave is stored in the high 32bit slot of the final 128bits */ | ||
187 | memset(reg, 0, sizeof(*reg)); | ||
188 | dest = (u32 *)reg; | ||
189 | *dest = tsk->thread.vrsave; | ||
190 | |||
191 | return 1; | ||
192 | } | ||
193 | #endif /* CONFIG_ALTIVEC */ | 141 | #endif /* CONFIG_ALTIVEC */ |
194 | 142 | ||
195 | #ifdef CONFIG_VSX | 143 | #ifdef CONFIG_VSX |
@@ -224,29 +172,6 @@ void flush_vsx_to_thread(struct task_struct *tsk) | |||
224 | preempt_enable(); | 172 | preempt_enable(); |
225 | } | 173 | } |
226 | } | 174 | } |
227 | |||
228 | /* | ||
229 | * This dumps the lower half 64bits of the first 32 VSX registers. | ||
230 | * This needs to be called with dump_task_fp and dump_task_altivec to | ||
231 | * get all the VSX state. | ||
232 | */ | ||
233 | int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs) | ||
234 | { | ||
235 | elf_vrreg_t *reg; | ||
236 | double buf[32]; | ||
237 | int i; | ||
238 | |||
239 | if (tsk == current) | ||
240 | flush_vsx_to_thread(tsk); | ||
241 | |||
242 | reg = (elf_vrreg_t *)vrregs; | ||
243 | |||
244 | for (i = 0; i < 32 ; i++) | ||
245 | buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET]; | ||
246 | memcpy(reg, buf, sizeof(buf)); | ||
247 | |||
248 | return 1; | ||
249 | } | ||
250 | #endif /* CONFIG_VSX */ | 175 | #endif /* CONFIG_VSX */ |
251 | 176 | ||
252 | #ifdef CONFIG_SPE | 177 | #ifdef CONFIG_SPE |
@@ -279,14 +204,6 @@ void flush_spe_to_thread(struct task_struct *tsk) | |||
279 | preempt_enable(); | 204 | preempt_enable(); |
280 | } | 205 | } |
281 | } | 206 | } |
282 | |||
283 | int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | ||
284 | { | ||
285 | flush_spe_to_thread(current); | ||
286 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | ||
287 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | ||
288 | return 1; | ||
289 | } | ||
290 | #endif /* CONFIG_SPE */ | 207 | #endif /* CONFIG_SPE */ |
291 | 208 | ||
292 | #ifndef CONFIG_SMP | 209 | #ifndef CONFIG_SMP |
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index 38a51728406f..89664675b469 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h | |||
@@ -204,28 +204,8 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, | |||
204 | } | 204 | } |
205 | #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); | 205 | #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); |
206 | 206 | ||
207 | static inline int dump_task_regs(struct task_struct *tsk, | ||
208 | elf_gregset_t *elf_regs) | ||
209 | { | ||
210 | struct pt_regs *regs = tsk->thread.regs; | ||
211 | if (regs) | ||
212 | ppc_elf_core_copy_regs(*elf_regs, regs); | ||
213 | |||
214 | return 1; | ||
215 | } | ||
216 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
217 | |||
218 | extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); | ||
219 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
220 | |||
221 | typedef elf_vrregset_t elf_fpxregset_t; | 207 | typedef elf_vrregset_t elf_fpxregset_t; |
222 | 208 | ||
223 | #ifdef CONFIG_ALTIVEC | ||
224 | extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs); | ||
225 | #define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_altivec(tsk, regs) | ||
226 | #define ELF_CORE_XFPREG_TYPE NT_PPC_VMX | ||
227 | #endif | ||
228 | |||
229 | /* ELF_HWCAP yields a mask that user programs can use to figure out what | 209 | /* ELF_HWCAP yields a mask that user programs can use to figure out what |
230 | instruction set this cpu supports. This could be done in userspace, | 210 | instruction set this cpu supports. This could be done in userspace, |
231 | but it's not easy, and we've already done it here. */ | 211 | but it's not easy, and we've already done it here. */ |