aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-10 06:21:10 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-10-11 02:26:50 -0400
commit18461960cbf50bf345ef0667d45d5f64de8fb893 (patch)
tree58947fe30598814684f7e33424675e204316e8ef /arch
parentde79f7b9f6f92ec1bd6f61fa1f20de60728a5b5e (diff)
powerpc: Provide for giveup_fpu/altivec to save state in alternate location
This provides a facility which is intended for use by KVM, where the contents of the FP/VSX and VMX (Altivec) registers can be saved away to somewhere other than the thread_struct when kernel code wants to use floating point or VMX instructions. This is done by providing a pointer in the thread_struct to indicate where the state should be saved to. The giveup_fpu() and giveup_altivec() functions test these pointers and save state to the indicated location if they are non-NULL. Note that the MSR_FP/VEC bits in task->thread.regs->msr are still used to indicate whether the CPU register state is live, even when an alternate save location is being used. This also provides load_fp_state() and load_vr_state() functions, which load up FP/VSX and VMX state from memory into the CPU registers, and corresponding store_fp_state() and store_vr_state() functions, which store FP/VSX and VMX state into memory from the CPU registers. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/processor.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/fpu.S25
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/kernel/vector.S29
6 files changed, 71 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index afe695e9feb8..ea88e7bd4a34 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -211,6 +211,7 @@ struct thread_struct {
211#endif 211#endif
212#endif 212#endif
213 struct thread_fp_state fp_state; 213 struct thread_fp_state fp_state;
214 struct thread_fp_state *fp_save_area;
214 int fpexc_mode; /* floating-point exception mode */ 215 int fpexc_mode; /* floating-point exception mode */
215 unsigned int align_ctl; /* alignment handling control */ 216 unsigned int align_ctl; /* alignment handling control */
216#ifdef CONFIG_PPC64 217#ifdef CONFIG_PPC64
@@ -229,6 +230,7 @@ struct thread_struct {
229 unsigned long trap_nr; /* last trap # on this thread */ 230 unsigned long trap_nr; /* last trap # on this thread */
230#ifdef CONFIG_ALTIVEC 231#ifdef CONFIG_ALTIVEC
231 struct thread_vr_state vr_state; 232 struct thread_vr_state vr_state;
233 struct thread_vr_state *vr_save_area;
232 unsigned long vrsave; 234 unsigned long vrsave;
233 int used_vr; /* set if process has used altivec */ 235 int used_vr; /* set if process has used altivec */
234#endif /* CONFIG_ALTIVEC */ 236#endif /* CONFIG_ALTIVEC */
@@ -357,6 +359,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
357extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); 359extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
358extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); 360extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
359 361
362extern void load_fp_state(struct thread_fp_state *fp);
363extern void store_fp_state(struct thread_fp_state *fp);
364extern void load_vr_state(struct thread_vr_state *vr);
365extern void store_vr_state(struct thread_vr_state *vr);
366
360static inline unsigned int __unpack_fe01(unsigned long msr_bits) 367static inline unsigned int __unpack_fe01(unsigned long msr_bits)
361{ 368{
362 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); 369 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8d27b61c95b9..6278edddc3f8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,9 +91,11 @@ int main(void)
91#endif 91#endif
92 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 92 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
93 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); 93 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
94 DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
94 DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); 95 DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
95#ifdef CONFIG_ALTIVEC 96#ifdef CONFIG_ALTIVEC
96 DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); 97 DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
98 DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
97 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); 99 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
98 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 100 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
99 DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); 101 DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 34b96e6d2f0d..4dca05e91e95 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -81,6 +81,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
81#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 81#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
82 82
83/* 83/*
84 * Load state from memory into FP registers including FPSCR.
85 * Assumes the caller has enabled FP in the MSR.
86 */
87_GLOBAL(load_fp_state)
88 lfd fr0,FPSTATE_FPSCR(r3)
89 MTFSF_L(fr0)
90 REST_32FPVSRS(0, R4, R3)
91 blr
92
93/*
94 * Store FP state into memory, including FPSCR
95 * Assumes the caller has enabled FP in the MSR.
96 */
97_GLOBAL(store_fp_state)
98 SAVE_32FPVSRS(0, R4, R3)
99 mffs fr0
100 stfd fr0,FPSTATE_FPSCR(r3)
101 blr
102
103/*
84 * This task wants to use the FPU now. 104 * This task wants to use the FPU now.
85 * On UP, disable FP for the task which had the FPU previously, 105 * On UP, disable FP for the task which had the FPU previously,
86 * and save its floating-point registers in its thread_struct. 106 * and save its floating-point registers in its thread_struct.
@@ -172,9 +192,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
172 PPC_LCMPI 0,r3,0 192 PPC_LCMPI 0,r3,0
173 beqlr- /* if no previous owner, done */ 193 beqlr- /* if no previous owner, done */
174 addi r3,r3,THREAD /* want THREAD of task */ 194 addi r3,r3,THREAD /* want THREAD of task */
195 PPC_LL r6,THREAD_FPSAVEAREA(r3)
175 PPC_LL r5,PT_REGS(r3) 196 PPC_LL r5,PT_REGS(r3)
176 PPC_LCMPI 0,r5,0 197 PPC_LCMPI 0,r6,0
198 bne 2f
177 addi r6,r3,THREAD_FPSTATE 199 addi r6,r3,THREAD_FPSTATE
2002: PPC_LCMPI 0,r5,0
178 SAVE_32FPVSRS(0, R4, R6) 201 SAVE_32FPVSRS(0, R4, R6)
179 mffs fr0 202 mffs fr0
180 stfd fr0,FPSTATE_FPSCR(r6) 203 stfd fr0,FPSTATE_FPSCR(r6)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 21646dbe1bb3..56a4bec1b11a 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -98,9 +98,13 @@ EXPORT_SYMBOL(start_thread);
98 98
99#ifdef CONFIG_PPC_FPU 99#ifdef CONFIG_PPC_FPU
100EXPORT_SYMBOL(giveup_fpu); 100EXPORT_SYMBOL(giveup_fpu);
101EXPORT_SYMBOL(load_fp_state);
102EXPORT_SYMBOL(store_fp_state);
101#endif 103#endif
102#ifdef CONFIG_ALTIVEC 104#ifdef CONFIG_ALTIVEC
103EXPORT_SYMBOL(giveup_altivec); 105EXPORT_SYMBOL(giveup_altivec);
106EXPORT_SYMBOL(load_vr_state);
107EXPORT_SYMBOL(store_vr_state);
104#endif /* CONFIG_ALTIVEC */ 108#endif /* CONFIG_ALTIVEC */
105#ifdef CONFIG_VSX 109#ifdef CONFIG_VSX
106EXPORT_SYMBOL(giveup_vsx); 110EXPORT_SYMBOL(giveup_vsx);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a281416affb..8649a3d629e1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1008,6 +1008,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1008 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1009#endif 1009#endif
1010 1010
1011 p->thread.fp_save_area = NULL;
1012#ifdef CONFIG_ALTIVEC
1013 p->thread.vr_save_area = NULL;
1014#endif
1015
1011#ifdef CONFIG_PPC_STD_MMU_64 1016#ifdef CONFIG_PPC_STD_MMU_64
1012 if (mmu_has_feature(MMU_FTR_SLB)) { 1017 if (mmu_has_feature(MMU_FTR_SLB)) {
1013 unsigned long sp_vsid; 1018 unsigned long sp_vsid;
@@ -1114,9 +1119,11 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1114 current->thread.used_vsr = 0; 1119 current->thread.used_vsr = 0;
1115#endif 1120#endif
1116 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1121 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1122 current->thread.fp_save_area = NULL;
1117#ifdef CONFIG_ALTIVEC 1123#ifdef CONFIG_ALTIVEC
1118 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); 1124 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1119 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1125 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1126 current->thread.vr_save_area = NULL;
1120 current->thread.vrsave = 0; 1127 current->thread.vrsave = 0;
1121 current->thread.used_vr = 0; 1128 current->thread.used_vr = 0;
1122#endif /* CONFIG_ALTIVEC */ 1129#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index a48df870b696..eacda4eea2d7 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -37,6 +37,28 @@ _GLOBAL(do_load_up_transact_altivec)
37#endif 37#endif
38 38
39/* 39/*
40 * Load state from memory into VMX registers including VSCR.
41 * Assumes the caller has enabled VMX in the MSR.
42 */
43_GLOBAL(load_vr_state)
44 li r4,VRSTATE_VSCR
45 lvx vr0,r4,r3
46 mtvscr vr0
47 REST_32VRS(0,r4,r3)
48 blr
49
50/*
51 * Store VMX state into memory, including VSCR.
52 * Assumes the caller has enabled VMX in the MSR.
53 */
54_GLOBAL(store_vr_state)
55 SAVE_32VRS(0, r4, r3)
56 mfvscr vr0
57 li r4, VRSTATE_VSCR
58 stvx vr0, r4, r3
59 blr
60
61/*
40 * Disable VMX for the task which had it previously, 62 * Disable VMX for the task which had it previously,
41 * and save its vector registers in its thread_struct. 63 * and save its vector registers in its thread_struct.
42 * Enables the VMX for use in the kernel on return. 64 * Enables the VMX for use in the kernel on return.
@@ -144,9 +166,12 @@ _GLOBAL(giveup_altivec)
144 PPC_LCMPI 0,r3,0 166 PPC_LCMPI 0,r3,0
145 beqlr /* if no previous owner, done */ 167 beqlr /* if no previous owner, done */
146 addi r3,r3,THREAD /* want THREAD of task */ 168 addi r3,r3,THREAD /* want THREAD of task */
147 addi r7,r3,THREAD_VRSTATE 169 PPC_LL r7,THREAD_VRSAVEAREA(r3)
148 PPC_LL r5,PT_REGS(r3) 170 PPC_LL r5,PT_REGS(r3)
149 PPC_LCMPI 0,r5,0 171 PPC_LCMPI 0,r7,0
172 bne 2f
173 addi r7,r3,THREAD_VRSTATE
1742: PPC_LCMPI 0,r5,0
150 SAVE_32VRS(0,r4,r7) 175 SAVE_32VRS(0,r4,r7)
151 mfvscr vr0 176 mfvscr vr0
152 li r4,VRSTATE_VSCR 177 li r4,VRSTATE_VSCR