aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHendrik Brueckner <brueckner@linux.vnet.ibm.com>2015-06-29 10:43:06 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-08-03 04:04:37 -0400
commitd0164ee20d98847d3c777a0ae90e678e7ac1e416 (patch)
tree524bb68d1e46da8876da38e683b5474a7ec7f7ac
parent2a01bd1bd3d28d1eef26d5509c95d0923f7dc75c (diff)
s390/kernel: remove save_fpu_regs() parameter and use __LC_CURRENT instead
All calls to save_fpu_regs() specify the fpu structure of the current task pointer as parameter. The task pointer of the current task can also be retrieved from the CPU lowcore directly. Remove the parameter definition, load the __LC_CURRENT task pointer from the CPU lowcore, and rebase the FPU structure onto the task structure. Apply the same approach for the load_fpu_regs() function. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/fpu-internal.h2
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c8
-rw-r--r--arch/s390/kernel/compat_signal.c6
-rw-r--r--arch/s390/kernel/entry.S36
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/kernel/signal.c6
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c10
10 files changed, 45 insertions, 47 deletions
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h
index 237f8fcbe46b..55dc2c0fb40a 100644
--- a/arch/s390/include/asm/fpu-internal.h
+++ b/arch/s390/include/asm/fpu-internal.h
@@ -28,7 +28,7 @@ struct fpu {
28 }; 28 };
29}; 29};
30 30
31void save_fpu_regs(struct fpu *fpu); 31void save_fpu_regs(void);
32 32
33#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) 33#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
34#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) 34#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 0a4a3150b7d7..dcadfde32265 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -30,7 +30,7 @@ static inline void restore_access_regs(unsigned int *acrs)
30 30
31#define switch_to(prev,next,last) do { \ 31#define switch_to(prev,next,last) do { \
32 if (prev->mm) { \ 32 if (prev->mm) { \
33 save_fpu_regs(&prev->thread.fpu); \ 33 save_fpu_regs(); \
34 save_access_regs(&prev->thread.acrs[0]); \ 34 save_access_regs(&prev->thread.acrs[0]); \
35 save_ri_cb(prev->thread.ri_cb); \ 35 save_ri_cb(prev->thread.ri_cb); \
36 } \ 36 } \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 6bc42c08be09..48c9af7a7683 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -28,16 +28,14 @@ int main(void)
28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
29 BLANK(); 29 BLANK();
30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); 30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
31 DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu)); 31 DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
32 DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
33 DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
32 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); 34 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
33 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); 35 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
34 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); 36 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
35 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); 37 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
36 BLANK(); 38 BLANK();
37 DEFINE(__FPU_fpc, offsetof(struct fpu, fpc));
38 DEFINE(__FPU_flags, offsetof(struct fpu, flags));
39 DEFINE(__FPU_regs, offsetof(struct fpu, regs));
40 BLANK();
41 DEFINE(__TI_task, offsetof(struct thread_info, task)); 39 DEFINE(__TI_task, offsetof(struct thread_info, task));
42 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 40 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
43 DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); 41 DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 0b46fd4aa31e..eb4664238613 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -154,7 +154,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
154static void store_sigregs(void) 154static void store_sigregs(void)
155{ 155{
156 save_access_regs(current->thread.acrs); 156 save_access_regs(current->thread.acrs);
157 save_fpu_regs(&current->thread.fpu); 157 save_fpu_regs();
158} 158}
159 159
160/* Load registers after signal return */ 160/* Load registers after signal return */
@@ -286,7 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
286 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 286 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
287 goto badframe; 287 goto badframe;
288 set_current_blocked(&set); 288 set_current_blocked(&set);
289 save_fpu_regs(&current->thread.fpu); 289 save_fpu_regs();
290 if (restore_sigregs32(regs, &frame->sregs)) 290 if (restore_sigregs32(regs, &frame->sregs))
291 goto badframe; 291 goto badframe;
292 if (restore_sigregs_ext32(regs, &frame->sregs_ext)) 292 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
@@ -309,7 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
309 set_current_blocked(&set); 309 set_current_blocked(&set);
310 if (compat_restore_altstack(&frame->uc.uc_stack)) 310 if (compat_restore_altstack(&frame->uc.uc_stack))
311 goto badframe; 311 goto badframe;
312 save_fpu_regs(&current->thread.fpu); 312 save_fpu_regs();
313 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 313 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
314 goto badframe; 314 goto badframe;
315 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) 315 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 21c1219122af..5a966dea937f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -183,7 +183,6 @@ ENTRY(sie64a)
183 xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason 183 xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
184 tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? 184 tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ?
185 jno .Lsie_load_guest_gprs 185 jno .Lsie_load_guest_gprs
186 lg %r12,__LC_THREAD_INFO # load fp/vx regs save area
187 brasl %r14,load_fpu_regs # load guest fp/vx regs 186 brasl %r14,load_fpu_regs # load guest fp/vx regs
188.Lsie_load_guest_gprs: 187.Lsie_load_guest_gprs:
189 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 188 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
@@ -752,14 +751,16 @@ ENTRY(psw_idle)
752 * of the register contents at system call or io return. 751 * of the register contents at system call or io return.
753 */ 752 */
754ENTRY(save_fpu_regs) 753ENTRY(save_fpu_regs)
754 lg %r2,__LC_CURRENT
755 aghi %r2,__TASK_thread
755 tm __LC_CPU_FLAGS+7,_CIF_FPU 756 tm __LC_CPU_FLAGS+7,_CIF_FPU
756 bor %r14 757 bor %r14
757 stfpc __FPU_fpc(%r2) 758 stfpc __THREAD_FPU_fpc(%r2)
758.Lsave_fpu_regs_fpc_end: 759.Lsave_fpu_regs_fpc_end:
759 lg %r3,__FPU_regs(%r2) 760 lg %r3,__THREAD_FPU_regs(%r2)
760 ltgr %r3,%r3 761 ltgr %r3,%r3
761 jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU 762 jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
762 tm __FPU_flags+3(%r2),FPU_USE_VX 763 tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
763 jz .Lsave_fpu_regs_fp # no -> store FP regs 764 jz .Lsave_fpu_regs_fp # no -> store FP regs
764.Lsave_fpu_regs_vx_low: 765.Lsave_fpu_regs_vx_low:
765 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 766 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -794,20 +795,19 @@ ENTRY(save_fpu_regs)
794 * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. 795 * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
795 * 796 *
796 * There are special calling conventions to fit into sysc and io return work: 797 * There are special calling conventions to fit into sysc and io return work:
797 * %r12: __LC_THREAD_INFO
798 * %r15: <kernel stack> 798 * %r15: <kernel stack>
799 * The function requires: 799 * The function requires:
800 * %r4 and __SF_EMPTY+32(%r15) 800 * %r4 and __SF_EMPTY+32(%r15)
801 */ 801 */
802load_fpu_regs: 802load_fpu_regs:
803 lg %r4,__LC_CURRENT
804 aghi %r4,__TASK_thread
803 tm __LC_CPU_FLAGS+7,_CIF_FPU 805 tm __LC_CPU_FLAGS+7,_CIF_FPU
804 bnor %r14 806 bnor %r14
805 lg %r4,__TI_task(%r12) 807 lfpc __THREAD_FPU_fpc(%r4)
806 la %r4,__THREAD_fpu(%r4)
807 lfpc __FPU_fpc(%r4)
808 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 808 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
809 tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 809 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
810 lg %r4,__FPU_regs(%r4) # %r4 <- reg save area 810 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
811 jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs 811 jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs
812.Lload_fpu_regs_vx_ctl: 812.Lload_fpu_regs_vx_ctl:
813 tm __SF_EMPTY+32+5(%r15),2 # test VX control 813 tm __SF_EMPTY+32+5(%r15),2 # test VX control
@@ -1190,13 +1190,14 @@ cleanup_critical:
1190 jhe 2f 1190 jhe 2f
1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) 1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
1192 jhe 1f 1192 jhe 1f
1193 lg %r2,__LC_CURRENT
11930: # Store floating-point controls 11940: # Store floating-point controls
1194 stfpc __FPU_fpc(%r2) 1195 stfpc __THREAD_FPU_fpc(%r2)
11951: # Load register save area and check if VX is active 11961: # Load register save area and check if VX is active
1196 lg %r3,__FPU_regs(%r2) 1197 lg %r3,__THREAD_FPU_regs(%r2)
1197 ltgr %r3,%r3 1198 ltgr %r3,%r3
1198 jz 5f # no save area -> set CIF_FPU 1199 jz 5f # no save area -> set CIF_FPU
1199 tm __FPU_flags+3(%r2),FPU_USE_VX 1200 tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
1200 jz 4f # no VX -> store FP regs 1201 jz 4f # no VX -> store FP regs
12012: # Store vector registers (V0-V15) 12022: # Store vector registers (V0-V15)
1202 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1203 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -1250,11 +1251,10 @@ cleanup_critical:
1250 jhe 5f 1251 jhe 5f
1251 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) 1252 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
1252 jhe 6f 1253 jhe 6f
1253 lg %r4,__TI_task(%r12) 1254 lg %r4,__LC_CURRENT
1254 la %r4,__THREAD_fpu(%r4) 1255 lfpc __THREAD_FPU_fpc(%r4)
1255 lfpc __FPU_fpc(%r4) 1256 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
1256 tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 1257 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1257 lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
1258 jz 3f # -> no VX, load FP regs 1258 jz 3f # -> no VX, load FP regs
12596: # Set VX-enablement control 12596: # Set VX-enablement control
1260 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 1260 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9cf0063f920e..f2dac9f0799d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -104,7 +104,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
104 * The CIF_FPU flag is set in any case to lazy clear or restore a saved 104 * The CIF_FPU flag is set in any case to lazy clear or restore a saved
105 * state when switching to a different task or returning to user space. 105 * state when switching to a different task or returning to user space.
106 */ 106 */
107 save_fpu_regs(&current->thread.fpu); 107 save_fpu_regs();
108 dst->thread.fpu.fpc = current->thread.fpu.fpc; 108 dst->thread.fpu.fpc = current->thread.fpu.fpc;
109 if (is_vx_task(current)) 109 if (is_vx_task(current))
110 convert_vx_to_fp(dst->thread.fpu.fprs, 110 convert_vx_to_fp(dst->thread.fpu.fprs,
@@ -196,7 +196,7 @@ asmlinkage void execve_tail(void)
196 */ 196 */
197int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 197int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
198{ 198{
199 save_fpu_regs(&current->thread.fpu); 199 save_fpu_regs();
200 fpregs->fpc = current->thread.fpu.fpc; 200 fpregs->fpc = current->thread.fpu.fpc;
201 fpregs->pad = 0; 201 fpregs->pad = 0;
202 if (is_vx_task(current)) 202 if (is_vx_task(current))
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8c525880a3ff..8b1c8e33f184 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -943,7 +943,7 @@ static int s390_fpregs_get(struct task_struct *target,
943 _s390_fp_regs fp_regs; 943 _s390_fp_regs fp_regs;
944 944
945 if (target == current) 945 if (target == current)
946 save_fpu_regs(&target->thread.fpu); 946 save_fpu_regs();
947 947
948 fp_regs.fpc = target->thread.fpu.fpc; 948 fp_regs.fpc = target->thread.fpu.fpc;
949 fpregs_store(&fp_regs, &target->thread.fpu); 949 fpregs_store(&fp_regs, &target->thread.fpu);
@@ -961,7 +961,7 @@ static int s390_fpregs_set(struct task_struct *target,
961 freg_t fprs[__NUM_FPRS]; 961 freg_t fprs[__NUM_FPRS];
962 962
963 if (target == current) 963 if (target == current)
964 save_fpu_regs(&target->thread.fpu); 964 save_fpu_regs();
965 965
966 /* If setting FPC, must validate it first. */ 966 /* If setting FPC, must validate it first. */
967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
@@ -1049,7 +1049,7 @@ static int s390_vxrs_low_get(struct task_struct *target,
1049 return -ENODEV; 1049 return -ENODEV;
1050 if (is_vx_task(target)) { 1050 if (is_vx_task(target)) {
1051 if (target == current) 1051 if (target == current)
1052 save_fpu_regs(&target->thread.fpu); 1052 save_fpu_regs();
1053 for (i = 0; i < __NUM_VXRS_LOW; i++) 1053 for (i = 0; i < __NUM_VXRS_LOW; i++)
1054 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); 1054 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1055 } else 1055 } else
@@ -1072,7 +1072,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
1072 if (rc) 1072 if (rc)
1073 return rc; 1073 return rc;
1074 } else if (target == current) 1074 } else if (target == current)
1075 save_fpu_regs(&target->thread.fpu); 1075 save_fpu_regs();
1076 1076
1077 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1077 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1078 if (rc == 0) 1078 if (rc == 0)
@@ -1093,7 +1093,7 @@ static int s390_vxrs_high_get(struct task_struct *target,
1093 return -ENODEV; 1093 return -ENODEV;
1094 if (is_vx_task(target)) { 1094 if (is_vx_task(target)) {
1095 if (target == current) 1095 if (target == current)
1096 save_fpu_regs(&target->thread.fpu); 1096 save_fpu_regs();
1097 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, 1097 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
1098 sizeof(vxrs)); 1098 sizeof(vxrs));
1099 } else 1099 } else
@@ -1115,7 +1115,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
1115 if (rc) 1115 if (rc)
1116 return rc; 1116 return rc;
1117 } else if (target == current) 1117 } else if (target == current)
1118 save_fpu_regs(&target->thread.fpu); 1118 save_fpu_regs();
1119 1119
1120 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1120 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1121 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); 1121 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 2f4c7e2638c9..9549af102d75 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -105,7 +105,7 @@ struct rt_sigframe
105static void store_sigregs(void) 105static void store_sigregs(void)
106{ 106{
107 save_access_regs(current->thread.acrs); 107 save_access_regs(current->thread.acrs);
108 save_fpu_regs(&current->thread.fpu); 108 save_fpu_regs();
109} 109}
110 110
111/* Load registers after signal return */ 111/* Load registers after signal return */
@@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn)
222 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 222 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
223 goto badframe; 223 goto badframe;
224 set_current_blocked(&set); 224 set_current_blocked(&set);
225 save_fpu_regs(&current->thread.fpu); 225 save_fpu_regs();
226 if (restore_sigregs(regs, &frame->sregs)) 226 if (restore_sigregs(regs, &frame->sregs))
227 goto badframe; 227 goto badframe;
228 if (restore_sigregs_ext(regs, &frame->sregs_ext)) 228 if (restore_sigregs_ext(regs, &frame->sregs_ext))
@@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
246 set_current_blocked(&set); 246 set_current_blocked(&set);
247 if (restore_altstack(&frame->uc.uc_stack)) 247 if (restore_altstack(&frame->uc.uc_stack))
248 goto badframe; 248 goto badframe;
249 save_fpu_regs(&current->thread.fpu); 249 save_fpu_regs();
250 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 250 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
251 goto badframe; 251 goto badframe;
252 if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) 252 if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 76f76932ccb9..9861613fb35a 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -236,7 +236,7 @@ int alloc_vector_registers(struct task_struct *tsk)
236 return -ENOMEM; 236 return -ENOMEM;
237 preempt_disable(); 237 preempt_disable();
238 if (tsk == current) 238 if (tsk == current)
239 save_fpu_regs(&tsk->thread.fpu); 239 save_fpu_regs();
240 /* Copy the 16 floating point registers */ 240 /* Copy the 16 floating point registers */
241 convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); 241 convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
242 fprs = tsk->thread.fpu.fprs; 242 fprs = tsk->thread.fpu.fprs;
@@ -257,7 +257,7 @@ void vector_exception(struct pt_regs *regs)
257 } 257 }
258 258
259 /* get vector interrupt code from fpc */ 259 /* get vector interrupt code from fpc */
260 save_fpu_regs(&current->thread.fpu); 260 save_fpu_regs();
261 vic = (current->thread.fpu.fpc & 0xf00) >> 8; 261 vic = (current->thread.fpu.fpc & 0xf00) >> 8;
262 switch (vic) { 262 switch (vic) {
263 case 1: /* invalid vector operation */ 263 case 1: /* invalid vector operation */
@@ -295,7 +295,7 @@ void data_exception(struct pt_regs *regs)
295 295
296 location = get_trap_ip(regs); 296 location = get_trap_ip(regs);
297 297
298 save_fpu_regs(&current->thread.fpu); 298 save_fpu_regs();
299 /* Check for vector register enablement */ 299 /* Check for vector register enablement */
300 if (MACHINE_HAS_VX && !is_vx_task(current) && 300 if (MACHINE_HAS_VX && !is_vx_task(current) &&
301 (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { 301 (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c0cceaf4a92e..1903f0212bd0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1224,7 +1224,7 @@ static inline void load_fpu_from(struct fpu *from)
1224void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1224void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1225{ 1225{
1226 /* Save host register state */ 1226 /* Save host register state */
1227 save_fpu_regs(&current->thread.fpu); 1227 save_fpu_regs();
1228 save_fpu_to(&vcpu->arch.host_fpregs); 1228 save_fpu_to(&vcpu->arch.host_fpregs);
1229 1229
1230 if (test_kvm_facility(vcpu->kvm, 129)) { 1230 if (test_kvm_facility(vcpu->kvm, 129)) {
@@ -1256,7 +1256,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1256 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1256 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1257 gmap_disable(vcpu->arch.gmap); 1257 gmap_disable(vcpu->arch.gmap);
1258 1258
1259 save_fpu_regs(&current->thread.fpu); 1259 save_fpu_regs();
1260 1260
1261 if (test_kvm_facility(vcpu->kvm, 129)) 1261 if (test_kvm_facility(vcpu->kvm, 129))
1262 /* 1262 /*
@@ -1671,7 +1671,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1671 return -EINVAL; 1671 return -EINVAL;
1672 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 1672 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1673 vcpu->arch.guest_fpregs.fpc = fpu->fpc; 1673 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1674 save_fpu_regs(&current->thread.fpu); 1674 save_fpu_regs();
1675 load_fpu_from(&vcpu->arch.guest_fpregs); 1675 load_fpu_from(&vcpu->arch.guest_fpregs);
1676 return 0; 1676 return 0;
1677} 1677}
@@ -2241,7 +2241,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2241 * copying in vcpu load/put. Lets update our copies before we save 2241 * copying in vcpu load/put. Lets update our copies before we save
2242 * it into the save area 2242 * it into the save area
2243 */ 2243 */
2244 save_fpu_regs(&current->thread.fpu); 2244 save_fpu_regs();
2245 if (test_kvm_facility(vcpu->kvm, 129)) { 2245 if (test_kvm_facility(vcpu->kvm, 129)) {
2246 /* 2246 /*
2247 * If the vector extension is available, the vector registers 2247 * If the vector extension is available, the vector registers
@@ -2288,7 +2288,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2288 * 2288 *
2289 * Let's update our copies before we save it into the save area. 2289 * Let's update our copies before we save it into the save area.
2290 */ 2290 */
2291 save_fpu_regs(&current->thread.fpu); 2291 save_fpu_regs();
2292 2292
2293 return kvm_s390_store_adtl_status_unloaded(vcpu, addr); 2293 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2294} 2294}