aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-09-23 02:18:11 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-04 01:43:07 -0400
commite909fb83d39292679118761426d7784715ad79ad (patch)
tree02bc288a2692d60e06c216db9b7fed78b3df5ef4
parent3cee070a13b141b8eb5727c3bfa9920092f87264 (diff)
powerpc: Never giveup a reclaimed thread when enabling kernel {fp, altivec, vsx}
After a thread is reclaimed from its active or suspended transactional state the checkpointed state exists on CPU, this state (along with the live/transactional state) has been saved in its entirety by the reclaiming process. There exists a sequence of events that would cause the kernel to call one of enable_kernel_fp(), enable_kernel_altivec() or enable_kernel_vsx() after a thread has been reclaimed. These functions save away any user state on the CPU so that the kernel can use the registers. Not only is this saving away unnecessary at this point, it is actually incorrect. It causes a save of the checkpointed state to the live structures within the thread struct thus destroying the true live state for that thread. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/process.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 34ee5f2e3271..45b6ea069f92 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -205,12 +205,23 @@ EXPORT_SYMBOL_GPL(flush_fp_to_thread);
205 205
206void enable_kernel_fp(void) 206void enable_kernel_fp(void)
207{ 207{
208 unsigned long cpumsr;
209
208 WARN_ON(preemptible()); 210 WARN_ON(preemptible());
209 211
210 msr_check_and_set(MSR_FP); 212 cpumsr = msr_check_and_set(MSR_FP);
211 213
212 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { 214 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
213 check_if_tm_restore_required(current); 215 check_if_tm_restore_required(current);
216 /*
217 * If a thread has already been reclaimed then the
218 * checkpointed registers are on the CPU but have definitely
219 * been saved by the reclaim code. Don't need to and *cannot*
220 * giveup as this would save to the 'live' structure not the
221 * checkpointed structure.
222 */
223 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
224 return;
214 __giveup_fpu(current); 225 __giveup_fpu(current);
215 } 226 }
216} 227}
@@ -257,12 +268,23 @@ EXPORT_SYMBOL(giveup_altivec);
257 268
258void enable_kernel_altivec(void) 269void enable_kernel_altivec(void)
259{ 270{
271 unsigned long cpumsr;
272
260 WARN_ON(preemptible()); 273 WARN_ON(preemptible());
261 274
262 msr_check_and_set(MSR_VEC); 275 cpumsr = msr_check_and_set(MSR_VEC);
263 276
264 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { 277 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
265 check_if_tm_restore_required(current); 278 check_if_tm_restore_required(current);
279 /*
280 * If a thread has already been reclaimed then the
281 * checkpointed registers are on the CPU but have definitely
282 * been saved by the reclaim code. Don't need to and *cannot*
283 * giveup as this would save to the 'live' structure not the
284 * checkpointed structure.
285 */
286 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
287 return;
266 __giveup_altivec(current); 288 __giveup_altivec(current);
267 } 289 }
268} 290}
@@ -331,12 +353,23 @@ static void save_vsx(struct task_struct *tsk)
331 353
332void enable_kernel_vsx(void) 354void enable_kernel_vsx(void)
333{ 355{
356 unsigned long cpumsr;
357
334 WARN_ON(preemptible()); 358 WARN_ON(preemptible());
335 359
336 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); 360 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
337 361
338 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { 362 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
339 check_if_tm_restore_required(current); 363 check_if_tm_restore_required(current);
364 /*
365 * If a thread has already been reclaimed then the
366 * checkpointed registers are on the CPU but have definitely
367 * been saved by the reclaim code. Don't need to and *cannot*
368 * giveup as this would save to the 'live' structure not the
369 * checkpointed structure.
370 */
371 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
372 return;
340 if (current->thread.regs->msr & MSR_FP) 373 if (current->thread.regs->msr & MSR_FP)
341 __giveup_fpu(current); 374 __giveup_fpu(current);
342 if (current->thread.regs->msr & MSR_VEC) 375 if (current->thread.regs->msr & MSR_VEC)