diff options
| -rw-r--r-- | arch/powerpc/kernel/process.c | 112 |
1 files changed, 112 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1cc40533021b..48a987579e4f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -50,6 +50,7 @@ | |||
| 50 | #include <asm/runlatch.h> | 50 | #include <asm/runlatch.h> |
| 51 | #include <asm/syscalls.h> | 51 | #include <asm/syscalls.h> |
| 52 | #include <asm/switch_to.h> | 52 | #include <asm/switch_to.h> |
| 53 | #include <asm/tm.h> | ||
| 53 | #include <asm/debug.h> | 54 | #include <asm/debug.h> |
| 54 | #ifdef CONFIG_PPC64 | 55 | #ifdef CONFIG_PPC64 |
| 55 | #include <asm/firmware.h> | 56 | #include <asm/firmware.h> |
| @@ -467,6 +468,117 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a, | |||
| 467 | return false; | 468 | return false; |
| 468 | return true; | 469 | return true; |
| 469 | } | 470 | } |
| 471 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 472 | static inline void tm_reclaim_task(struct task_struct *tsk) | ||
| 473 | { | ||
| 474 | /* We have to work out if we're switching from/to a task that's in the | ||
| 475 | * middle of a transaction. | ||
| 476 | * | ||
| 477 | * In switching we need to maintain a 2nd register state as | ||
| 478 | * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the | ||
| 479 | * checkpointed (tbegin) state in ckpt_regs and saves the transactional | ||
| 480 | * (current) FPRs into oldtask->thread.transact_fpr[]. | ||
| 481 | * | ||
| 482 | * We also context switch (save) TFHAR/TEXASR/TFIAR in here. | ||
| 483 | */ | ||
| 484 | struct thread_struct *thr = &tsk->thread; | ||
| 485 | |||
| 486 | if (!thr->regs) | ||
| 487 | return; | ||
| 488 | |||
| 489 | if (!MSR_TM_ACTIVE(thr->regs->msr)) | ||
| 490 | goto out_and_saveregs; | ||
| 491 | |||
| 492 | /* Stash the original thread MSR, as giveup_fpu et al will | ||
| 493 | * modify it. We hold onto it to see whether the task used | ||
| 494 | * FP & vector regs. | ||
| 495 | */ | ||
| 496 | thr->tm_orig_msr = thr->regs->msr; | ||
| 497 | |||
| 498 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " | ||
| 499 | "ccr=%lx, msr=%lx, trap=%lx)\n", | ||
| 500 | tsk->pid, thr->regs->nip, | ||
| 501 | thr->regs->ccr, thr->regs->msr, | ||
| 502 | thr->regs->trap); | ||
| 503 | |||
| 504 | tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); | ||
| 505 | |||
| 506 | TM_DEBUG("--- tm_reclaim on pid %d complete\n", | ||
| 507 | tsk->pid); | ||
| 508 | |||
| 509 | out_and_saveregs: | ||
| 510 | /* Always save the regs here, even if a transaction's not active. | ||
| 511 | * This context-switches a thread's TM info SPRs. We do it here to | ||
| 512 | * be consistent with the restore path (in recheckpoint) which | ||
| 513 | * cannot happen later in _switch(). | ||
| 514 | */ | ||
| 515 | tm_save_sprs(thr); | ||
| 516 | } | ||
| 517 | |||
| 518 | static inline void __maybe_unused tm_recheckpoint_new_task(struct task_struct *new) | ||
| 519 | { | ||
| 520 | unsigned long msr; | ||
| 521 | |||
| 522 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 523 | return; | ||
| 524 | |||
| 525 | /* Recheckpoint the registers of the thread we're about to switch to. | ||
| 526 | * | ||
| 527 | * If the task was using FP, we non-lazily reload both the original and | ||
| 528 | * the speculative FP register states. This is because the kernel | ||
| 529 | * doesn't see if/when a TM rollback occurs, so if we take an FP | ||
| 530 | * unavoidable later, we are unable to determine which set of FP regs | ||
| 531 | * need to be restored. | ||
| 532 | */ | ||
| 533 | if (!new->thread.regs) | ||
| 534 | return; | ||
| 535 | |||
| 536 | /* The TM SPRs are restored here, so that TEXASR.FS can be set | ||
| 537 | * before the trecheckpoint and no explosion occurs. | ||
| 538 | */ | ||
| 539 | tm_restore_sprs(&new->thread); | ||
| 540 | |||
| 541 | if (!MSR_TM_ACTIVE(new->thread.regs->msr)) | ||
| 542 | return; | ||
| 543 | msr = new->thread.tm_orig_msr; | ||
| 544 | /* Recheckpoint to restore original checkpointed register state. */ | ||
| 545 | TM_DEBUG("*** tm_recheckpoint of pid %d " | ||
| 546 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", | ||
| 547 | new->pid, new->thread.regs->msr, msr); | ||
| 548 | |||
| 549 | /* This loads the checkpointed FP/VEC state, if used */ | ||
| 550 | tm_recheckpoint(&new->thread, msr); | ||
| 551 | |||
| 552 | /* This loads the speculative FP/VEC state, if used */ | ||
| 553 | if (msr & MSR_FP) { | ||
| 554 | do_load_up_transact_fpu(&new->thread); | ||
| 555 | new->thread.regs->msr |= | ||
| 556 | (MSR_FP | new->thread.fpexc_mode); | ||
| 557 | } | ||
| 558 | if (msr & MSR_VEC) { | ||
| 559 | do_load_up_transact_altivec(&new->thread); | ||
| 560 | new->thread.regs->msr |= MSR_VEC; | ||
| 561 | } | ||
| 562 | /* We may as well turn on VSX too since all the state is restored now */ | ||
| 563 | if (msr & MSR_VSX) | ||
| 564 | new->thread.regs->msr |= MSR_VSX; | ||
| 565 | |||
| 566 | TM_DEBUG("*** tm_recheckpoint of pid %d complete " | ||
| 567 | "(kernel msr 0x%lx)\n", | ||
| 568 | new->pid, mfmsr()); | ||
| 569 | } | ||
| 570 | |||
| 571 | static inline void __switch_to_tm(struct task_struct *prev) | ||
| 572 | { | ||
| 573 | if (cpu_has_feature(CPU_FTR_TM)) { | ||
| 574 | tm_enable(); | ||
| 575 | tm_reclaim_task(prev); | ||
| 576 | } | ||
| 577 | } | ||
| 578 | #else | ||
| 579 | #define tm_recheckpoint_new_task(new) | ||
| 580 | #define __switch_to_tm(prev) | ||
| 581 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
| 470 | 582 | ||
| 471 | struct task_struct *__switch_to(struct task_struct *prev, | 583 | struct task_struct *__switch_to(struct task_struct *prev, |
| 472 | struct task_struct *new) | 584 | struct task_struct *new) |
