diff options
| -rw-r--r-- | arch/powerpc/kernel/tm.S | 61 |
1 files changed, 44 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index bf8f34a58670..b7019b559ddb 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
| @@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim) | |||
| 110 | std r3, STK_PARAM(R3)(r1) | 110 | std r3, STK_PARAM(R3)(r1) |
| 111 | SAVE_NVGPRS(r1) | 111 | SAVE_NVGPRS(r1) |
| 112 | 112 | ||
| 113 | /* We need to setup MSR for VSX register save instructions. Here we | 113 | /* We need to setup MSR for VSX register save instructions. */ |
| 114 | * also clear the MSR RI since when we do the treclaim, we won't have a | ||
| 115 | * valid kernel pointer for a while. We clear RI here as it avoids | ||
| 116 | * adding another mtmsr closer to the treclaim. This makes the region | ||
| 117 | * maked as non-recoverable wider than it needs to be but it saves on | ||
| 118 | * inserting another mtmsrd later. | ||
| 119 | */ | ||
| 120 | mfmsr r14 | 114 | mfmsr r14 |
| 121 | mr r15, r14 | 115 | mr r15, r14 |
| 122 | ori r15, r15, MSR_FP | 116 | ori r15, r15, MSR_FP |
| 123 | li r16, MSR_RI | 117 | li r16, 0 |
| 124 | ori r16, r16, MSR_EE /* IRQs hard off */ | 118 | ori r16, r16, MSR_EE /* IRQs hard off */ |
| 125 | andc r15, r15, r16 | 119 | andc r15, r15, r16 |
| 126 | oris r15, r15, MSR_VEC@h | 120 | oris r15, r15, MSR_VEC@h |
| @@ -176,7 +170,17 @@ dont_backup_fp: | |||
| 176 | 1: tdeqi r6, 0 | 170 | 1: tdeqi r6, 0 |
| 177 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 | 171 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 |
| 178 | 172 | ||
| 179 | /* The moment we treclaim, ALL of our GPRs will switch | 173 | /* Clear MSR RI since we are about to change r1, EE is already off. */ |
| 174 | li r4, 0 | ||
| 175 | mtmsrd r4, 1 | ||
| 176 | |||
| 177 | /* | ||
| 178 | * BE CAREFUL HERE: | ||
| 179 | * At this point we can't take an SLB miss since we have MSR_RI | ||
| 180 | * off. Load only to/from the stack/paca which are in SLB bolted regions | ||
| 181 | * until we turn MSR RI back on. | ||
| 182 | * | ||
| 183 | * The moment we treclaim, ALL of our GPRs will switch | ||
| 180 | * to user register state. (FPRs, CCR etc. also!) | 184 | * to user register state. (FPRs, CCR etc. also!) |
| 181 | * Use an sprg and a tm_scratch in the PACA to shuffle. | 185 | * Use an sprg and a tm_scratch in the PACA to shuffle. |
| 182 | */ | 186 | */ |
| @@ -197,6 +201,11 @@ dont_backup_fp: | |||
| 197 | 201 | ||
| 198 | /* Store the PPR in r11 and reset to decent value */ | 202 | /* Store the PPR in r11 and reset to decent value */ |
| 199 | std r11, GPR11(r1) /* Temporary stash */ | 203 | std r11, GPR11(r1) /* Temporary stash */ |
| 204 | |||
| 205 | /* Reset MSR RI so we can take SLB faults again */ | ||
| 206 | li r11, MSR_RI | ||
| 207 | mtmsrd r11, 1 | ||
| 208 | |||
| 200 | mfspr r11, SPRN_PPR | 209 | mfspr r11, SPRN_PPR |
| 201 | HMT_MEDIUM | 210 | HMT_MEDIUM |
| 202 | 211 | ||
| @@ -397,11 +406,6 @@ restore_gprs: | |||
| 397 | ld r5, THREAD_TM_DSCR(r3) | 406 | ld r5, THREAD_TM_DSCR(r3) |
| 398 | ld r6, THREAD_TM_PPR(r3) | 407 | ld r6, THREAD_TM_PPR(r3) |
| 399 | 408 | ||
| 400 | /* Clear the MSR RI since we are about to change R1. EE is already off | ||
| 401 | */ | ||
| 402 | li r4, 0 | ||
| 403 | mtmsrd r4, 1 | ||
| 404 | |||
| 405 | REST_GPR(0, r7) /* GPR0 */ | 409 | REST_GPR(0, r7) /* GPR0 */ |
| 406 | REST_2GPRS(2, r7) /* GPR2-3 */ | 410 | REST_2GPRS(2, r7) /* GPR2-3 */ |
| 407 | REST_GPR(4, r7) /* GPR4 */ | 411 | REST_GPR(4, r7) /* GPR4 */ |
| @@ -439,10 +443,33 @@ restore_gprs: | |||
| 439 | ld r6, _CCR(r7) | 443 | ld r6, _CCR(r7) |
| 440 | mtcr r6 | 444 | mtcr r6 |
| 441 | 445 | ||
| 442 | REST_GPR(1, r7) /* GPR1 */ | ||
| 443 | REST_GPR(5, r7) /* GPR5-7 */ | ||
| 444 | REST_GPR(6, r7) | 446 | REST_GPR(6, r7) |
| 445 | ld r7, GPR7(r7) | 447 | |
| 448 | /* | ||
| 449 | * Store r1 and r5 on the stack so that we can access them | ||
| 450 | * after we clear MSR RI. | ||
| 451 | */ | ||
| 452 | |||
| 453 | REST_GPR(5, r7) | ||
| 454 | std r5, -8(r1) | ||
| 455 | ld r5, GPR1(r7) | ||
| 456 | std r5, -16(r1) | ||
| 457 | |||
| 458 | REST_GPR(7, r7) | ||
| 459 | |||
| 460 | /* Clear MSR RI since we are about to change r1. EE is already off */ | ||
| 461 | li r5, 0 | ||
| 462 | mtmsrd r5, 1 | ||
| 463 | |||
| 464 | /* | ||
| 465 | * BE CAREFUL HERE: | ||
| 466 | * At this point we can't take an SLB miss since we have MSR_RI | ||
| 467 | * off. Load only to/from the stack/paca which are in SLB bolted regions | ||
| 468 | * until we turn MSR RI back on. | ||
| 469 | */ | ||
| 470 | |||
| 471 | ld r5, -8(r1) | ||
| 472 | ld r1, -16(r1) | ||
| 446 | 473 | ||
| 447 | /* Commit register state as checkpointed state: */ | 474 | /* Commit register state as checkpointed state: */ |
| 448 | TRECHKPT | 475 | TRECHKPT |
