aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-01-08 05:25:20 -0500
committerAlexander Graf <agraf@suse.de>2014-01-27 10:00:59 -0500
commite0b7ec058c0eb7ba8d5d937d81de2bd16db6970e (patch)
tree32b266bf4e3c497ab0a306731c01761b2dde11ce /arch/powerpc
parenteee7ff9d2cc0eaaa00496bdf4193144104c7dc63 (diff)
KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers
On a threaded processor such as POWER7, we group VCPUs into virtual cores and arrange that the VCPUs in a virtual core run on the same physical core. Currently we don't enforce any correspondence between virtual thread numbers within a virtual core and physical thread numbers. Physical threads are allocated starting at 0 on a first-come first-served basis to runnable virtual threads (VCPUs). POWER8 implements a new "msgsndp" instruction which guest kernels can use to interrupt other threads in the same core or sub-core. Since the instruction takes the destination physical thread ID as a parameter, it becomes necessary to align the physical thread IDs with the virtual thread IDs, that is, to make sure virtual thread N within a virtual core always runs on physical thread N. This means that it's possible that thread 0, which is where we call __kvmppc_vcore_entry, may end up running some other vcpu than the one whose task called kvmppc_run_core(), or it may end up running no vcpu at all, if for example thread 0 of the virtual core is currently executing in userspace. However, we do need thread 0 to be responsible for switching the MMU -- a previous version of this patch that had other threads switching the MMU was found to be responsible for occasional memory corruption and machine check interrupts in the guest on POWER7 machines. To accommodate this, we no longer pass the vcpu pointer to __kvmppc_vcore_entry, but instead let the assembly code load it from the PACA. Since the assembly code will need to know the kvm pointer and the thread ID for threads which don't have a vcpu, we move the thread ID into the PACA and we add a kvm pointer to the virtual core structure. In the case where thread 0 has no vcpu to run, it still calls into kvmppc_hv_entry in order to do the MMU switch, and then naps until either its vcpu is ready to run in the guest, or some other thread needs to exit the guest. In the latter case, thread 0 jumps to the code that switches the MMU back to the host. This control flow means that now we switch the MMU before loading any guest vcpu state. Similarly, on guest exit we now save all the guest vcpu state before switching the MMU back to the host. This has required substantial code movement, making the diff rather large. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c46
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S676
6 files changed, 397 insertions, 337 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348a4db9..490b34f5d6bf 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -87,6 +87,7 @@ struct kvmppc_host_state {
87 u8 hwthread_req; 87 u8 hwthread_req;
88 u8 hwthread_state; 88 u8 hwthread_state;
89 u8 host_ipi; 89 u8 host_ipi;
90 u8 ptid;
90 struct kvm_vcpu *kvm_vcpu; 91 struct kvm_vcpu *kvm_vcpu;
91 struct kvmppc_vcore *kvm_vcore; 92 struct kvmppc_vcore *kvm_vcore;
92 unsigned long xics_phys; 93 unsigned long xics_phys;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2c2ca5faf7f2..b850544dbc3f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -288,6 +288,7 @@ struct kvmppc_vcore {
288 int n_woken; 288 int n_woken;
289 int nap_count; 289 int nap_count;
290 int napping_threads; 290 int napping_threads;
291 int first_vcpuid;
291 u16 pcpu; 292 u16 pcpu;
292 u16 last_cpu; 293 u16 last_cpu;
293 u8 vcore_state; 294 u8 vcore_state;
@@ -298,6 +299,7 @@ struct kvmppc_vcore {
298 u64 stolen_tb; 299 u64 stolen_tb;
299 u64 preempt_tb; 300 u64 preempt_tb;
300 struct kvm_vcpu *runner; 301 struct kvm_vcpu *runner;
302 struct kvm *kvm;
301 u64 tb_offset; /* guest timebase - host timebase */ 303 u64 tb_offset; /* guest timebase - host timebase */
302 ulong lpcr; 304 ulong lpcr;
303 u32 arch_compat; 305 u32 arch_compat;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 5e64c3d2149f..332ae66883e4 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,7 +506,6 @@ int main(void)
506 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); 506 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
507 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 507 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
508 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 508 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
509 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
510 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 509 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
511 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); 510 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
512 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); 511 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
@@ -514,6 +513,7 @@ int main(void)
514 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 513 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
515 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 514 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
516 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 515 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
516 DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
517 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); 517 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
518 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); 518 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
519 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); 519 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
@@ -583,6 +583,7 @@ int main(void)
583 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 583 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
584 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); 584 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
585 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); 585 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
586 HSTATE_FIELD(HSTATE_PTID, ptid);
586 HSTATE_FIELD(HSTATE_MMCR, host_mmcr); 587 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
587 HSTATE_FIELD(HSTATE_PMC, host_pmc); 588 HSTATE_FIELD(HSTATE_PMC, host_pmc);
588 HSTATE_FIELD(HSTATE_PURR, host_purr); 589 HSTATE_FIELD(HSTATE_PURR, host_purr);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7e1813ceabc1..7da53cd215db 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -990,6 +990,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
990 init_waitqueue_head(&vcore->wq); 990 init_waitqueue_head(&vcore->wq);
991 vcore->preempt_tb = TB_NIL; 991 vcore->preempt_tb = TB_NIL;
992 vcore->lpcr = kvm->arch.lpcr; 992 vcore->lpcr = kvm->arch.lpcr;
993 vcore->first_vcpuid = core * threads_per_core;
994 vcore->kvm = kvm;
993 } 995 }
994 kvm->arch.vcores[core] = vcore; 996 kvm->arch.vcores[core] = vcore;
995 kvm->arch.online_vcores++; 997 kvm->arch.online_vcores++;
@@ -1003,6 +1005,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1003 ++vcore->num_threads; 1005 ++vcore->num_threads;
1004 spin_unlock(&vcore->lock); 1006 spin_unlock(&vcore->lock);
1005 vcpu->arch.vcore = vcore; 1007 vcpu->arch.vcore = vcore;
1008 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1006 1009
1007 vcpu->arch.cpu_type = KVM_CPU_3S_64; 1010 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1008 kvmppc_sanity_check(vcpu); 1011 kvmppc_sanity_check(vcpu);
@@ -1066,7 +1069,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1066 } 1069 }
1067} 1070}
1068 1071
1069extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1072extern void __kvmppc_vcore_entry(void);
1070 1073
1071static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 1074static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1072 struct kvm_vcpu *vcpu) 1075 struct kvm_vcpu *vcpu)
@@ -1140,15 +1143,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1140 tpaca = &paca[cpu]; 1143 tpaca = &paca[cpu];
1141 tpaca->kvm_hstate.kvm_vcpu = vcpu; 1144 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1142 tpaca->kvm_hstate.kvm_vcore = vc; 1145 tpaca->kvm_hstate.kvm_vcore = vc;
1143 tpaca->kvm_hstate.napping = 0; 1146 tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
1144 vcpu->cpu = vc->pcpu; 1147 vcpu->cpu = vc->pcpu;
1145 smp_wmb(); 1148 smp_wmb();
1146#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 1149#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1147 if (vcpu->arch.ptid) { 1150 if (cpu != smp_processor_id()) {
1148#ifdef CONFIG_KVM_XICS 1151#ifdef CONFIG_KVM_XICS
1149 xics_wake_cpu(cpu); 1152 xics_wake_cpu(cpu);
1150#endif 1153#endif
1151 ++vc->n_woken; 1154 if (vcpu->arch.ptid)
1155 ++vc->n_woken;
1152 } 1156 }
1153#endif 1157#endif
1154} 1158}
@@ -1205,10 +1209,10 @@ static int on_primary_thread(void)
1205 */ 1209 */
1206static void kvmppc_run_core(struct kvmppc_vcore *vc) 1210static void kvmppc_run_core(struct kvmppc_vcore *vc)
1207{ 1211{
1208 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 1212 struct kvm_vcpu *vcpu, *vnext;
1209 long ret; 1213 long ret;
1210 u64 now; 1214 u64 now;
1211 int ptid, i, need_vpa_update; 1215 int i, need_vpa_update;
1212 int srcu_idx; 1216 int srcu_idx;
1213 struct kvm_vcpu *vcpus_to_update[threads_per_core]; 1217 struct kvm_vcpu *vcpus_to_update[threads_per_core];
1214 1218
@@ -1246,25 +1250,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1246 } 1250 }
1247 1251
1248 /* 1252 /*
1249 * Assign physical thread IDs, first to non-ceded vcpus
1250 * and then to ceded ones.
1251 */
1252 ptid = 0;
1253 vcpu0 = NULL;
1254 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1255 if (!vcpu->arch.ceded) {
1256 if (!ptid)
1257 vcpu0 = vcpu;
1258 vcpu->arch.ptid = ptid++;
1259 }
1260 }
1261 if (!vcpu0)
1262 goto out; /* nothing to run; should never happen */
1263 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1264 if (vcpu->arch.ceded)
1265 vcpu->arch.ptid = ptid++;
1266
1267 /*
1268 * Make sure we are running on thread 0, and that 1253 * Make sure we are running on thread 0, and that
1269 * secondary threads are offline. 1254 * secondary threads are offline.
1270 */ 1255 */
@@ -1280,15 +1265,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1280 kvmppc_create_dtl_entry(vcpu, vc); 1265 kvmppc_create_dtl_entry(vcpu, vc);
1281 } 1266 }
1282 1267
1268 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1269 get_paca()->kvm_hstate.kvm_vcore = vc;
1270 get_paca()->kvm_hstate.ptid = 0;
1271
1283 vc->vcore_state = VCORE_RUNNING; 1272 vc->vcore_state = VCORE_RUNNING;
1284 preempt_disable(); 1273 preempt_disable();
1285 spin_unlock(&vc->lock); 1274 spin_unlock(&vc->lock);
1286 1275
1287 kvm_guest_enter(); 1276 kvm_guest_enter();
1288 1277
1289 srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); 1278 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1290 1279
1291 __kvmppc_vcore_entry(NULL, vcpu0); 1280 __kvmppc_vcore_entry();
1292 1281
1293 spin_lock(&vc->lock); 1282 spin_lock(&vc->lock);
1294 /* disable sending of IPIs on virtual external irqs */ 1283 /* disable sending of IPIs on virtual external irqs */
@@ -1303,7 +1292,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1303 vc->vcore_state = VCORE_EXITING; 1292 vc->vcore_state = VCORE_EXITING;
1304 spin_unlock(&vc->lock); 1293 spin_unlock(&vc->lock);
1305 1294
1306 srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); 1295 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1307 1296
1308 /* make sure updates to secondary vcpu structs are visible now */ 1297 /* make sure updates to secondary vcpu structs are visible now */
1309 smp_mb(); 1298 smp_mb();
@@ -1411,7 +1400,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1411 if (!signal_pending(current)) { 1400 if (!signal_pending(current)) {
1412 if (vc->vcore_state == VCORE_RUNNING && 1401 if (vc->vcore_state == VCORE_RUNNING &&
1413 VCORE_EXIT_COUNT(vc) == 0) { 1402 VCORE_EXIT_COUNT(vc) == 0) {
1414 vcpu->arch.ptid = vc->n_runnable - 1;
1415 kvmppc_create_dtl_entry(vcpu, vc); 1403 kvmppc_create_dtl_entry(vcpu, vc);
1416 kvmppc_start_thread(vcpu); 1404 kvmppc_start_thread(vcpu);
1417 } else if (vc->vcore_state == VCORE_SLEEPING) { 1405 } else if (vc->vcore_state == VCORE_SLEEPING) {
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 00b7ed41ea17..e873796b1a29 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -35,7 +35,7 @@
35 ****************************************************************************/ 35 ****************************************************************************/
36 36
37/* Registers: 37/* Registers:
38 * r4: vcpu pointer 38 * none
39 */ 39 */
40_GLOBAL(__kvmppc_vcore_entry) 40_GLOBAL(__kvmppc_vcore_entry)
41 41
@@ -71,7 +71,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
71 mtmsrd r10,1 71 mtmsrd r10,1
72 72
73 /* Save host PMU registers */ 73 /* Save host PMU registers */
74 /* R4 is live here (vcpu pointer) but not r3 or r5 */
75 li r3, 1 74 li r3, 1
76 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 75 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
77 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 76 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
@@ -136,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
136 * enters the guest with interrupts enabled. 135 * enters the guest with interrupts enabled.
137 */ 136 */
138BEGIN_FTR_SECTION 137BEGIN_FTR_SECTION
138 ld r4, HSTATE_KVM_VCPU(r13)
139 ld r0, VCPU_PENDING_EXC(r4) 139 ld r0, VCPU_PENDING_EXC(r4)
140 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) 140 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
141 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 141 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
142 and. r0, r0, r7 142 and. r0, r0, r7
143 beq 32f 143 beq 32f
144 mr r31, r4
145 lhz r3, PACAPACAINDEX(r13) 144 lhz r3, PACAPACAINDEX(r13)
146 bl smp_send_reschedule 145 bl smp_send_reschedule
147 nop 146 nop
148 mr r4, r31
14932: 14732:
150END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 148END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
151#endif /* CONFIG_SMP */ 149#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 66db71c9156a..8bbe91bdb6da 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,10 @@
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif 34#endif
35 35
36/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
36/* 40/*
37 * Call kvmppc_hv_entry in real mode. 41 * Call kvmppc_hv_entry in real mode.
38 * Must be called with interrupts hard-disabled. 42 * Must be called with interrupts hard-disabled.
@@ -57,6 +61,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
57 RFI 61 RFI
58 62
59kvmppc_call_hv_entry: 63kvmppc_call_hv_entry:
64 ld r4, HSTATE_KVM_VCPU(r13)
60 bl kvmppc_hv_entry 65 bl kvmppc_hv_entry
61 66
62 /* Back from guest - restore host state and return to caller */ 67 /* Back from guest - restore host state and return to caller */
@@ -73,15 +78,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
73 ld r3,PACA_SPRG3(r13) 78 ld r3,PACA_SPRG3(r13)
74 mtspr SPRN_SPRG3,r3 79 mtspr SPRN_SPRG3,r3
75 80
76 /*
77 * Reload DEC. HDEC interrupts were disabled when
78 * we reloaded the host's LPCR value.
79 */
80 ld r3, HSTATE_DECEXP(r13)
81 mftb r4
82 subf r4, r4, r3
83 mtspr SPRN_DEC, r4
84
85 /* Reload the host's PMU registers */ 81 /* Reload the host's PMU registers */
86 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
87 lbz r4, LPPACA_PMCINUSE(r3) 83 lbz r4, LPPACA_PMCINUSE(r3)
@@ -117,6 +113,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
11723: 11323:
118 114
119 /* 115 /*
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
118 */
119 ld r3, HSTATE_DECEXP(r13)
120 mftb r4
121 subf r4, r4, r3
122 mtspr SPRN_DEC, r4
123
124 /*
120 * For external and machine check interrupts, we need 125 * For external and machine check interrupts, we need
121 * to call the Linux handler to process the interrupt. 126 * to call the Linux handler to process the interrupt.
122 * We do that by jumping to absolute address 0x500 for 127 * We do that by jumping to absolute address 0x500 for
@@ -156,15 +161,82 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
15613: b machine_check_fwnmi 16113: b machine_check_fwnmi
157 162
158 163
164kvmppc_primary_no_guest:
165 /* We handle this much like a ceded vcpu */
166 /* set our bit in napping_threads */
167 ld r5, HSTATE_KVM_VCORE(r13)
168 lbz r7, HSTATE_PTID(r13)
169 li r0, 1
170 sld r0, r0, r7
171 addi r6, r5, VCORE_NAPPING_THREADS
1721: lwarx r3, 0, r6
173 or r3, r3, r0
174 stwcx. r3, 0, r6
175 bne 1b
176 /* order napping_threads update vs testing entry_exit_count */
177 isync
178 li r12, 0
179 lwz r7, VCORE_ENTRY_EXIT(r5)
180 cmpwi r7, 0x100
181 bge kvm_novcpu_exit /* another thread already exiting */
182 li r3, NAPPING_NOVCPU
183 stb r3, HSTATE_NAPPING(r13)
184 li r3, 1
185 stb r3, HSTATE_HWTHREAD_REQ(r13)
186
187 b kvm_do_nap
188
189kvm_novcpu_wakeup:
190 ld r1, HSTATE_HOST_R1(r13)
191 ld r5, HSTATE_KVM_VCORE(r13)
192 li r0, 0
193 stb r0, HSTATE_NAPPING(r13)
194 stb r0, HSTATE_HWTHREAD_REQ(r13)
195
196 /* see if any other thread is already exiting */
197 li r12, 0
198 lwz r0, VCORE_ENTRY_EXIT(r5)
199 cmpwi r0, 0x100
200 bge kvm_novcpu_exit
201
202 /* clear our bit in napping_threads */
203 lbz r7, HSTATE_PTID(r13)
204 li r0, 1
205 sld r0, r0, r7
206 addi r6, r5, VCORE_NAPPING_THREADS
2074: lwarx r3, 0, r6
208 andc r3, r3, r0
209 stwcx. r3, 0, r6
210 bne 4b
211
212 /* Check the wake reason in SRR1 to see why we got here */
213 mfspr r3, SPRN_SRR1
214 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
215 cmpwi r3, 4 /* was it an external interrupt? */
216 bne kvm_novcpu_exit /* if not, exit the guest */
217
218 /* extern interrupt - read and handle it */
219 li r12, BOOK3S_INTERRUPT_EXTERNAL
220 bl kvmppc_read_intr
221 cmpdi r3, 0
222 bge kvm_novcpu_exit
223 li r12, 0
224
225 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
226 ld r4, HSTATE_KVM_VCPU(r13)
227 cmpdi r4, 0
228 bne kvmppc_got_guest
229
230kvm_novcpu_exit:
231 b hdec_soon
232
159/* 233/*
160 * We come in here when wakened from nap mode on a secondary hw thread. 234 * We come in here when wakened from nap mode.
161 * Relocation is off and most register values are lost. 235 * Relocation is off and most register values are lost.
162 * r13 points to the PACA. 236 * r13 points to the PACA.
163 */ 237 */
164 .globl kvm_start_guest 238 .globl kvm_start_guest
165kvm_start_guest: 239kvm_start_guest:
166 ld r1,PACAEMERGSP(r13)
167 subi r1,r1,STACK_FRAME_OVERHEAD
168 ld r2,PACATOC(r13) 240 ld r2,PACATOC(r13)
169 241
170 li r0,KVM_HWTHREAD_IN_KVM 242 li r0,KVM_HWTHREAD_IN_KVM
@@ -176,8 +248,13 @@ kvm_start_guest:
176 248
177 /* were we napping due to cede? */ 249 /* were we napping due to cede? */
178 lbz r0,HSTATE_NAPPING(r13) 250 lbz r0,HSTATE_NAPPING(r13)
179 cmpwi r0,0 251 cmpwi r0,NAPPING_CEDE
180 bne kvm_end_cede 252 beq kvm_end_cede
253 cmpwi r0,NAPPING_NOVCPU
254 beq kvm_novcpu_wakeup
255
256 ld r1,PACAEMERGSP(r13)
257 subi r1,r1,STACK_FRAME_OVERHEAD
181 258
182 /* 259 /*
183 * We weren't napping due to cede, so this must be a secondary 260 * We weren't napping due to cede, so this must be a secondary
@@ -220,7 +297,13 @@ kvm_start_guest:
220 stw r8,HSTATE_SAVED_XIRR(r13) 297 stw r8,HSTATE_SAVED_XIRR(r13)
221 b kvm_no_guest 298 b kvm_no_guest
222 299
22330: bl kvmppc_hv_entry 30030:
301 /* Set HSTATE_DSCR(r13) to something sensible */
302 LOAD_REG_ADDR(r6, dscr_default)
303 ld r6, 0(r6)
304 std r6, HSTATE_DSCR(r13)
305
306 bl kvmppc_hv_entry
224 307
225 /* Back from the guest, go back to nap */ 308 /* Back from the guest, go back to nap */
226 /* Clear our vcpu pointer so we don't come back in early */ 309 /* Clear our vcpu pointer so we don't come back in early */
@@ -252,6 +335,7 @@ kvm_start_guest:
252kvm_no_guest: 335kvm_no_guest:
253 li r0, KVM_HWTHREAD_IN_NAP 336 li r0, KVM_HWTHREAD_IN_NAP
254 stb r0, HSTATE_HWTHREAD_STATE(r13) 337 stb r0, HSTATE_HWTHREAD_STATE(r13)
338kvm_do_nap:
255 li r3, LPCR_PECE0 339 li r3, LPCR_PECE0
256 mfspr r4, SPRN_LPCR 340 mfspr r4, SPRN_LPCR
257 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 341 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -276,7 +360,7 @@ kvmppc_hv_entry:
276 360
277 /* Required state: 361 /* Required state:
278 * 362 *
279 * R4 = vcpu pointer 363 * R4 = vcpu pointer (or NULL)
280 * MSR = ~IR|DR 364 * MSR = ~IR|DR
281 * R13 = PACA 365 * R13 = PACA
282 * R1 = host R1 366 * R1 = host R1
@@ -286,124 +370,12 @@ kvmppc_hv_entry:
286 std r0, PPC_LR_STKOFF(r1) 370 std r0, PPC_LR_STKOFF(r1)
287 stdu r1, -112(r1) 371 stdu r1, -112(r1)
288 372
289BEGIN_FTR_SECTION
290 /* Set partition DABR */
291 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
292 li r5,3
293 ld r6,VCPU_DABR(r4)
294 mtspr SPRN_DABRX,r5
295 mtspr SPRN_DABR,r6
296 BEGIN_FTR_SECTION_NESTED(89)
297 isync
298 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
299END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
300
301 /* Load guest PMU registers */
302 /* R4 is live here (vcpu pointer) */
303 li r3, 1
304 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
305 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
306 isync
307 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
308 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
309 lwz r6, VCPU_PMC + 8(r4)
310 lwz r7, VCPU_PMC + 12(r4)
311 lwz r8, VCPU_PMC + 16(r4)
312 lwz r9, VCPU_PMC + 20(r4)
313BEGIN_FTR_SECTION
314 lwz r10, VCPU_PMC + 24(r4)
315 lwz r11, VCPU_PMC + 28(r4)
316END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
317 mtspr SPRN_PMC1, r3
318 mtspr SPRN_PMC2, r5
319 mtspr SPRN_PMC3, r6
320 mtspr SPRN_PMC4, r7
321 mtspr SPRN_PMC5, r8
322 mtspr SPRN_PMC6, r9
323BEGIN_FTR_SECTION
324 mtspr SPRN_PMC7, r10
325 mtspr SPRN_PMC8, r11
326END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
327 ld r3, VCPU_MMCR(r4)
328 ld r5, VCPU_MMCR + 8(r4)
329 ld r6, VCPU_MMCR + 16(r4)
330 ld r7, VCPU_SIAR(r4)
331 ld r8, VCPU_SDAR(r4)
332 mtspr SPRN_MMCR1, r5
333 mtspr SPRN_MMCRA, r6
334 mtspr SPRN_SIAR, r7
335 mtspr SPRN_SDAR, r8
336 mtspr SPRN_MMCR0, r3
337 isync
338
339 /* Load up FP, VMX and VSX registers */
340 bl kvmppc_load_fp
341
342 ld r14, VCPU_GPR(R14)(r4)
343 ld r15, VCPU_GPR(R15)(r4)
344 ld r16, VCPU_GPR(R16)(r4)
345 ld r17, VCPU_GPR(R17)(r4)
346 ld r18, VCPU_GPR(R18)(r4)
347 ld r19, VCPU_GPR(R19)(r4)
348 ld r20, VCPU_GPR(R20)(r4)
349 ld r21, VCPU_GPR(R21)(r4)
350 ld r22, VCPU_GPR(R22)(r4)
351 ld r23, VCPU_GPR(R23)(r4)
352 ld r24, VCPU_GPR(R24)(r4)
353 ld r25, VCPU_GPR(R25)(r4)
354 ld r26, VCPU_GPR(R26)(r4)
355 ld r27, VCPU_GPR(R27)(r4)
356 ld r28, VCPU_GPR(R28)(r4)
357 ld r29, VCPU_GPR(R29)(r4)
358 ld r30, VCPU_GPR(R30)(r4)
359 ld r31, VCPU_GPR(R31)(r4)
360
361BEGIN_FTR_SECTION
362 /* Switch DSCR to guest value */
363 ld r5, VCPU_DSCR(r4)
364 mtspr SPRN_DSCR, r5
365END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
366
367 /*
368 * Set the decrementer to the guest decrementer.
369 */
370 ld r8,VCPU_DEC_EXPIRES(r4)
371 mftb r7
372 subf r3,r7,r8
373 mtspr SPRN_DEC,r3
374 stw r3,VCPU_DEC(r4)
375
376 ld r5, VCPU_SPRG0(r4)
377 ld r6, VCPU_SPRG1(r4)
378 ld r7, VCPU_SPRG2(r4)
379 ld r8, VCPU_SPRG3(r4)
380 mtspr SPRN_SPRG0, r5
381 mtspr SPRN_SPRG1, r6
382 mtspr SPRN_SPRG2, r7
383 mtspr SPRN_SPRG3, r8
384
385 /* Save R1 in the PACA */ 373 /* Save R1 in the PACA */
386 std r1, HSTATE_HOST_R1(r13) 374 std r1, HSTATE_HOST_R1(r13)
387 375
388 /* Load up DAR and DSISR */
389 ld r5, VCPU_DAR(r4)
390 lwz r6, VCPU_DSISR(r4)
391 mtspr SPRN_DAR, r5
392 mtspr SPRN_DSISR, r6
393
394 li r6, KVM_GUEST_MODE_HOST_HV 376 li r6, KVM_GUEST_MODE_HOST_HV
395 stb r6, HSTATE_IN_GUEST(r13) 377 stb r6, HSTATE_IN_GUEST(r13)
396 378
397BEGIN_FTR_SECTION
398 /* Restore AMR and UAMOR, set AMOR to all 1s */
399 ld r5,VCPU_AMR(r4)
400 ld r6,VCPU_UAMOR(r4)
401 li r7,-1
402 mtspr SPRN_AMR,r5
403 mtspr SPRN_UAMOR,r6
404 mtspr SPRN_AMOR,r7
405END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
406
407 /* Clear out SLB */ 379 /* Clear out SLB */
408 li r6,0 380 li r6,0
409 slbmte r6,r6 381 slbmte r6,r6
@@ -429,8 +401,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
429 bne 21b 401 bne 21b
430 402
431 /* Primary thread switches to guest partition. */ 403 /* Primary thread switches to guest partition. */
432 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 404 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
433 lwz r6,VCPU_PTID(r4) 405 lbz r6,HSTATE_PTID(r13)
434 cmpwi r6,0 406 cmpwi r6,0
435 bne 20f 407 bne 20f
436 ld r6,KVM_SDR1(r9) 408 ld r6,KVM_SDR1(r9)
@@ -504,32 +476,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
504 mtspr SPRN_RMOR,r8 476 mtspr SPRN_RMOR,r8
505 isync 477 isync
506 478
507 /* Increment yield count if they have a VPA */
508 ld r3, VCPU_VPA(r4)
509 cmpdi r3, 0
510 beq 25f
511 lwz r5, LPPACA_YIELDCOUNT(r3)
512 addi r5, r5, 1
513 stw r5, LPPACA_YIELDCOUNT(r3)
514 li r6, 1
515 stb r6, VCPU_VPA_DIRTY(r4)
51625:
517 /* Check if HDEC expires soon */ 479 /* Check if HDEC expires soon */
518 mfspr r3,SPRN_HDEC 480 mfspr r3,SPRN_HDEC
519 cmpwi r3,10 481 cmpwi r3,512 /* 1 microsecond */
520 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 482 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
521 mr r9,r4
522 blt hdec_soon 483 blt hdec_soon
523
524 /* Save purr/spurr */
525 mfspr r5,SPRN_PURR
526 mfspr r6,SPRN_SPURR
527 std r5,HSTATE_PURR(r13)
528 std r6,HSTATE_SPURR(r13)
529 ld r7,VCPU_PURR(r4)
530 ld r8,VCPU_SPURR(r4)
531 mtspr SPRN_PURR,r7
532 mtspr SPRN_SPURR,r8
533 b 31f 484 b 31f
534 485
535 /* 486 /*
@@ -540,7 +491,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
540 * We also have to invalidate the TLB since its 491 * We also have to invalidate the TLB since its
541 * entries aren't tagged with the LPID. 492 * entries aren't tagged with the LPID.
542 */ 493 */
54330: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 49430: ld r5,HSTATE_KVM_VCORE(r13)
495 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
544 496
545 /* first take native_tlbie_lock */ 497 /* first take native_tlbie_lock */
546 .section ".toc","aw" 498 .section ".toc","aw"
@@ -605,7 +557,6 @@ toc_tlbie_lock:
605 mfspr r3,SPRN_HDEC 557 mfspr r3,SPRN_HDEC
606 cmpwi r3,10 558 cmpwi r3,10
607 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 559 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
608 mr r9,r4
609 blt hdec_soon 560 blt hdec_soon
610 561
611 /* Enable HDEC interrupts */ 562 /* Enable HDEC interrupts */
@@ -620,9 +571,14 @@ toc_tlbie_lock:
620 mfspr r0,SPRN_HID0 571 mfspr r0,SPRN_HID0
621 mfspr r0,SPRN_HID0 572 mfspr r0,SPRN_HID0
622 mfspr r0,SPRN_HID0 573 mfspr r0,SPRN_HID0
57431:
575 /* Do we have a guest vcpu to run? */
576 cmpdi r4, 0
577 beq kvmppc_primary_no_guest
578kvmppc_got_guest:
623 579
624 /* Load up guest SLB entries */ 580 /* Load up guest SLB entries */
62531: lwz r5,VCPU_SLB_MAX(r4) 581 lwz r5,VCPU_SLB_MAX(r4)
626 cmpwi r5,0 582 cmpwi r5,0
627 beq 9f 583 beq 9f
628 mtctr r5 584 mtctr r5
@@ -633,6 +589,140 @@ toc_tlbie_lock:
633 addi r6,r6,VCPU_SLB_SIZE 589 addi r6,r6,VCPU_SLB_SIZE
634 bdnz 1b 590 bdnz 1b
6359: 5919:
592 /* Increment yield count if they have a VPA */
593 ld r3, VCPU_VPA(r4)
594 cmpdi r3, 0
595 beq 25f
596 lwz r5, LPPACA_YIELDCOUNT(r3)
597 addi r5, r5, 1
598 stw r5, LPPACA_YIELDCOUNT(r3)
599 li r6, 1
600 stb r6, VCPU_VPA_DIRTY(r4)
60125:
602
603BEGIN_FTR_SECTION
604 /* Save purr/spurr */
605 mfspr r5,SPRN_PURR
606 mfspr r6,SPRN_SPURR
607 std r5,HSTATE_PURR(r13)
608 std r6,HSTATE_SPURR(r13)
609 ld r7,VCPU_PURR(r4)
610 ld r8,VCPU_SPURR(r4)
611 mtspr SPRN_PURR,r7
612 mtspr SPRN_SPURR,r8
613END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
614
615BEGIN_FTR_SECTION
616 /* Set partition DABR */
617 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
618 li r5,3
619 ld r6,VCPU_DABR(r4)
620 mtspr SPRN_DABRX,r5
621 mtspr SPRN_DABR,r6
622 BEGIN_FTR_SECTION_NESTED(89)
623 isync
624 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
625END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
626
627 /* Load guest PMU registers */
628 /* R4 is live here (vcpu pointer) */
629 li r3, 1
630 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
631 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
632 isync
633 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
634 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
635 lwz r6, VCPU_PMC + 8(r4)
636 lwz r7, VCPU_PMC + 12(r4)
637 lwz r8, VCPU_PMC + 16(r4)
638 lwz r9, VCPU_PMC + 20(r4)
639BEGIN_FTR_SECTION
640 lwz r10, VCPU_PMC + 24(r4)
641 lwz r11, VCPU_PMC + 28(r4)
642END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
643 mtspr SPRN_PMC1, r3
644 mtspr SPRN_PMC2, r5
645 mtspr SPRN_PMC3, r6
646 mtspr SPRN_PMC4, r7
647 mtspr SPRN_PMC5, r8
648 mtspr SPRN_PMC6, r9
649BEGIN_FTR_SECTION
650 mtspr SPRN_PMC7, r10
651 mtspr SPRN_PMC8, r11
652END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
653 ld r3, VCPU_MMCR(r4)
654 ld r5, VCPU_MMCR + 8(r4)
655 ld r6, VCPU_MMCR + 16(r4)
656 ld r7, VCPU_SIAR(r4)
657 ld r8, VCPU_SDAR(r4)
658 mtspr SPRN_MMCR1, r5
659 mtspr SPRN_MMCRA, r6
660 mtspr SPRN_SIAR, r7
661 mtspr SPRN_SDAR, r8
662 mtspr SPRN_MMCR0, r3
663 isync
664
665 /* Load up FP, VMX and VSX registers */
666 bl kvmppc_load_fp
667
668 ld r14, VCPU_GPR(R14)(r4)
669 ld r15, VCPU_GPR(R15)(r4)
670 ld r16, VCPU_GPR(R16)(r4)
671 ld r17, VCPU_GPR(R17)(r4)
672 ld r18, VCPU_GPR(R18)(r4)
673 ld r19, VCPU_GPR(R19)(r4)
674 ld r20, VCPU_GPR(R20)(r4)
675 ld r21, VCPU_GPR(R21)(r4)
676 ld r22, VCPU_GPR(R22)(r4)
677 ld r23, VCPU_GPR(R23)(r4)
678 ld r24, VCPU_GPR(R24)(r4)
679 ld r25, VCPU_GPR(R25)(r4)
680 ld r26, VCPU_GPR(R26)(r4)
681 ld r27, VCPU_GPR(R27)(r4)
682 ld r28, VCPU_GPR(R28)(r4)
683 ld r29, VCPU_GPR(R29)(r4)
684 ld r30, VCPU_GPR(R30)(r4)
685 ld r31, VCPU_GPR(R31)(r4)
686
687BEGIN_FTR_SECTION
688 /* Switch DSCR to guest value */
689 ld r5, VCPU_DSCR(r4)
690 mtspr SPRN_DSCR, r5
691END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
692
693 /*
694 * Set the decrementer to the guest decrementer.
695 */
696 ld r8,VCPU_DEC_EXPIRES(r4)
697 mftb r7
698 subf r3,r7,r8
699 mtspr SPRN_DEC,r3
700 stw r3,VCPU_DEC(r4)
701
702 ld r5, VCPU_SPRG0(r4)
703 ld r6, VCPU_SPRG1(r4)
704 ld r7, VCPU_SPRG2(r4)
705 ld r8, VCPU_SPRG3(r4)
706 mtspr SPRN_SPRG0, r5
707 mtspr SPRN_SPRG1, r6
708 mtspr SPRN_SPRG2, r7
709 mtspr SPRN_SPRG3, r8
710
711 /* Load up DAR and DSISR */
712 ld r5, VCPU_DAR(r4)
713 lwz r6, VCPU_DSISR(r4)
714 mtspr SPRN_DAR, r5
715 mtspr SPRN_DSISR, r6
716
717BEGIN_FTR_SECTION
718 /* Restore AMR and UAMOR, set AMOR to all 1s */
719 ld r5,VCPU_AMR(r4)
720 ld r6,VCPU_UAMOR(r4)
721 li r7,-1
722 mtspr SPRN_AMR,r5
723 mtspr SPRN_UAMOR,r6
724 mtspr SPRN_AMOR,r7
725END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
636 726
637 /* Restore state of CTRL run bit; assume 1 on entry */ 727 /* Restore state of CTRL run bit; assume 1 on entry */
638 lwz r5,VCPU_CTRL(r4) 728 lwz r5,VCPU_CTRL(r4)
@@ -984,13 +1074,130 @@ BEGIN_FTR_SECTION
984 mtspr SPRN_SPURR,r4 1074 mtspr SPRN_SPURR,r4
985END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1075END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
986 1076
1077 /* Save DEC */
1078 mfspr r5,SPRN_DEC
1079 mftb r6
1080 extsw r5,r5
1081 add r5,r5,r6
1082 std r5,VCPU_DEC_EXPIRES(r9)
1083
1084 /* Save and reset AMR and UAMOR before turning on the MMU */
1085BEGIN_FTR_SECTION
1086 mfspr r5,SPRN_AMR
1087 mfspr r6,SPRN_UAMOR
1088 std r5,VCPU_AMR(r9)
1089 std r6,VCPU_UAMOR(r9)
1090 li r6,0
1091 mtspr SPRN_AMR,r6
1092END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1093
1094 /* Switch DSCR back to host value */
1095BEGIN_FTR_SECTION
1096 mfspr r8, SPRN_DSCR
1097 ld r7, HSTATE_DSCR(r13)
1098 std r8, VCPU_DSCR(r9)
1099 mtspr SPRN_DSCR, r7
1100END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1101
1102 /* Save non-volatile GPRs */
1103 std r14, VCPU_GPR(R14)(r9)
1104 std r15, VCPU_GPR(R15)(r9)
1105 std r16, VCPU_GPR(R16)(r9)
1106 std r17, VCPU_GPR(R17)(r9)
1107 std r18, VCPU_GPR(R18)(r9)
1108 std r19, VCPU_GPR(R19)(r9)
1109 std r20, VCPU_GPR(R20)(r9)
1110 std r21, VCPU_GPR(R21)(r9)
1111 std r22, VCPU_GPR(R22)(r9)
1112 std r23, VCPU_GPR(R23)(r9)
1113 std r24, VCPU_GPR(R24)(r9)
1114 std r25, VCPU_GPR(R25)(r9)
1115 std r26, VCPU_GPR(R26)(r9)
1116 std r27, VCPU_GPR(R27)(r9)
1117 std r28, VCPU_GPR(R28)(r9)
1118 std r29, VCPU_GPR(R29)(r9)
1119 std r30, VCPU_GPR(R30)(r9)
1120 std r31, VCPU_GPR(R31)(r9)
1121
1122 /* Save SPRGs */
1123 mfspr r3, SPRN_SPRG0
1124 mfspr r4, SPRN_SPRG1
1125 mfspr r5, SPRN_SPRG2
1126 mfspr r6, SPRN_SPRG3
1127 std r3, VCPU_SPRG0(r9)
1128 std r4, VCPU_SPRG1(r9)
1129 std r5, VCPU_SPRG2(r9)
1130 std r6, VCPU_SPRG3(r9)
1131
1132 /* save FP state */
1133 mr r3, r9
1134 bl kvmppc_save_fp
1135
1136 /* Increment yield count if they have a VPA */
1137 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1138 cmpdi r8, 0
1139 beq 25f
1140 lwz r3, LPPACA_YIELDCOUNT(r8)
1141 addi r3, r3, 1
1142 stw r3, LPPACA_YIELDCOUNT(r8)
1143 li r3, 1
1144 stb r3, VCPU_VPA_DIRTY(r9)
114525:
1146 /* Save PMU registers if requested */
1147 /* r8 and cr0.eq are live here */
1148 li r3, 1
1149 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1150 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1151 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1152 mfspr r6, SPRN_MMCRA
1153BEGIN_FTR_SECTION
1154 /* On P7, clear MMCRA in order to disable SDAR updates */
1155 li r7, 0
1156 mtspr SPRN_MMCRA, r7
1157END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1158 isync
1159 beq 21f /* if no VPA, save PMU stuff anyway */
1160 lbz r7, LPPACA_PMCINUSE(r8)
1161 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1162 bne 21f
1163 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1164 b 22f
116521: mfspr r5, SPRN_MMCR1
1166 mfspr r7, SPRN_SIAR
1167 mfspr r8, SPRN_SDAR
1168 std r4, VCPU_MMCR(r9)
1169 std r5, VCPU_MMCR + 8(r9)
1170 std r6, VCPU_MMCR + 16(r9)
1171 std r7, VCPU_SIAR(r9)
1172 std r8, VCPU_SDAR(r9)
1173 mfspr r3, SPRN_PMC1
1174 mfspr r4, SPRN_PMC2
1175 mfspr r5, SPRN_PMC3
1176 mfspr r6, SPRN_PMC4
1177 mfspr r7, SPRN_PMC5
1178 mfspr r8, SPRN_PMC6
1179BEGIN_FTR_SECTION
1180 mfspr r10, SPRN_PMC7
1181 mfspr r11, SPRN_PMC8
1182END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1183 stw r3, VCPU_PMC(r9)
1184 stw r4, VCPU_PMC + 4(r9)
1185 stw r5, VCPU_PMC + 8(r9)
1186 stw r6, VCPU_PMC + 12(r9)
1187 stw r7, VCPU_PMC + 16(r9)
1188 stw r8, VCPU_PMC + 20(r9)
1189BEGIN_FTR_SECTION
1190 stw r10, VCPU_PMC + 24(r9)
1191 stw r11, VCPU_PMC + 28(r9)
1192END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
119322:
987 /* Clear out SLB */ 1194 /* Clear out SLB */
988 li r5,0 1195 li r5,0
989 slbmte r5,r5 1196 slbmte r5,r5
990 slbia 1197 slbia
991 ptesync 1198 ptesync
992 1199
993hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 1200hdec_soon: /* r12 = trap, r13 = paca */
994BEGIN_FTR_SECTION 1201BEGIN_FTR_SECTION
995 b 32f 1202 b 32f
996END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1203END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -1024,8 +1231,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1024 */ 1231 */
1025 cmpwi r3,0x100 /* Are we the first here? */ 1232 cmpwi r3,0x100 /* Are we the first here? */
1026 bge 43f 1233 bge 43f
1027 cmpwi r3,1 /* Are any other threads in the guest? */
1028 ble 43f
1029 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1234 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1030 beq 40f 1235 beq 40f
1031 li r0,0 1236 li r0,0
@@ -1036,7 +1241,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1036 * doesn't wake CPUs up from nap. 1241 * doesn't wake CPUs up from nap.
1037 */ 1242 */
1038 lwz r3,VCORE_NAPPING_THREADS(r5) 1243 lwz r3,VCORE_NAPPING_THREADS(r5)
1039 lwz r4,VCPU_PTID(r9) 1244 lbz r4,HSTATE_PTID(r13)
1040 li r0,1 1245 li r0,1
1041 sld r0,r0,r4 1246 sld r0,r0,r4
1042 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1247 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
@@ -1053,10 +1258,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1053 addi r6,r6,PACA_SIZE 1258 addi r6,r6,PACA_SIZE
1054 bne 42b 1259 bne 42b
1055 1260
1261secondary_too_late:
1056 /* Secondary threads wait for primary to do partition switch */ 1262 /* Secondary threads wait for primary to do partition switch */
105743: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 126343: ld r5,HSTATE_KVM_VCORE(r13)
1058 ld r5,HSTATE_KVM_VCORE(r13) 1264 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1059 lwz r3,VCPU_PTID(r9) 1265 lbz r3,HSTATE_PTID(r13)
1060 cmpwi r3,0 1266 cmpwi r3,0
1061 beq 15f 1267 beq 15f
1062 HMT_LOW 1268 HMT_LOW
@@ -1121,7 +1327,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1121 * We have to lock against concurrent tlbies, and 1327 * We have to lock against concurrent tlbies, and
1122 * we have to flush the whole TLB. 1328 * we have to flush the whole TLB.
1123 */ 1329 */
112432: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 133032: ld r5,HSTATE_KVM_VCORE(r13)
1331 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1125 1332
1126 /* Take the guest's tlbie_lock */ 1333 /* Take the guest's tlbie_lock */
1127#ifdef __BIG_ENDIAN__ 1334#ifdef __BIG_ENDIAN__
@@ -1204,151 +1411,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
12041: addi r8,r8,16 14111: addi r8,r8,16
1205 .endr 1412 .endr
1206 1413
1207 /* Save DEC */
1208 mfspr r5,SPRN_DEC
1209 mftb r6
1210 extsw r5,r5
1211 add r5,r5,r6
1212 std r5,VCPU_DEC_EXPIRES(r9)
1213
1214 /* Save and reset AMR and UAMOR before turning on the MMU */
1215BEGIN_FTR_SECTION
1216 mfspr r5,SPRN_AMR
1217 mfspr r6,SPRN_UAMOR
1218 std r5,VCPU_AMR(r9)
1219 std r6,VCPU_UAMOR(r9)
1220 li r6,0
1221 mtspr SPRN_AMR,r6
1222END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1223
1224 /* Unset guest mode */ 1414 /* Unset guest mode */
1225 li r0, KVM_GUEST_MODE_NONE 1415 li r0, KVM_GUEST_MODE_NONE
1226 stb r0, HSTATE_IN_GUEST(r13) 1416 stb r0, HSTATE_IN_GUEST(r13)
1227 1417
1228 /* Switch DSCR back to host value */
1229BEGIN_FTR_SECTION
1230 mfspr r8, SPRN_DSCR
1231 ld r7, HSTATE_DSCR(r13)
1232 std r8, VCPU_DSCR(r9)
1233 mtspr SPRN_DSCR, r7
1234END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1235
1236 /* Save non-volatile GPRs */
1237 std r14, VCPU_GPR(R14)(r9)
1238 std r15, VCPU_GPR(R15)(r9)
1239 std r16, VCPU_GPR(R16)(r9)
1240 std r17, VCPU_GPR(R17)(r9)
1241 std r18, VCPU_GPR(R18)(r9)
1242 std r19, VCPU_GPR(R19)(r9)
1243 std r20, VCPU_GPR(R20)(r9)
1244 std r21, VCPU_GPR(R21)(r9)
1245 std r22, VCPU_GPR(R22)(r9)
1246 std r23, VCPU_GPR(R23)(r9)
1247 std r24, VCPU_GPR(R24)(r9)
1248 std r25, VCPU_GPR(R25)(r9)
1249 std r26, VCPU_GPR(R26)(r9)
1250 std r27, VCPU_GPR(R27)(r9)
1251 std r28, VCPU_GPR(R28)(r9)
1252 std r29, VCPU_GPR(R29)(r9)
1253 std r30, VCPU_GPR(R30)(r9)
1254 std r31, VCPU_GPR(R31)(r9)
1255
1256 /* Save SPRGs */
1257 mfspr r3, SPRN_SPRG0
1258 mfspr r4, SPRN_SPRG1
1259 mfspr r5, SPRN_SPRG2
1260 mfspr r6, SPRN_SPRG3
1261 std r3, VCPU_SPRG0(r9)
1262 std r4, VCPU_SPRG1(r9)
1263 std r5, VCPU_SPRG2(r9)
1264 std r6, VCPU_SPRG3(r9)
1265
1266 /* save FP state */
1267 mr r3, r9
1268 bl kvmppc_save_fp
1269
1270 /* Increment yield count if they have a VPA */
1271 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1272 cmpdi r8, 0
1273 beq 25f
1274 lwz r3, LPPACA_YIELDCOUNT(r8)
1275 addi r3, r3, 1
1276 stw r3, LPPACA_YIELDCOUNT(r8)
1277 li r3, 1
1278 stb r3, VCPU_VPA_DIRTY(r9)
127925:
1280 /* Save PMU registers if requested */
1281 /* r8 and cr0.eq are live here */
1282 li r3, 1
1283 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1284 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1285 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1286 mfspr r6, SPRN_MMCRA
1287BEGIN_FTR_SECTION
1288 /* On P7, clear MMCRA in order to disable SDAR updates */
1289 li r7, 0
1290 mtspr SPRN_MMCRA, r7
1291END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1292 isync
1293 beq 21f /* if no VPA, save PMU stuff anyway */
1294 lbz r7, LPPACA_PMCINUSE(r8)
1295 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1296 bne 21f
1297 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1298 b 22f
129921: mfspr r5, SPRN_MMCR1
1300 mfspr r7, SPRN_SIAR
1301 mfspr r8, SPRN_SDAR
1302 std r4, VCPU_MMCR(r9)
1303 std r5, VCPU_MMCR + 8(r9)
1304 std r6, VCPU_MMCR + 16(r9)
1305 std r7, VCPU_SIAR(r9)
1306 std r8, VCPU_SDAR(r9)
1307 mfspr r3, SPRN_PMC1
1308 mfspr r4, SPRN_PMC2
1309 mfspr r5, SPRN_PMC3
1310 mfspr r6, SPRN_PMC4
1311 mfspr r7, SPRN_PMC5
1312 mfspr r8, SPRN_PMC6
1313BEGIN_FTR_SECTION
1314 mfspr r10, SPRN_PMC7
1315 mfspr r11, SPRN_PMC8
1316END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1317 stw r3, VCPU_PMC(r9)
1318 stw r4, VCPU_PMC + 4(r9)
1319 stw r5, VCPU_PMC + 8(r9)
1320 stw r6, VCPU_PMC + 12(r9)
1321 stw r7, VCPU_PMC + 16(r9)
1322 stw r8, VCPU_PMC + 20(r9)
1323BEGIN_FTR_SECTION
1324 stw r10, VCPU_PMC + 24(r9)
1325 stw r11, VCPU_PMC + 28(r9)
1326END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
132722:
1328 ld r0, 112+PPC_LR_STKOFF(r1) 1418 ld r0, 112+PPC_LR_STKOFF(r1)
1329 addi r1, r1, 112 1419 addi r1, r1, 112
1330 mtlr r0 1420 mtlr r0
1331 blr 1421 blr
1332secondary_too_late:
1333 ld r5,HSTATE_KVM_VCORE(r13)
1334 HMT_LOW
133513: lbz r3,VCORE_IN_GUEST(r5)
1336 cmpwi r3,0
1337 bne 13b
1338 HMT_MEDIUM
1339 li r0, KVM_GUEST_MODE_NONE
1340 stb r0, HSTATE_IN_GUEST(r13)
1341 ld r11,PACA_SLBSHADOWPTR(r13)
1342
1343 .rept SLB_NUM_BOLTED
1344 ld r5,SLBSHADOW_SAVEAREA(r11)
1345 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1346 andis. r7,r5,SLB_ESID_V@h
1347 beq 1f
1348 slbmte r6,r5
13491: addi r11,r11,16
1350 .endr
1351 b 22b
1352 1422
1353/* 1423/*
1354 * Check whether an HDSI is an HPTE not found fault or something else. 1424 * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1649,7 +1719,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1649 * up to the host. 1719 * up to the host.
1650 */ 1720 */
1651 ld r5,HSTATE_KVM_VCORE(r13) 1721 ld r5,HSTATE_KVM_VCORE(r13)
1652 lwz r6,VCPU_PTID(r3) 1722 lbz r6,HSTATE_PTID(r13)
1653 lwz r8,VCORE_ENTRY_EXIT(r5) 1723 lwz r8,VCORE_ENTRY_EXIT(r5)
1654 clrldi r8,r8,56 1724 clrldi r8,r8,56
1655 li r0,1 1725 li r0,1
@@ -1662,7 +1732,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1662 bge kvm_cede_exit 1732 bge kvm_cede_exit
1663 stwcx. r4,0,r6 1733 stwcx. r4,0,r6
1664 bne 31b 1734 bne 31b
1665 li r0,1 1735 li r0,NAPPING_CEDE
1666 stb r0,HSTATE_NAPPING(r13) 1736 stb r0,HSTATE_NAPPING(r13)
1667 /* order napping_threads update vs testing entry_exit_count */ 1737 /* order napping_threads update vs testing entry_exit_count */
1668 lwsync 1738 lwsync
@@ -1751,7 +1821,7 @@ kvm_end_cede:
1751 1821
1752 /* clear our bit in vcore->napping_threads */ 1822 /* clear our bit in vcore->napping_threads */
175333: ld r5,HSTATE_KVM_VCORE(r13) 182333: ld r5,HSTATE_KVM_VCORE(r13)
1754 lwz r3,VCPU_PTID(r4) 1824 lbz r3,HSTATE_PTID(r13)
1755 li r0,1 1825 li r0,1
1756 sld r0,r0,r3 1826 sld r0,r0,r3
1757 addi r6,r5,VCORE_NAPPING_THREADS 1827 addi r6,r5,VCORE_NAPPING_THREADS