diff options
author | Mihai Caraman <mihai.caraman@freescale.com> | 2014-09-01 05:01:58 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-09-22 04:11:35 -0400 |
commit | 188e267ce249b491dfbb77d881996dcb5610dc90 (patch) | |
tree | c7d5c386562371f0e4dad2c3d885d9b2d58a3e6c /arch/powerpc/kvm/e500mc.c | |
parent | 9333e6c4c15a4084dd5f4336cd4379afbf99e458 (diff) |
KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core
ePAPR represents hardware threads as cpu node properties in device tree.
So with existing QEMU, hardware threads are simply exposed as vcpus with
one hardware thread.
The e6500 core shares TLBs between hardware threads. Without tlb write
conditional instruction, the Linux kernel uses per core mechanisms to
protect against duplicate TLB entries.
The guest is unable to detect real siblings threads, so it can't use the
TLB protection mechanism. An alternative solution is to use the hypervisor
to allocate different lpids to guest's vcpus that runs simultaneous on real
siblings threads. On systems with two threads per core this patch halves
the size of the lpid pool that the allocator sees and use two lpids per VM.
Use even numbers to speedup vcpu lpid computation with consecutive lpids
per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
[agraf: fix spelling]
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/e500mc.c')
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 46 |
1 files changed, 33 insertions, 13 deletions
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 454934990672..bf8f99f6676a 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) | |||
48 | return; | 48 | return; |
49 | } | 49 | } |
50 | 50 | ||
51 | 51 | preempt_disable(); | |
52 | tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; | 52 | tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; |
53 | mb(); | 53 | mb(); |
54 | ppc_msgsnd(dbell_type, 0, tag); | 54 | ppc_msgsnd(dbell_type, 0, tag); |
55 | preempt_enable(); | ||
55 | } | 56 | } |
56 | 57 | ||
57 | /* gtlbe must not be mapped by more than one host tlb entry */ | 58 | /* gtlbe must not be mapped by more than one host tlb entry */ |
@@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
60 | { | 61 | { |
61 | unsigned int tid, ts; | 62 | unsigned int tid, ts; |
62 | gva_t eaddr; | 63 | gva_t eaddr; |
63 | u32 val, lpid; | 64 | u32 val; |
64 | unsigned long flags; | 65 | unsigned long flags; |
65 | 66 | ||
66 | ts = get_tlb_ts(gtlbe); | 67 | ts = get_tlb_ts(gtlbe); |
67 | tid = get_tlb_tid(gtlbe); | 68 | tid = get_tlb_tid(gtlbe); |
68 | lpid = vcpu_e500->vcpu.kvm->arch.lpid; | ||
69 | 69 | ||
70 | /* We search the host TLB to invalidate its shadow TLB entry */ | 70 | /* We search the host TLB to invalidate its shadow TLB entry */ |
71 | val = (tid << 16) | ts; | 71 | val = (tid << 16) | ts; |
@@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
74 | local_irq_save(flags); | 74 | local_irq_save(flags); |
75 | 75 | ||
76 | mtspr(SPRN_MAS6, val); | 76 | mtspr(SPRN_MAS6, val); |
77 | mtspr(SPRN_MAS5, MAS5_SGS | lpid); | 77 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
78 | 78 | ||
79 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); | 79 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); |
80 | val = mfspr(SPRN_MAS1); | 80 | val = mfspr(SPRN_MAS1); |
@@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
95 | unsigned long flags; | 95 | unsigned long flags; |
96 | 96 | ||
97 | local_irq_save(flags); | 97 | local_irq_save(flags); |
98 | mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); | 98 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
99 | asm volatile("tlbilxlpid"); | 99 | asm volatile("tlbilxlpid"); |
100 | mtspr(SPRN_MAS5, 0); | 100 | mtspr(SPRN_MAS5, 0); |
101 | local_irq_restore(flags); | 101 | local_irq_restore(flags); |
@@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
110 | { | 110 | { |
111 | } | 111 | } |
112 | 112 | ||
113 | /* We use two lpids per VM */ | ||
113 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); | 114 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); |
114 | 115 | ||
115 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | 116 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
@@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
118 | 119 | ||
119 | kvmppc_booke_vcpu_load(vcpu, cpu); | 120 | kvmppc_booke_vcpu_load(vcpu, cpu); |
120 | 121 | ||
121 | mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); | 122 | mtspr(SPRN_LPID, get_lpid(vcpu)); |
122 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); | 123 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); |
123 | mtspr(SPRN_GPIR, vcpu->vcpu_id); | 124 | mtspr(SPRN_GPIR, vcpu->vcpu_id); |
124 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); | 125 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); |
126 | vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); | ||
127 | vcpu->arch.epsc = vcpu->arch.eplc; | ||
125 | mtspr(SPRN_EPLC, vcpu->arch.eplc); | 128 | mtspr(SPRN_EPLC, vcpu->arch.eplc); |
126 | mtspr(SPRN_EPSC, vcpu->arch.epsc); | 129 | mtspr(SPRN_EPSC, vcpu->arch.epsc); |
127 | 130 | ||
@@ -141,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 144 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
142 | 145 | ||
143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | 146 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
144 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { | 147 | __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { |
145 | kvmppc_e500_tlbil_all(vcpu_e500); | 148 | kvmppc_e500_tlbil_all(vcpu_e500); |
146 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; | 149 | __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; |
147 | } | 150 | } |
148 | } | 151 | } |
149 | 152 | ||
@@ -193,8 +196,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
193 | vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; | 196 | vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; |
194 | #endif | 197 | #endif |
195 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; | 198 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; |
196 | vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); | ||
197 | vcpu->arch.epsc = vcpu->arch.eplc; | ||
198 | 199 | ||
199 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 200 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
200 | vcpu_e500->svr = mfspr(SPRN_SVR); | 201 | vcpu_e500->svr = mfspr(SPRN_SVR); |
@@ -354,13 +355,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) | |||
354 | if (lpid < 0) | 355 | if (lpid < 0) |
355 | return lpid; | 356 | return lpid; |
356 | 357 | ||
358 | /* | ||
359 | * Use two lpids per VM on cores with two threads like e6500. Use | ||
360 | * even numbers to speedup vcpu lpid computation with consecutive lpids | ||
361 | * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. | ||
362 | */ | ||
363 | if (threads_per_core == 2) | ||
364 | lpid <<= 1; | ||
365 | |||
357 | kvm->arch.lpid = lpid; | 366 | kvm->arch.lpid = lpid; |
358 | return 0; | 367 | return 0; |
359 | } | 368 | } |
360 | 369 | ||
361 | static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) | 370 | static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) |
362 | { | 371 | { |
363 | kvmppc_free_lpid(kvm->arch.lpid); | 372 | int lpid = kvm->arch.lpid; |
373 | |||
374 | if (threads_per_core == 2) | ||
375 | lpid >>= 1; | ||
376 | |||
377 | kvmppc_free_lpid(lpid); | ||
364 | } | 378 | } |
365 | 379 | ||
366 | static struct kvmppc_ops kvm_ops_e500mc = { | 380 | static struct kvmppc_ops kvm_ops_e500mc = { |
@@ -388,7 +402,13 @@ static int __init kvmppc_e500mc_init(void) | |||
388 | if (r) | 402 | if (r) |
389 | goto err_out; | 403 | goto err_out; |
390 | 404 | ||
391 | kvmppc_init_lpid(64); | 405 | /* |
406 | * Use two lpids per VM on dual threaded processors like e6500 | ||
407 | * to workarround the lack of tlb write conditional instruction. | ||
408 | * Expose half the number of available hardware lpids to the lpid | ||
409 | * allocator. | ||
410 | */ | ||
411 | kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); | ||
392 | kvmppc_claim_lpid(0); /* host */ | 412 | kvmppc_claim_lpid(0); /* host */ |
393 | 413 | ||
394 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | 414 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |