aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMihai Caraman <mihai.caraman@freescale.com>2014-09-01 05:01:58 -0400
committerAlexander Graf <agraf@suse.de>2014-09-22 04:11:35 -0400
commit188e267ce249b491dfbb77d881996dcb5610dc90 (patch)
treec7d5c386562371f0e4dad2c3d885d9b2d58a3e6c /arch
parent9333e6c4c15a4084dd5f4336cd4379afbf99e458 (diff)
KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core
ePAPR represents hardware threads as cpu node properties in device tree. So with existing QEMU, hardware threads are simply exposed as vcpus with one hardware thread. The e6500 core shares TLBs between hardware threads. Without tlb write conditional instruction, the Linux kernel uses per core mechanisms to protect against duplicate TLB entries. The guest is unable to detect real siblings threads, so it can't use the TLB protection mechanism. An alternative solution is to use the hypervisor to allocate different lpids to guest's vcpus that runs simultaneous on real siblings threads. On systems with two threads per core this patch halves the size of the lpid pool that the allocator sees and use two lpids per VM. Use even numbers to speedup vcpu lpid computation with consecutive lpids per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> [agraf: fix spelling] Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h5
-rw-r--r--arch/powerpc/kvm/e500.h20
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/powerpc/kvm/e500mc.c46
4 files changed, 65 insertions, 24 deletions
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index f7aa5cc395c4..630134d17f5d 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,7 +23,10 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26/* LPIDs we support with this build -- runtime limit may be lower */ 26/*
27 * Number of available lpids. Only the low-order 6 bits of LPID rgister are
28 * implemented on e500mc+ cores.
29 */
27#define KVMPPC_NR_LPIDS 64 30#define KVMPPC_NR_LPIDS 64
28 31
29#define KVMPPC_INST_EHPRIV 0x7c00021c 32#define KVMPPC_INST_EHPRIV 0x7c00021c
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178bdea5..72920bed3ac6 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/mmu-book3e.h> 23#include <asm/mmu-book3e.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25#include <asm/cputhreads.h>
25 26
26enum vcpu_ftr { 27enum vcpu_ftr {
27 VCPU_FTR_MMU_V2 28 VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
289#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe) 290#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
290#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu) 291#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
291#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS) 292#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
293
294/*
295 * These functions should be called with preemption disabled
296 * and the returned value is valid only in that context
297 */
298static inline int get_thread_specific_lpid(int vm_lpid)
299{
300 int vcpu_lpid = vm_lpid;
301
302 if (threads_per_core == 2)
303 vcpu_lpid |= smp_processor_id() & 1;
304
305 return vcpu_lpid;
306}
307
308static inline int get_lpid(struct kvm_vcpu *vcpu)
309{
310 return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
311}
292#else 312#else
293unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, 313unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
294 struct kvm_book3e_206_tlb_entry *gtlbe); 314 struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb57897..c8795a64e935 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
69 * writing shadow tlb entry to host TLB 69 * writing shadow tlb entry to host TLB
70 */ 70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, 71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
72 uint32_t mas0) 72 uint32_t mas0,
73 uint32_t lpid)
73{ 74{
74 unsigned long flags; 75 unsigned long flags;
75 76
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
80 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 81 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
81 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 82 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
82#ifdef CONFIG_KVM_BOOKE_HV 83#ifdef CONFIG_KVM_BOOKE_HV
83 mtspr(SPRN_MAS8, stlbe->mas8); 84 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
84#endif 85#endif
85 asm volatile("isync; tlbwe" : : : "memory"); 86 asm volatile("isync; tlbwe" : : : "memory");
86 87
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
129 130
130 if (tlbsel == 0) { 131 if (tlbsel == 0) {
131 mas0 = get_host_mas0(stlbe->mas2); 132 mas0 = get_host_mas0(stlbe->mas2);
132 __write_host_tlbe(stlbe, mas0); 133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
133 } else { 134 } else {
134 __write_host_tlbe(stlbe, 135 __write_host_tlbe(stlbe,
135 MAS0_TLBSEL(1) | 136 MAS0_TLBSEL(1) |
136 MAS0_ESEL(to_htlb1_esel(sesel))); 137 MAS0_ESEL(to_htlb1_esel(sesel)),
138 vcpu_e500->vcpu.kvm->arch.lpid);
137 } 139 }
138} 140}
139 141
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
176 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177 magic.mas8 = 0; 179 magic.mas8 = 0;
178 180
179 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
180 preempt_enable(); 182 preempt_enable();
181} 183}
182#endif 184#endif
@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
317 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 319 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
318 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 321 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320
321#ifdef CONFIG_KVM_BOOKE_HV
322 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
323#endif
324} 322}
325 323
326static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 324static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
633 631
634 local_irq_save(flags); 632 local_irq_save(flags);
635 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); 633 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
636 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid); 634 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
637 asm volatile("tlbsx 0, %[geaddr]\n" : : 635 asm volatile("tlbsx 0, %[geaddr]\n" : :
638 [geaddr] "r" (geaddr)); 636 [geaddr] "r" (geaddr));
639 mtspr(SPRN_MAS5, 0); 637 mtspr(SPRN_MAS5, 0);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 454934990672..bf8f99f6676a 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
48 return; 48 return;
49 } 49 }
50 50
51 51 preempt_disable();
52 tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; 52 tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
53 mb(); 53 mb();
54 ppc_msgsnd(dbell_type, 0, tag); 54 ppc_msgsnd(dbell_type, 0, tag);
55 preempt_enable();
55} 56}
56 57
57/* gtlbe must not be mapped by more than one host tlb entry */ 58/* gtlbe must not be mapped by more than one host tlb entry */
@@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
60{ 61{
61 unsigned int tid, ts; 62 unsigned int tid, ts;
62 gva_t eaddr; 63 gva_t eaddr;
63 u32 val, lpid; 64 u32 val;
64 unsigned long flags; 65 unsigned long flags;
65 66
66 ts = get_tlb_ts(gtlbe); 67 ts = get_tlb_ts(gtlbe);
67 tid = get_tlb_tid(gtlbe); 68 tid = get_tlb_tid(gtlbe);
68 lpid = vcpu_e500->vcpu.kvm->arch.lpid;
69 69
70 /* We search the host TLB to invalidate its shadow TLB entry */ 70 /* We search the host TLB to invalidate its shadow TLB entry */
71 val = (tid << 16) | ts; 71 val = (tid << 16) | ts;
@@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
74 local_irq_save(flags); 74 local_irq_save(flags);
75 75
76 mtspr(SPRN_MAS6, val); 76 mtspr(SPRN_MAS6, val);
77 mtspr(SPRN_MAS5, MAS5_SGS | lpid); 77 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
78 78
79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); 79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
80 val = mfspr(SPRN_MAS1); 80 val = mfspr(SPRN_MAS1);
@@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
95 unsigned long flags; 95 unsigned long flags;
96 96
97 local_irq_save(flags); 97 local_irq_save(flags);
98 mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); 98 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
99 asm volatile("tlbilxlpid"); 99 asm volatile("tlbilxlpid");
100 mtspr(SPRN_MAS5, 0); 100 mtspr(SPRN_MAS5, 0);
101 local_irq_restore(flags); 101 local_irq_restore(flags);
@@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110{ 110{
111} 111}
112 112
113/* We use two lpids per VM */
113static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); 114static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
114 115
115static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) 116static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
@@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
118 119
119 kvmppc_booke_vcpu_load(vcpu, cpu); 120 kvmppc_booke_vcpu_load(vcpu, cpu);
120 121
121 mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); 122 mtspr(SPRN_LPID, get_lpid(vcpu));
122 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); 123 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
123 mtspr(SPRN_GPIR, vcpu->vcpu_id); 124 mtspr(SPRN_GPIR, vcpu->vcpu_id);
124 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); 125 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
126 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
127 vcpu->arch.epsc = vcpu->arch.eplc;
125 mtspr(SPRN_EPLC, vcpu->arch.eplc); 128 mtspr(SPRN_EPLC, vcpu->arch.eplc);
126 mtspr(SPRN_EPSC, vcpu->arch.epsc); 129 mtspr(SPRN_EPSC, vcpu->arch.epsc);
127 130
@@ -141,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
141 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 144 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
142 145
143 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
144 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { 147 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
145 kvmppc_e500_tlbil_all(vcpu_e500); 148 kvmppc_e500_tlbil_all(vcpu_e500);
146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; 149 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
147 } 150 }
148} 151}
149 152
@@ -193,8 +196,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
193 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; 196 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
194#endif 197#endif
195 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; 198 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
196 vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
197 vcpu->arch.epsc = vcpu->arch.eplc;
198 199
199 vcpu->arch.pvr = mfspr(SPRN_PVR); 200 vcpu->arch.pvr = mfspr(SPRN_PVR);
200 vcpu_e500->svr = mfspr(SPRN_SVR); 201 vcpu_e500->svr = mfspr(SPRN_SVR);
@@ -354,13 +355,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
354 if (lpid < 0) 355 if (lpid < 0)
355 return lpid; 356 return lpid;
356 357
358 /*
359 * Use two lpids per VM on cores with two threads like e6500. Use
360 * even numbers to speedup vcpu lpid computation with consecutive lpids
361 * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
362 */
363 if (threads_per_core == 2)
364 lpid <<= 1;
365
357 kvm->arch.lpid = lpid; 366 kvm->arch.lpid = lpid;
358 return 0; 367 return 0;
359} 368}
360 369
361static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) 370static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
362{ 371{
363 kvmppc_free_lpid(kvm->arch.lpid); 372 int lpid = kvm->arch.lpid;
373
374 if (threads_per_core == 2)
375 lpid >>= 1;
376
377 kvmppc_free_lpid(lpid);
364} 378}
365 379
366static struct kvmppc_ops kvm_ops_e500mc = { 380static struct kvmppc_ops kvm_ops_e500mc = {
@@ -388,7 +402,13 @@ static int __init kvmppc_e500mc_init(void)
388 if (r) 402 if (r)
389 goto err_out; 403 goto err_out;
390 404
391 kvmppc_init_lpid(64); 405 /*
406 * Use two lpids per VM on dual threaded processors like e6500
407 * to workarround the lack of tlb write conditional instruction.
408 * Expose half the number of available hardware lpids to the lpid
409 * allocator.
410 */
411 kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
392 kvmppc_claim_lpid(0); /* host */ 412 kvmppc_claim_lpid(0); /* host */
393 413
394 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 414 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);