aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/e500_mmu_host.c
diff options
context:
space:
mode:
authorMihai Caraman <mihai.caraman@freescale.com>2014-09-01 05:01:58 -0400
committerAlexander Graf <agraf@suse.de>2014-09-22 04:11:35 -0400
commit188e267ce249b491dfbb77d881996dcb5610dc90 (patch)
treec7d5c386562371f0e4dad2c3d885d9b2d58a3e6c /arch/powerpc/kvm/e500_mmu_host.c
parent9333e6c4c15a4084dd5f4336cd4379afbf99e458 (diff)
KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core
ePAPR represents hardware threads as cpu node properties in device tree. So with existing QEMU, hardware threads are simply exposed as vcpus with one hardware thread. The e6500 core shares TLBs between hardware threads. Without tlb write conditional instruction, the Linux kernel uses per core mechanisms to protect against duplicate TLB entries. The guest is unable to detect real siblings threads, so it can't use the TLB protection mechanism. An alternative solution is to use the hypervisor to allocate different lpids to guest's vcpus that runs simultaneous on real siblings threads. On systems with two threads per core this patch halves the size of the lpid pool that the allocator sees and use two lpids per VM. Use even numbers to speedup vcpu lpid computation with consecutive lpids per VM: vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> [agraf: fix spelling] Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/e500_mmu_host.c')
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb57897..c8795a64e935 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
69 * writing shadow tlb entry to host TLB 69 * writing shadow tlb entry to host TLB
70 */ 70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, 71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
72 uint32_t mas0) 72 uint32_t mas0,
73 uint32_t lpid)
73{ 74{
74 unsigned long flags; 75 unsigned long flags;
75 76
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
80 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 81 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
81 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 82 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
82#ifdef CONFIG_KVM_BOOKE_HV 83#ifdef CONFIG_KVM_BOOKE_HV
83 mtspr(SPRN_MAS8, stlbe->mas8); 84 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
84#endif 85#endif
85 asm volatile("isync; tlbwe" : : : "memory"); 86 asm volatile("isync; tlbwe" : : : "memory");
86 87
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
129 130
130 if (tlbsel == 0) { 131 if (tlbsel == 0) {
131 mas0 = get_host_mas0(stlbe->mas2); 132 mas0 = get_host_mas0(stlbe->mas2);
132 __write_host_tlbe(stlbe, mas0); 133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
133 } else { 134 } else {
134 __write_host_tlbe(stlbe, 135 __write_host_tlbe(stlbe,
135 MAS0_TLBSEL(1) | 136 MAS0_TLBSEL(1) |
136 MAS0_ESEL(to_htlb1_esel(sesel))); 137 MAS0_ESEL(to_htlb1_esel(sesel)),
138 vcpu_e500->vcpu.kvm->arch.lpid);
137 } 139 }
138} 140}
139 141
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
176 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177 magic.mas8 = 0; 179 magic.mas8 = 0;
178 180
179 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
180 preempt_enable(); 182 preempt_enable();
181} 183}
182#endif 184#endif
@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
317 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 319 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
318 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 321 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320
321#ifdef CONFIG_KVM_BOOKE_HV
322 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
323#endif
324} 322}
325 323
326static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 324static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
633 631
634 local_irq_save(flags); 632 local_irq_save(flags);
635 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); 633 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
636 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid); 634 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
637 asm volatile("tlbsx 0, %[geaddr]\n" : : 635 asm volatile("tlbsx 0, %[geaddr]\n" : :
638 [geaddr] "r" (geaddr)); 636 [geaddr] "r" (geaddr));
639 mtspr(SPRN_MAS5, 0); 637 mtspr(SPRN_MAS5, 0);