aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-03-22 20:21:14 -0400
committerAlexander Graf <agraf@suse.de>2012-05-16 09:02:11 -0400
commitffe3649282946547f1b938e02c0228aead407a18 (patch)
treebc95604d39cdefb135adc60c03fabffda4557604 /arch
parent32c7dbfd479e73684b0d23fcb0a5cb04f19d86f4 (diff)
powerpc/kvm: Fix VSID usage in 64-bit "PR" KVM
The code forgot to scramble the VSIDs the way we normally do and was basically using the "proto VSID" directly with the MMU. This means that in practice, KVM used random VSIDs that could collide with segments used by other user space programs. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [agraf: simplify ppc32 case] Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c13
2 files changed, 11 insertions, 9 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index aa795ccef294..fd07f43d6622 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
81 u64 sdr1; 81 u64 sdr1;
82 u64 hior; 82 u64 hior;
83 u64 msr_mask; 83 u64 msr_mask;
84 u64 vsid_next;
85#ifdef CONFIG_PPC_BOOK3S_32 84#ifdef CONFIG_PPC_BOOK3S_32
86 u32 vsid_pool[VSID_POOL_SIZE]; 85 u32 vsid_pool[VSID_POOL_SIZE];
86 u32 vsid_next;
87#else 87#else
88 u64 vsid_first; 88 u64 proto_vsid_first;
89 u64 vsid_max; 89 u64 proto_vsid_max;
90 u64 proto_vsid_next;
90#endif 91#endif
91 int context_id[SID_CONTEXTS]; 92 int context_id[SID_CONTEXTS];
92 93
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6f87f39a1ac2..10fc8ec9d2a8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
194 backwards_map = !backwards_map; 194 backwards_map = !backwards_map;
195 195
196 /* Uh-oh ... out of mappings. Let's flush! */ 196 /* Uh-oh ... out of mappings. Let's flush! */
197 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { 197 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
198 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 198 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
199 memset(vcpu_book3s->sid_map, 0, 199 memset(vcpu_book3s->sid_map, 0,
200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
201 kvmppc_mmu_pte_flush(vcpu, 0, 0); 201 kvmppc_mmu_pte_flush(vcpu, 0, 0);
202 kvmppc_mmu_flush_segments(vcpu); 202 kvmppc_mmu_flush_segments(vcpu);
203 } 203 }
204 map->host_vsid = vcpu_book3s->vsid_next++; 204 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
205 205
206 map->guest_vsid = gvsid; 206 map->guest_vsid = gvsid;
207 map->valid = true; 207 map->valid = true;
@@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
319 return -1; 319 return -1;
320 vcpu3s->context_id[0] = err; 320 vcpu3s->context_id[0] = err;
321 321
322 vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; 322 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
323 vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 323 << USER_ESID_BITS) - 1;
324 vcpu3s->vsid_next = vcpu3s->vsid_first; 324 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
325 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
325 326
326 kvmppc_mmu_hpte_init(vcpu); 327 kvmppc_mmu_hpte_init(vcpu);
327 328