aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-08-15 02:04:24 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:15 -0400
commit8b6db3bc965c204db6868d4005808b4fdc9c46d7 (patch)
tree973d040d221d5fd3448c97b1c102d014fe6e6687 /arch
parentad0873763a83e7b31ba87a85ec2027dd6a9d7b55 (diff)
KVM: PPC: Implement correct SID mapping on Book3s_32
Up until now we were doing segment mappings wrong on Book3s_32. For Book3s_64 we were using a trick where we know that a single mmu_context gives us 16 bits of context ids. The mm system on Book3s_32 instead uses a clever algorithm to distribute VSIDs across the available range, so a context id really only gives us 16 available VSIDs. To keep at least a few guest processes in the SID shadow, let's map a number of contexts that we can use as VSID pool. This makes the code be actually correct and shouldn't hurt performance too much. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h15
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c57
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c8
3 files changed, 48 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index be8aac24ba83..d62e703f1214 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -60,6 +60,13 @@ struct kvmppc_sid_map {
60#define SID_MAP_NUM (1 << SID_MAP_BITS) 60#define SID_MAP_NUM (1 << SID_MAP_BITS)
61#define SID_MAP_MASK (SID_MAP_NUM - 1) 61#define SID_MAP_MASK (SID_MAP_NUM - 1)
62 62
63#ifdef CONFIG_PPC_BOOK3S_64
64#define SID_CONTEXTS 1
65#else
66#define SID_CONTEXTS 128
67#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
68#endif
69
63struct kvmppc_vcpu_book3s { 70struct kvmppc_vcpu_book3s {
64 struct kvm_vcpu vcpu; 71 struct kvm_vcpu vcpu;
65 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; 72 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
@@ -78,10 +85,14 @@ struct kvmppc_vcpu_book3s {
78 u64 sdr1; 85 u64 sdr1;
79 u64 hior; 86 u64 hior;
80 u64 msr_mask; 87 u64 msr_mask;
81 u64 vsid_first;
82 u64 vsid_next; 88 u64 vsid_next;
89#ifdef CONFIG_PPC_BOOK3S_32
90 u32 vsid_pool[VSID_POOL_SIZE];
91#else
92 u64 vsid_first;
83 u64 vsid_max; 93 u64 vsid_max;
84 int context_id; 94#endif
95 int context_id[SID_CONTEXTS];
85 ulong prog_flags; /* flags to inject when giving a 700 trap */ 96 ulong prog_flags; /* flags to inject when giving a 700 trap */
86}; 97};
87 98
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 57dddeb23b9b..9fecbfbce773 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -275,18 +275,15 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
275 backwards_map = !backwards_map; 275 backwards_map = !backwards_map;
276 276
277 /* Uh-oh ... out of mappings. Let's flush! */ 277 /* Uh-oh ... out of mappings. Let's flush! */
278 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { 278 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
279 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 279 vcpu_book3s->vsid_next = 0;
280 memset(vcpu_book3s->sid_map, 0, 280 memset(vcpu_book3s->sid_map, 0,
281 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 281 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
282 kvmppc_mmu_pte_flush(vcpu, 0, 0); 282 kvmppc_mmu_pte_flush(vcpu, 0, 0);
283 kvmppc_mmu_flush_segments(vcpu); 283 kvmppc_mmu_flush_segments(vcpu);
284 } 284 }
285 map->host_vsid = vcpu_book3s->vsid_next; 285 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
286 286 vcpu_book3s->vsid_next++;
287 /* Would have to be 111 to be completely aligned with the rest of
288 Linux, but that is just way too little space! */
289 vcpu_book3s->vsid_next+=1;
290 287
291 map->guest_vsid = gvsid; 288 map->guest_vsid = gvsid;
292 map->valid = true; 289 map->valid = true;
@@ -333,40 +330,38 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
333 330
334void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 331void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
335{ 332{
333 int i;
334
336 kvmppc_mmu_hpte_destroy(vcpu); 335 kvmppc_mmu_hpte_destroy(vcpu);
337 preempt_disable(); 336 preempt_disable();
338 __destroy_context(to_book3s(vcpu)->context_id); 337 for (i = 0; i < SID_CONTEXTS; i++)
338 __destroy_context(to_book3s(vcpu)->context_id[i]);
339 preempt_enable(); 339 preempt_enable();
340} 340}
341 341
342/* From mm/mmu_context_hash32.c */ 342/* From mm/mmu_context_hash32.c */
343#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) 343#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
344 344
345int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 345int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
346{ 346{
347 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 347 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
348 int err; 348 int err;
349 ulong sdr1; 349 ulong sdr1;
350 int i;
351 int j;
350 352
351 err = __init_new_context(); 353 for (i = 0; i < SID_CONTEXTS; i++) {
352 if (err < 0) 354 err = __init_new_context();
353 return -1; 355 if (err < 0)
354 vcpu3s->context_id = err; 356 goto init_fail;
355 357 vcpu3s->context_id[i] = err;
356 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
357 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
358
359#if 0 /* XXX still doesn't guarantee uniqueness */
360 /* We could collide with the Linux vsid space because the vsid
361 * wraps around at 24 bits. We're safe if we do our own space
362 * though, so let's always set the highest bit. */
363 358
364 vcpu3s->vsid_max |= 0x00800000; 359 /* Remember context id for this combination */
365 vcpu3s->vsid_first |= 0x00800000; 360 for (j = 0; j < 16; j++)
366#endif 361 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
367 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); 362 }
368 363
369 vcpu3s->vsid_next = vcpu3s->vsid_first; 364 vcpu3s->vsid_next = 0;
370 365
371 /* Remember where the HTAB is */ 366 /* Remember where the HTAB is */
372 asm ( "mfsdr1 %0" : "=r"(sdr1) ); 367 asm ( "mfsdr1 %0" : "=r"(sdr1) );
@@ -376,4 +371,14 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
376 kvmppc_mmu_hpte_init(vcpu); 371 kvmppc_mmu_hpte_init(vcpu);
377 372
378 return 0; 373 return 0;
374
375init_fail:
376 for (j = 0; j < i; j++) {
377 if (!vcpu3s->context_id[j])
378 continue;
379
380 __destroy_context(to_book3s(vcpu)->context_id[j]);
381 }
382
383 return -1;
379} 384}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 4040c8d16ad5..fa2f08434ba5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -286,7 +286,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
286void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 286void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
287{ 287{
288 kvmppc_mmu_hpte_destroy(vcpu); 288 kvmppc_mmu_hpte_destroy(vcpu);
289 __destroy_context(to_book3s(vcpu)->context_id); 289 __destroy_context(to_book3s(vcpu)->context_id[0]);
290} 290}
291 291
292int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 292int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
@@ -297,10 +297,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
297 err = __init_new_context(); 297 err = __init_new_context();
298 if (err < 0) 298 if (err < 0)
299 return -1; 299 return -1;
300 vcpu3s->context_id = err; 300 vcpu3s->context_id[0] = err;
301 301
302 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; 302 vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
303 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; 303 vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
304 vcpu3s->vsid_next = vcpu3s->vsid_first; 304 vcpu3s->vsid_next = vcpu3s->vsid_first;
305 305
306 kvmppc_mmu_hpte_init(vcpu); 306 kvmppc_mmu_hpte_init(vcpu);