aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:17:33 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:46 -0400
commitc4befc58a0cc5a8cc5b4a7234d67b6b16dec4e70 (patch)
tree3f2fbc510d7bb613fd5362acd8f0c16809f7a8af /arch/powerpc/kvm/book3s_64_mmu.c
parent149dbdb1859be46a063a5b1b0aa99a5f999b7632 (diff)
KVM: PPC: Move fields between struct kvm_vcpu_arch and kvmppc_vcpu_book3s
This moves the slb field, which represents the state of the emulated SLB, from the kvmppc_vcpu_book3s struct to the kvm_vcpu_arch, and the hpte_hash_[v]pte[_long] fields from kvm_vcpu_arch to kvmppc_vcpu_book3s. This is in accord with the principle that the kvm_vcpu_arch struct represents the state of the emulated CPU, and the kvmppc_vcpu_book3s struct holds the auxiliary data structures used in the emulation. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
1 files changed, 25 insertions, 29 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211e..c6d3e194b6b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
41} 41}
42 42
43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( 43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44 struct kvmppc_vcpu_book3s *vcpu_book3s, 44 struct kvm_vcpu *vcpu,
45 gva_t eaddr) 45 gva_t eaddr)
46{ 46{
47 int i; 47 int i;
48 u64 esid = GET_ESID(eaddr); 48 u64 esid = GET_ESID(eaddr);
49 u64 esid_1t = GET_ESID_1T(eaddr); 49 u64 esid_1t = GET_ESID_1T(eaddr);
50 50
51 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 51 for (i = 0; i < vcpu->arch.slb_nr; i++) {
52 u64 cmp_esid = esid; 52 u64 cmp_esid = esid;
53 53
54 if (!vcpu_book3s->slb[i].valid) 54 if (!vcpu->arch.slb[i].valid)
55 continue; 55 continue;
56 56
57 if (vcpu_book3s->slb[i].tb) 57 if (vcpu->arch.slb[i].tb)
58 cmp_esid = esid_1t; 58 cmp_esid = esid_1t;
59 59
60 if (vcpu_book3s->slb[i].esid == cmp_esid) 60 if (vcpu->arch.slb[i].esid == cmp_esid)
61 return &vcpu_book3s->slb[i]; 61 return &vcpu->arch.slb[i];
62 } 62 }
63 63
64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", 64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t); 65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 66 for (i = 0; i < vcpu->arch.slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid) 67 if (vcpu->arch.slb[i].vsid)
68 dprintk(" %d: %c%c%c %llx %llx\n", i, 68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ', 69 vcpu->arch.slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ', 70 vcpu->arch.slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ', 71 vcpu->arch.slb[i].tb ? 't' : ' ',
72 vcpu_book3s->slb[i].esid, 72 vcpu->arch.slb[i].esid,
73 vcpu_book3s->slb[i].vsid); 73 vcpu->arch.slb[i].vsid);
74 } 74 }
75 75
76 return NULL; 76 return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
81{ 81{
82 struct kvmppc_slb *slb; 82 struct kvmppc_slb *slb;
83 83
84 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); 84 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
85 if (!slb) 85 if (!slb)
86 return 0; 86 return 0;
87 87
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
180 return 0; 180 return 0;
181 } 181 }
182 182
183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
184 if (!slbe) 184 if (!slbe)
185 goto no_seg_found; 185 goto no_seg_found;
186 186
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
320 esid_1t = GET_ESID_1T(rb); 320 esid_1t = GET_ESID_1T(rb);
321 slb_nr = rb & 0xfff; 321 slb_nr = rb & 0xfff;
322 322
323 if (slb_nr > vcpu_book3s->slb_nr) 323 if (slb_nr > vcpu->arch.slb_nr)
324 return; 324 return;
325 325
326 slbe = &vcpu_book3s->slb[slb_nr]; 326 slbe = &vcpu->arch.slb[slb_nr];
327 327
328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
344 344
345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) 345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
346{ 346{
347 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
348 struct kvmppc_slb *slbe; 347 struct kvmppc_slb *slbe;
349 348
350 if (slb_nr > vcpu_book3s->slb_nr) 349 if (slb_nr > vcpu->arch.slb_nr)
351 return 0; 350 return 0;
352 351
353 slbe = &vcpu_book3s->slb[slb_nr]; 352 slbe = &vcpu->arch.slb[slb_nr];
354 353
355 return slbe->orige; 354 return slbe->orige;
356} 355}
357 356
358static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) 357static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
359{ 358{
360 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
361 struct kvmppc_slb *slbe; 359 struct kvmppc_slb *slbe;
362 360
363 if (slb_nr > vcpu_book3s->slb_nr) 361 if (slb_nr > vcpu->arch.slb_nr)
364 return 0; 362 return 0;
365 363
366 slbe = &vcpu_book3s->slb[slb_nr]; 364 slbe = &vcpu->arch.slb[slb_nr];
367 365
368 return slbe->origv; 366 return slbe->origv;
369} 367}
370 368
371static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 369static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
372{ 370{
373 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
374 struct kvmppc_slb *slbe; 371 struct kvmppc_slb *slbe;
375 372
376 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 373 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
377 374
378 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); 375 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
379 376
380 if (!slbe) 377 if (!slbe)
381 return; 378 return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
389 386
390static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 387static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
391{ 388{
392 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
393 int i; 389 int i;
394 390
395 dprintk("KVM MMU: slbia()\n"); 391 dprintk("KVM MMU: slbia()\n");
396 392
397 for (i = 1; i < vcpu_book3s->slb_nr; i++) 393 for (i = 1; i < vcpu->arch.slb_nr; i++)
398 vcpu_book3s->slb[i].valid = false; 394 vcpu->arch.slb[i].valid = false;
399 395
400 if (vcpu->arch.shared->msr & MSR_IR) { 396 if (vcpu->arch.shared->msr & MSR_IR) {
401 kvmppc_mmu_flush_segments(vcpu); 397 kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
464 ulong mp_ea = vcpu->arch.magic_page_ea; 460 ulong mp_ea = vcpu->arch.magic_page_ea;
465 461
466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 462 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
467 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 463 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
468 if (slb) 464 if (slb)
469 gvsid = slb->vsid; 465 gvsid = slb->vsid;
470 } 466 }