aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:17:33 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:46 -0400
commitc4befc58a0cc5a8cc5b4a7234d67b6b16dec4e70 (patch)
tree3f2fbc510d7bb613fd5362acd8f0c16809f7a8af /arch/powerpc/kvm
parent149dbdb1859be46a063a5b1b0aa99a5f999b7632 (diff)
KVM: PPC: Move fields between struct kvm_vcpu_arch and kvmppc_vcpu_book3s
This moves the slb field, which represents the state of the emulated SLB, from the kvmppc_vcpu_book3s struct to the kvm_vcpu_arch, and the hpte_hash_[v]pte[_long] fields from kvm_vcpu_arch to kvmppc_vcpu_book3s. This is in accord with the principle that the kvm_vcpu_arch struct represents the state of the emulated CPU, and the kvmppc_vcpu_book3s struct holds the auxiliary data structures used in the emulation. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
-rw-r--r--arch/powerpc/kvm/trace.h2
4 files changed, 73 insertions, 63 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 83500fb62c83..5d0babefe913 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,7 +17,6 @@
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "trace.h"
21 20
22#include <asm/reg.h> 21#include <asm/reg.h>
23#include <asm/cputable.h> 22#include <asm/cputable.h>
@@ -34,6 +33,8 @@
34#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
35#include <linux/highmem.h> 34#include <linux/highmem.h>
36 35
36#include "trace.h"
37
37#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 38#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
38 39
39/* #define EXIT_DEBUG */ 40/* #define EXIT_DEBUG */
@@ -1191,8 +1192,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1191 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1192 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1192 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1193 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1193 for (i = 0; i < 64; i++) { 1194 for (i = 0; i < 64; i++) {
1194 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; 1195 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1195 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; 1196 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1196 } 1197 }
1197 } else { 1198 } else {
1198 for (i = 0; i < 16; i++) 1199 for (i = 0; i < 16; i++)
@@ -1340,7 +1341,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1340 vcpu->arch.pvr = 0x84202; 1341 vcpu->arch.pvr = 0x84202;
1341#endif 1342#endif
1342 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1343 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1343 vcpu_book3s->slb_nr = 64; 1344 vcpu->arch.slb_nr = 64;
1344 1345
1345 /* remember where some real-mode handlers are */ 1346 /* remember where some real-mode handlers are */
1346 vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline); 1347 vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211e..c6d3e194b6b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
41} 41}
42 42
43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( 43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44 struct kvmppc_vcpu_book3s *vcpu_book3s, 44 struct kvm_vcpu *vcpu,
45 gva_t eaddr) 45 gva_t eaddr)
46{ 46{
47 int i; 47 int i;
48 u64 esid = GET_ESID(eaddr); 48 u64 esid = GET_ESID(eaddr);
49 u64 esid_1t = GET_ESID_1T(eaddr); 49 u64 esid_1t = GET_ESID_1T(eaddr);
50 50
51 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 51 for (i = 0; i < vcpu->arch.slb_nr; i++) {
52 u64 cmp_esid = esid; 52 u64 cmp_esid = esid;
53 53
54 if (!vcpu_book3s->slb[i].valid) 54 if (!vcpu->arch.slb[i].valid)
55 continue; 55 continue;
56 56
57 if (vcpu_book3s->slb[i].tb) 57 if (vcpu->arch.slb[i].tb)
58 cmp_esid = esid_1t; 58 cmp_esid = esid_1t;
59 59
60 if (vcpu_book3s->slb[i].esid == cmp_esid) 60 if (vcpu->arch.slb[i].esid == cmp_esid)
61 return &vcpu_book3s->slb[i]; 61 return &vcpu->arch.slb[i];
62 } 62 }
63 63
64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", 64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t); 65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 66 for (i = 0; i < vcpu->arch.slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid) 67 if (vcpu->arch.slb[i].vsid)
68 dprintk(" %d: %c%c%c %llx %llx\n", i, 68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ', 69 vcpu->arch.slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ', 70 vcpu->arch.slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ', 71 vcpu->arch.slb[i].tb ? 't' : ' ',
72 vcpu_book3s->slb[i].esid, 72 vcpu->arch.slb[i].esid,
73 vcpu_book3s->slb[i].vsid); 73 vcpu->arch.slb[i].vsid);
74 } 74 }
75 75
76 return NULL; 76 return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
81{ 81{
82 struct kvmppc_slb *slb; 82 struct kvmppc_slb *slb;
83 83
84 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); 84 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
85 if (!slb) 85 if (!slb)
86 return 0; 86 return 0;
87 87
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
180 return 0; 180 return 0;
181 } 181 }
182 182
183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
184 if (!slbe) 184 if (!slbe)
185 goto no_seg_found; 185 goto no_seg_found;
186 186
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
320 esid_1t = GET_ESID_1T(rb); 320 esid_1t = GET_ESID_1T(rb);
321 slb_nr = rb & 0xfff; 321 slb_nr = rb & 0xfff;
322 322
323 if (slb_nr > vcpu_book3s->slb_nr) 323 if (slb_nr > vcpu->arch.slb_nr)
324 return; 324 return;
325 325
326 slbe = &vcpu_book3s->slb[slb_nr]; 326 slbe = &vcpu->arch.slb[slb_nr];
327 327
328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
344 344
345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) 345static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
346{ 346{
347 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
348 struct kvmppc_slb *slbe; 347 struct kvmppc_slb *slbe;
349 348
350 if (slb_nr > vcpu_book3s->slb_nr) 349 if (slb_nr > vcpu->arch.slb_nr)
351 return 0; 350 return 0;
352 351
353 slbe = &vcpu_book3s->slb[slb_nr]; 352 slbe = &vcpu->arch.slb[slb_nr];
354 353
355 return slbe->orige; 354 return slbe->orige;
356} 355}
357 356
358static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) 357static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
359{ 358{
360 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
361 struct kvmppc_slb *slbe; 359 struct kvmppc_slb *slbe;
362 360
363 if (slb_nr > vcpu_book3s->slb_nr) 361 if (slb_nr > vcpu->arch.slb_nr)
364 return 0; 362 return 0;
365 363
366 slbe = &vcpu_book3s->slb[slb_nr]; 364 slbe = &vcpu->arch.slb[slb_nr];
367 365
368 return slbe->origv; 366 return slbe->origv;
369} 367}
370 368
371static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 369static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
372{ 370{
373 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
374 struct kvmppc_slb *slbe; 371 struct kvmppc_slb *slbe;
375 372
376 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 373 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
377 374
378 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); 375 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
379 376
380 if (!slbe) 377 if (!slbe)
381 return; 378 return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
389 386
390static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 387static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
391{ 388{
392 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
393 int i; 389 int i;
394 390
395 dprintk("KVM MMU: slbia()\n"); 391 dprintk("KVM MMU: slbia()\n");
396 392
397 for (i = 1; i < vcpu_book3s->slb_nr; i++) 393 for (i = 1; i < vcpu->arch.slb_nr; i++)
398 vcpu_book3s->slb[i].valid = false; 394 vcpu->arch.slb[i].valid = false;
399 395
400 if (vcpu->arch.shared->msr & MSR_IR) { 396 if (vcpu->arch.shared->msr & MSR_IR) {
401 kvmppc_mmu_flush_segments(vcpu); 397 kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
464 ulong mp_ea = vcpu->arch.magic_page_ea; 460 ulong mp_ea = vcpu->arch.magic_page_ea;
465 461
466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 462 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
467 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 463 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
468 if (slb) 464 if (slb)
469 gvsid = slb->vsid; 465 gvsid = slb->vsid;
470 } 466 }
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd131..41cb0017e757 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include "trace.h"
25 24
26#include <asm/kvm_ppc.h> 25#include <asm/kvm_ppc.h>
27#include <asm/kvm_book3s.h> 26#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
29#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
30#include <asm/hw_irq.h> 29#include <asm/hw_irq.h>
31 30
31#include "trace.h"
32
32#define PTE_SIZE 12 33#define PTE_SIZE 12
33 34
34static struct kmem_cache *hpte_cache; 35static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
58void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 59void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
59{ 60{
60 u64 index; 61 u64 index;
62 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
61 63
62 trace_kvm_book3s_mmu_map(pte); 64 trace_kvm_book3s_mmu_map(pte);
63 65
64 spin_lock(&vcpu->arch.mmu_lock); 66 spin_lock(&vcpu3s->mmu_lock);
65 67
66 /* Add to ePTE list */ 68 /* Add to ePTE list */
67 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 69 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
68 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); 70 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
69 71
70 /* Add to ePTE_long list */ 72 /* Add to ePTE_long list */
71 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); 73 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72 hlist_add_head_rcu(&pte->list_pte_long, 74 hlist_add_head_rcu(&pte->list_pte_long,
73 &vcpu->arch.hpte_hash_pte_long[index]); 75 &vcpu3s->hpte_hash_pte_long[index]);
74 76
75 /* Add to vPTE list */ 77 /* Add to vPTE list */
76 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); 78 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
77 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); 79 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
78 80
79 /* Add to vPTE_long list */ 81 /* Add to vPTE_long list */
80 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); 82 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
81 hlist_add_head_rcu(&pte->list_vpte_long, 83 hlist_add_head_rcu(&pte->list_vpte_long,
82 &vcpu->arch.hpte_hash_vpte_long[index]); 84 &vcpu3s->hpte_hash_vpte_long[index]);
83 85
84 spin_unlock(&vcpu->arch.mmu_lock); 86 spin_unlock(&vcpu3s->mmu_lock);
85} 87}
86 88
87static void free_pte_rcu(struct rcu_head *head) 89static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
92 94
93static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 95static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
94{ 96{
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98
95 trace_kvm_book3s_mmu_invalidate(pte); 99 trace_kvm_book3s_mmu_invalidate(pte);
96 100
97 /* Different for 32 and 64 bit */ 101 /* Different for 32 and 64 bit */
98 kvmppc_mmu_invalidate_pte(vcpu, pte); 102 kvmppc_mmu_invalidate_pte(vcpu, pte);
99 103
100 spin_lock(&vcpu->arch.mmu_lock); 104 spin_lock(&vcpu3s->mmu_lock);
101 105
102 /* pte already invalidated in between? */ 106 /* pte already invalidated in between? */
103 if (hlist_unhashed(&pte->list_pte)) { 107 if (hlist_unhashed(&pte->list_pte)) {
104 spin_unlock(&vcpu->arch.mmu_lock); 108 spin_unlock(&vcpu3s->mmu_lock);
105 return; 109 return;
106 } 110 }
107 111
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
115 else 119 else
116 kvm_release_pfn_clean(pte->pfn); 120 kvm_release_pfn_clean(pte->pfn);
117 121
118 spin_unlock(&vcpu->arch.mmu_lock); 122 spin_unlock(&vcpu3s->mmu_lock);
119 123
120 vcpu->arch.hpte_cache_count--; 124 vcpu3s->hpte_cache_count--;
121 call_rcu(&pte->rcu_head, free_pte_rcu); 125 call_rcu(&pte->rcu_head, free_pte_rcu);
122} 126}
123 127
124static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) 128static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
125{ 129{
130 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 131 struct hpte_cache *pte;
127 struct hlist_node *node; 132 struct hlist_node *node;
128 int i; 133 int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
130 rcu_read_lock(); 135 rcu_read_lock();
131 136
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 137 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 138 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 139
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 140 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 141 invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
141 146
142static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) 147static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 148{
149 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
144 struct hlist_head *list; 150 struct hlist_head *list;
145 struct hlist_node *node; 151 struct hlist_node *node;
146 struct hpte_cache *pte; 152 struct hpte_cache *pte;
147 153
148 /* Find the list of entries in the map */ 154 /* Find the list of entries in the map */
149 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 155 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
150 156
151 rcu_read_lock(); 157 rcu_read_lock();
152 158
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
160 166
161static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) 167static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
162{ 168{
169 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
163 struct hlist_head *list; 170 struct hlist_head *list;
164 struct hlist_node *node; 171 struct hlist_node *node;
165 struct hpte_cache *pte; 172 struct hpte_cache *pte;
166 173
167 /* Find the list of entries in the map */ 174 /* Find the list of entries in the map */
168 list = &vcpu->arch.hpte_hash_pte_long[ 175 list = &vcpu3s->hpte_hash_pte_long[
169 kvmppc_mmu_hash_pte_long(guest_ea)]; 176 kvmppc_mmu_hash_pte_long(guest_ea)];
170 177
171 rcu_read_lock(); 178 rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
203/* Flush with mask 0xfffffffff */ 210/* Flush with mask 0xfffffffff */
204static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) 211static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
205{ 212{
213 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
206 struct hlist_head *list; 214 struct hlist_head *list;
207 struct hlist_node *node; 215 struct hlist_node *node;
208 struct hpte_cache *pte; 216 struct hpte_cache *pte;
209 u64 vp_mask = 0xfffffffffULL; 217 u64 vp_mask = 0xfffffffffULL;
210 218
211 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 219 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
212 220
213 rcu_read_lock(); 221 rcu_read_lock();
214 222
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
223/* Flush with mask 0xffffff000 */ 231/* Flush with mask 0xffffff000 */
224static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 232static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
225{ 233{
234 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 struct hlist_head *list; 235 struct hlist_head *list;
227 struct hlist_node *node; 236 struct hlist_node *node;
228 struct hpte_cache *pte; 237 struct hpte_cache *pte;
229 u64 vp_mask = 0xffffff000ULL; 238 u64 vp_mask = 0xffffff000ULL;
230 239
231 list = &vcpu->arch.hpte_hash_vpte_long[ 240 list = &vcpu3s->hpte_hash_vpte_long[
232 kvmppc_mmu_hash_vpte_long(guest_vp)]; 241 kvmppc_mmu_hash_vpte_long(guest_vp)];
233 242
234 rcu_read_lock(); 243 rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
261 270
262void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 271void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
263{ 272{
273 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
264 struct hlist_node *node; 274 struct hlist_node *node;
265 struct hpte_cache *pte; 275 struct hpte_cache *pte;
266 int i; 276 int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
270 rcu_read_lock(); 280 rcu_read_lock();
271 281
272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 282 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
273 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 283 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
274 284
275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 285 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
276 if ((pte->pte.raddr >= pa_start) && 286 if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
283 293
284struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 294struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285{ 295{
296 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte; 297 struct hpte_cache *pte;
287 298
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); 299 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu->arch.hpte_cache_count++; 300 vcpu3s->hpte_cache_count++;
290 301
291 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) 302 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu); 303 kvmppc_mmu_pte_flush_all(vcpu);
293 304
294 return pte; 305 return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
309 320
310int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) 321int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
311{ 322{
323 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
324
312 /* init hpte lookup hashes */ 325 /* init hpte lookup hashes */
313 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, 326 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
314 ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); 327 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
315 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, 328 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
316 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); 329 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
317 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, 330 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
318 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); 331 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
319 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, 332 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
320 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); 333 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
321 334
322 spin_lock_init(&vcpu->arch.mmu_lock); 335 spin_lock_init(&vcpu3s->mmu_lock);
323 336
324 return 0; 337 return 0;
325} 338}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 3aca1b042b8c..d62a14b2cd0f 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
252 ), 252 ),
253 253
254 TP_fast_assign( 254 TP_fast_assign(
255 __entry->count = vcpu->arch.hpte_cache_count; 255 __entry->count = to_book3s(vcpu)->hpte_cache_count;
256 __entry->p1 = p1; 256 __entry->p1 = p1;
257 __entry->p2 = p2; 257 __entry->p2 = p2;
258 __entry->type = type; 258 __entry->type = type;