aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/44x_tlb.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-05 10:36:18 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:52:22 -0500
commitdb93f5745d836f81cef0b4101a7c2685eeb55efb (patch)
tree970b0dfc93dbbe25eb988b008bbbeffd866f3f23 /arch/powerpc/kvm/44x_tlb.c
parent5cbb5106f50b4515815cd32cf944958c0d4da83f (diff)
KVM: ppc: create struct kvm_vcpu_44x and introduce container_of() accessor
This patch doesn't yet move all 44x-specific data into the new structure, but is the first step down that path. In the future we may also want to create a struct kvm_vcpu_booke. Based on patch from Liu Yu <yu.liu@freescale.com>. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r--arch/powerpc/kvm/44x_tlb.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index bb6da134cadb..8b65fbd6c57d 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -24,6 +24,7 @@
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <asm/mmu-44x.h> 25#include <asm/mmu-44x.h>
26#include <asm/kvm_ppc.h> 26#include <asm/kvm_ppc.h>
27#include <asm/kvm_44x.h>
27 28
28#include "44x_tlb.h" 29#include "44x_tlb.h"
29 30
@@ -43,7 +44,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
43 "nr", "tid", "word0", "word1", "word2"); 44 "nr", "tid", "word0", "word1", "word2");
44 45
45 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 46 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
46 tlbe = &vcpu->arch.guest_tlb[i]; 47 tlbe = &vcpu_44x->guest_tlb[i];
47 if (tlbe->word0 & PPC44x_TLB_VALID) 48 if (tlbe->word0 & PPC44x_TLB_VALID)
48 printk(" G%2d | %02X | %08X | %08X | %08X |\n", 49 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
49 i, tlbe->tid, tlbe->word0, tlbe->word1, 50 i, tlbe->tid, tlbe->word0, tlbe->word1,
@@ -51,7 +52,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
51 } 52 }
52 53
53 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 54 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
54 tlbe = &vcpu->arch.shadow_tlb[i]; 55 tlbe = &vcpu_44x->shadow_tlb[i];
55 if (tlbe->word0 & PPC44x_TLB_VALID) 56 if (tlbe->word0 & PPC44x_TLB_VALID)
56 printk(" S%2d | %02X | %08X | %08X | %08X |\n", 57 printk(" S%2d | %02X | %08X | %08X | %08X |\n",
57 i, tlbe->tid, tlbe->word0, tlbe->word1, 58 i, tlbe->tid, tlbe->word0, tlbe->word1,
@@ -82,11 +83,12 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
82int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, 83int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
83 unsigned int as) 84 unsigned int as)
84{ 85{
86 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
85 int i; 87 int i;
86 88
87 /* XXX Replace loop with fancy data structures. */ 89 /* XXX Replace loop with fancy data structures. */
88 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 90 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
89 struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[i]; 91 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
90 unsigned int tid; 92 unsigned int tid;
91 93
92 if (eaddr < get_tlb_eaddr(tlbe)) 94 if (eaddr < get_tlb_eaddr(tlbe))
@@ -114,25 +116,27 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
114struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, 116struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
115 gva_t eaddr) 117 gva_t eaddr)
116{ 118{
119 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
117 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 120 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
118 unsigned int index; 121 unsigned int index;
119 122
120 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 123 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
121 if (index == -1) 124 if (index == -1)
122 return NULL; 125 return NULL;
123 return &vcpu->arch.guest_tlb[index]; 126 return &vcpu_44x->guest_tlb[index];
124} 127}
125 128
126struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, 129struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
127 gva_t eaddr) 130 gva_t eaddr)
128{ 131{
132 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
129 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 133 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
130 unsigned int index; 134 unsigned int index;
131 135
132 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 136 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
133 if (index == -1) 137 if (index == -1)
134 return NULL; 138 return NULL;
135 return &vcpu->arch.guest_tlb[index]; 139 return &vcpu_44x->guest_tlb[index];
136} 140}
137 141
138static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) 142static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
@@ -143,8 +147,9 @@ static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
143static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 147static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
144 unsigned int index) 148 unsigned int index)
145{ 149{
146 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; 150 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
147 struct page *page = vcpu->arch.shadow_pages[index]; 151 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
152 struct page *page = vcpu_44x->shadow_pages[index];
148 153
149 if (get_tlb_v(stlbe)) { 154 if (get_tlb_v(stlbe)) {
150 if (kvmppc_44x_tlbe_is_writable(stlbe)) 155 if (kvmppc_44x_tlbe_is_writable(stlbe))
@@ -164,7 +169,9 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
164 169
165void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) 170void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
166{ 171{
167 vcpu->arch.shadow_tlb_mod[i] = 1; 172 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
173
174 vcpu_44x->shadow_tlb_mod[i] = 1;
168} 175}
169 176
170/* Caller must ensure that the specified guest TLB entry is safe to insert into 177/* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -172,6 +179,7 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
172void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, 179void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
173 u32 flags) 180 u32 flags)
174{ 181{
182 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
175 struct page *new_page; 183 struct page *new_page;
176 struct kvmppc_44x_tlbe *stlbe; 184 struct kvmppc_44x_tlbe *stlbe;
177 hpa_t hpaddr; 185 hpa_t hpaddr;
@@ -182,7 +190,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
182 victim = kvmppc_tlb_44x_pos++; 190 victim = kvmppc_tlb_44x_pos++;
183 if (kvmppc_tlb_44x_pos > tlb_44x_hwater) 191 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
184 kvmppc_tlb_44x_pos = 0; 192 kvmppc_tlb_44x_pos = 0;
185 stlbe = &vcpu->arch.shadow_tlb[victim]; 193 stlbe = &vcpu_44x->shadow_tlb[victim];
186 194
187 /* Get reference to new page. */ 195 /* Get reference to new page. */
188 new_page = gfn_to_page(vcpu->kvm, gfn); 196 new_page = gfn_to_page(vcpu->kvm, gfn);
@@ -196,7 +204,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
196 /* Drop reference to old page. */ 204 /* Drop reference to old page. */
197 kvmppc_44x_shadow_release(vcpu, victim); 205 kvmppc_44x_shadow_release(vcpu, victim);
198 206
199 vcpu->arch.shadow_pages[victim] = new_page; 207 vcpu_44x->shadow_pages[victim] = new_page;
200 208
201 /* XXX Make sure (va, size) doesn't overlap any other 209 /* XXX Make sure (va, size) doesn't overlap any other
202 * entries. 440x6 user manual says the result would be 210 * entries. 440x6 user manual says the result would be
@@ -224,12 +232,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
224static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 232static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
225 gva_t eend, u32 asid) 233 gva_t eend, u32 asid)
226{ 234{
235 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
227 unsigned int pid = !(asid & 0xff); 236 unsigned int pid = !(asid & 0xff);
228 int i; 237 int i;
229 238
230 /* XXX Replace loop with fancy data structures. */ 239 /* XXX Replace loop with fancy data structures. */
231 for (i = 0; i <= tlb_44x_hwater; i++) { 240 for (i = 0; i <= tlb_44x_hwater; i++) {
232 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 241 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
233 unsigned int tid; 242 unsigned int tid;
234 243
235 if (!get_tlb_v(stlbe)) 244 if (!get_tlb_v(stlbe))
@@ -259,12 +268,13 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
259 * switching address spaces. */ 268 * switching address spaces. */
260void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 269void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
261{ 270{
271 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
262 int i; 272 int i;
263 273
264 if (vcpu->arch.swap_pid) { 274 if (vcpu->arch.swap_pid) {
265 /* XXX Replace loop with fancy data structures. */ 275 /* XXX Replace loop with fancy data structures. */
266 for (i = 0; i <= tlb_44x_hwater; i++) { 276 for (i = 0; i <= tlb_44x_hwater; i++) {
267 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 277 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
268 278
269 /* Future optimization: clear only userspace mappings. */ 279 /* Future optimization: clear only userspace mappings. */
270 kvmppc_44x_shadow_release(vcpu, i); 280 kvmppc_44x_shadow_release(vcpu, i);
@@ -303,6 +313,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
303 313
304int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) 314int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
305{ 315{
316 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
306 u64 eaddr; 317 u64 eaddr;
307 u64 raddr; 318 u64 raddr;
308 u64 asid; 319 u64 asid;
@@ -317,7 +328,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
317 return EMULATE_FAIL; 328 return EMULATE_FAIL;
318 } 329 }
319 330
320 tlbe = &vcpu->arch.guest_tlb[index]; 331 tlbe = &vcpu_44x->guest_tlb[index];
321 332
322 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 333 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
323 if (tlbe->word0 & PPC44x_TLB_VALID) { 334 if (tlbe->word0 & PPC44x_TLB_VALID) {