aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/44x_tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r--arch/powerpc/kvm/44x_tlb.c256
1 files changed, 125 insertions, 131 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index d49dc66ab3c3..2981ebea3d1f 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -22,6 +22,8 @@
22#include <linux/kvm.h> 22#include <linux/kvm.h>
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
25#include <asm/mmu-44x.h> 27#include <asm/mmu-44x.h>
26#include <asm/kvm_ppc.h> 28#include <asm/kvm_ppc.h>
27#include <asm/kvm_44x.h> 29#include <asm/kvm_44x.h>
@@ -40,8 +42,6 @@
40#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) 42#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
41#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) 43#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
42 44
43static unsigned int kvmppc_tlb_44x_pos;
44
45#ifdef DEBUG 45#ifdef DEBUG
46void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 46void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
47{ 47{
@@ -52,24 +52,49 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
52 printk("| %2s | %3s | %8s | %8s | %8s |\n", 52 printk("| %2s | %3s | %8s | %8s | %8s |\n",
53 "nr", "tid", "word0", "word1", "word2"); 53 "nr", "tid", "word0", "word1", "word2");
54 54
55 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 55 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
56 tlbe = &vcpu_44x->guest_tlb[i]; 56 tlbe = &vcpu_44x->guest_tlb[i];
57 if (tlbe->word0 & PPC44x_TLB_VALID) 57 if (tlbe->word0 & PPC44x_TLB_VALID)
58 printk(" G%2d | %02X | %08X | %08X | %08X |\n", 58 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
59 i, tlbe->tid, tlbe->word0, tlbe->word1, 59 i, tlbe->tid, tlbe->word0, tlbe->word1,
60 tlbe->word2); 60 tlbe->word2);
61 } 61 }
62
63 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
64 tlbe = &vcpu_44x->shadow_tlb[i];
65 if (tlbe->word0 & PPC44x_TLB_VALID)
66 printk(" S%2d | %02X | %08X | %08X | %08X |\n",
67 i, tlbe->tid, tlbe->word0, tlbe->word1,
68 tlbe->word2);
69 }
70} 62}
71#endif 63#endif
72 64
65static inline void kvmppc_44x_tlbie(unsigned int index)
66{
67 /* 0 <= index < 64, so the V bit is clear and we can use the index as
68 * word0. */
69 asm volatile(
70 "tlbwe %[index], %[index], 0\n"
71 :
72 : [index] "r"(index)
73 );
74}
75
76static inline void kvmppc_44x_tlbwe(unsigned int index,
77 struct kvmppc_44x_tlbe *stlbe)
78{
79 unsigned long tmp;
80
81 asm volatile(
82 "mfspr %[tmp], %[sprn_mmucr]\n"
83 "rlwimi %[tmp], %[tid], 0, 0xff\n"
84 "mtspr %[sprn_mmucr], %[tmp]\n"
85 "tlbwe %[word0], %[index], 0\n"
86 "tlbwe %[word1], %[index], 1\n"
87 "tlbwe %[word2], %[index], 2\n"
88 : [tmp] "=&r"(tmp)
89 : [word0] "r"(stlbe->word0),
90 [word1] "r"(stlbe->word1),
91 [word2] "r"(stlbe->word2),
92 [tid] "r"(stlbe->tid),
93 [index] "r"(index),
94 [sprn_mmucr] "i"(SPRN_MMUCR)
95 );
96}
97
73static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) 98static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
74{ 99{
75 /* We only care about the guest's permission and user bits. */ 100 /* We only care about the guest's permission and user bits. */
@@ -99,7 +124,7 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
99 int i; 124 int i;
100 125
101 /* XXX Replace loop with fancy data structures. */ 126 /* XXX Replace loop with fancy data structures. */
102 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 127 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
103 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; 128 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
104 unsigned int tid; 129 unsigned int tid;
105 130
@@ -125,65 +150,53 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
125 return -1; 150 return -1;
126} 151}
127 152
128struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, 153int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
129 gva_t eaddr)
130{ 154{
131 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
132 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 155 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
133 unsigned int index;
134 156
135 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 157 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
136 if (index == -1)
137 return NULL;
138 return &vcpu_44x->guest_tlb[index];
139} 158}
140 159
141struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, 160int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
142 gva_t eaddr)
143{ 161{
144 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 162 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
146 unsigned int index;
147 163
148 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 164 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
149 if (index == -1)
150 return NULL;
151 return &vcpu_44x->guest_tlb[index];
152} 165}
153 166
154static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) 167static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
168 unsigned int stlb_index)
155{ 169{
156 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); 170 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
157}
158 171
159static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 172 if (!ref->page)
160 unsigned int index) 173 return;
161{
162 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
163 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
164 struct page *page = vcpu_44x->shadow_pages[index];
165 174
166 if (get_tlb_v(stlbe)) { 175 /* Discard from the TLB. */
167 if (kvmppc_44x_tlbe_is_writable(stlbe)) 176 /* Note: we could actually invalidate a host mapping, if the host overwrote
168 kvm_release_page_dirty(page); 177 * this TLB entry since we inserted a guest mapping. */
169 else 178 kvmppc_44x_tlbie(stlb_index);
170 kvm_release_page_clean(page);
171 }
172}
173 179
174void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) 180 /* Now release the page. */
175{ 181 if (ref->writeable)
176 int i; 182 kvm_release_page_dirty(ref->page);
183 else
184 kvm_release_page_clean(ref->page);
177 185
178 for (i = 0; i <= tlb_44x_hwater; i++) 186 ref->page = NULL;
179 kvmppc_44x_shadow_release(vcpu, i); 187
188 /* XXX set tlb_44x_index to stlb_index? */
189
190 KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
180} 191}
181 192
182void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) 193void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
183{ 194{
184 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 195 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
196 int i;
185 197
186 vcpu_44x->shadow_tlb_mod[i] = 1; 198 for (i = 0; i <= tlb_44x_hwater; i++)
199 kvmppc_44x_shadow_release(vcpu_44x, i);
187} 200}
188 201
189/** 202/**
@@ -199,21 +212,24 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
199 * the shadow TLB. 212 * the shadow TLB.
200 */ 213 */
201void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, 214void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
202 u32 flags, u32 max_bytes) 215 u32 flags, u32 max_bytes, unsigned int gtlb_index)
203{ 216{
217 struct kvmppc_44x_tlbe stlbe;
204 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 218 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
219 struct kvmppc_44x_shadow_ref *ref;
205 struct page *new_page; 220 struct page *new_page;
206 struct kvmppc_44x_tlbe *stlbe;
207 hpa_t hpaddr; 221 hpa_t hpaddr;
208 gfn_t gfn; 222 gfn_t gfn;
209 unsigned int victim; 223 unsigned int victim;
210 224
211 /* Future optimization: don't overwrite the TLB entry containing the 225 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
212 * current PC (or stack?). */ 226 * miss handler by disabling interrupts. */
213 victim = kvmppc_tlb_44x_pos++; 227 local_irq_disable();
214 if (kvmppc_tlb_44x_pos > tlb_44x_hwater) 228 victim = ++tlb_44x_index;
215 kvmppc_tlb_44x_pos = 0; 229 if (victim > tlb_44x_hwater)
216 stlbe = &vcpu_44x->shadow_tlb[victim]; 230 victim = 0;
231 tlb_44x_index = victim;
232 local_irq_enable();
217 233
218 /* Get reference to new page. */ 234 /* Get reference to new page. */
219 gfn = gpaddr >> PAGE_SHIFT; 235 gfn = gpaddr >> PAGE_SHIFT;
@@ -225,10 +241,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
225 } 241 }
226 hpaddr = page_to_phys(new_page); 242 hpaddr = page_to_phys(new_page);
227 243
228 /* Drop reference to old page. */ 244 /* Invalidate any previous shadow mappings. */
229 kvmppc_44x_shadow_release(vcpu, victim); 245 kvmppc_44x_shadow_release(vcpu_44x, victim);
230
231 vcpu_44x->shadow_pages[victim] = new_page;
232 246
233 /* XXX Make sure (va, size) doesn't overlap any other 247 /* XXX Make sure (va, size) doesn't overlap any other
234 * entries. 440x6 user manual says the result would be 248 * entries. 440x6 user manual says the result would be
@@ -236,21 +250,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
236 250
237 /* XXX what about AS? */ 251 /* XXX what about AS? */
238 252
239 stlbe->tid = !(asid & 0xff);
240
241 /* Force TS=1 for all guest mappings. */ 253 /* Force TS=1 for all guest mappings. */
242 stlbe->word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; 254 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
243 255
244 if (max_bytes >= PAGE_SIZE) { 256 if (max_bytes >= PAGE_SIZE) {
245 /* Guest mapping is larger than or equal to host page size. We can use 257 /* Guest mapping is larger than or equal to host page size. We can use
246 * a "native" host mapping. */ 258 * a "native" host mapping. */
247 stlbe->word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; 259 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
248 } else { 260 } else {
249 /* Guest mapping is smaller than host page size. We must restrict the 261 /* Guest mapping is smaller than host page size. We must restrict the
250 * size of the mapping to be at most the smaller of the two, but for 262 * size of the mapping to be at most the smaller of the two, but for
251 * simplicity we fall back to a 4K mapping (this is probably what the 263 * simplicity we fall back to a 4K mapping (this is probably what the
252 * guest is using anyways). */ 264 * guest is using anyways). */
253 stlbe->word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; 265 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
254 266
255 /* 'hpaddr' is a host page, which is larger than the mapping we're 267 /* 'hpaddr' is a host page, which is larger than the mapping we're
256 * inserting here. To compensate, we must add the in-page offset to the 268 * inserting here. To compensate, we must add the in-page offset to the
@@ -258,47 +270,36 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
258 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); 270 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
259 } 271 }
260 272
261 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 273 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
262 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, 274 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
263 vcpu->arch.msr & MSR_PR); 275 vcpu->arch.msr & MSR_PR);
264 kvmppc_tlbe_set_modified(vcpu, victim); 276 stlbe.tid = !(asid & 0xff);
265 277
266 KVMTRACE_5D(STLB_WRITE, vcpu, victim, 278 /* Keep track of the reference so we can properly release it later. */
267 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, 279 ref = &vcpu_44x->shadow_refs[victim];
268 handler); 280 ref->page = new_page;
281 ref->gtlb_index = gtlb_index;
282 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
283 ref->tid = stlbe.tid;
284
285 /* Insert shadow mapping into hardware TLB. */
286 kvmppc_44x_tlbwe(victim, &stlbe);
287 KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
288 stlbe.word2, handler);
269} 289}
270 290
271static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 291/* For a particular guest TLB entry, invalidate the corresponding host TLB
272 gva_t eend, u32 asid) 292 * mappings and release the host pages. */
293static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
294 unsigned int gtlb_index)
273{ 295{
274 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 296 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
275 unsigned int pid = !(asid & 0xff);
276 int i; 297 int i;
277 298
278 /* XXX Replace loop with fancy data structures. */ 299 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
279 for (i = 0; i <= tlb_44x_hwater; i++) { 300 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
280 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; 301 if (ref->gtlb_index == gtlb_index)
281 unsigned int tid; 302 kvmppc_44x_shadow_release(vcpu_44x, i);
282
283 if (!get_tlb_v(stlbe))
284 continue;
285
286 if (eend < get_tlb_eaddr(stlbe))
287 continue;
288
289 if (eaddr > get_tlb_end(stlbe))
290 continue;
291
292 tid = get_tlb_tid(stlbe);
293 if (tid && (tid != pid))
294 continue;
295
296 kvmppc_44x_shadow_release(vcpu, i);
297 stlbe->word0 = 0;
298 kvmppc_tlbe_set_modified(vcpu, i);
299 KVMTRACE_5D(STLB_INVAL, vcpu, i,
300 stlbe->tid, stlbe->word0, stlbe->word1,
301 stlbe->word2, handler);
302 } 303 }
303} 304}
304 305
@@ -321,14 +322,11 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
321 * can't access guest kernel mappings (TID=1). When we switch to a new 322 * can't access guest kernel mappings (TID=1). When we switch to a new
322 * guest PID, which will also use host PID=0, we must discard the old guest 323 * guest PID, which will also use host PID=0, we must discard the old guest
323 * userspace mappings. */ 324 * userspace mappings. */
324 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) { 325 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
325 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; 326 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
326 327
327 if (get_tlb_tid(stlbe) == 0) { 328 if (ref->tid == 0)
328 kvmppc_44x_shadow_release(vcpu, i); 329 kvmppc_44x_shadow_release(vcpu_44x, i);
329 stlbe->word0 = 0;
330 kvmppc_tlbe_set_modified(vcpu, i);
331 }
332 } 330 }
333} 331}
334 332
@@ -356,26 +354,21 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
356int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) 354int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
357{ 355{
358 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 356 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
359 gva_t eaddr;
360 u64 asid;
361 struct kvmppc_44x_tlbe *tlbe; 357 struct kvmppc_44x_tlbe *tlbe;
362 unsigned int index; 358 unsigned int gtlb_index;
363 359
364 index = vcpu->arch.gpr[ra]; 360 gtlb_index = vcpu->arch.gpr[ra];
365 if (index > PPC44x_TLB_SIZE) { 361 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
366 printk("%s: index %d\n", __func__, index); 362 printk("%s: index %d\n", __func__, gtlb_index);
367 kvmppc_dump_vcpu(vcpu); 363 kvmppc_dump_vcpu(vcpu);
368 return EMULATE_FAIL; 364 return EMULATE_FAIL;
369 } 365 }
370 366
371 tlbe = &vcpu_44x->guest_tlb[index]; 367 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
372 368
373 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 369 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
374 if (tlbe->word0 & PPC44x_TLB_VALID) { 370 if (tlbe->word0 & PPC44x_TLB_VALID)
375 eaddr = get_tlb_eaddr(tlbe); 371 kvmppc_44x_invalidate(vcpu, gtlb_index);
376 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
377 kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
378 }
379 372
380 switch (ws) { 373 switch (ws) {
381 case PPC44x_TLB_PAGEID: 374 case PPC44x_TLB_PAGEID:
@@ -396,6 +389,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
396 } 389 }
397 390
398 if (tlbe_is_host_safe(vcpu, tlbe)) { 391 if (tlbe_is_host_safe(vcpu, tlbe)) {
392 u64 asid;
393 gva_t eaddr;
399 gpa_t gpaddr; 394 gpa_t gpaddr;
400 u32 flags; 395 u32 flags;
401 u32 bytes; 396 u32 bytes;
@@ -411,12 +406,11 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
411 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; 406 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
412 flags = tlbe->word2 & 0xffff; 407 flags = tlbe->word2 & 0xffff;
413 408
414 kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes); 409 kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
415 } 410 }
416 411
417 KVMTRACE_5D(GTLB_WRITE, vcpu, index, 412 KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
418 tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2, 413 tlbe->word1, tlbe->word2, handler);
419 handler);
420 414
421 return EMULATE_DONE; 415 return EMULATE_DONE;
422} 416}
@@ -424,7 +418,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
424int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) 418int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
425{ 419{
426 u32 ea; 420 u32 ea;
427 int index; 421 int gtlb_index;
428 unsigned int as = get_mmucr_sts(vcpu); 422 unsigned int as = get_mmucr_sts(vcpu);
429 unsigned int pid = get_mmucr_stid(vcpu); 423 unsigned int pid = get_mmucr_stid(vcpu);
430 424
@@ -432,14 +426,14 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
432 if (ra) 426 if (ra)
433 ea += vcpu->arch.gpr[ra]; 427 ea += vcpu->arch.gpr[ra];
434 428
435 index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); 429 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
436 if (rc) { 430 if (rc) {
437 if (index < 0) 431 if (gtlb_index < 0)
438 vcpu->arch.cr &= ~0x20000000; 432 vcpu->arch.cr &= ~0x20000000;
439 else 433 else
440 vcpu->arch.cr |= 0x20000000; 434 vcpu->arch.cr |= 0x20000000;
441 } 435 }
442 vcpu->arch.gpr[rt] = index; 436 vcpu->arch.gpr[rt] = gtlb_index;
443 437
444 return EMULATE_DONE; 438 return EMULATE_DONE;
445} 439}