diff options
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 528 |
1 files changed, 0 insertions, 528 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c deleted file mode 100644 index 0deef1082e02..000000000000 --- a/arch/powerpc/kvm/44x_tlb.c +++ /dev/null | |||
@@ -1,528 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2007 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kvm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/highmem.h> | ||
25 | |||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/mmu-44x.h> | ||
28 | #include <asm/kvm_ppc.h> | ||
29 | #include <asm/kvm_44x.h> | ||
30 | #include "timing.h" | ||
31 | |||
32 | #include "44x_tlb.h" | ||
33 | #include "trace.h" | ||
34 | |||
35 | #ifndef PPC44x_TLBE_SIZE | ||
36 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
37 | #endif | ||
38 | |||
39 | #define PAGE_SIZE_4K (1<<12) | ||
40 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
41 | |||
42 | #define PPC44x_TLB_UATTR_MASK \ | ||
43 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | ||
44 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | ||
45 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | ||
46 | |||
47 | #ifdef DEBUG | ||
48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
51 | struct kvmppc_44x_tlbe *tlbe; | ||
52 | int i; | ||
53 | |||
54 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
55 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
56 | "nr", "tid", "word0", "word1", "word2"); | ||
57 | |||
58 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
59 | tlbe = &vcpu_44x->guest_tlb[i]; | ||
60 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
61 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
62 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
63 | tlbe->word2); | ||
64 | } | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
69 | { | ||
70 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
71 | * word0. */ | ||
72 | asm volatile( | ||
73 | "tlbwe %[index], %[index], 0\n" | ||
74 | : | ||
75 | : [index] "r"(index) | ||
76 | ); | ||
77 | } | ||
78 | |||
79 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
80 | struct kvmppc_44x_tlbe *tlbe) | ||
81 | { | ||
82 | asm volatile( | ||
83 | "tlbre %[word0], %[index], 0\n" | ||
84 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
85 | "andi. %[tid], %[tid], 0xff\n" | ||
86 | "tlbre %[word1], %[index], 1\n" | ||
87 | "tlbre %[word2], %[index], 2\n" | ||
88 | : [word0] "=r"(tlbe->word0), | ||
89 | [word1] "=r"(tlbe->word1), | ||
90 | [word2] "=r"(tlbe->word2), | ||
91 | [tid] "=r"(tlbe->tid) | ||
92 | : [index] "r"(index), | ||
93 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
94 | : "cc" | ||
95 | ); | ||
96 | } | ||
97 | |||
98 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
99 | struct kvmppc_44x_tlbe *stlbe) | ||
100 | { | ||
101 | unsigned long tmp; | ||
102 | |||
103 | asm volatile( | ||
104 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
105 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
106 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
107 | "tlbwe %[word0], %[index], 0\n" | ||
108 | "tlbwe %[word1], %[index], 1\n" | ||
109 | "tlbwe %[word2], %[index], 2\n" | ||
110 | : [tmp] "=&r"(tmp) | ||
111 | : [word0] "r"(stlbe->word0), | ||
112 | [word1] "r"(stlbe->word1), | ||
113 | [word2] "r"(stlbe->word2), | ||
114 | [tid] "r"(stlbe->tid), | ||
115 | [index] "r"(index), | ||
116 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
117 | ); | ||
118 | } | ||
119 | |||
120 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | ||
121 | { | ||
122 | /* We only care about the guest's permission and user bits. */ | ||
123 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; | ||
124 | |||
125 | if (!usermode) { | ||
126 | /* Guest is in supervisor mode, so we need to translate guest | ||
127 | * supervisor permissions into user permissions. */ | ||
128 | attrib &= ~PPC44x_TLB_USER_PERM_MASK; | ||
129 | attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; | ||
130 | } | ||
131 | |||
132 | /* Make sure host can always access this memory. */ | ||
133 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | ||
134 | |||
135 | /* WIMGE = 0b00100 */ | ||
136 | attrib |= PPC44x_TLB_M; | ||
137 | |||
138 | return attrib; | ||
139 | } | ||
140 | |||
141 | /* Load shadow TLB back into hardware. */ | ||
142 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
143 | { | ||
144 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
145 | int i; | ||
146 | |||
147 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
148 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
149 | |||
150 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
151 | kvmppc_44x_tlbwe(i, stlbe); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
156 | unsigned int i) | ||
157 | { | ||
158 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
159 | } | ||
160 | |||
161 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
162 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
163 | { | ||
164 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
168 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
169 | |||
170 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
171 | kvmppc_44x_tlbre(i, stlbe); | ||
172 | |||
173 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
174 | kvmppc_44x_tlbie(i); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | |||
179 | /* Search the guest TLB for a matching entry. */ | ||
180 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | ||
181 | unsigned int as) | ||
182 | { | ||
183 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
184 | int i; | ||
185 | |||
186 | /* XXX Replace loop with fancy data structures. */ | ||
187 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
188 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; | ||
189 | unsigned int tid; | ||
190 | |||
191 | if (eaddr < get_tlb_eaddr(tlbe)) | ||
192 | continue; | ||
193 | |||
194 | if (eaddr > get_tlb_end(tlbe)) | ||
195 | continue; | ||
196 | |||
197 | tid = get_tlb_tid(tlbe); | ||
198 | if (tid && (tid != pid)) | ||
199 | continue; | ||
200 | |||
201 | if (!get_tlb_v(tlbe)) | ||
202 | continue; | ||
203 | |||
204 | if (get_tlb_ts(tlbe) != as) | ||
205 | continue; | ||
206 | |||
207 | return i; | ||
208 | } | ||
209 | |||
210 | return -1; | ||
211 | } | ||
212 | |||
213 | gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | ||
214 | gva_t eaddr) | ||
215 | { | ||
216 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
217 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
218 | unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; | ||
219 | |||
220 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); | ||
221 | } | ||
222 | |||
223 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
224 | { | ||
225 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | ||
226 | |||
227 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
228 | } | ||
229 | |||
230 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | ||
231 | { | ||
232 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); | ||
233 | |||
234 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | ||
235 | } | ||
236 | |||
237 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | ||
238 | { | ||
239 | } | ||
240 | |||
241 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | ||
242 | { | ||
243 | } | ||
244 | |||
245 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, | ||
246 | unsigned int stlb_index) | ||
247 | { | ||
248 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; | ||
249 | |||
250 | if (!ref->page) | ||
251 | return; | ||
252 | |||
253 | /* Discard from the TLB. */ | ||
254 | /* Note: we could actually invalidate a host mapping, if the host overwrote | ||
255 | * this TLB entry since we inserted a guest mapping. */ | ||
256 | kvmppc_44x_tlbie(stlb_index); | ||
257 | |||
258 | /* Now release the page. */ | ||
259 | if (ref->writeable) | ||
260 | kvm_release_page_dirty(ref->page); | ||
261 | else | ||
262 | kvm_release_page_clean(ref->page); | ||
263 | |||
264 | ref->page = NULL; | ||
265 | |||
266 | /* XXX set tlb_44x_index to stlb_index? */ | ||
267 | |||
268 | trace_kvm_stlb_inval(stlb_index); | ||
269 | } | ||
270 | |||
271 | void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu) | ||
272 | { | ||
273 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
274 | int i; | ||
275 | |||
276 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
277 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * kvmppc_mmu_map -- create a host mapping for guest memory | ||
282 | * | ||
283 | * If the guest wanted a larger page than the host supports, only the first | ||
284 | * host page is mapped here and the rest are demand faulted. | ||
285 | * | ||
286 | * If the guest wanted a smaller page than the host page size, we map only the | ||
287 | * guest-size page (i.e. not a full host page mapping). | ||
288 | * | ||
289 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
290 | * the shadow TLB. | ||
291 | */ | ||
292 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | ||
293 | unsigned int gtlb_index) | ||
294 | { | ||
295 | struct kvmppc_44x_tlbe stlbe; | ||
296 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
297 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
298 | struct kvmppc_44x_shadow_ref *ref; | ||
299 | struct page *new_page; | ||
300 | hpa_t hpaddr; | ||
301 | gfn_t gfn; | ||
302 | u32 asid = gtlbe->tid; | ||
303 | u32 flags = gtlbe->word2; | ||
304 | u32 max_bytes = get_tlb_bytes(gtlbe); | ||
305 | unsigned int victim; | ||
306 | |||
307 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB | ||
308 | * miss handler by disabling interrupts. */ | ||
309 | local_irq_disable(); | ||
310 | victim = ++tlb_44x_index; | ||
311 | if (victim > tlb_44x_hwater) | ||
312 | victim = 0; | ||
313 | tlb_44x_index = victim; | ||
314 | local_irq_enable(); | ||
315 | |||
316 | /* Get reference to new page. */ | ||
317 | gfn = gpaddr >> PAGE_SHIFT; | ||
318 | new_page = gfn_to_page(vcpu->kvm, gfn); | ||
319 | if (is_error_page(new_page)) { | ||
320 | printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", | ||
321 | (unsigned long long)gfn); | ||
322 | return; | ||
323 | } | ||
324 | hpaddr = page_to_phys(new_page); | ||
325 | |||
326 | /* Invalidate any previous shadow mappings. */ | ||
327 | kvmppc_44x_shadow_release(vcpu_44x, victim); | ||
328 | |||
329 | /* XXX Make sure (va, size) doesn't overlap any other | ||
330 | * entries. 440x6 user manual says the result would be | ||
331 | * "undefined." */ | ||
332 | |||
333 | /* XXX what about AS? */ | ||
334 | |||
335 | /* Force TS=1 for all guest mappings. */ | ||
336 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; | ||
337 | |||
338 | if (max_bytes >= PAGE_SIZE) { | ||
339 | /* Guest mapping is larger than or equal to host page size. We can use | ||
340 | * a "native" host mapping. */ | ||
341 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; | ||
342 | } else { | ||
343 | /* Guest mapping is smaller than host page size. We must restrict the | ||
344 | * size of the mapping to be at most the smaller of the two, but for | ||
345 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
346 | * guest is using anyways). */ | ||
347 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
348 | |||
349 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
350 | * inserting here. To compensate, we must add the in-page offset to the | ||
351 | * sub-page. */ | ||
352 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
353 | } | ||
354 | |||
355 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | ||
356 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, | ||
357 | vcpu->arch.shared->msr & MSR_PR); | ||
358 | stlbe.tid = !(asid & 0xff); | ||
359 | |||
360 | /* Keep track of the reference so we can properly release it later. */ | ||
361 | ref = &vcpu_44x->shadow_refs[victim]; | ||
362 | ref->page = new_page; | ||
363 | ref->gtlb_index = gtlb_index; | ||
364 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
365 | ref->tid = stlbe.tid; | ||
366 | |||
367 | /* Insert shadow mapping into hardware TLB. */ | ||
368 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
369 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
370 | trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
371 | stlbe.word2); | ||
372 | } | ||
373 | |||
374 | /* For a particular guest TLB entry, invalidate the corresponding host TLB | ||
375 | * mappings and release the host pages. */ | ||
376 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
377 | unsigned int gtlb_index) | ||
378 | { | ||
379 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
380 | int i; | ||
381 | |||
382 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { | ||
383 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | ||
384 | if (ref->gtlb_index == gtlb_index) | ||
385 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
386 | } | ||
387 | } | ||
388 | |||
389 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
390 | { | ||
391 | int usermode = vcpu->arch.shared->msr & MSR_PR; | ||
392 | |||
393 | vcpu->arch.shadow_pid = !usermode; | ||
394 | } | ||
395 | |||
396 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | ||
397 | { | ||
398 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
399 | int i; | ||
400 | |||
401 | if (unlikely(vcpu->arch.pid == new_pid)) | ||
402 | return; | ||
403 | |||
404 | vcpu->arch.pid = new_pid; | ||
405 | |||
406 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it | ||
407 | * can't access guest kernel mappings (TID=1). When we switch to a new | ||
408 | * guest PID, which will also use host PID=0, we must discard the old guest | ||
409 | * userspace mappings. */ | ||
410 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { | ||
411 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | ||
412 | |||
413 | if (ref->tid == 0) | ||
414 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
419 | const struct kvmppc_44x_tlbe *tlbe) | ||
420 | { | ||
421 | gpa_t gpa; | ||
422 | |||
423 | if (!get_tlb_v(tlbe)) | ||
424 | return 0; | ||
425 | |||
426 | /* Does it match current guest AS? */ | ||
427 | /* XXX what about IS != DS? */ | ||
428 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
429 | return 0; | ||
430 | |||
431 | gpa = get_tlb_raddr(tlbe); | ||
432 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
433 | /* Mapping is not for RAM. */ | ||
434 | return 0; | ||
435 | |||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | ||
440 | { | ||
441 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
442 | struct kvmppc_44x_tlbe *tlbe; | ||
443 | unsigned int gtlb_index; | ||
444 | int idx; | ||
445 | |||
446 | gtlb_index = kvmppc_get_gpr(vcpu, ra); | ||
447 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { | ||
448 | printk("%s: index %d\n", __func__, gtlb_index); | ||
449 | kvmppc_dump_vcpu(vcpu); | ||
450 | return EMULATE_FAIL; | ||
451 | } | ||
452 | |||
453 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
454 | |||
455 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ | ||
456 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
457 | kvmppc_44x_invalidate(vcpu, gtlb_index); | ||
458 | |||
459 | switch (ws) { | ||
460 | case PPC44x_TLB_PAGEID: | ||
461 | tlbe->tid = get_mmucr_stid(vcpu); | ||
462 | tlbe->word0 = kvmppc_get_gpr(vcpu, rs); | ||
463 | break; | ||
464 | |||
465 | case PPC44x_TLB_XLAT: | ||
466 | tlbe->word1 = kvmppc_get_gpr(vcpu, rs); | ||
467 | break; | ||
468 | |||
469 | case PPC44x_TLB_ATTRIB: | ||
470 | tlbe->word2 = kvmppc_get_gpr(vcpu, rs); | ||
471 | break; | ||
472 | |||
473 | default: | ||
474 | return EMULATE_FAIL; | ||
475 | } | ||
476 | |||
477 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
478 | |||
479 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
480 | gva_t eaddr; | ||
481 | gpa_t gpaddr; | ||
482 | u32 bytes; | ||
483 | |||
484 | eaddr = get_tlb_eaddr(tlbe); | ||
485 | gpaddr = get_tlb_raddr(tlbe); | ||
486 | |||
487 | /* Use the advertised page size to mask effective and real addrs. */ | ||
488 | bytes = get_tlb_bytes(tlbe); | ||
489 | eaddr &= ~(bytes - 1); | ||
490 | gpaddr &= ~(bytes - 1); | ||
491 | |||
492 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | ||
493 | } | ||
494 | |||
495 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
496 | |||
497 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, | ||
498 | tlbe->word2); | ||
499 | |||
500 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
501 | return EMULATE_DONE; | ||
502 | } | ||
503 | |||
504 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | ||
505 | { | ||
506 | u32 ea; | ||
507 | int gtlb_index; | ||
508 | unsigned int as = get_mmucr_sts(vcpu); | ||
509 | unsigned int pid = get_mmucr_stid(vcpu); | ||
510 | |||
511 | ea = kvmppc_get_gpr(vcpu, rb); | ||
512 | if (ra) | ||
513 | ea += kvmppc_get_gpr(vcpu, ra); | ||
514 | |||
515 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
516 | if (rc) { | ||
517 | u32 cr = kvmppc_get_cr(vcpu); | ||
518 | |||
519 | if (gtlb_index < 0) | ||
520 | kvmppc_set_cr(vcpu, cr & ~0x20000000); | ||
521 | else | ||
522 | kvmppc_set_cr(vcpu, cr | 0x20000000); | ||
523 | } | ||
524 | kvmppc_set_gpr(vcpu, rt, gtlb_index); | ||
525 | |||
526 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
527 | return EMULATE_DONE; | ||
528 | } | ||