diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-04-01 02:50:59 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 05:01:05 -0400 |
commit | 964cd94a2ae3b20f9da9bd43b31aac32c4fe9aee (patch) | |
tree | c9d6b5c3af1b6a1d8f48e1aac4484a6a8d2ee9fa /arch/ia64/kvm | |
parent | bb46fb4af160ec7ae6e5102a79a3b2518eaee7af (diff) |
KVM: ia64: Add TLB virtulization support
vtlb.c includes tlb/VHPT virtulization.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 636 |
1 files changed, 636 insertions, 0 deletions
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c new file mode 100644 index 000000000000..def4576d22b1 --- /dev/null +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -0,0 +1,636 @@ | |||
1 | /* | ||
2 | * vtlb.c: guest virtual tlb handling module. | ||
3 | * Copyright (c) 2004, Intel Corporation. | ||
4 | * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com> | ||
5 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
6 | * | ||
7 | * Copyright (c) 2007, Intel Corporation. | ||
8 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms and conditions of the GNU General Public License, | ||
13 | * version 2, as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
22 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "vcpu.h" | ||
27 | |||
28 | #include <linux/rwsem.h> | ||
29 | |||
30 | #include <asm/tlb.h> | ||
31 | |||
32 | /* | ||
33 | * Check to see if the address rid:va is translated by the TLB | ||
34 | */ | ||
35 | |||
36 | static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) | ||
37 | { | ||
38 | return ((trp->p) && (trp->rid == rid) | ||
39 | && ((va-trp->vadr) < PSIZE(trp->ps))); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Only for GUEST TR format. | ||
44 | */ | ||
45 | static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva) | ||
46 | { | ||
47 | u64 sa1, ea1; | ||
48 | |||
49 | if (!trp->p || trp->rid != rid) | ||
50 | return 0; | ||
51 | |||
52 | sa1 = trp->vadr; | ||
53 | ea1 = sa1 + PSIZE(trp->ps) - 1; | ||
54 | eva -= 1; | ||
55 | if ((sva > ea1) || (sa1 > eva)) | ||
56 | return 0; | ||
57 | else | ||
58 | return 1; | ||
59 | |||
60 | } | ||
61 | |||
62 | void machine_tlb_purge(u64 va, u64 ps) | ||
63 | { | ||
64 | ia64_ptcl(va, ps << 2); | ||
65 | } | ||
66 | |||
67 | void local_flush_tlb_all(void) | ||
68 | { | ||
69 | int i, j; | ||
70 | unsigned long flags, count0, count1; | ||
71 | unsigned long stride0, stride1, addr; | ||
72 | |||
73 | addr = current_vcpu->arch.ptce_base; | ||
74 | count0 = current_vcpu->arch.ptce_count[0]; | ||
75 | count1 = current_vcpu->arch.ptce_count[1]; | ||
76 | stride0 = current_vcpu->arch.ptce_stride[0]; | ||
77 | stride1 = current_vcpu->arch.ptce_stride[1]; | ||
78 | |||
79 | local_irq_save(flags); | ||
80 | for (i = 0; i < count0; ++i) { | ||
81 | for (j = 0; j < count1; ++j) { | ||
82 | ia64_ptce(addr); | ||
83 | addr += stride1; | ||
84 | } | ||
85 | addr += stride0; | ||
86 | } | ||
87 | local_irq_restore(flags); | ||
88 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | ||
89 | } | ||
90 | |||
91 | int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) | ||
92 | { | ||
93 | union ia64_rr vrr; | ||
94 | union ia64_pta vpta; | ||
95 | struct ia64_psr vpsr; | ||
96 | |||
97 | vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); | ||
98 | vrr.val = vcpu_get_rr(vcpu, vadr); | ||
99 | vpta.val = vcpu_get_pta(vcpu); | ||
100 | |||
101 | if (vrr.ve & vpta.ve) { | ||
102 | switch (ref) { | ||
103 | case DATA_REF: | ||
104 | case NA_REF: | ||
105 | return vpsr.dt; | ||
106 | case INST_REF: | ||
107 | return vpsr.dt && vpsr.it && vpsr.ic; | ||
108 | case RSE_REF: | ||
109 | return vpsr.dt && vpsr.rt; | ||
110 | |||
111 | } | ||
112 | } | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) | ||
117 | { | ||
118 | u64 index, pfn, rid, pfn_bits; | ||
119 | |||
120 | pfn_bits = vpta.size - 5 - 8; | ||
121 | pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); | ||
122 | rid = _REGION_ID(vrr); | ||
123 | index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1)); | ||
124 | *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16); | ||
125 | |||
126 | return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) + | ||
127 | (index << 5)); | ||
128 | } | ||
129 | |||
130 | struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) | ||
131 | { | ||
132 | |||
133 | struct thash_data *trp; | ||
134 | int i; | ||
135 | u64 rid; | ||
136 | |||
137 | rid = vcpu_get_rr(vcpu, va); | ||
138 | rid = rid & RR_RID_MASK;; | ||
139 | if (type == D_TLB) { | ||
140 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
141 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
142 | i < NDTRS; i++, trp++) { | ||
143 | if (__is_tr_translated(trp, rid, va)) | ||
144 | return trp; | ||
145 | } | ||
146 | } | ||
147 | } else { | ||
148 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
149 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
150 | i < NITRS; i++, trp++) { | ||
151 | if (__is_tr_translated(trp, rid, va)) | ||
152 | return trp; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | return NULL; | ||
158 | } | ||
159 | |||
160 | static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | ||
161 | { | ||
162 | union ia64_rr rr; | ||
163 | struct thash_data *head; | ||
164 | unsigned long ps, gpaddr; | ||
165 | |||
166 | ps = itir_ps(itir); | ||
167 | |||
168 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | ||
169 | (ifa & ((1UL << ps) - 1)); | ||
170 | |||
171 | rr.val = ia64_get_rr(ifa); | ||
172 | head = (struct thash_data *)ia64_thash(ifa); | ||
173 | head->etag = INVALID_TI_TAG; | ||
174 | ia64_mf(); | ||
175 | head->page_flags = pte & ~PAGE_FLAGS_RV_MASK; | ||
176 | head->itir = rr.ps << 2; | ||
177 | head->etag = ia64_ttag(ifa); | ||
178 | head->gpaddr = gpaddr; | ||
179 | } | ||
180 | |||
181 | void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) | ||
182 | { | ||
183 | u64 i, dirty_pages = 1; | ||
184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; | ||
185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); | ||
186 | void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) | ||
187 | + KVM_MEM_DIRTY_LOG_OFS; | ||
188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; | ||
189 | |||
190 | vmm_spin_lock(lock); | ||
191 | for (i = 0; i < dirty_pages; i++) { | ||
192 | /* avoid RMW */ | ||
193 | if (!test_bit(base_gfn + i, dirty_bitmap)) | ||
194 | set_bit(base_gfn + i , dirty_bitmap); | ||
195 | } | ||
196 | vmm_spin_unlock(lock); | ||
197 | } | ||
198 | |||
199 | void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) | ||
200 | { | ||
201 | u64 phy_pte, psr; | ||
202 | union ia64_rr mrr; | ||
203 | |||
204 | mrr.val = ia64_get_rr(va); | ||
205 | phy_pte = translate_phy_pte(&pte, itir, va); | ||
206 | |||
207 | if (itir_ps(itir) >= mrr.ps) { | ||
208 | vhpt_insert(phy_pte, itir, va, pte); | ||
209 | } else { | ||
210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
211 | psr = ia64_clear_ic(); | ||
212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); | ||
213 | ia64_set_psr(psr); | ||
214 | } | ||
215 | |||
216 | if (!(pte&VTLB_PTE_IO)) | ||
217 | mark_pages_dirty(v, pte, itir_ps(itir)); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * vhpt lookup | ||
222 | */ | ||
223 | struct thash_data *vhpt_lookup(u64 va) | ||
224 | { | ||
225 | struct thash_data *head; | ||
226 | u64 tag; | ||
227 | |||
228 | head = (struct thash_data *)ia64_thash(va); | ||
229 | tag = ia64_ttag(va); | ||
230 | if (head->etag == tag) | ||
231 | return head; | ||
232 | return NULL; | ||
233 | } | ||
234 | |||
235 | u64 guest_vhpt_lookup(u64 iha, u64 *pte) | ||
236 | { | ||
237 | u64 ret; | ||
238 | struct thash_data *data; | ||
239 | |||
240 | data = __vtr_lookup(current_vcpu, iha, D_TLB); | ||
241 | if (data != NULL) | ||
242 | thash_vhpt_insert(current_vcpu, data->page_flags, | ||
243 | data->itir, iha, D_TLB); | ||
244 | |||
245 | asm volatile ("rsm psr.ic|psr.i;;" | ||
246 | "srlz.d;;" | ||
247 | "ld8.s r9=[%1];;" | ||
248 | "tnat.nz p6,p7=r9;;" | ||
249 | "(p6) mov %0=1;" | ||
250 | "(p6) mov r9=r0;" | ||
251 | "(p7) extr.u r9=r9,0,53;;" | ||
252 | "(p7) mov %0=r0;" | ||
253 | "(p7) st8 [%2]=r9;;" | ||
254 | "ssm psr.ic;;" | ||
255 | "srlz.d;;" | ||
256 | /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/ | ||
257 | : "=r"(ret) : "r"(iha), "r"(pte):"memory"); | ||
258 | |||
259 | return ret; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * purge software guest tlb | ||
264 | */ | ||
265 | |||
266 | static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
267 | { | ||
268 | struct thash_data *cur; | ||
269 | u64 start, curadr, size, psbits, tag, rr_ps, num; | ||
270 | union ia64_rr vrr; | ||
271 | struct thash_cb *hcb = &v->arch.vtlb; | ||
272 | |||
273 | vrr.val = vcpu_get_rr(v, va); | ||
274 | psbits = VMX(v, psbits[(va >> 61)]); | ||
275 | start = va & ~((1UL << ps) - 1); | ||
276 | while (psbits) { | ||
277 | curadr = start; | ||
278 | rr_ps = __ffs(psbits); | ||
279 | psbits &= ~(1UL << rr_ps); | ||
280 | num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps)); | ||
281 | size = PSIZE(rr_ps); | ||
282 | vrr.ps = rr_ps; | ||
283 | while (num) { | ||
284 | cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag); | ||
285 | if (cur->etag == tag && cur->ps == rr_ps) | ||
286 | cur->etag = INVALID_TI_TAG; | ||
287 | curadr += size; | ||
288 | num--; | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
293 | |||
294 | /* | ||
295 | * purge VHPT and machine TLB | ||
296 | */ | ||
297 | static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps) | ||
298 | { | ||
299 | struct thash_data *cur; | ||
300 | u64 start, size, tag, num; | ||
301 | union ia64_rr rr; | ||
302 | |||
303 | start = va & ~((1UL << ps) - 1); | ||
304 | rr.val = ia64_get_rr(va); | ||
305 | size = PSIZE(rr.ps); | ||
306 | num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps)); | ||
307 | while (num) { | ||
308 | cur = (struct thash_data *)ia64_thash(start); | ||
309 | tag = ia64_ttag(start); | ||
310 | if (cur->etag == tag) | ||
311 | cur->etag = INVALID_TI_TAG; | ||
312 | start += size; | ||
313 | num--; | ||
314 | } | ||
315 | machine_tlb_purge(va, ps); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Insert an entry into hash TLB or VHPT. | ||
320 | * NOTES: | ||
321 | * 1: When inserting VHPT to thash, "va" is a must covered | ||
322 | * address by the inserted machine VHPT entry. | ||
323 | * 2: The format of entry is always in TLB. | ||
324 | * 3: The caller need to make sure the new entry will not overlap | ||
325 | * with any existed entry. | ||
326 | */ | ||
327 | void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va) | ||
328 | { | ||
329 | struct thash_data *head; | ||
330 | union ia64_rr vrr; | ||
331 | u64 tag; | ||
332 | struct thash_cb *hcb = &v->arch.vtlb; | ||
333 | |||
334 | vrr.val = vcpu_get_rr(v, va); | ||
335 | vrr.ps = itir_ps(itir); | ||
336 | VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); | ||
337 | head = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
338 | head->page_flags = pte; | ||
339 | head->itir = itir; | ||
340 | head->etag = tag; | ||
341 | } | ||
342 | |||
343 | int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type) | ||
344 | { | ||
345 | struct thash_data *trp; | ||
346 | int i; | ||
347 | u64 end, rid; | ||
348 | |||
349 | rid = vcpu_get_rr(vcpu, va); | ||
350 | rid = rid & RR_RID_MASK; | ||
351 | end = va + PSIZE(ps); | ||
352 | if (type == D_TLB) { | ||
353 | if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { | ||
354 | for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0; | ||
355 | i < NDTRS; i++, trp++) { | ||
356 | if (__is_tr_overlap(trp, rid, va, end)) | ||
357 | return i; | ||
358 | } | ||
359 | } | ||
360 | } else { | ||
361 | if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) { | ||
362 | for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0; | ||
363 | i < NITRS; i++, trp++) { | ||
364 | if (__is_tr_overlap(trp, rid, va, end)) | ||
365 | return i; | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | return -1; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Purge entries in VTLB and VHPT | ||
374 | */ | ||
375 | void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps) | ||
376 | { | ||
377 | if (vcpu_quick_region_check(v->arch.tc_regions, va)) | ||
378 | vtlb_purge(v, va, ps); | ||
379 | vhpt_purge(v, va, ps); | ||
380 | } | ||
381 | |||
382 | void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps) | ||
383 | { | ||
384 | u64 old_va = va; | ||
385 | va = REGION_OFFSET(va); | ||
386 | if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) | ||
387 | vtlb_purge(v, va, ps); | ||
388 | vhpt_purge(v, va, ps); | ||
389 | } | ||
390 | |||
391 | u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | ||
392 | { | ||
393 | u64 ps, ps_mask, paddr, maddr; | ||
394 | union pte_flags phy_pte; | ||
395 | |||
396 | ps = itir_ps(itir); | ||
397 | ps_mask = ~((1UL << ps) - 1); | ||
398 | phy_pte.val = *pte; | ||
399 | paddr = *pte; | ||
400 | paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); | ||
401 | maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); | ||
402 | if (maddr & GPFN_IO_MASK) { | ||
403 | *pte |= VTLB_PTE_IO; | ||
404 | return -1; | ||
405 | } | ||
406 | maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | | ||
407 | (paddr & ~PAGE_MASK); | ||
408 | phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; | ||
409 | return phy_pte.val; | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | ||
414 | * Notes: Only TC entry can purge and insert. | ||
415 | * 1 indicates this is MMIO | ||
416 | */ | ||
417 | int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | ||
418 | u64 ifa, int type) | ||
419 | { | ||
420 | u64 ps; | ||
421 | u64 phy_pte; | ||
422 | union ia64_rr vrr, mrr; | ||
423 | int ret = 0; | ||
424 | |||
425 | ps = itir_ps(itir); | ||
426 | vrr.val = vcpu_get_rr(v, ifa); | ||
427 | mrr.val = ia64_get_rr(ifa); | ||
428 | |||
429 | phy_pte = translate_phy_pte(&pte, itir, ifa); | ||
430 | |||
431 | /* Ensure WB attribute if pte is related to a normal mem page, | ||
432 | * which is required by vga acceleration since qemu maps shared | ||
433 | * vram buffer with WB. | ||
434 | */ | ||
435 | if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { | ||
436 | pte &= ~_PAGE_MA_MASK; | ||
437 | phy_pte &= ~_PAGE_MA_MASK; | ||
438 | } | ||
439 | |||
440 | if (pte & VTLB_PTE_IO) | ||
441 | ret = 1; | ||
442 | |||
443 | vtlb_purge(v, ifa, ps); | ||
444 | vhpt_purge(v, ifa, ps); | ||
445 | |||
446 | if (ps == mrr.ps) { | ||
447 | if (!(pte&VTLB_PTE_IO)) { | ||
448 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
449 | } else { | ||
450 | vtlb_insert(v, pte, itir, ifa); | ||
451 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
452 | } | ||
453 | } else if (ps > mrr.ps) { | ||
454 | vtlb_insert(v, pte, itir, ifa); | ||
455 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
456 | if (!(pte&VTLB_PTE_IO)) | ||
457 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
458 | } else { | ||
459 | u64 psr; | ||
460 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | ||
461 | psr = ia64_clear_ic(); | ||
462 | ia64_itc(type, ifa, phy_pte, ps); | ||
463 | ia64_set_psr(psr); | ||
464 | } | ||
465 | if (!(pte&VTLB_PTE_IO)) | ||
466 | mark_pages_dirty(v, pte, ps); | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * Purge all TCs or VHPT entries including those in Hash table. | ||
473 | * | ||
474 | */ | ||
475 | |||
476 | void thash_purge_all(struct kvm_vcpu *v) | ||
477 | { | ||
478 | int i; | ||
479 | struct thash_data *head; | ||
480 | struct thash_cb *vtlb, *vhpt; | ||
481 | vtlb = &v->arch.vtlb; | ||
482 | vhpt = &v->arch.vhpt; | ||
483 | |||
484 | for (i = 0; i < 8; i++) | ||
485 | VMX(v, psbits[i]) = 0; | ||
486 | |||
487 | head = vtlb->hash; | ||
488 | for (i = 0; i < vtlb->num; i++) { | ||
489 | head->page_flags = 0; | ||
490 | head->etag = INVALID_TI_TAG; | ||
491 | head->itir = 0; | ||
492 | head->next = 0; | ||
493 | head++; | ||
494 | }; | ||
495 | |||
496 | head = vhpt->hash; | ||
497 | for (i = 0; i < vhpt->num; i++) { | ||
498 | head->page_flags = 0; | ||
499 | head->etag = INVALID_TI_TAG; | ||
500 | head->itir = 0; | ||
501 | head->next = 0; | ||
502 | head++; | ||
503 | }; | ||
504 | |||
505 | local_flush_tlb_all(); | ||
506 | } | ||
507 | |||
508 | |||
509 | /* | ||
510 | * Lookup the hash table and its collision chain to find an entry | ||
511 | * covering this address rid:va or the entry. | ||
512 | * | ||
513 | * INPUT: | ||
514 | * in: TLB format for both VHPT & TLB. | ||
515 | */ | ||
516 | |||
517 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | ||
518 | { | ||
519 | struct thash_data *cch; | ||
520 | u64 psbits, ps, tag; | ||
521 | union ia64_rr vrr; | ||
522 | |||
523 | struct thash_cb *hcb = &v->arch.vtlb; | ||
524 | |||
525 | cch = __vtr_lookup(v, va, is_data);; | ||
526 | if (cch) | ||
527 | return cch; | ||
528 | |||
529 | if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) | ||
530 | return NULL; | ||
531 | |||
532 | psbits = VMX(v, psbits[(va >> 61)]); | ||
533 | vrr.val = vcpu_get_rr(v, va); | ||
534 | while (psbits) { | ||
535 | ps = __ffs(psbits); | ||
536 | psbits &= ~(1UL << ps); | ||
537 | vrr.ps = ps; | ||
538 | cch = vsa_thash(hcb->pta, va, vrr.val, &tag); | ||
539 | if (cch->etag == tag && cch->ps == ps) | ||
540 | return cch; | ||
541 | } | ||
542 | |||
543 | return NULL; | ||
544 | } | ||
545 | |||
546 | |||
547 | /* | ||
548 | * Initialize internal control data before service. | ||
549 | */ | ||
550 | void thash_init(struct thash_cb *hcb, u64 sz) | ||
551 | { | ||
552 | int i; | ||
553 | struct thash_data *head; | ||
554 | |||
555 | hcb->pta.val = (unsigned long)hcb->hash; | ||
556 | hcb->pta.vf = 1; | ||
557 | hcb->pta.ve = 1; | ||
558 | hcb->pta.size = sz; | ||
559 | head = hcb->hash; | ||
560 | for (i = 0; i < hcb->num; i++) { | ||
561 | head->page_flags = 0; | ||
562 | head->itir = 0; | ||
563 | head->etag = INVALID_TI_TAG; | ||
564 | head->next = 0; | ||
565 | head++; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | u64 kvm_lookup_mpa(u64 gpfn) | ||
570 | { | ||
571 | u64 *base = (u64 *) KVM_P2M_BASE; | ||
572 | return *(base + gpfn); | ||
573 | } | ||
574 | |||
575 | u64 kvm_gpa_to_mpa(u64 gpa) | ||
576 | { | ||
577 | u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); | ||
578 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); | ||
579 | } | ||
580 | |||
581 | |||
582 | /* | ||
583 | * Fetch guest bundle code. | ||
584 | * INPUT: | ||
585 | * gip: guest ip | ||
586 | * pbundle: used to return fetched bundle. | ||
587 | */ | ||
588 | int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) | ||
589 | { | ||
590 | u64 gpip = 0; /* guest physical IP*/ | ||
591 | u64 *vpa; | ||
592 | struct thash_data *tlb; | ||
593 | u64 maddr; | ||
594 | |||
595 | if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) { | ||
596 | /* I-side physical mode */ | ||
597 | gpip = gip; | ||
598 | } else { | ||
599 | tlb = vtlb_lookup(vcpu, gip, I_TLB); | ||
600 | if (tlb) | ||
601 | gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | | ||
602 | (gip & (PSIZE(tlb->ps) - 1)); | ||
603 | } | ||
604 | if (gpip) { | ||
605 | maddr = kvm_gpa_to_mpa(gpip); | ||
606 | } else { | ||
607 | tlb = vhpt_lookup(gip); | ||
608 | if (tlb == NULL) { | ||
609 | ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); | ||
610 | return IA64_FAULT; | ||
611 | } | ||
612 | maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | ||
613 | | (gip & (PSIZE(tlb->ps) - 1)); | ||
614 | } | ||
615 | vpa = (u64 *)__kvm_va(maddr); | ||
616 | |||
617 | pbundle->i64[0] = *vpa++; | ||
618 | pbundle->i64[1] = *vpa; | ||
619 | |||
620 | return IA64_NO_FAULT; | ||
621 | } | ||
622 | |||
623 | |||
624 | void kvm_init_vhpt(struct kvm_vcpu *v) | ||
625 | { | ||
626 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; | ||
627 | thash_init(&v->arch.vhpt, VHPT_SHIFT); | ||
628 | ia64_set_pta(v->arch.vhpt.pta.val); | ||
629 | /*Enable VHPT here?*/ | ||
630 | } | ||
631 | |||
632 | void kvm_init_vtlb(struct kvm_vcpu *v) | ||
633 | { | ||
634 | v->arch.vtlb.num = VTLB_NUM_ENTRIES; | ||
635 | thash_init(&v->arch.vtlb, VTLB_SHIFT); | ||
636 | } | ||