aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/kvm_tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/kvm_tlb.c')
-rw-r--r--arch/mips/kvm/kvm_tlb.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index 89511a9258d3..c777dd36d4a8 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -17,6 +17,8 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20#include <linux/srcu.h>
21
20 22
21#include <asm/cpu.h> 23#include <asm/cpu.h>
22#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
@@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 53
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 54uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{ 55{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); 56 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55} 57}
56 58
57 59
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 60uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{ 61{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); 62 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61} 63}
62 64
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 65inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
@@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void)
84 old_pagemask = read_c0_pagemask(); 86 old_pagemask = read_c0_pagemask();
85 87
86 printk("HOST TLBs:\n"); 88 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); 89 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 90
89 for (i = 0; i < current_cpu_data.tlbsize; i++) { 91 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i); 92 write_c0_index(i);
@@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
169 } 171 }
170} 172}
171 173
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 174static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{ 175{
176 int srcu_idx, err = 0;
174 pfn_t pfn; 177 pfn_t pfn;
175 178
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 179 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return; 180 return 0;
178 181
182 srcu_idx = srcu_read_lock(&kvm->srcu);
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 183 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180 184
181 if (kvm_mips_is_error_pfn(pfn)) { 185 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); 186 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
187 err = -EFAULT;
188 goto out;
183 } 189 }
184 190
185 kvm->arch.guest_pmap[gfn] = pfn; 191 kvm->arch.guest_pmap[gfn] = pfn;
186 return; 192out:
193 srcu_read_unlock(&kvm->srcu, srcu_idx);
194 return err;
187} 195}
188 196
189/* Translate guest KSEG0 addresses to Host PA */ 197/* Translate guest KSEG0 addresses to Host PA */
@@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
207 gva); 215 gva);
208 return KVM_INVALID_PAGE; 216 return KVM_INVALID_PAGE;
209 } 217 }
210 kvm_mips_map_page(vcpu->kvm, gfn); 218
219 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
220 return KVM_INVALID_ADDR;
221
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 222 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212} 223}
213 224
@@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
310 even = !(gfn & 0x1); 321 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1); 322 vaddr = badvaddr & (PAGE_MASK << 1);
312 323
313 kvm_mips_map_page(vcpu->kvm, gfn); 324 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); 325 return -1;
326
327 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
328 return -1;
315 329
316 if (even) { 330 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn]; 331 pfn0 = kvm->arch.guest_pmap[gfn];
@@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
389 pfn0 = 0; 403 pfn0 = 0;
390 pfn1 = 0; 404 pfn1 = 0;
391 } else { 405 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); 406 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); 407 return -1;
408
409 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
410 return -1;
394 411
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; 412 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; 413 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
@@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
428 445
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 446 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 447 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { 448 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i; 449 index = i;
433 break; 450 break;
434 } 451 }
@@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
626{ 643{
627 unsigned long asid = asid_cache(cpu); 644 unsigned long asid = asid_cache(cpu);
628 645
629 if (!(ASID_MASK(ASID_INC(asid)))) { 646 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) { 647 if (cpu_has_vtag_icache) {
631 flush_icache_all(); 648 flush_icache_all();
632 } 649 }
@@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804 if (!newasid) { 821 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) { 823 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); 824 write_c0_entryhi(vcpu->arch.
825 preempt_entryhi & ASID_MASK);
808 ehb(); 826 ehb();
809 } 827 }
810 } else { 828 } else {
@@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816 */ 834 */
817 if (current->flags & PF_VCPU) { 835 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu)) 836 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch. 837 write_c0_entryhi(vcpu->arch.
820 guest_kernel_asid[cpu])); 838 guest_kernel_asid[cpu] &
839 ASID_MASK);
821 else 840 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch. 841 write_c0_entryhi(vcpu->arch.
823 guest_user_asid[cpu])); 842 guest_user_asid[cpu] &
843 ASID_MASK);
824 ehb(); 844 ehb();
825 } 845 }
826 } 846 }
@@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
879 kvm_mips_guest_tlb_lookup(vcpu, 899 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK) 900 ((unsigned long) opc & VPN2_MASK)
881 | 901 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 902 (kvm_read_c0_guest_entryhi
903 (cop0) & ASID_MASK));
883 if (index < 0) { 904 if (index < 0) {
884 kvm_err 905 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 906 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",