aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/tlb.c')
-rw-r--r--arch/mips/kvm/tlb.c77
1 files changed, 20 insertions, 57 deletions
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 570479c03bdc..a08c43946247 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -35,17 +35,17 @@
35#define PRIx64 "llx" 35#define PRIx64 "llx"
36 36
37atomic_t kvm_mips_instance; 37atomic_t kvm_mips_instance;
38EXPORT_SYMBOL(kvm_mips_instance); 38EXPORT_SYMBOL_GPL(kvm_mips_instance);
39 39
40/* These function pointers are initialized once the KVM module is loaded */ 40/* These function pointers are initialized once the KVM module is loaded */
41kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); 41kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); 42EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
43 43
44void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); 44void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
45EXPORT_SYMBOL(kvm_mips_release_pfn_clean); 45EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
46 46
47bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); 47bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
48EXPORT_SYMBOL(kvm_mips_is_error_pfn); 48EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
49 49
50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
51{ 51{
@@ -111,7 +111,7 @@ void kvm_mips_dump_host_tlbs(void)
111 mtc0_tlbw_hazard(); 111 mtc0_tlbw_hazard();
112 local_irq_restore(flags); 112 local_irq_restore(flags);
113} 113}
114EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); 114EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
115 115
116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
117{ 117{
@@ -139,7 +139,7 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 } 140 }
141} 141}
142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); 142EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
143 143
144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145{ 145{
@@ -191,7 +191,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 191
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
193} 193}
194EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); 194EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
195 195
196/* XXXKYMA: Must be called with interrupts disabled */ 196/* XXXKYMA: Must be called with interrupts disabled */
197/* set flush_dcache_mask == 0 if no dcache flush required */ 197/* set flush_dcache_mask == 0 if no dcache flush required */
@@ -308,7 +308,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
308 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 308 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
309 flush_dcache_mask); 309 flush_dcache_mask);
310} 310}
311EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); 311EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
312 312
313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
314 struct kvm_vcpu *vcpu) 314 struct kvm_vcpu *vcpu)
@@ -351,7 +351,7 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
351 351
352 return 0; 352 return 0;
353} 353}
354EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); 354EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
355 355
356int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 356int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
357 struct kvm_mips_tlb *tlb, 357 struct kvm_mips_tlb *tlb,
@@ -401,7 +401,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
401 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 401 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
402 tlb->tlb_mask); 402 tlb->tlb_mask);
403} 403}
404EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); 404EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
405 405
406int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 406int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
407{ 407{
@@ -422,7 +422,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
422 422
423 return index; 423 return index;
424} 424}
425EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); 425EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
426 426
427int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 427int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
428{ 428{
@@ -458,7 +458,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
458 458
459 return idx; 459 return idx;
460} 460}
461EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); 461EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
462 462
463int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 463int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
464{ 464{
@@ -505,44 +505,7 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
505 505
506 return 0; 506 return 0;
507} 507}
508EXPORT_SYMBOL(kvm_mips_host_tlb_inv); 508EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
509
510/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
511int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
512{
513 unsigned long flags, old_entryhi;
514
515 if (index >= current_cpu_data.tlbsize)
516 BUG();
517
518 local_irq_save(flags);
519
520 old_entryhi = read_c0_entryhi();
521
522 write_c0_entryhi(UNIQUE_ENTRYHI(index));
523 mtc0_tlbw_hazard();
524
525 write_c0_index(index);
526 mtc0_tlbw_hazard();
527
528 write_c0_entrylo0(0);
529 mtc0_tlbw_hazard();
530
531 write_c0_entrylo1(0);
532 mtc0_tlbw_hazard();
533
534 tlb_write_indexed();
535 mtc0_tlbw_hazard();
536 tlbw_use_hazard();
537
538 write_c0_entryhi(old_entryhi);
539 mtc0_tlbw_hazard();
540 tlbw_use_hazard();
541
542 local_irq_restore(flags);
543
544 return 0;
545}
546 509
547void kvm_mips_flush_host_tlb(int skip_kseg0) 510void kvm_mips_flush_host_tlb(int skip_kseg0)
548{ 511{
@@ -594,7 +557,7 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
594 557
595 local_irq_restore(flags); 558 local_irq_restore(flags);
596} 559}
597EXPORT_SYMBOL(kvm_mips_flush_host_tlb); 560EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
598 561
599void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 562void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
600 struct kvm_vcpu *vcpu) 563 struct kvm_vcpu *vcpu)
@@ -642,7 +605,7 @@ void kvm_local_flush_tlb_all(void)
642 605
643 local_irq_restore(flags); 606 local_irq_restore(flags);
644} 607}
645EXPORT_SYMBOL(kvm_local_flush_tlb_all); 608EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
646 609
647/** 610/**
648 * kvm_mips_migrate_count() - Migrate timer. 611 * kvm_mips_migrate_count() - Migrate timer.
@@ -673,8 +636,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
673 636
674 local_irq_save(flags); 637 local_irq_save(flags);
675 638
676 if (((vcpu->arch. 639 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
677 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { 640 ASID_VERSION_MASK) {
678 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); 641 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
679 vcpu->arch.guest_kernel_asid[cpu] = 642 vcpu->arch.guest_kernel_asid[cpu] =
680 vcpu->arch.guest_kernel_mm.context.asid[cpu]; 643 vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -739,7 +702,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
739 local_irq_restore(flags); 702 local_irq_restore(flags);
740 703
741} 704}
742EXPORT_SYMBOL(kvm_arch_vcpu_load); 705EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
743 706
744/* ASID can change if another task is scheduled during preemption */ 707/* ASID can change if another task is scheduled during preemption */
745void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 708void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -768,7 +731,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
768 731
769 local_irq_restore(flags); 732 local_irq_restore(flags);
770} 733}
771EXPORT_SYMBOL(kvm_arch_vcpu_put); 734EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
772 735
773uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) 736uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
774{ 737{
@@ -813,4 +776,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
813 776
814 return inst; 777 return inst;
815} 778}
816EXPORT_SYMBOL(kvm_get_inst); 779EXPORT_SYMBOL_GPL(kvm_get_inst);