diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2017-02-07 12:18:13 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-02-07 12:18:13 -0500 |
commit | d9c0e59f92d491a7be5172eaf2d600b4953a0bd4 (patch) | |
tree | 0823b289a65ae4a3fffa69571fe7d72f51aa2aa3 | |
parent | d5b798c15fb97136dc13ac5a9629f91e88d5d565 (diff) | |
parent | 12ed1faece3f141c2604b5b3a8377ba71d23ec9d (diff) |
Merge tag 'kvm_mips_4.11_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips into HEAD
KVM: MIPS: GVA/GPA page tables, dirty logging, SYNC_MMU etc
Numerous MIPS KVM fixes, improvements, and features for 4.11, many of
which continue to pave the way for VZ support, the most interesting of
which are:
- Add GVA->HPA page tables for T&E, to cache GVA mappings.
- Generate fast-path TLB refill exception handler which loads host TLB
entries from GVA page table, avoiding repeated guest memory
translation and guest TLB lookups.
- Use uaccess macros when T&E needs to access guest memory, which with
GVA page tables and the Linux TLB refill handler improves robustness
against TLB faults and fixes EVA hosts.
- Use BadInstr/BadInstrP registers when available to obtain instruction
encodings after a synchronous trap.
- Add GPA->HPA page tables to replace the inflexible linear array,
allowing for multiple sparsely arranged memory regions.
- Properly implement dirty page logging.
- Add KVM_CAP_SYNC_MMU support so that changes in GPA mappings become
effective in guests even if they are already running, allowing for
copy-on-write, KSM, idle page tracking, swapping, and guest memory
ballooning.
- Add KVM_CAP_READONLY_MEM support, so writes to specified memory
regions are treated as MMIO.
- Implement proper CP0_EBase support in T&E.
- Expose a few more missing CP0 registers to userland.
- Add KVM_CAP_NR_VCPUS and KVM_CAP_MAX_VCPUS support, and allow up to 8
VCPUs to be created in a VM.
- Various cleanups and dropping of dead and duplicated code.
-rw-r--r-- | Documentation/virtual/kvm/api.txt | 10 | ||||
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 183 | ||||
-rw-r--r-- | arch/mips/include/asm/mmu_context.h | 9 | ||||
-rw-r--r-- | arch/mips/include/asm/pgalloc.h | 16 | ||||
-rw-r--r-- | arch/mips/include/asm/r4kcache.h | 55 | ||||
-rw-r--r-- | arch/mips/include/asm/tlbex.h | 26 | ||||
-rw-r--r-- | arch/mips/include/asm/uasm.h | 5 | ||||
-rw-r--r-- | arch/mips/include/uapi/asm/kvm.h | 2 | ||||
-rw-r--r-- | arch/mips/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/mips/kvm/dyntrans.c | 52 | ||||
-rw-r--r-- | arch/mips/kvm/emulate.c | 432 | ||||
-rw-r--r-- | arch/mips/kvm/entry.c | 155 | ||||
-rw-r--r-- | arch/mips/kvm/interrupt.c | 5 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 496 | ||||
-rw-r--r-- | arch/mips/kvm/mmu.c | 1329 | ||||
-rw-r--r-- | arch/mips/kvm/tlb.c | 291 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 734 | ||||
-rw-r--r-- | arch/mips/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 25 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 38 |
22 files changed, 2602 insertions, 1268 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index f1945d8cbccb..e4f2cdcf78eb 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -2061,6 +2061,8 @@ registers, find a list below: | |||
2061 | MIPS | KVM_REG_MIPS_LO | 64 | 2061 | MIPS | KVM_REG_MIPS_LO | 64 |
2062 | MIPS | KVM_REG_MIPS_PC | 64 | 2062 | MIPS | KVM_REG_MIPS_PC | 64 |
2063 | MIPS | KVM_REG_MIPS_CP0_INDEX | 32 | 2063 | MIPS | KVM_REG_MIPS_CP0_INDEX | 32 |
2064 | MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 | ||
2065 | MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 | ||
2064 | MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 | 2066 | MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 |
2065 | MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 | 2067 | MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 |
2066 | MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 | 2068 | MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 |
@@ -2071,9 +2073,11 @@ registers, find a list below: | |||
2071 | MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 | 2073 | MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 |
2072 | MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 | 2074 | MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 |
2073 | MIPS | KVM_REG_MIPS_CP0_STATUS | 32 | 2075 | MIPS | KVM_REG_MIPS_CP0_STATUS | 32 |
2076 | MIPS | KVM_REG_MIPS_CP0_INTCTL | 32 | ||
2074 | MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 | 2077 | MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 |
2075 | MIPS | KVM_REG_MIPS_CP0_EPC | 64 | 2078 | MIPS | KVM_REG_MIPS_CP0_EPC | 64 |
2076 | MIPS | KVM_REG_MIPS_CP0_PRID | 32 | 2079 | MIPS | KVM_REG_MIPS_CP0_PRID | 32 |
2080 | MIPS | KVM_REG_MIPS_CP0_EBASE | 64 | ||
2077 | MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 | 2081 | MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 |
2078 | MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 | 2082 | MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 |
2079 | MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 | 2083 | MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 |
@@ -2148,6 +2152,12 @@ patterns depending on whether they're 32-bit or 64-bit registers: | |||
2148 | 0x7020 0000 0001 00 <reg:5> <sel:3> (32-bit) | 2152 | 0x7020 0000 0001 00 <reg:5> <sel:3> (32-bit) |
2149 | 0x7030 0000 0001 00 <reg:5> <sel:3> (64-bit) | 2153 | 0x7030 0000 0001 00 <reg:5> <sel:3> (64-bit) |
2150 | 2154 | ||
2155 | Note: KVM_REG_MIPS_CP0_ENTRYLO0 and KVM_REG_MIPS_CP0_ENTRYLO1 are the MIPS64 | ||
2156 | versions of the EntryLo registers regardless of the word size of the host | ||
2157 | hardware, host kernel, guest, and whether XPA is present in the guest, i.e. | ||
2158 | with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and | ||
2159 | the PFNX field starting at bit 30. | ||
2160 | |||
2151 | MIPS KVM control registers (see above) have the following id bit patterns: | 2161 | MIPS KVM control registers (see above) have the following id bit patterns: |
2152 | 0x7030 0000 0002 <reg:16> | 2162 | 0x7030 0000 0002 <reg:16> |
2153 | 2163 | ||
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index bebec370324f..05e785fc061d 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | 43 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) |
44 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | 44 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) |
45 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | 45 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) |
46 | #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1) | ||
46 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | 47 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) |
47 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | 48 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) |
48 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) | 49 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) |
@@ -64,7 +65,7 @@ | |||
64 | #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) | 65 | #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) |
65 | 66 | ||
66 | 67 | ||
67 | #define KVM_MAX_VCPUS 1 | 68 | #define KVM_MAX_VCPUS 8 |
68 | #define KVM_USER_MEM_SLOTS 8 | 69 | #define KVM_USER_MEM_SLOTS 8 |
69 | /* memory slots that does not exposed to userspace */ | 70 | /* memory slots that does not exposed to userspace */ |
70 | #define KVM_PRIVATE_MEM_SLOTS 0 | 71 | #define KVM_PRIVATE_MEM_SLOTS 0 |
@@ -88,6 +89,7 @@ | |||
88 | 89 | ||
89 | #define KVM_GUEST_KUSEG 0x00000000UL | 90 | #define KVM_GUEST_KUSEG 0x00000000UL |
90 | #define KVM_GUEST_KSEG0 0x40000000UL | 91 | #define KVM_GUEST_KSEG0 0x40000000UL |
92 | #define KVM_GUEST_KSEG1 0x40000000UL | ||
91 | #define KVM_GUEST_KSEG23 0x60000000UL | 93 | #define KVM_GUEST_KSEG23 0x60000000UL |
92 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) | 94 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) |
93 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) | 95 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) |
@@ -104,7 +106,6 @@ | |||
104 | #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) | 106 | #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) |
105 | 107 | ||
106 | #define KVM_INVALID_PAGE 0xdeadbeef | 108 | #define KVM_INVALID_PAGE 0xdeadbeef |
107 | #define KVM_INVALID_INST 0xdeadbeef | ||
108 | #define KVM_INVALID_ADDR 0xdeadbeef | 109 | #define KVM_INVALID_ADDR 0xdeadbeef |
109 | 110 | ||
110 | /* | 111 | /* |
@@ -121,8 +122,6 @@ static inline bool kvm_is_error_hva(unsigned long addr) | |||
121 | return IS_ERR_VALUE(addr); | 122 | return IS_ERR_VALUE(addr); |
122 | } | 123 | } |
123 | 124 | ||
124 | extern atomic_t kvm_mips_instance; | ||
125 | |||
126 | struct kvm_vm_stat { | 125 | struct kvm_vm_stat { |
127 | ulong remote_tlb_flush; | 126 | ulong remote_tlb_flush; |
128 | }; | 127 | }; |
@@ -156,12 +155,8 @@ struct kvm_arch_memory_slot { | |||
156 | }; | 155 | }; |
157 | 156 | ||
158 | struct kvm_arch { | 157 | struct kvm_arch { |
159 | /* Guest GVA->HPA page table */ | 158 | /* Guest physical mm */ |
160 | unsigned long *guest_pmap; | 159 | struct mm_struct gpa_mm; |
161 | unsigned long guest_pmap_npages; | ||
162 | |||
163 | /* Wired host TLB used for the commpage */ | ||
164 | int commpage_tlb; | ||
165 | }; | 160 | }; |
166 | 161 | ||
167 | #define N_MIPS_COPROC_REGS 32 | 162 | #define N_MIPS_COPROC_REGS 32 |
@@ -233,6 +228,7 @@ enum emulation_result { | |||
233 | EMULATE_FAIL, /* can't emulate this instruction */ | 228 | EMULATE_FAIL, /* can't emulate this instruction */ |
234 | EMULATE_WAIT, /* WAIT instruction */ | 229 | EMULATE_WAIT, /* WAIT instruction */ |
235 | EMULATE_PRIV_FAIL, | 230 | EMULATE_PRIV_FAIL, |
231 | EMULATE_EXCEPT, /* A guest exception has been generated */ | ||
236 | }; | 232 | }; |
237 | 233 | ||
238 | #define mips3_paddr_to_tlbpfn(x) \ | 234 | #define mips3_paddr_to_tlbpfn(x) \ |
@@ -250,6 +246,7 @@ enum emulation_result { | |||
250 | #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) | 246 | #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) |
251 | #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) | 247 | #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) |
252 | #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) | 248 | #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) |
249 | #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D) | ||
253 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ | 250 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ |
254 | ((y) & VPN2_MASK & ~(x).tlb_mask)) | 251 | ((y) & VPN2_MASK & ~(x).tlb_mask)) |
255 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ | 252 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ |
@@ -261,6 +258,17 @@ struct kvm_mips_tlb { | |||
261 | long tlb_lo[2]; | 258 | long tlb_lo[2]; |
262 | }; | 259 | }; |
263 | 260 | ||
261 | #define KVM_NR_MEM_OBJS 4 | ||
262 | |||
263 | /* | ||
264 | * We don't want allocation failures within the mmu code, so we preallocate | ||
265 | * enough memory for a single page fault in a cache. | ||
266 | */ | ||
267 | struct kvm_mmu_memory_cache { | ||
268 | int nobjs; | ||
269 | void *objects[KVM_NR_MEM_OBJS]; | ||
270 | }; | ||
271 | |||
264 | #define KVM_MIPS_AUX_FPU 0x1 | 272 | #define KVM_MIPS_AUX_FPU 0x1 |
265 | #define KVM_MIPS_AUX_MSA 0x2 | 273 | #define KVM_MIPS_AUX_MSA 0x2 |
266 | 274 | ||
@@ -275,6 +283,8 @@ struct kvm_vcpu_arch { | |||
275 | unsigned long host_cp0_badvaddr; | 283 | unsigned long host_cp0_badvaddr; |
276 | unsigned long host_cp0_epc; | 284 | unsigned long host_cp0_epc; |
277 | u32 host_cp0_cause; | 285 | u32 host_cp0_cause; |
286 | u32 host_cp0_badinstr; | ||
287 | u32 host_cp0_badinstrp; | ||
278 | 288 | ||
279 | /* GPRS */ | 289 | /* GPRS */ |
280 | unsigned long gprs[32]; | 290 | unsigned long gprs[32]; |
@@ -318,20 +328,18 @@ struct kvm_vcpu_arch { | |||
318 | /* Bitmask of pending exceptions to be cleared */ | 328 | /* Bitmask of pending exceptions to be cleared */ |
319 | unsigned long pending_exceptions_clr; | 329 | unsigned long pending_exceptions_clr; |
320 | 330 | ||
321 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ | ||
322 | unsigned long preempt_entryhi; | ||
323 | |||
324 | /* S/W Based TLB for guest */ | 331 | /* S/W Based TLB for guest */ |
325 | struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; | 332 | struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; |
326 | 333 | ||
327 | /* Cached guest kernel/user ASIDs */ | 334 | /* Guest kernel/user [partial] mm */ |
328 | u32 guest_user_asid[NR_CPUS]; | ||
329 | u32 guest_kernel_asid[NR_CPUS]; | ||
330 | struct mm_struct guest_kernel_mm, guest_user_mm; | 335 | struct mm_struct guest_kernel_mm, guest_user_mm; |
331 | 336 | ||
332 | /* Guest ASID of last user mode execution */ | 337 | /* Guest ASID of last user mode execution */ |
333 | unsigned int last_user_gasid; | 338 | unsigned int last_user_gasid; |
334 | 339 | ||
340 | /* Cache some mmu pages needed inside spinlock regions */ | ||
341 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
342 | |||
335 | int last_sched_cpu; | 343 | int last_sched_cpu; |
336 | 344 | ||
337 | /* WAIT executed */ | 345 | /* WAIT executed */ |
@@ -339,14 +347,15 @@ struct kvm_vcpu_arch { | |||
339 | 347 | ||
340 | u8 fpu_enabled; | 348 | u8 fpu_enabled; |
341 | u8 msa_enabled; | 349 | u8 msa_enabled; |
342 | u8 kscratch_enabled; | ||
343 | }; | 350 | }; |
344 | 351 | ||
345 | 352 | ||
346 | #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) | 353 | #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) |
347 | #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) | 354 | #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) |
348 | #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) | 355 | #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) |
356 | #define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val)) | ||
349 | #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) | 357 | #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) |
358 | #define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val)) | ||
350 | #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) | 359 | #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) |
351 | #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) | 360 | #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) |
352 | #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) | 361 | #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) |
@@ -522,9 +531,17 @@ struct kvm_mips_callbacks { | |||
522 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); | 531 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); |
523 | int (*handle_fpe)(struct kvm_vcpu *vcpu); | 532 | int (*handle_fpe)(struct kvm_vcpu *vcpu); |
524 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); | 533 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); |
525 | int (*vm_init)(struct kvm *kvm); | ||
526 | int (*vcpu_init)(struct kvm_vcpu *vcpu); | 534 | int (*vcpu_init)(struct kvm_vcpu *vcpu); |
535 | void (*vcpu_uninit)(struct kvm_vcpu *vcpu); | ||
527 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | 536 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); |
537 | void (*flush_shadow_all)(struct kvm *kvm); | ||
538 | /* | ||
539 | * Must take care of flushing any cached GPA PTEs (e.g. guest entries in | ||
540 | * VZ root TLB, or T&E GVA page tables and corresponding root TLB | ||
541 | * mappings). | ||
542 | */ | ||
543 | void (*flush_shadow_memslot)(struct kvm *kvm, | ||
544 | const struct kvm_memory_slot *slot); | ||
528 | gpa_t (*gva_to_gpa)(gva_t gva); | 545 | gpa_t (*gva_to_gpa)(gva_t gva); |
529 | void (*queue_timer_int)(struct kvm_vcpu *vcpu); | 546 | void (*queue_timer_int)(struct kvm_vcpu *vcpu); |
530 | void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); | 547 | void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); |
@@ -542,8 +559,10 @@ struct kvm_mips_callbacks { | |||
542 | const struct kvm_one_reg *reg, s64 *v); | 559 | const struct kvm_one_reg *reg, s64 *v); |
543 | int (*set_one_reg)(struct kvm_vcpu *vcpu, | 560 | int (*set_one_reg)(struct kvm_vcpu *vcpu, |
544 | const struct kvm_one_reg *reg, s64 v); | 561 | const struct kvm_one_reg *reg, s64 v); |
545 | int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); | 562 | int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
546 | int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); | 563 | int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); |
564 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
565 | void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
547 | }; | 566 | }; |
548 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | 567 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; |
549 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | 568 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); |
@@ -556,6 +575,7 @@ extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); | |||
556 | /* Building of entry/exception code */ | 575 | /* Building of entry/exception code */ |
557 | int kvm_mips_entry_setup(void); | 576 | int kvm_mips_entry_setup(void); |
558 | void *kvm_mips_build_vcpu_run(void *addr); | 577 | void *kvm_mips_build_vcpu_run(void *addr); |
578 | void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler); | ||
559 | void *kvm_mips_build_exception(void *addr, void *handler); | 579 | void *kvm_mips_build_exception(void *addr, void *handler); |
560 | void *kvm_mips_build_exit(void *addr); | 580 | void *kvm_mips_build_exit(void *addr); |
561 | 581 | ||
@@ -580,54 +600,125 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); | |||
580 | u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); | 600 | u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); |
581 | 601 | ||
582 | extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, | 602 | extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, |
583 | struct kvm_vcpu *vcpu); | 603 | struct kvm_vcpu *vcpu, |
604 | bool write_fault); | ||
584 | 605 | ||
585 | extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | 606 | extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, |
586 | struct kvm_vcpu *vcpu); | 607 | struct kvm_vcpu *vcpu); |
587 | 608 | ||
588 | extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | 609 | extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
589 | struct kvm_mips_tlb *tlb); | 610 | struct kvm_mips_tlb *tlb, |
611 | unsigned long gva, | ||
612 | bool write_fault); | ||
590 | 613 | ||
591 | extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, | 614 | extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, |
592 | u32 *opc, | 615 | u32 *opc, |
593 | struct kvm_run *run, | 616 | struct kvm_run *run, |
594 | struct kvm_vcpu *vcpu); | 617 | struct kvm_vcpu *vcpu, |
595 | 618 | bool write_fault); | |
596 | extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause, | ||
597 | u32 *opc, | ||
598 | struct kvm_run *run, | ||
599 | struct kvm_vcpu *vcpu); | ||
600 | 619 | ||
601 | extern void kvm_mips_dump_host_tlbs(void); | 620 | extern void kvm_mips_dump_host_tlbs(void); |
602 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); | 621 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); |
603 | extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | 622 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, |
604 | unsigned long entrylo0, | 623 | bool user, bool kernel); |
605 | unsigned long entrylo1, | ||
606 | int flush_dcache_mask); | ||
607 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); | ||
608 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); | ||
609 | 624 | ||
610 | extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, | 625 | extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, |
611 | unsigned long entryhi); | 626 | unsigned long entryhi); |
612 | extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); | 627 | |
613 | extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | 628 | void kvm_mips_suspend_mm(int cpu); |
614 | unsigned long gva); | 629 | void kvm_mips_resume_mm(int cpu); |
615 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 630 | |
616 | struct kvm_vcpu *vcpu); | 631 | /* MMU handling */ |
617 | extern void kvm_local_flush_tlb_all(void); | 632 | |
618 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); | 633 | /** |
619 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 634 | * enum kvm_mips_flush - Types of MMU flushes. |
620 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); | 635 | * @KMF_USER: Flush guest user virtual memory mappings. |
636 | * Guest USeg only. | ||
637 | * @KMF_KERN: Flush guest kernel virtual memory mappings. | ||
638 | * Guest USeg and KSeg2/3. | ||
639 | * @KMF_GPA: Flush guest physical memory mappings. | ||
640 | * Also includes KSeg0 if KMF_KERN is set. | ||
641 | */ | ||
642 | enum kvm_mips_flush { | ||
643 | KMF_USER = 0x0, | ||
644 | KMF_KERN = 0x1, | ||
645 | KMF_GPA = 0x2, | ||
646 | }; | ||
647 | void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); | ||
648 | bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); | ||
649 | int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); | ||
650 | pgd_t *kvm_pgd_alloc(void); | ||
651 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | ||
652 | void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, | ||
653 | bool user); | ||
654 | void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); | ||
655 | void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); | ||
656 | |||
657 | enum kvm_mips_fault_result { | ||
658 | KVM_MIPS_MAPPED = 0, | ||
659 | KVM_MIPS_GVA, | ||
660 | KVM_MIPS_GPA, | ||
661 | KVM_MIPS_TLB, | ||
662 | KVM_MIPS_TLBINV, | ||
663 | KVM_MIPS_TLBMOD, | ||
664 | }; | ||
665 | enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, | ||
666 | unsigned long gva, | ||
667 | bool write); | ||
668 | |||
669 | #define KVM_ARCH_WANT_MMU_NOTIFIER | ||
670 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
671 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
672 | unsigned long start, unsigned long end); | ||
673 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
674 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | ||
675 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | ||
676 | |||
677 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
678 | unsigned long address) | ||
679 | { | ||
680 | } | ||
621 | 681 | ||
622 | /* Emulation */ | 682 | /* Emulation */ |
623 | u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu); | 683 | int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); |
624 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); | 684 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); |
685 | int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); | ||
686 | int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); | ||
687 | |||
688 | /** | ||
689 | * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. | ||
690 | * @vcpu: Virtual CPU. | ||
691 | * | ||
692 | * Returns: Whether the TLBL exception was likely due to an instruction | ||
693 | * fetch fault rather than a data load fault. | ||
694 | */ | ||
695 | static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) | ||
696 | { | ||
697 | unsigned long badvaddr = vcpu->host_cp0_badvaddr; | ||
698 | unsigned long epc = msk_isa16_mode(vcpu->pc); | ||
699 | u32 cause = vcpu->host_cp0_cause; | ||
700 | |||
701 | if (epc == badvaddr) | ||
702 | return true; | ||
703 | |||
704 | /* | ||
705 | * Branches may be 32-bit or 16-bit instructions. | ||
706 | * This isn't exact, but we don't really support MIPS16 or microMIPS yet | ||
707 | * in KVM anyway. | ||
708 | */ | ||
709 | if ((cause & CAUSEF_BD) && badvaddr - epc <= 4) | ||
710 | return true; | ||
711 | |||
712 | return false; | ||
713 | } | ||
625 | 714 | ||
626 | extern enum emulation_result kvm_mips_emulate_inst(u32 cause, | 715 | extern enum emulation_result kvm_mips_emulate_inst(u32 cause, |
627 | u32 *opc, | 716 | u32 *opc, |
628 | struct kvm_run *run, | 717 | struct kvm_run *run, |
629 | struct kvm_vcpu *vcpu); | 718 | struct kvm_vcpu *vcpu); |
630 | 719 | ||
720 | long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); | ||
721 | |||
631 | extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, | 722 | extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, |
632 | u32 *opc, | 723 | u32 *opc, |
633 | struct kvm_run *run, | 724 | struct kvm_run *run, |
@@ -761,10 +852,6 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} | |||
761 | static inline void kvm_arch_free_memslot(struct kvm *kvm, | 852 | static inline void kvm_arch_free_memslot(struct kvm *kvm, |
762 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} | 853 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} |
763 | static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} | 854 | static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} |
764 | static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} | ||
765 | static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
766 | struct kvm_memory_slot *slot) {} | ||
767 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | ||
768 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | 855 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
769 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} | 856 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} |
770 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} | 857 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index ddd57ade1aa8..2abf94f72c0a 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -29,9 +29,11 @@ do { \ | |||
29 | } \ | 29 | } \ |
30 | } while (0) | 30 | } while (0) |
31 | 31 | ||
32 | extern void tlbmiss_handler_setup_pgd(unsigned long); | ||
33 | |||
34 | /* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */ | ||
32 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ | 35 | #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ |
33 | do { \ | 36 | do { \ |
34 | extern void tlbmiss_handler_setup_pgd(unsigned long); \ | ||
35 | tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ | 37 | tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ |
36 | htw_set_pwbase((unsigned long)pgd); \ | 38 | htw_set_pwbase((unsigned long)pgd); \ |
37 | } while (0) | 39 | } while (0) |
@@ -97,17 +99,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
97 | static inline void | 99 | static inline void |
98 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 100 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
99 | { | 101 | { |
100 | extern void kvm_local_flush_tlb_all(void); | ||
101 | unsigned long asid = asid_cache(cpu); | 102 | unsigned long asid = asid_cache(cpu); |
102 | 103 | ||
103 | if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { | 104 | if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { |
104 | if (cpu_has_vtag_icache) | 105 | if (cpu_has_vtag_icache) |
105 | flush_icache_all(); | 106 | flush_icache_all(); |
106 | #ifdef CONFIG_KVM | ||
107 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | ||
108 | #else | ||
109 | local_flush_tlb_all(); /* start new asid cycle */ | 107 | local_flush_tlb_all(); /* start new asid cycle */ |
110 | #endif | ||
111 | if (!asid) /* fix version if needed */ | 108 | if (!asid) /* fix version if needed */ |
112 | asid = asid_first_version(cpu); | 109 | asid = asid_first_version(cpu); |
113 | } | 110 | } |
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a03e86969f78..a8705f6c8180 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h | |||
@@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
43 | * Initialize a new pgd / pmd table with invalid pointers. | 43 | * Initialize a new pgd / pmd table with invalid pointers. |
44 | */ | 44 | */ |
45 | extern void pgd_init(unsigned long page); | 45 | extern void pgd_init(unsigned long page); |
46 | 46 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | |
47 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
48 | { | ||
49 | pgd_t *ret, *init; | ||
50 | |||
51 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); | ||
52 | if (ret) { | ||
53 | init = pgd_offset(&init_mm, 0UL); | ||
54 | pgd_init((unsigned long)ret); | ||
55 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | ||
56 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
57 | } | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | 47 | ||
62 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 48 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
63 | { | 49 | { |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index b42b513007a2..7227c158cbf8 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | #define protected_cache_op(op,addr) \ | 149 | #define protected_cache_op(op,addr) \ |
150 | ({ \ | ||
151 | int __err = 0; \ | ||
150 | __asm__ __volatile__( \ | 152 | __asm__ __volatile__( \ |
151 | " .set push \n" \ | 153 | " .set push \n" \ |
152 | " .set noreorder \n" \ | 154 | " .set noreorder \n" \ |
153 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ | 155 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
154 | "1: cache %0, (%1) \n" \ | 156 | "1: cache %1, (%2) \n" \ |
155 | "2: .set pop \n" \ | 157 | "2: .set pop \n" \ |
158 | " .section .fixup,\"ax\" \n" \ | ||
159 | "3: li %0, %3 \n" \ | ||
160 | " j 2b \n" \ | ||
161 | " .previous \n" \ | ||
156 | " .section __ex_table,\"a\" \n" \ | 162 | " .section __ex_table,\"a\" \n" \ |
157 | " "STR(PTR)" 1b, 2b \n" \ | 163 | " "STR(PTR)" 1b, 3b \n" \ |
158 | " .previous" \ | 164 | " .previous" \ |
159 | : \ | 165 | : "+r" (__err) \ |
160 | : "i" (op), "r" (addr)) | 166 | : "i" (op), "r" (addr), "i" (-EFAULT)); \ |
167 | __err; \ | ||
168 | }) | ||
169 | |||
161 | 170 | ||
162 | #define protected_cachee_op(op,addr) \ | 171 | #define protected_cachee_op(op,addr) \ |
172 | ({ \ | ||
173 | int __err = 0; \ | ||
163 | __asm__ __volatile__( \ | 174 | __asm__ __volatile__( \ |
164 | " .set push \n" \ | 175 | " .set push \n" \ |
165 | " .set noreorder \n" \ | 176 | " .set noreorder \n" \ |
166 | " .set mips0 \n" \ | 177 | " .set mips0 \n" \ |
167 | " .set eva \n" \ | 178 | " .set eva \n" \ |
168 | "1: cachee %0, (%1) \n" \ | 179 | "1: cachee %1, (%2) \n" \ |
169 | "2: .set pop \n" \ | 180 | "2: .set pop \n" \ |
181 | " .section .fixup,\"ax\" \n" \ | ||
182 | "3: li %0, %3 \n" \ | ||
183 | " j 2b \n" \ | ||
184 | " .previous \n" \ | ||
170 | " .section __ex_table,\"a\" \n" \ | 185 | " .section __ex_table,\"a\" \n" \ |
171 | " "STR(PTR)" 1b, 2b \n" \ | 186 | " "STR(PTR)" 1b, 3b \n" \ |
172 | " .previous" \ | 187 | " .previous" \ |
173 | : \ | 188 | : "+r" (__err) \ |
174 | : "i" (op), "r" (addr)) | 189 | : "i" (op), "r" (addr), "i" (-EFAULT)); \ |
190 | __err; \ | ||
191 | }) | ||
175 | 192 | ||
176 | /* | 193 | /* |
177 | * The next two are for badland addresses like signal trampolines. | 194 | * The next two are for badland addresses like signal trampolines. |
178 | */ | 195 | */ |
179 | static inline void protected_flush_icache_line(unsigned long addr) | 196 | static inline int protected_flush_icache_line(unsigned long addr) |
180 | { | 197 | { |
181 | switch (boot_cpu_type()) { | 198 | switch (boot_cpu_type()) { |
182 | case CPU_LOONGSON2: | 199 | case CPU_LOONGSON2: |
183 | protected_cache_op(Hit_Invalidate_I_Loongson2, addr); | 200 | return protected_cache_op(Hit_Invalidate_I_Loongson2, addr); |
184 | break; | ||
185 | 201 | ||
186 | default: | 202 | default: |
187 | #ifdef CONFIG_EVA | 203 | #ifdef CONFIG_EVA |
188 | protected_cachee_op(Hit_Invalidate_I, addr); | 204 | return protected_cachee_op(Hit_Invalidate_I, addr); |
189 | #else | 205 | #else |
190 | protected_cache_op(Hit_Invalidate_I, addr); | 206 | return protected_cache_op(Hit_Invalidate_I, addr); |
191 | #endif | 207 | #endif |
192 | break; | ||
193 | } | 208 | } |
194 | } | 209 | } |
195 | 210 | ||
@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr) | |||
199 | * caches. We're talking about one cacheline unnecessarily getting invalidated | 214 | * caches. We're talking about one cacheline unnecessarily getting invalidated |
200 | * here so the penalty isn't overly hard. | 215 | * here so the penalty isn't overly hard. |
201 | */ | 216 | */ |
202 | static inline void protected_writeback_dcache_line(unsigned long addr) | 217 | static inline int protected_writeback_dcache_line(unsigned long addr) |
203 | { | 218 | { |
204 | #ifdef CONFIG_EVA | 219 | #ifdef CONFIG_EVA |
205 | protected_cachee_op(Hit_Writeback_Inv_D, addr); | 220 | return protected_cachee_op(Hit_Writeback_Inv_D, addr); |
206 | #else | 221 | #else |
207 | protected_cache_op(Hit_Writeback_Inv_D, addr); | 222 | return protected_cache_op(Hit_Writeback_Inv_D, addr); |
208 | #endif | 223 | #endif |
209 | } | 224 | } |
210 | 225 | ||
211 | static inline void protected_writeback_scache_line(unsigned long addr) | 226 | static inline int protected_writeback_scache_line(unsigned long addr) |
212 | { | 227 | { |
213 | #ifdef CONFIG_EVA | 228 | #ifdef CONFIG_EVA |
214 | protected_cachee_op(Hit_Writeback_Inv_SD, addr); | 229 | return protected_cachee_op(Hit_Writeback_Inv_SD, addr); |
215 | #else | 230 | #else |
216 | protected_cache_op(Hit_Writeback_Inv_SD, addr); | 231 | return protected_cache_op(Hit_Writeback_Inv_SD, addr); |
217 | #endif | 232 | #endif |
218 | } | 233 | } |
219 | 234 | ||
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h new file mode 100644 index 000000000000..53050e9dd2c9 --- /dev/null +++ b/arch/mips/include/asm/tlbex.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __ASM_TLBEX_H | ||
2 | #define __ASM_TLBEX_H | ||
3 | |||
4 | #include <asm/uasm.h> | ||
5 | |||
6 | /* | ||
7 | * Write random or indexed TLB entry, and care about the hazards from | ||
8 | * the preceding mtc0 and for the following eret. | ||
9 | */ | ||
10 | enum tlb_write_entry { | ||
11 | tlb_random, | ||
12 | tlb_indexed | ||
13 | }; | ||
14 | |||
15 | extern int pgd_reg; | ||
16 | |||
17 | void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | ||
18 | unsigned int tmp, unsigned int ptr); | ||
19 | void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr); | ||
20 | void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr); | ||
21 | void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep); | ||
22 | void build_tlb_write_entry(u32 **p, struct uasm_label **l, | ||
23 | struct uasm_reloc **r, | ||
24 | enum tlb_write_entry wmode); | ||
25 | |||
26 | #endif /* __ASM_TLBEX_H */ | ||
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index f7929f65f7ca..e9a9e2ade1d2 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h | |||
@@ -9,6 +9,9 @@ | |||
9 | * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. | 9 | * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef __ASM_UASM_H | ||
13 | #define __ASM_UASM_H | ||
14 | |||
12 | #include <linux/types.h> | 15 | #include <linux/types.h> |
13 | 16 | ||
14 | #ifdef CONFIG_EXPORT_UASM | 17 | #ifdef CONFIG_EXPORT_UASM |
@@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); | |||
309 | void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, | 312 | void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, |
310 | unsigned int reg2, int lid); | 313 | unsigned int reg2, int lid); |
311 | void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); | 314 | void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); |
315 | |||
316 | #endif /* __ASM_UASM_H */ | ||
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 6985eb59b085..a8a0199bf760 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h | |||
@@ -19,6 +19,8 @@ | |||
19 | * Some parts derived from the x86 version of this file. | 19 | * Some parts derived from the x86 version of this file. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define __KVM_HAVE_READONLY_MEM | ||
23 | |||
22 | /* | 24 | /* |
23 | * for KVM_GET_REGS and KVM_SET_REGS | 25 | * for KVM_GET_REGS and KVM_SET_REGS |
24 | * | 26 | * |
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 7c56d6b124d1..65067327db12 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig | |||
@@ -20,7 +20,9 @@ config KVM | |||
20 | select EXPORT_UASM | 20 | select EXPORT_UASM |
21 | select PREEMPT_NOTIFIERS | 21 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 22 | select ANON_INODES |
23 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT | ||
23 | select KVM_MMIO | 24 | select KVM_MMIO |
25 | select MMU_NOTIFIER | ||
24 | select SRCU | 26 | select SRCU |
25 | ---help--- | 27 | ---help--- |
26 | Support for hosting Guest kernels. | 28 | Support for hosting Guest kernels. |
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c index 010cef240688..f8e772564d74 100644 --- a/arch/mips/kvm/dyntrans.c +++ b/arch/mips/kvm/dyntrans.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include <linux/uaccess.h> | ||
16 | #include <linux/vmalloc.h> | 17 | #include <linux/vmalloc.h> |
17 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
18 | #include <linux/bootmem.h> | 19 | #include <linux/bootmem.h> |
@@ -29,28 +30,37 @@ | |||
29 | static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, | 30 | static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, |
30 | union mips_instruction replace) | 31 | union mips_instruction replace) |
31 | { | 32 | { |
32 | unsigned long paddr, flags; | 33 | unsigned long vaddr = (unsigned long)opc; |
33 | void *vaddr; | 34 | int err; |
34 | 35 | ||
35 | if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) { | 36 | retry: |
36 | paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, | 37 | /* The GVA page table is still active so use the Linux TLB handlers */ |
37 | (unsigned long)opc); | 38 | kvm_trap_emul_gva_lockless_begin(vcpu); |
38 | vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr))); | 39 | err = put_user(replace.word, opc); |
39 | vaddr += paddr & ~PAGE_MASK; | 40 | kvm_trap_emul_gva_lockless_end(vcpu); |
40 | memcpy(vaddr, (void *)&replace, sizeof(u32)); | 41 | |
41 | local_flush_icache_range((unsigned long)vaddr, | 42 | if (unlikely(err)) { |
42 | (unsigned long)vaddr + 32); | 43 | /* |
43 | kunmap_atomic(vaddr); | 44 | * We write protect clean pages in GVA page table so normal |
44 | } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { | 45 | * Linux TLB mod handler doesn't silently dirty the page. |
45 | local_irq_save(flags); | 46 | * Its also possible we raced with a GVA invalidation. |
46 | memcpy((void *)opc, (void *)&replace, sizeof(u32)); | 47 | * Try to force the page to become dirty. |
47 | __local_flush_icache_user_range((unsigned long)opc, | 48 | */ |
48 | (unsigned long)opc + 32); | 49 | err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); |
49 | local_irq_restore(flags); | 50 | if (unlikely(err)) { |
50 | } else { | 51 | kvm_info("%s: Address unwriteable: %p\n", |
51 | kvm_err("%s: Invalid address: %p\n", __func__, opc); | 52 | __func__, opc); |
52 | return -EFAULT; | 53 | return -EFAULT; |
54 | } | ||
55 | |||
56 | /* | ||
57 | * Try again. This will likely trigger a TLB refill, which will | ||
58 | * fetch the new dirty entry from the GVA page table, which | ||
59 | * should then succeed. | ||
60 | */ | ||
61 | goto retry; | ||
53 | } | 62 | } |
63 | __local_flush_icache_user_range(vaddr, vaddr + 4); | ||
54 | 64 | ||
55 | return 0; | 65 | return 0; |
56 | } | 66 | } |
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index aa0937423e28..d40cfaad4529 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
@@ -38,23 +38,25 @@ | |||
38 | * Compute the return address and do emulate branch simulation, if required. | 38 | * Compute the return address and do emulate branch simulation, if required. |
39 | * This function should be called only in branch delay slot active. | 39 | * This function should be called only in branch delay slot active. |
40 | */ | 40 | */ |
41 | unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | 41 | static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, |
42 | unsigned long instpc) | 42 | unsigned long *out) |
43 | { | 43 | { |
44 | unsigned int dspcontrol; | 44 | unsigned int dspcontrol; |
45 | union mips_instruction insn; | 45 | union mips_instruction insn; |
46 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 46 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
47 | long epc = instpc; | 47 | long epc = instpc; |
48 | long nextpc = KVM_INVALID_INST; | 48 | long nextpc; |
49 | int err; | ||
49 | 50 | ||
50 | if (epc & 3) | 51 | if (epc & 3) { |
51 | goto unaligned; | 52 | kvm_err("%s: unaligned epc\n", __func__); |
53 | return -EINVAL; | ||
54 | } | ||
52 | 55 | ||
53 | /* Read the instruction */ | 56 | /* Read the instruction */ |
54 | insn.word = kvm_get_inst((u32 *) epc, vcpu); | 57 | err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); |
55 | 58 | if (err) | |
56 | if (insn.word == KVM_INVALID_INST) | 59 | return err; |
57 | return KVM_INVALID_INST; | ||
58 | 60 | ||
59 | switch (insn.i_format.opcode) { | 61 | switch (insn.i_format.opcode) { |
60 | /* jr and jalr are in r_format format. */ | 62 | /* jr and jalr are in r_format format. */ |
@@ -66,6 +68,8 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
66 | case jr_op: | 68 | case jr_op: |
67 | nextpc = arch->gprs[insn.r_format.rs]; | 69 | nextpc = arch->gprs[insn.r_format.rs]; |
68 | break; | 70 | break; |
71 | default: | ||
72 | return -EINVAL; | ||
69 | } | 73 | } |
70 | break; | 74 | break; |
71 | 75 | ||
@@ -114,8 +118,11 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
114 | nextpc = epc; | 118 | nextpc = epc; |
115 | break; | 119 | break; |
116 | case bposge32_op: | 120 | case bposge32_op: |
117 | if (!cpu_has_dsp) | 121 | if (!cpu_has_dsp) { |
118 | goto sigill; | 122 | kvm_err("%s: DSP branch but not DSP ASE\n", |
123 | __func__); | ||
124 | return -EINVAL; | ||
125 | } | ||
119 | 126 | ||
120 | dspcontrol = rddsp(0x01); | 127 | dspcontrol = rddsp(0x01); |
121 | 128 | ||
@@ -125,6 +132,8 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
125 | epc += 8; | 132 | epc += 8; |
126 | nextpc = epc; | 133 | nextpc = epc; |
127 | break; | 134 | break; |
135 | default: | ||
136 | return -EINVAL; | ||
128 | } | 137 | } |
129 | break; | 138 | break; |
130 | 139 | ||
@@ -189,7 +198,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
189 | /* And now the FPA/cp1 branch instructions. */ | 198 | /* And now the FPA/cp1 branch instructions. */ |
190 | case cop1_op: | 199 | case cop1_op: |
191 | kvm_err("%s: unsupported cop1_op\n", __func__); | 200 | kvm_err("%s: unsupported cop1_op\n", __func__); |
192 | break; | 201 | return -EINVAL; |
193 | 202 | ||
194 | #ifdef CONFIG_CPU_MIPSR6 | 203 | #ifdef CONFIG_CPU_MIPSR6 |
195 | /* R6 added the following compact branches with forbidden slots */ | 204 | /* R6 added the following compact branches with forbidden slots */ |
@@ -198,19 +207,19 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
198 | /* only rt == 0 isn't compact branch */ | 207 | /* only rt == 0 isn't compact branch */ |
199 | if (insn.i_format.rt != 0) | 208 | if (insn.i_format.rt != 0) |
200 | goto compact_branch; | 209 | goto compact_branch; |
201 | break; | 210 | return -EINVAL; |
202 | case pop10_op: | 211 | case pop10_op: |
203 | case pop30_op: | 212 | case pop30_op: |
204 | /* only rs == rt == 0 is reserved, rest are compact branches */ | 213 | /* only rs == rt == 0 is reserved, rest are compact branches */ |
205 | if (insn.i_format.rs != 0 || insn.i_format.rt != 0) | 214 | if (insn.i_format.rs != 0 || insn.i_format.rt != 0) |
206 | goto compact_branch; | 215 | goto compact_branch; |
207 | break; | 216 | return -EINVAL; |
208 | case pop66_op: | 217 | case pop66_op: |
209 | case pop76_op: | 218 | case pop76_op: |
210 | /* only rs == 0 isn't compact branch */ | 219 | /* only rs == 0 isn't compact branch */ |
211 | if (insn.i_format.rs != 0) | 220 | if (insn.i_format.rs != 0) |
212 | goto compact_branch; | 221 | goto compact_branch; |
213 | break; | 222 | return -EINVAL; |
214 | compact_branch: | 223 | compact_branch: |
215 | /* | 224 | /* |
216 | * If we've hit an exception on the forbidden slot, then | 225 | * If we've hit an exception on the forbidden slot, then |
@@ -221,42 +230,74 @@ compact_branch: | |||
221 | break; | 230 | break; |
222 | #else | 231 | #else |
223 | compact_branch: | 232 | compact_branch: |
224 | /* Compact branches not supported before R6 */ | 233 | /* Fall through - Compact branches not supported before R6 */ |
225 | break; | ||
226 | #endif | 234 | #endif |
235 | default: | ||
236 | return -EINVAL; | ||
227 | } | 237 | } |
228 | 238 | ||
229 | return nextpc; | 239 | *out = nextpc; |
230 | 240 | return 0; | |
231 | unaligned: | ||
232 | kvm_err("%s: unaligned epc\n", __func__); | ||
233 | return nextpc; | ||
234 | |||
235 | sigill: | ||
236 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); | ||
237 | return nextpc; | ||
238 | } | 241 | } |
239 | 242 | ||
240 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) | 243 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) |
241 | { | 244 | { |
242 | unsigned long branch_pc; | 245 | int err; |
243 | enum emulation_result er = EMULATE_DONE; | ||
244 | 246 | ||
245 | if (cause & CAUSEF_BD) { | 247 | if (cause & CAUSEF_BD) { |
246 | branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); | 248 | err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, |
247 | if (branch_pc == KVM_INVALID_INST) { | 249 | &vcpu->arch.pc); |
248 | er = EMULATE_FAIL; | 250 | if (err) |
249 | } else { | 251 | return EMULATE_FAIL; |
250 | vcpu->arch.pc = branch_pc; | 252 | } else { |
251 | kvm_debug("BD update_pc(): New PC: %#lx\n", | ||
252 | vcpu->arch.pc); | ||
253 | } | ||
254 | } else | ||
255 | vcpu->arch.pc += 4; | 253 | vcpu->arch.pc += 4; |
254 | } | ||
256 | 255 | ||
257 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | 256 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); |
258 | 257 | ||
259 | return er; | 258 | return EMULATE_DONE; |
259 | } | ||
260 | |||
261 | /** | ||
262 | * kvm_get_badinstr() - Get bad instruction encoding. | ||
263 | * @opc: Guest pointer to faulting instruction. | ||
264 | * @vcpu: KVM VCPU information. | ||
265 | * | ||
266 | * Gets the instruction encoding of the faulting instruction, using the saved | ||
267 | * BadInstr register value if it exists, otherwise falling back to reading guest | ||
268 | * memory at @opc. | ||
269 | * | ||
270 | * Returns: The instruction encoding of the faulting instruction. | ||
271 | */ | ||
272 | int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
273 | { | ||
274 | if (cpu_has_badinstr) { | ||
275 | *out = vcpu->arch.host_cp0_badinstr; | ||
276 | return 0; | ||
277 | } else { | ||
278 | return kvm_get_inst(opc, vcpu, out); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * kvm_get_badinstrp() - Get bad prior instruction encoding. | ||
284 | * @opc: Guest pointer to prior faulting instruction. | ||
285 | * @vcpu: KVM VCPU information. | ||
286 | * | ||
287 | * Gets the instruction encoding of the prior faulting instruction (the branch | ||
288 | * containing the delay slot which faulted), using the saved BadInstrP register | ||
289 | * value if it exists, otherwise falling back to reading guest memory at @opc. | ||
290 | * | ||
291 | * Returns: The instruction encoding of the prior faulting instruction. | ||
292 | */ | ||
293 | int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
294 | { | ||
295 | if (cpu_has_badinstrp) { | ||
296 | *out = vcpu->arch.host_cp0_badinstrp; | ||
297 | return 0; | ||
298 | } else { | ||
299 | return kvm_get_inst(opc, vcpu, out); | ||
300 | } | ||
260 | } | 301 | } |
261 | 302 | ||
262 | /** | 303 | /** |
@@ -856,22 +897,30 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | |||
856 | static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, | 897 | static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, |
857 | struct kvm_mips_tlb *tlb) | 898 | struct kvm_mips_tlb *tlb) |
858 | { | 899 | { |
900 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
901 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
859 | int cpu, i; | 902 | int cpu, i; |
860 | bool user; | 903 | bool user; |
861 | 904 | ||
862 | /* No need to flush for entries which are already invalid */ | 905 | /* No need to flush for entries which are already invalid */ |
863 | if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) | 906 | if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) |
864 | return; | 907 | return; |
908 | /* Don't touch host kernel page tables or TLB mappings */ | ||
909 | if ((unsigned long)tlb->tlb_hi > 0x7fffffff) | ||
910 | return; | ||
865 | /* User address space doesn't need flushing for KSeg2/3 changes */ | 911 | /* User address space doesn't need flushing for KSeg2/3 changes */ |
866 | user = tlb->tlb_hi < KVM_GUEST_KSEG0; | 912 | user = tlb->tlb_hi < KVM_GUEST_KSEG0; |
867 | 913 | ||
868 | preempt_disable(); | 914 | preempt_disable(); |
869 | 915 | ||
916 | /* Invalidate page table entries */ | ||
917 | kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); | ||
918 | |||
870 | /* | 919 | /* |
871 | * Probe the shadow host TLB for the entry being overwritten, if one | 920 | * Probe the shadow host TLB for the entry being overwritten, if one |
872 | * matches, invalidate it | 921 | * matches, invalidate it |
873 | */ | 922 | */ |
874 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 923 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); |
875 | 924 | ||
876 | /* Invalidate the whole ASID on other CPUs */ | 925 | /* Invalidate the whole ASID on other CPUs */ |
877 | cpu = smp_processor_id(); | 926 | cpu = smp_processor_id(); |
@@ -879,8 +928,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, | |||
879 | if (i == cpu) | 928 | if (i == cpu) |
880 | continue; | 929 | continue; |
881 | if (user) | 930 | if (user) |
882 | vcpu->arch.guest_user_asid[i] = 0; | 931 | cpu_context(i, user_mm) = 0; |
883 | vcpu->arch.guest_kernel_asid[i] = 0; | 932 | cpu_context(i, kern_mm) = 0; |
884 | } | 933 | } |
885 | 934 | ||
886 | preempt_enable(); | 935 | preempt_enable(); |
@@ -1017,7 +1066,7 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | |||
1017 | unsigned int mask = MIPS_CONF_M; | 1066 | unsigned int mask = MIPS_CONF_M; |
1018 | 1067 | ||
1019 | /* KScrExist */ | 1068 | /* KScrExist */ |
1020 | mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16; | 1069 | mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; |
1021 | 1070 | ||
1022 | return mask; | 1071 | return mask; |
1023 | } | 1072 | } |
@@ -1056,6 +1105,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, | |||
1056 | struct kvm_vcpu *vcpu) | 1105 | struct kvm_vcpu *vcpu) |
1057 | { | 1106 | { |
1058 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1107 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1108 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1059 | enum emulation_result er = EMULATE_DONE; | 1109 | enum emulation_result er = EMULATE_DONE; |
1060 | u32 rt, rd, sel; | 1110 | u32 rt, rd, sel; |
1061 | unsigned long curr_pc; | 1111 | unsigned long curr_pc; |
@@ -1150,14 +1200,13 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, | |||
1150 | er = EMULATE_FAIL; | 1200 | er = EMULATE_FAIL; |
1151 | break; | 1201 | break; |
1152 | } | 1202 | } |
1153 | #define C0_EBASE_CORE_MASK 0xff | ||
1154 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { | 1203 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { |
1155 | /* Preserve CORE number */ | 1204 | /* |
1156 | kvm_change_c0_guest_ebase(cop0, | 1205 | * Preserve core number, and keep the exception |
1157 | ~(C0_EBASE_CORE_MASK), | 1206 | * base in guest KSeg0. |
1207 | */ | ||
1208 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000, | ||
1158 | vcpu->arch.gprs[rt]); | 1209 | vcpu->arch.gprs[rt]); |
1159 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", | ||
1160 | kvm_read_c0_guest_ebase(cop0)); | ||
1161 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 1210 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
1162 | u32 nasid = | 1211 | u32 nasid = |
1163 | vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; | 1212 | vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; |
@@ -1169,6 +1218,17 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, | |||
1169 | nasid); | 1218 | nasid); |
1170 | 1219 | ||
1171 | /* | 1220 | /* |
1221 | * Flush entries from the GVA page | ||
1222 | * tables. | ||
1223 | * Guest user page table will get | ||
1224 | * flushed lazily on re-entry to guest | ||
1225 | * user if the guest ASID actually | ||
1226 | * changes. | ||
1227 | */ | ||
1228 | kvm_mips_flush_gva_pt(kern_mm->pgd, | ||
1229 | KMF_KERN); | ||
1230 | |||
1231 | /* | ||
1172 | * Regenerate/invalidate kernel MMU | 1232 | * Regenerate/invalidate kernel MMU |
1173 | * context. | 1233 | * context. |
1174 | * The user MMU context will be | 1234 | * The user MMU context will be |
@@ -1178,13 +1238,10 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, | |||
1178 | */ | 1238 | */ |
1179 | preempt_disable(); | 1239 | preempt_disable(); |
1180 | cpu = smp_processor_id(); | 1240 | cpu = smp_processor_id(); |
1181 | kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, | 1241 | get_new_mmu_context(kern_mm, cpu); |
1182 | cpu, vcpu); | ||
1183 | vcpu->arch.guest_kernel_asid[cpu] = | ||
1184 | vcpu->arch.guest_kernel_mm.context.asid[cpu]; | ||
1185 | for_each_possible_cpu(i) | 1242 | for_each_possible_cpu(i) |
1186 | if (i != cpu) | 1243 | if (i != cpu) |
1187 | vcpu->arch.guest_kernel_asid[i] = 0; | 1244 | cpu_context(i, kern_mm) = 0; |
1188 | preempt_enable(); | 1245 | preempt_enable(); |
1189 | } | 1246 | } |
1190 | kvm_write_c0_guest_entryhi(cop0, | 1247 | kvm_write_c0_guest_entryhi(cop0, |
@@ -1639,12 +1696,56 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, | |||
1639 | return er; | 1696 | return er; |
1640 | } | 1697 | } |
1641 | 1698 | ||
1699 | static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), | ||
1700 | unsigned long curr_pc, | ||
1701 | unsigned long addr, | ||
1702 | struct kvm_run *run, | ||
1703 | struct kvm_vcpu *vcpu, | ||
1704 | u32 cause) | ||
1705 | { | ||
1706 | int err; | ||
1707 | |||
1708 | for (;;) { | ||
1709 | /* Carefully attempt the cache operation */ | ||
1710 | kvm_trap_emul_gva_lockless_begin(vcpu); | ||
1711 | err = fn(addr); | ||
1712 | kvm_trap_emul_gva_lockless_end(vcpu); | ||
1713 | |||
1714 | if (likely(!err)) | ||
1715 | return EMULATE_DONE; | ||
1716 | |||
1717 | /* | ||
1718 | * Try to handle the fault and retry, maybe we just raced with a | ||
1719 | * GVA invalidation. | ||
1720 | */ | ||
1721 | switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { | ||
1722 | case KVM_MIPS_GVA: | ||
1723 | case KVM_MIPS_GPA: | ||
1724 | /* bad virtual or physical address */ | ||
1725 | return EMULATE_FAIL; | ||
1726 | case KVM_MIPS_TLB: | ||
1727 | /* no matching guest TLB */ | ||
1728 | vcpu->arch.host_cp0_badvaddr = addr; | ||
1729 | vcpu->arch.pc = curr_pc; | ||
1730 | kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); | ||
1731 | return EMULATE_EXCEPT; | ||
1732 | case KVM_MIPS_TLBINV: | ||
1733 | /* invalid matching guest TLB */ | ||
1734 | vcpu->arch.host_cp0_badvaddr = addr; | ||
1735 | vcpu->arch.pc = curr_pc; | ||
1736 | kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); | ||
1737 | return EMULATE_EXCEPT; | ||
1738 | default: | ||
1739 | break; | ||
1740 | }; | ||
1741 | } | ||
1742 | } | ||
1743 | |||
1642 | enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, | 1744 | enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, |
1643 | u32 *opc, u32 cause, | 1745 | u32 *opc, u32 cause, |
1644 | struct kvm_run *run, | 1746 | struct kvm_run *run, |
1645 | struct kvm_vcpu *vcpu) | 1747 | struct kvm_vcpu *vcpu) |
1646 | { | 1748 | { |
1647 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1648 | enum emulation_result er = EMULATE_DONE; | 1749 | enum emulation_result er = EMULATE_DONE; |
1649 | u32 cache, op_inst, op, base; | 1750 | u32 cache, op_inst, op, base; |
1650 | s16 offset; | 1751 | s16 offset; |
@@ -1701,80 +1802,16 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, | |||
1701 | goto done; | 1802 | goto done; |
1702 | } | 1803 | } |
1703 | 1804 | ||
1704 | preempt_disable(); | ||
1705 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | ||
1706 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && | ||
1707 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { | ||
1708 | kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", | ||
1709 | __func__, va, vcpu, read_c0_entryhi()); | ||
1710 | er = EMULATE_FAIL; | ||
1711 | preempt_enable(); | ||
1712 | goto done; | ||
1713 | } | ||
1714 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || | ||
1715 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | ||
1716 | int index; | ||
1717 | |||
1718 | /* If an entry already exists then skip */ | ||
1719 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) | ||
1720 | goto skip_fault; | ||
1721 | |||
1722 | /* | ||
1723 | * If address not in the guest TLB, then give the guest a fault, | ||
1724 | * the resulting handler will do the right thing | ||
1725 | */ | ||
1726 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | ||
1727 | (kvm_read_c0_guest_entryhi | ||
1728 | (cop0) & KVM_ENTRYHI_ASID)); | ||
1729 | |||
1730 | if (index < 0) { | ||
1731 | vcpu->arch.host_cp0_badvaddr = va; | ||
1732 | vcpu->arch.pc = curr_pc; | ||
1733 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, | ||
1734 | vcpu); | ||
1735 | preempt_enable(); | ||
1736 | goto dont_update_pc; | ||
1737 | } else { | ||
1738 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | ||
1739 | /* | ||
1740 | * Check if the entry is valid, if not then setup a TLB | ||
1741 | * invalid exception to the guest | ||
1742 | */ | ||
1743 | if (!TLB_IS_VALID(*tlb, va)) { | ||
1744 | vcpu->arch.host_cp0_badvaddr = va; | ||
1745 | vcpu->arch.pc = curr_pc; | ||
1746 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | ||
1747 | run, vcpu); | ||
1748 | preempt_enable(); | ||
1749 | goto dont_update_pc; | ||
1750 | } | ||
1751 | /* | ||
1752 | * We fault an entry from the guest tlb to the | ||
1753 | * shadow host TLB | ||
1754 | */ | ||
1755 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { | ||
1756 | kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", | ||
1757 | __func__, va, index, vcpu, | ||
1758 | read_c0_entryhi()); | ||
1759 | er = EMULATE_FAIL; | ||
1760 | preempt_enable(); | ||
1761 | goto done; | ||
1762 | } | ||
1763 | } | ||
1764 | } else { | ||
1765 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
1766 | cache, op, base, arch->gprs[base], offset); | ||
1767 | er = EMULATE_FAIL; | ||
1768 | preempt_enable(); | ||
1769 | goto done; | ||
1770 | |||
1771 | } | ||
1772 | |||
1773 | skip_fault: | ||
1774 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ | 1805 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ |
1775 | if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { | 1806 | if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { |
1776 | flush_dcache_line(va); | 1807 | /* |
1777 | 1808 | * Perform the dcache part of icache synchronisation on the | |
1809 | * guest's behalf. | ||
1810 | */ | ||
1811 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, | ||
1812 | curr_pc, va, run, vcpu, cause); | ||
1813 | if (er != EMULATE_DONE) | ||
1814 | goto done; | ||
1778 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1815 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1779 | /* | 1816 | /* |
1780 | * Replace the CACHE instruction, with a SYNCI, not the same, | 1817 | * Replace the CACHE instruction, with a SYNCI, not the same, |
@@ -1783,8 +1820,15 @@ skip_fault: | |||
1783 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1820 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1784 | #endif | 1821 | #endif |
1785 | } else if (op_inst == Hit_Invalidate_I) { | 1822 | } else if (op_inst == Hit_Invalidate_I) { |
1786 | flush_dcache_line(va); | 1823 | /* Perform the icache synchronisation on the guest's behalf */ |
1787 | flush_icache_line(va); | 1824 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, |
1825 | curr_pc, va, run, vcpu, cause); | ||
1826 | if (er != EMULATE_DONE) | ||
1827 | goto done; | ||
1828 | er = kvm_mips_guest_cache_op(protected_flush_icache_line, | ||
1829 | curr_pc, va, run, vcpu, cause); | ||
1830 | if (er != EMULATE_DONE) | ||
1831 | goto done; | ||
1788 | 1832 | ||
1789 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1833 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1790 | /* Replace the CACHE instruction, with a SYNCI */ | 1834 | /* Replace the CACHE instruction, with a SYNCI */ |
@@ -1796,17 +1840,13 @@ skip_fault: | |||
1796 | er = EMULATE_FAIL; | 1840 | er = EMULATE_FAIL; |
1797 | } | 1841 | } |
1798 | 1842 | ||
1799 | preempt_enable(); | ||
1800 | done: | 1843 | done: |
1801 | /* Rollback PC only if emulation was unsuccessful */ | 1844 | /* Rollback PC only if emulation was unsuccessful */ |
1802 | if (er == EMULATE_FAIL) | 1845 | if (er == EMULATE_FAIL) |
1803 | vcpu->arch.pc = curr_pc; | 1846 | vcpu->arch.pc = curr_pc; |
1804 | 1847 | /* Guest exception needs guest to resume */ | |
1805 | dont_update_pc: | 1848 | if (er == EMULATE_EXCEPT) |
1806 | /* | 1849 | er = EMULATE_DONE; |
1807 | * This is for exceptions whose emulation updates the PC, so do not | ||
1808 | * overwrite the PC under any circumstances | ||
1809 | */ | ||
1810 | 1850 | ||
1811 | return er; | 1851 | return er; |
1812 | } | 1852 | } |
@@ -1817,12 +1857,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, | |||
1817 | { | 1857 | { |
1818 | union mips_instruction inst; | 1858 | union mips_instruction inst; |
1819 | enum emulation_result er = EMULATE_DONE; | 1859 | enum emulation_result er = EMULATE_DONE; |
1860 | int err; | ||
1820 | 1861 | ||
1821 | /* Fetch the instruction. */ | 1862 | /* Fetch the instruction. */ |
1822 | if (cause & CAUSEF_BD) | 1863 | if (cause & CAUSEF_BD) |
1823 | opc += 1; | 1864 | opc += 1; |
1824 | 1865 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | |
1825 | inst.word = kvm_get_inst(opc, vcpu); | 1866 | if (err) |
1867 | return EMULATE_FAIL; | ||
1826 | 1868 | ||
1827 | switch (inst.r_format.opcode) { | 1869 | switch (inst.r_format.opcode) { |
1828 | case cop0_op: | 1870 | case cop0_op: |
@@ -1874,6 +1916,22 @@ unknown: | |||
1874 | return er; | 1916 | return er; |
1875 | } | 1917 | } |
1876 | 1918 | ||
1919 | /** | ||
1920 | * kvm_mips_guest_exception_base() - Find guest exception vector base address. | ||
1921 | * | ||
1922 | * Returns: The base address of the current guest exception vector, taking | ||
1923 | * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. | ||
1924 | */ | ||
1925 | long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) | ||
1926 | { | ||
1927 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1928 | |||
1929 | if (kvm_read_c0_guest_status(cop0) & ST0_BEV) | ||
1930 | return KVM_GUEST_CKSEG1ADDR(0x1fc00200); | ||
1931 | else | ||
1932 | return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; | ||
1933 | } | ||
1934 | |||
1877 | enum emulation_result kvm_mips_emulate_syscall(u32 cause, | 1935 | enum emulation_result kvm_mips_emulate_syscall(u32 cause, |
1878 | u32 *opc, | 1936 | u32 *opc, |
1879 | struct kvm_run *run, | 1937 | struct kvm_run *run, |
@@ -1899,7 +1957,7 @@ enum emulation_result kvm_mips_emulate_syscall(u32 cause, | |||
1899 | (EXCCODE_SYS << CAUSEB_EXCCODE)); | 1957 | (EXCCODE_SYS << CAUSEB_EXCCODE)); |
1900 | 1958 | ||
1901 | /* Set PC to the exception entry point */ | 1959 | /* Set PC to the exception entry point */ |
1902 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1960 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
1903 | 1961 | ||
1904 | } else { | 1962 | } else { |
1905 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); | 1963 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
@@ -1933,13 +1991,13 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, | |||
1933 | arch->pc); | 1991 | arch->pc); |
1934 | 1992 | ||
1935 | /* set pc to the exception entry point */ | 1993 | /* set pc to the exception entry point */ |
1936 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | 1994 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; |
1937 | 1995 | ||
1938 | } else { | 1996 | } else { |
1939 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | 1997 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", |
1940 | arch->pc); | 1998 | arch->pc); |
1941 | 1999 | ||
1942 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2000 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
1943 | } | 2001 | } |
1944 | 2002 | ||
1945 | kvm_change_c0_guest_cause(cop0, (0xff), | 2003 | kvm_change_c0_guest_cause(cop0, (0xff), |
@@ -1949,8 +2007,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, | |||
1949 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2007 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1950 | /* XXXKYMA: is the context register used by linux??? */ | 2008 | /* XXXKYMA: is the context register used by linux??? */ |
1951 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2009 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1952 | /* Blow away the shadow host TLBs */ | ||
1953 | kvm_mips_flush_host_tlb(1); | ||
1954 | 2010 | ||
1955 | return EMULATE_DONE; | 2011 | return EMULATE_DONE; |
1956 | } | 2012 | } |
@@ -1978,16 +2034,14 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, | |||
1978 | 2034 | ||
1979 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | 2035 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", |
1980 | arch->pc); | 2036 | arch->pc); |
1981 | |||
1982 | /* set pc to the exception entry point */ | ||
1983 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
1984 | |||
1985 | } else { | 2037 | } else { |
1986 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | 2038 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", |
1987 | arch->pc); | 2039 | arch->pc); |
1988 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
1989 | } | 2040 | } |
1990 | 2041 | ||
2042 | /* set pc to the exception entry point */ | ||
2043 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2044 | |||
1991 | kvm_change_c0_guest_cause(cop0, (0xff), | 2045 | kvm_change_c0_guest_cause(cop0, (0xff), |
1992 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); | 2046 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); |
1993 | 2047 | ||
@@ -1995,8 +2049,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, | |||
1995 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2049 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1996 | /* XXXKYMA: is the context register used by linux??? */ | 2050 | /* XXXKYMA: is the context register used by linux??? */ |
1997 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2051 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1998 | /* Blow away the shadow host TLBs */ | ||
1999 | kvm_mips_flush_host_tlb(1); | ||
2000 | 2052 | ||
2001 | return EMULATE_DONE; | 2053 | return EMULATE_DONE; |
2002 | } | 2054 | } |
@@ -2025,11 +2077,11 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, | |||
2025 | arch->pc); | 2077 | arch->pc); |
2026 | 2078 | ||
2027 | /* Set PC to the exception entry point */ | 2079 | /* Set PC to the exception entry point */ |
2028 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | 2080 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; |
2029 | } else { | 2081 | } else { |
2030 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | 2082 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", |
2031 | arch->pc); | 2083 | arch->pc); |
2032 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2084 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2033 | } | 2085 | } |
2034 | 2086 | ||
2035 | kvm_change_c0_guest_cause(cop0, (0xff), | 2087 | kvm_change_c0_guest_cause(cop0, (0xff), |
@@ -2039,8 +2091,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, | |||
2039 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2091 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2040 | /* XXXKYMA: is the context register used by linux??? */ | 2092 | /* XXXKYMA: is the context register used by linux??? */ |
2041 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2093 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
2042 | /* Blow away the shadow host TLBs */ | ||
2043 | kvm_mips_flush_host_tlb(1); | ||
2044 | 2094 | ||
2045 | return EMULATE_DONE; | 2095 | return EMULATE_DONE; |
2046 | } | 2096 | } |
@@ -2067,15 +2117,14 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, | |||
2067 | 2117 | ||
2068 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | 2118 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", |
2069 | arch->pc); | 2119 | arch->pc); |
2070 | |||
2071 | /* Set PC to the exception entry point */ | ||
2072 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2073 | } else { | 2120 | } else { |
2074 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | 2121 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", |
2075 | arch->pc); | 2122 | arch->pc); |
2076 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2077 | } | 2123 | } |
2078 | 2124 | ||
2125 | /* Set PC to the exception entry point */ | ||
2126 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2127 | |||
2079 | kvm_change_c0_guest_cause(cop0, (0xff), | 2128 | kvm_change_c0_guest_cause(cop0, (0xff), |
2080 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); | 2129 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); |
2081 | 2130 | ||
@@ -2083,41 +2132,10 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, | |||
2083 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2132 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2084 | /* XXXKYMA: is the context register used by linux??? */ | 2133 | /* XXXKYMA: is the context register used by linux??? */ |
2085 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2134 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
2086 | /* Blow away the shadow host TLBs */ | ||
2087 | kvm_mips_flush_host_tlb(1); | ||
2088 | 2135 | ||
2089 | return EMULATE_DONE; | 2136 | return EMULATE_DONE; |
2090 | } | 2137 | } |
2091 | 2138 | ||
2092 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | ||
2093 | enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc, | ||
2094 | struct kvm_run *run, | ||
2095 | struct kvm_vcpu *vcpu) | ||
2096 | { | ||
2097 | enum emulation_result er = EMULATE_DONE; | ||
2098 | #ifdef DEBUG | ||
2099 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2100 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | ||
2101 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2102 | int index; | ||
2103 | |||
2104 | /* If address not in the guest TLB, then we are in trouble */ | ||
2105 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | ||
2106 | if (index < 0) { | ||
2107 | /* XXXKYMA Invalidate and retry */ | ||
2108 | kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); | ||
2109 | kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", | ||
2110 | __func__, entryhi); | ||
2111 | kvm_mips_dump_guest_tlbs(vcpu); | ||
2112 | kvm_mips_dump_host_tlbs(); | ||
2113 | return EMULATE_FAIL; | ||
2114 | } | ||
2115 | #endif | ||
2116 | |||
2117 | er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); | ||
2118 | return er; | ||
2119 | } | ||
2120 | |||
2121 | enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, | 2139 | enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, |
2122 | u32 *opc, | 2140 | u32 *opc, |
2123 | struct kvm_run *run, | 2141 | struct kvm_run *run, |
@@ -2140,14 +2158,13 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, | |||
2140 | 2158 | ||
2141 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | 2159 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", |
2142 | arch->pc); | 2160 | arch->pc); |
2143 | |||
2144 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2145 | } else { | 2161 | } else { |
2146 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | 2162 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", |
2147 | arch->pc); | 2163 | arch->pc); |
2148 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | ||
2149 | } | 2164 | } |
2150 | 2165 | ||
2166 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2167 | |||
2151 | kvm_change_c0_guest_cause(cop0, (0xff), | 2168 | kvm_change_c0_guest_cause(cop0, (0xff), |
2152 | (EXCCODE_MOD << CAUSEB_EXCCODE)); | 2169 | (EXCCODE_MOD << CAUSEB_EXCCODE)); |
2153 | 2170 | ||
@@ -2155,8 +2172,6 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, | |||
2155 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2172 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2156 | /* XXXKYMA: is the context register used by linux??? */ | 2173 | /* XXXKYMA: is the context register used by linux??? */ |
2157 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2174 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
2158 | /* Blow away the shadow host TLBs */ | ||
2159 | kvm_mips_flush_host_tlb(1); | ||
2160 | 2175 | ||
2161 | return EMULATE_DONE; | 2176 | return EMULATE_DONE; |
2162 | } | 2177 | } |
@@ -2181,7 +2196,7 @@ enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, | |||
2181 | 2196 | ||
2182 | } | 2197 | } |
2183 | 2198 | ||
2184 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2199 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2185 | 2200 | ||
2186 | kvm_change_c0_guest_cause(cop0, (0xff), | 2201 | kvm_change_c0_guest_cause(cop0, (0xff), |
2187 | (EXCCODE_CPU << CAUSEB_EXCCODE)); | 2202 | (EXCCODE_CPU << CAUSEB_EXCCODE)); |
@@ -2215,7 +2230,7 @@ enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, | |||
2215 | (EXCCODE_RI << CAUSEB_EXCCODE)); | 2230 | (EXCCODE_RI << CAUSEB_EXCCODE)); |
2216 | 2231 | ||
2217 | /* Set PC to the exception entry point */ | 2232 | /* Set PC to the exception entry point */ |
2218 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2233 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2219 | 2234 | ||
2220 | } else { | 2235 | } else { |
2221 | kvm_err("Trying to deliver RI when EXL is already set\n"); | 2236 | kvm_err("Trying to deliver RI when EXL is already set\n"); |
@@ -2250,7 +2265,7 @@ enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, | |||
2250 | (EXCCODE_BP << CAUSEB_EXCCODE)); | 2265 | (EXCCODE_BP << CAUSEB_EXCCODE)); |
2251 | 2266 | ||
2252 | /* Set PC to the exception entry point */ | 2267 | /* Set PC to the exception entry point */ |
2253 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2268 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2254 | 2269 | ||
2255 | } else { | 2270 | } else { |
2256 | kvm_err("Trying to deliver BP when EXL is already set\n"); | 2271 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
@@ -2285,7 +2300,7 @@ enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, | |||
2285 | (EXCCODE_TR << CAUSEB_EXCCODE)); | 2300 | (EXCCODE_TR << CAUSEB_EXCCODE)); |
2286 | 2301 | ||
2287 | /* Set PC to the exception entry point */ | 2302 | /* Set PC to the exception entry point */ |
2288 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2303 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2289 | 2304 | ||
2290 | } else { | 2305 | } else { |
2291 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | 2306 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); |
@@ -2320,7 +2335,7 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, | |||
2320 | (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); | 2335 | (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); |
2321 | 2336 | ||
2322 | /* Set PC to the exception entry point */ | 2337 | /* Set PC to the exception entry point */ |
2323 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2338 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2324 | 2339 | ||
2325 | } else { | 2340 | } else { |
2326 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | 2341 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); |
@@ -2355,7 +2370,7 @@ enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, | |||
2355 | (EXCCODE_FPE << CAUSEB_EXCCODE)); | 2370 | (EXCCODE_FPE << CAUSEB_EXCCODE)); |
2356 | 2371 | ||
2357 | /* Set PC to the exception entry point */ | 2372 | /* Set PC to the exception entry point */ |
2358 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2373 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2359 | 2374 | ||
2360 | } else { | 2375 | } else { |
2361 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | 2376 | kvm_err("Trying to deliver FPE when EXL is already set\n"); |
@@ -2390,7 +2405,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, | |||
2390 | (EXCCODE_MSADIS << CAUSEB_EXCCODE)); | 2405 | (EXCCODE_MSADIS << CAUSEB_EXCCODE)); |
2391 | 2406 | ||
2392 | /* Set PC to the exception entry point */ | 2407 | /* Set PC to the exception entry point */ |
2393 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2408 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2394 | 2409 | ||
2395 | } else { | 2410 | } else { |
2396 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | 2411 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); |
@@ -2409,6 +2424,7 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, | |||
2409 | enum emulation_result er = EMULATE_DONE; | 2424 | enum emulation_result er = EMULATE_DONE; |
2410 | unsigned long curr_pc; | 2425 | unsigned long curr_pc; |
2411 | union mips_instruction inst; | 2426 | union mips_instruction inst; |
2427 | int err; | ||
2412 | 2428 | ||
2413 | /* | 2429 | /* |
2414 | * Update PC and hold onto current PC in case there is | 2430 | * Update PC and hold onto current PC in case there is |
@@ -2422,11 +2438,9 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, | |||
2422 | /* Fetch the instruction. */ | 2438 | /* Fetch the instruction. */ |
2423 | if (cause & CAUSEF_BD) | 2439 | if (cause & CAUSEF_BD) |
2424 | opc += 1; | 2440 | opc += 1; |
2425 | 2441 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | |
2426 | inst.word = kvm_get_inst(opc, vcpu); | 2442 | if (err) { |
2427 | 2443 | kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); | |
2428 | if (inst.word == KVM_INVALID_INST) { | ||
2429 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); | ||
2430 | return EMULATE_FAIL; | 2444 | return EMULATE_FAIL; |
2431 | } | 2445 | } |
2432 | 2446 | ||
@@ -2557,7 +2571,7 @@ static enum emulation_result kvm_mips_emulate_exc(u32 cause, | |||
2557 | (exccode << CAUSEB_EXCCODE)); | 2571 | (exccode << CAUSEB_EXCCODE)); |
2558 | 2572 | ||
2559 | /* Set PC to the exception entry point */ | 2573 | /* Set PC to the exception entry point */ |
2560 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2574 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2561 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2575 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2562 | 2576 | ||
2563 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | 2577 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", |
@@ -2670,7 +2684,8 @@ enum emulation_result kvm_mips_check_privilege(u32 cause, | |||
2670 | enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, | 2684 | enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, |
2671 | u32 *opc, | 2685 | u32 *opc, |
2672 | struct kvm_run *run, | 2686 | struct kvm_run *run, |
2673 | struct kvm_vcpu *vcpu) | 2687 | struct kvm_vcpu *vcpu, |
2688 | bool write_fault) | ||
2674 | { | 2689 | { |
2675 | enum emulation_result er = EMULATE_DONE; | 2690 | enum emulation_result er = EMULATE_DONE; |
2676 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2691 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
@@ -2726,7 +2741,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, | |||
2726 | * OK we have a Guest TLB entry, now inject it into the | 2741 | * OK we have a Guest TLB entry, now inject it into the |
2727 | * shadow host TLB | 2742 | * shadow host TLB |
2728 | */ | 2743 | */ |
2729 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { | 2744 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, |
2745 | write_fault)) { | ||
2730 | kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", | 2746 | kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", |
2731 | __func__, va, index, vcpu, | 2747 | __func__, va, index, vcpu, |
2732 | read_c0_entryhi()); | 2748 | read_c0_entryhi()); |
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index e92fb190e2d6..c5b254c4d0da 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c | |||
@@ -12,8 +12,11 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kvm_host.h> | 14 | #include <linux/kvm_host.h> |
15 | #include <linux/log2.h> | ||
16 | #include <asm/mmu_context.h> | ||
15 | #include <asm/msa.h> | 17 | #include <asm/msa.h> |
16 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
19 | #include <asm/tlbex.h> | ||
17 | #include <asm/uasm.h> | 20 | #include <asm/uasm.h> |
18 | 21 | ||
19 | /* Register names */ | 22 | /* Register names */ |
@@ -50,6 +53,8 @@ | |||
50 | /* Some CP0 registers */ | 53 | /* Some CP0 registers */ |
51 | #define C0_HWRENA 7, 0 | 54 | #define C0_HWRENA 7, 0 |
52 | #define C0_BADVADDR 8, 0 | 55 | #define C0_BADVADDR 8, 0 |
56 | #define C0_BADINSTR 8, 1 | ||
57 | #define C0_BADINSTRP 8, 2 | ||
53 | #define C0_ENTRYHI 10, 0 | 58 | #define C0_ENTRYHI 10, 0 |
54 | #define C0_STATUS 12, 0 | 59 | #define C0_STATUS 12, 0 |
55 | #define C0_CAUSE 13, 0 | 60 | #define C0_CAUSE 13, 0 |
@@ -89,6 +94,21 @@ static void *kvm_mips_build_ret_from_exit(void *addr); | |||
89 | static void *kvm_mips_build_ret_to_guest(void *addr); | 94 | static void *kvm_mips_build_ret_to_guest(void *addr); |
90 | static void *kvm_mips_build_ret_to_host(void *addr); | 95 | static void *kvm_mips_build_ret_to_host(void *addr); |
91 | 96 | ||
97 | /* | ||
98 | * The version of this function in tlbex.c uses current_cpu_type(), but for KVM | ||
99 | * we assume symmetry. | ||
100 | */ | ||
101 | static int c0_kscratch(void) | ||
102 | { | ||
103 | switch (boot_cpu_type()) { | ||
104 | case CPU_XLP: | ||
105 | case CPU_XLR: | ||
106 | return 22; | ||
107 | default: | ||
108 | return 31; | ||
109 | } | ||
110 | } | ||
111 | |||
92 | /** | 112 | /** |
93 | * kvm_mips_entry_setup() - Perform global setup for entry code. | 113 | * kvm_mips_entry_setup() - Perform global setup for entry code. |
94 | * | 114 | * |
@@ -103,18 +123,21 @@ int kvm_mips_entry_setup(void) | |||
103 | * We prefer to use KScratchN registers if they are available over the | 123 | * We prefer to use KScratchN registers if they are available over the |
104 | * defaults above, which may not work on all cores. | 124 | * defaults above, which may not work on all cores. |
105 | */ | 125 | */ |
106 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc; | 126 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask; |
127 | |||
128 | if (pgd_reg != -1) | ||
129 | kscratch_mask &= ~BIT(pgd_reg); | ||
107 | 130 | ||
108 | /* Pick a scratch register for storing VCPU */ | 131 | /* Pick a scratch register for storing VCPU */ |
109 | if (kscratch_mask) { | 132 | if (kscratch_mask) { |
110 | scratch_vcpu[0] = 31; | 133 | scratch_vcpu[0] = c0_kscratch(); |
111 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; | 134 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; |
112 | kscratch_mask &= ~BIT(scratch_vcpu[1]); | 135 | kscratch_mask &= ~BIT(scratch_vcpu[1]); |
113 | } | 136 | } |
114 | 137 | ||
115 | /* Pick a scratch register to use as a temp for saving state */ | 138 | /* Pick a scratch register to use as a temp for saving state */ |
116 | if (kscratch_mask) { | 139 | if (kscratch_mask) { |
117 | scratch_tmp[0] = 31; | 140 | scratch_tmp[0] = c0_kscratch(); |
118 | scratch_tmp[1] = ffs(kscratch_mask) - 1; | 141 | scratch_tmp[1] = ffs(kscratch_mask) - 1; |
119 | kscratch_mask &= ~BIT(scratch_tmp[1]); | 142 | kscratch_mask &= ~BIT(scratch_tmp[1]); |
120 | } | 143 | } |
@@ -130,7 +153,7 @@ static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, | |||
130 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | 153 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
131 | 154 | ||
132 | /* Save the temp scratch register value in cp0_cause of stack frame */ | 155 | /* Save the temp scratch register value in cp0_cause of stack frame */ |
133 | if (scratch_tmp[0] == 31) { | 156 | if (scratch_tmp[0] == c0_kscratch()) { |
134 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); | 157 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
135 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); | 158 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
136 | } | 159 | } |
@@ -146,7 +169,7 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | |||
146 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | 169 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
147 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); | 170 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
148 | 171 | ||
149 | if (scratch_tmp[0] == 31) { | 172 | if (scratch_tmp[0] == c0_kscratch()) { |
150 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); | 173 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
151 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); | 174 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
152 | } | 175 | } |
@@ -286,23 +309,26 @@ static void *kvm_mips_build_enter_guest(void *addr) | |||
286 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); | 309 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); |
287 | uasm_i_xori(&p, T0, T0, KSU_USER); | 310 | uasm_i_xori(&p, T0, T0, KSU_USER); |
288 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); | 311 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); |
289 | UASM_i_ADDIU(&p, T1, K1, | 312 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
290 | offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); | 313 | guest_kernel_mm.context.asid)); |
291 | /* else user */ | 314 | /* else user */ |
292 | UASM_i_ADDIU(&p, T1, K1, | 315 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
293 | offsetof(struct kvm_vcpu_arch, guest_user_asid)); | 316 | guest_user_mm.context.asid)); |
294 | uasm_l_kernel_asid(&l, p); | 317 | uasm_l_kernel_asid(&l, p); |
295 | 318 | ||
296 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 319 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
297 | /* smp_processor_id */ | 320 | /* smp_processor_id */ |
298 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); | 321 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); |
299 | /* x4 */ | 322 | /* index the ASID array */ |
300 | uasm_i_sll(&p, T2, T2, 2); | 323 | uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); |
301 | UASM_i_ADDU(&p, T3, T1, T2); | 324 | UASM_i_ADDU(&p, T3, T1, T2); |
302 | uasm_i_lw(&p, K0, 0, T3); | 325 | UASM_i_LW(&p, K0, 0, T3); |
303 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE | 326 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE |
304 | /* x sizeof(struct cpuinfo_mips)/4 */ | 327 | /* |
305 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); | 328 | * reuse ASID array offset |
329 | * cpuinfo_mips is a multiple of sizeof(long) | ||
330 | */ | ||
331 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); | ||
306 | uasm_i_mul(&p, T2, T2, T3); | 332 | uasm_i_mul(&p, T2, T2, T3); |
307 | 333 | ||
308 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); | 334 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); |
@@ -312,7 +338,20 @@ static void *kvm_mips_build_enter_guest(void *addr) | |||
312 | #else | 338 | #else |
313 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); | 339 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); |
314 | #endif | 340 | #endif |
315 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | 341 | |
342 | /* | ||
343 | * Set up KVM T&E GVA pgd. | ||
344 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | ||
345 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | ||
346 | * - but skips write into CP0_PWBase for now | ||
347 | */ | ||
348 | UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - | ||
349 | (int)offsetof(struct mm_struct, context.asid), T1); | ||
350 | |||
351 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | ||
352 | uasm_i_jalr(&p, RA, T9); | ||
353 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | ||
354 | |||
316 | uasm_i_ehb(&p); | 355 | uasm_i_ehb(&p); |
317 | 356 | ||
318 | /* Disable RDHWR access */ | 357 | /* Disable RDHWR access */ |
@@ -348,6 +387,80 @@ static void *kvm_mips_build_enter_guest(void *addr) | |||
348 | } | 387 | } |
349 | 388 | ||
350 | /** | 389 | /** |
390 | * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler. | ||
391 | * @addr: Address to start writing code. | ||
392 | * @handler: Address of common handler (within range of @addr). | ||
393 | * | ||
394 | * Assemble TLB refill exception fast path handler for guest execution. | ||
395 | * | ||
396 | * Returns: Next address after end of written function. | ||
397 | */ | ||
398 | void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) | ||
399 | { | ||
400 | u32 *p = addr; | ||
401 | struct uasm_label labels[2]; | ||
402 | struct uasm_reloc relocs[2]; | ||
403 | struct uasm_label *l = labels; | ||
404 | struct uasm_reloc *r = relocs; | ||
405 | |||
406 | memset(labels, 0, sizeof(labels)); | ||
407 | memset(relocs, 0, sizeof(relocs)); | ||
408 | |||
409 | /* Save guest k1 into scratch register */ | ||
410 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | ||
411 | |||
412 | /* Get the VCPU pointer from the VCPU scratch register */ | ||
413 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | ||
414 | |||
415 | /* Save guest k0 into VCPU structure */ | ||
416 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | ||
417 | |||
418 | /* | ||
419 | * Some of the common tlbex code uses current_cpu_type(). For KVM we | ||
420 | * assume symmetry and just disable preemption to silence the warning. | ||
421 | */ | ||
422 | preempt_disable(); | ||
423 | |||
424 | /* | ||
425 | * Now for the actual refill bit. A lot of this can be common with the | ||
426 | * Linux TLB refill handler, however we don't need to handle so many | ||
427 | * cases. We only need to handle user mode refills, and user mode runs | ||
428 | * with 32-bit addressing. | ||
429 | * | ||
430 | * Therefore the branch to label_vmalloc generated by build_get_pmde64() | ||
431 | * that isn't resolved should never actually get taken and is harmless | ||
432 | * to leave in place for now. | ||
433 | */ | ||
434 | |||
435 | #ifdef CONFIG_64BIT | ||
436 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ | ||
437 | #else | ||
438 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | ||
439 | #endif | ||
440 | |||
441 | /* we don't support huge pages yet */ | ||
442 | |||
443 | build_get_ptep(&p, K0, K1); | ||
444 | build_update_entries(&p, K0, K1); | ||
445 | build_tlb_write_entry(&p, &l, &r, tlb_random); | ||
446 | |||
447 | preempt_enable(); | ||
448 | |||
449 | /* Get the VCPU pointer from the VCPU scratch register again */ | ||
450 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | ||
451 | |||
452 | /* Restore the guest's k0/k1 registers */ | ||
453 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | ||
454 | uasm_i_ehb(&p); | ||
455 | UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | ||
456 | |||
457 | /* Jump to guest */ | ||
458 | uasm_i_eret(&p); | ||
459 | |||
460 | return p; | ||
461 | } | ||
462 | |||
463 | /** | ||
351 | * kvm_mips_build_exception() - Assemble first level guest exception handler. | 464 | * kvm_mips_build_exception() - Assemble first level guest exception handler. |
352 | * @addr: Address to start writing code. | 465 | * @addr: Address to start writing code. |
353 | * @handler: Address of common handler (within range of @addr). | 466 | * @handler: Address of common handler (within range of @addr). |
@@ -468,6 +581,18 @@ void *kvm_mips_build_exit(void *addr) | |||
468 | uasm_i_mfc0(&p, K0, C0_CAUSE); | 581 | uasm_i_mfc0(&p, K0, C0_CAUSE); |
469 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); | 582 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); |
470 | 583 | ||
584 | if (cpu_has_badinstr) { | ||
585 | uasm_i_mfc0(&p, K0, C0_BADINSTR); | ||
586 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | ||
587 | host_cp0_badinstr), K1); | ||
588 | } | ||
589 | |||
590 | if (cpu_has_badinstrp) { | ||
591 | uasm_i_mfc0(&p, K0, C0_BADINSTRP); | ||
592 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | ||
593 | host_cp0_badinstrp), K1); | ||
594 | } | ||
595 | |||
471 | /* Now restore the host state just enough to run the handlers */ | 596 | /* Now restore the host state just enough to run the handlers */ |
472 | 597 | ||
473 | /* Switch EBASE to the one used by Linux */ | 598 | /* Switch EBASE to the one used by Linux */ |
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index e88403b3dcdd..aa0a1a00faf6 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c | |||
@@ -183,10 +183,11 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | |||
183 | (exccode << CAUSEB_EXCCODE)); | 183 | (exccode << CAUSEB_EXCCODE)); |
184 | 184 | ||
185 | /* XXXSL Set PC to the interrupt exception entry point */ | 185 | /* XXXSL Set PC to the interrupt exception entry point */ |
186 | arch->pc = kvm_mips_guest_exception_base(vcpu); | ||
186 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) | 187 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) |
187 | arch->pc = KVM_GUEST_KSEG0 + 0x200; | 188 | arch->pc += 0x200; |
188 | else | 189 | else |
189 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 190 | arch->pc += 0x180; |
190 | 191 | ||
191 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 192 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
192 | } | 193 | } |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 29ec9ab3fd55..31ee5ee0010b 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
25 | #include <asm/pgalloc.h> | ||
25 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
26 | 27 | ||
27 | #include <linux/kvm_host.h> | 28 | #include <linux/kvm_host.h> |
@@ -63,18 +64,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
63 | {NULL} | 64 | {NULL} |
64 | }; | 65 | }; |
65 | 66 | ||
66 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | int i; | ||
69 | |||
70 | for_each_possible_cpu(i) { | ||
71 | vcpu->arch.guest_kernel_asid[i] = 0; | ||
72 | vcpu->arch.guest_user_asid[i] = 0; | ||
73 | } | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | /* | 67 | /* |
79 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | 68 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in |
80 | * Config7, so we are "runnable" if interrupts are pending | 69 | * Config7, so we are "runnable" if interrupts are pending |
@@ -104,39 +93,12 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
104 | *(int *)rtn = 0; | 93 | *(int *)rtn = 0; |
105 | } | 94 | } |
106 | 95 | ||
107 | static void kvm_mips_init_tlbs(struct kvm *kvm) | ||
108 | { | ||
109 | unsigned long wired; | ||
110 | |||
111 | /* | ||
112 | * Add a wired entry to the TLB, it is used to map the commpage to | ||
113 | * the Guest kernel | ||
114 | */ | ||
115 | wired = read_c0_wired(); | ||
116 | write_c0_wired(wired + 1); | ||
117 | mtc0_tlbw_hazard(); | ||
118 | kvm->arch.commpage_tlb = wired; | ||
119 | |||
120 | kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), | ||
121 | kvm->arch.commpage_tlb); | ||
122 | } | ||
123 | |||
124 | static void kvm_mips_init_vm_percpu(void *arg) | ||
125 | { | ||
126 | struct kvm *kvm = (struct kvm *)arg; | ||
127 | |||
128 | kvm_mips_init_tlbs(kvm); | ||
129 | kvm_mips_callbacks->vm_init(kvm); | ||
130 | |||
131 | } | ||
132 | |||
133 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 96 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
134 | { | 97 | { |
135 | if (atomic_inc_return(&kvm_mips_instance) == 1) { | 98 | /* Allocate page table to map GPA -> RPA */ |
136 | kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", | 99 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); |
137 | __func__); | 100 | if (!kvm->arch.gpa_mm.pgd) |
138 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); | 101 | return -ENOMEM; |
139 | } | ||
140 | 102 | ||
141 | return 0; | 103 | return 0; |
142 | } | 104 | } |
@@ -156,13 +118,6 @@ void kvm_mips_free_vcpus(struct kvm *kvm) | |||
156 | unsigned int i; | 118 | unsigned int i; |
157 | struct kvm_vcpu *vcpu; | 119 | struct kvm_vcpu *vcpu; |
158 | 120 | ||
159 | /* Put the pages we reserved for the guest pmap */ | ||
160 | for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { | ||
161 | if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) | ||
162 | kvm_release_pfn_clean(kvm->arch.guest_pmap[i]); | ||
163 | } | ||
164 | kfree(kvm->arch.guest_pmap); | ||
165 | |||
166 | kvm_for_each_vcpu(i, vcpu, kvm) { | 121 | kvm_for_each_vcpu(i, vcpu, kvm) { |
167 | kvm_arch_vcpu_free(vcpu); | 122 | kvm_arch_vcpu_free(vcpu); |
168 | } | 123 | } |
@@ -177,25 +132,17 @@ void kvm_mips_free_vcpus(struct kvm *kvm) | |||
177 | mutex_unlock(&kvm->lock); | 132 | mutex_unlock(&kvm->lock); |
178 | } | 133 | } |
179 | 134 | ||
180 | static void kvm_mips_uninit_tlbs(void *arg) | 135 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) |
181 | { | 136 | { |
182 | /* Restore wired count */ | 137 | /* It should always be safe to remove after flushing the whole range */ |
183 | write_c0_wired(0); | 138 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); |
184 | mtc0_tlbw_hazard(); | 139 | pgd_free(NULL, kvm->arch.gpa_mm.pgd); |
185 | /* Clear out all the TLBs */ | ||
186 | kvm_local_flush_tlb_all(); | ||
187 | } | 140 | } |
188 | 141 | ||
189 | void kvm_arch_destroy_vm(struct kvm *kvm) | 142 | void kvm_arch_destroy_vm(struct kvm *kvm) |
190 | { | 143 | { |
191 | kvm_mips_free_vcpus(kvm); | 144 | kvm_mips_free_vcpus(kvm); |
192 | 145 | kvm_mips_free_gpa_pt(kvm); | |
193 | /* If this is the last instance, restore wired count */ | ||
194 | if (atomic_dec_return(&kvm_mips_instance) == 0) { | ||
195 | kvm_debug("%s: last KVM instance, restoring TLB parameters\n", | ||
196 | __func__); | ||
197 | on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); | ||
198 | } | ||
199 | } | 146 | } |
200 | 147 | ||
201 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, | 148 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
@@ -210,6 +157,32 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |||
210 | return 0; | 157 | return 0; |
211 | } | 158 | } |
212 | 159 | ||
160 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | ||
161 | { | ||
162 | /* Flush whole GPA */ | ||
163 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); | ||
164 | |||
165 | /* Let implementation do the rest */ | ||
166 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
167 | } | ||
168 | |||
169 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
170 | struct kvm_memory_slot *slot) | ||
171 | { | ||
172 | /* | ||
173 | * The slot has been made invalid (ready for moving or deletion), so we | ||
174 | * need to ensure that it can no longer be accessed by any guest VCPUs. | ||
175 | */ | ||
176 | |||
177 | spin_lock(&kvm->mmu_lock); | ||
178 | /* Flush slot from GPA */ | ||
179 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, | ||
180 | slot->base_gfn + slot->npages - 1); | ||
181 | /* Let implementation do the rest */ | ||
182 | kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); | ||
183 | spin_unlock(&kvm->mmu_lock); | ||
184 | } | ||
185 | |||
213 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 186 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
214 | struct kvm_memory_slot *memslot, | 187 | struct kvm_memory_slot *memslot, |
215 | const struct kvm_userspace_memory_region *mem, | 188 | const struct kvm_userspace_memory_region *mem, |
@@ -224,35 +197,32 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
224 | const struct kvm_memory_slot *new, | 197 | const struct kvm_memory_slot *new, |
225 | enum kvm_mr_change change) | 198 | enum kvm_mr_change change) |
226 | { | 199 | { |
227 | unsigned long npages = 0; | 200 | int needs_flush; |
228 | int i; | ||
229 | 201 | ||
230 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | 202 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", |
231 | __func__, kvm, mem->slot, mem->guest_phys_addr, | 203 | __func__, kvm, mem->slot, mem->guest_phys_addr, |
232 | mem->memory_size, mem->userspace_addr); | 204 | mem->memory_size, mem->userspace_addr); |
233 | 205 | ||
234 | /* Setup Guest PMAP table */ | 206 | /* |
235 | if (!kvm->arch.guest_pmap) { | 207 | * If dirty page logging is enabled, write protect all pages in the slot |
236 | if (mem->slot == 0) | 208 | * ready for dirty logging. |
237 | npages = mem->memory_size >> PAGE_SHIFT; | 209 | * |
238 | 210 | * There is no need to do this in any of the following cases: | |
239 | if (npages) { | 211 | * CREATE: No dirty mappings will already exist. |
240 | kvm->arch.guest_pmap_npages = npages; | 212 | * MOVE/DELETE: The old mappings will already have been cleaned up by |
241 | kvm->arch.guest_pmap = | 213 | * kvm_arch_flush_shadow_memslot() |
242 | kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); | 214 | */ |
243 | 215 | if (change == KVM_MR_FLAGS_ONLY && | |
244 | if (!kvm->arch.guest_pmap) { | 216 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && |
245 | kvm_err("Failed to allocate guest PMAP\n"); | 217 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { |
246 | return; | 218 | spin_lock(&kvm->mmu_lock); |
247 | } | 219 | /* Write protect GPA page table entries */ |
248 | 220 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, | |
249 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", | 221 | new->base_gfn + new->npages - 1); |
250 | npages, kvm->arch.guest_pmap); | 222 | /* Let implementation do the rest */ |
251 | 223 | if (needs_flush) | |
252 | /* Now setup the page table */ | 224 | kvm_mips_callbacks->flush_shadow_memslot(kvm, new); |
253 | for (i = 0; i < npages; i++) | 225 | spin_unlock(&kvm->mmu_lock); |
254 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; | ||
255 | } | ||
256 | } | 226 | } |
257 | } | 227 | } |
258 | 228 | ||
@@ -276,7 +246,7 @@ static inline void dump_handler(const char *symbol, void *start, void *end) | |||
276 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 246 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
277 | { | 247 | { |
278 | int err, size; | 248 | int err, size; |
279 | void *gebase, *p, *handler; | 249 | void *gebase, *p, *handler, *refill_start, *refill_end; |
280 | int i; | 250 | int i; |
281 | 251 | ||
282 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | 252 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); |
@@ -329,8 +299,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
329 | /* Build guest exception vectors dynamically in unmapped memory */ | 299 | /* Build guest exception vectors dynamically in unmapped memory */ |
330 | handler = gebase + 0x2000; | 300 | handler = gebase + 0x2000; |
331 | 301 | ||
332 | /* TLB Refill, EXL = 0 */ | 302 | /* TLB refill */ |
333 | kvm_mips_build_exception(gebase, handler); | 303 | refill_start = gebase; |
304 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); | ||
334 | 305 | ||
335 | /* General Exception Entry point */ | 306 | /* General Exception Entry point */ |
336 | kvm_mips_build_exception(gebase + 0x180, handler); | 307 | kvm_mips_build_exception(gebase + 0x180, handler); |
@@ -356,6 +327,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
356 | pr_debug("#include <asm/regdef.h>\n"); | 327 | pr_debug("#include <asm/regdef.h>\n"); |
357 | pr_debug("\n"); | 328 | pr_debug("\n"); |
358 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); | 329 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); |
330 | dump_handler("kvm_tlb_refill", refill_start, refill_end); | ||
359 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); | 331 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); |
360 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); | 332 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); |
361 | 333 | ||
@@ -406,6 +378,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |||
406 | 378 | ||
407 | kvm_mips_dump_stats(vcpu); | 379 | kvm_mips_dump_stats(vcpu); |
408 | 380 | ||
381 | kvm_mmu_free_memory_caches(vcpu); | ||
409 | kfree(vcpu->arch.guest_ebase); | 382 | kfree(vcpu->arch.guest_ebase); |
410 | kfree(vcpu->arch.kseg0_commpage); | 383 | kfree(vcpu->arch.kseg0_commpage); |
411 | kfree(vcpu); | 384 | kfree(vcpu); |
@@ -422,34 +395,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
422 | return -ENOIOCTLCMD; | 395 | return -ENOIOCTLCMD; |
423 | } | 396 | } |
424 | 397 | ||
425 | /* Must be called with preemption disabled, just before entering guest */ | ||
426 | static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) | ||
427 | { | ||
428 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
429 | int i, cpu = smp_processor_id(); | ||
430 | unsigned int gasid; | ||
431 | |||
432 | /* | ||
433 | * Lazy host ASID regeneration for guest user mode. | ||
434 | * If the guest ASID has changed since the last guest usermode | ||
435 | * execution, regenerate the host ASID so as to invalidate stale TLB | ||
436 | * entries. | ||
437 | */ | ||
438 | if (!KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
439 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; | ||
440 | if (gasid != vcpu->arch.last_user_gasid) { | ||
441 | kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, | ||
442 | vcpu); | ||
443 | vcpu->arch.guest_user_asid[cpu] = | ||
444 | vcpu->arch.guest_user_mm.context.asid[cpu]; | ||
445 | for_each_possible_cpu(i) | ||
446 | if (i != cpu) | ||
447 | vcpu->arch.guest_user_asid[cpu] = 0; | ||
448 | vcpu->arch.last_user_gasid = gasid; | ||
449 | } | ||
450 | } | ||
451 | } | ||
452 | |||
453 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 398 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
454 | { | 399 | { |
455 | int r = 0; | 400 | int r = 0; |
@@ -467,25 +412,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
467 | lose_fpu(1); | 412 | lose_fpu(1); |
468 | 413 | ||
469 | local_irq_disable(); | 414 | local_irq_disable(); |
470 | /* Check if we have any exceptions/interrupts pending */ | ||
471 | kvm_mips_deliver_interrupts(vcpu, | ||
472 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | ||
473 | |||
474 | guest_enter_irqoff(); | 415 | guest_enter_irqoff(); |
475 | |||
476 | /* Disable hardware page table walking while in guest */ | ||
477 | htw_stop(); | ||
478 | |||
479 | trace_kvm_enter(vcpu); | 416 | trace_kvm_enter(vcpu); |
480 | 417 | ||
481 | kvm_mips_check_asids(vcpu); | 418 | /* |
419 | * Make sure the read of VCPU requests in vcpu_run() callback is not | ||
420 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB | ||
421 | * flush request while the requester sees the VCPU as outside of guest | ||
422 | * mode and not needing an IPI. | ||
423 | */ | ||
424 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
482 | 425 | ||
483 | r = vcpu->arch.vcpu_run(run, vcpu); | 426 | r = kvm_mips_callbacks->vcpu_run(run, vcpu); |
484 | trace_kvm_out(vcpu); | ||
485 | |||
486 | /* Re-enable HTW before enabling interrupts */ | ||
487 | htw_start(); | ||
488 | 427 | ||
428 | trace_kvm_out(vcpu); | ||
489 | guest_exit_irqoff(); | 429 | guest_exit_irqoff(); |
490 | local_irq_enable(); | 430 | local_irq_enable(); |
491 | 431 | ||
@@ -580,33 +520,6 @@ static u64 kvm_mips_get_one_regs[] = { | |||
580 | KVM_REG_MIPS_LO, | 520 | KVM_REG_MIPS_LO, |
581 | #endif | 521 | #endif |
582 | KVM_REG_MIPS_PC, | 522 | KVM_REG_MIPS_PC, |
583 | |||
584 | KVM_REG_MIPS_CP0_INDEX, | ||
585 | KVM_REG_MIPS_CP0_CONTEXT, | ||
586 | KVM_REG_MIPS_CP0_USERLOCAL, | ||
587 | KVM_REG_MIPS_CP0_PAGEMASK, | ||
588 | KVM_REG_MIPS_CP0_WIRED, | ||
589 | KVM_REG_MIPS_CP0_HWRENA, | ||
590 | KVM_REG_MIPS_CP0_BADVADDR, | ||
591 | KVM_REG_MIPS_CP0_COUNT, | ||
592 | KVM_REG_MIPS_CP0_ENTRYHI, | ||
593 | KVM_REG_MIPS_CP0_COMPARE, | ||
594 | KVM_REG_MIPS_CP0_STATUS, | ||
595 | KVM_REG_MIPS_CP0_CAUSE, | ||
596 | KVM_REG_MIPS_CP0_EPC, | ||
597 | KVM_REG_MIPS_CP0_PRID, | ||
598 | KVM_REG_MIPS_CP0_CONFIG, | ||
599 | KVM_REG_MIPS_CP0_CONFIG1, | ||
600 | KVM_REG_MIPS_CP0_CONFIG2, | ||
601 | KVM_REG_MIPS_CP0_CONFIG3, | ||
602 | KVM_REG_MIPS_CP0_CONFIG4, | ||
603 | KVM_REG_MIPS_CP0_CONFIG5, | ||
604 | KVM_REG_MIPS_CP0_CONFIG7, | ||
605 | KVM_REG_MIPS_CP0_ERROREPC, | ||
606 | |||
607 | KVM_REG_MIPS_COUNT_CTL, | ||
608 | KVM_REG_MIPS_COUNT_RESUME, | ||
609 | KVM_REG_MIPS_COUNT_HZ, | ||
610 | }; | 523 | }; |
611 | 524 | ||
612 | static u64 kvm_mips_get_one_regs_fpu[] = { | 525 | static u64 kvm_mips_get_one_regs_fpu[] = { |
@@ -619,15 +532,6 @@ static u64 kvm_mips_get_one_regs_msa[] = { | |||
619 | KVM_REG_MIPS_MSA_CSR, | 532 | KVM_REG_MIPS_MSA_CSR, |
620 | }; | 533 | }; |
621 | 534 | ||
622 | static u64 kvm_mips_get_one_regs_kscratch[] = { | ||
623 | KVM_REG_MIPS_CP0_KSCRATCH1, | ||
624 | KVM_REG_MIPS_CP0_KSCRATCH2, | ||
625 | KVM_REG_MIPS_CP0_KSCRATCH3, | ||
626 | KVM_REG_MIPS_CP0_KSCRATCH4, | ||
627 | KVM_REG_MIPS_CP0_KSCRATCH5, | ||
628 | KVM_REG_MIPS_CP0_KSCRATCH6, | ||
629 | }; | ||
630 | |||
631 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) | 535 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) |
632 | { | 536 | { |
633 | unsigned long ret; | 537 | unsigned long ret; |
@@ -641,7 +545,6 @@ static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) | |||
641 | } | 545 | } |
642 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | 546 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) |
643 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; | 547 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; |
644 | ret += __arch_hweight8(vcpu->arch.kscratch_enabled); | ||
645 | ret += kvm_mips_callbacks->num_regs(vcpu); | 548 | ret += kvm_mips_callbacks->num_regs(vcpu); |
646 | 549 | ||
647 | return ret; | 550 | return ret; |
@@ -694,16 +597,6 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | |||
694 | } | 597 | } |
695 | } | 598 | } |
696 | 599 | ||
697 | for (i = 0; i < 6; ++i) { | ||
698 | if (!(vcpu->arch.kscratch_enabled & BIT(i + 2))) | ||
699 | continue; | ||
700 | |||
701 | if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i], | ||
702 | sizeof(kvm_mips_get_one_regs_kscratch[i]))) | ||
703 | return -EFAULT; | ||
704 | ++indices; | ||
705 | } | ||
706 | |||
707 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); | 600 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); |
708 | } | 601 | } |
709 | 602 | ||
@@ -794,95 +687,6 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
794 | v = fpu->msacsr; | 687 | v = fpu->msacsr; |
795 | break; | 688 | break; |
796 | 689 | ||
797 | /* Co-processor 0 registers */ | ||
798 | case KVM_REG_MIPS_CP0_INDEX: | ||
799 | v = (long)kvm_read_c0_guest_index(cop0); | ||
800 | break; | ||
801 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
802 | v = (long)kvm_read_c0_guest_context(cop0); | ||
803 | break; | ||
804 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
805 | v = (long)kvm_read_c0_guest_userlocal(cop0); | ||
806 | break; | ||
807 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
808 | v = (long)kvm_read_c0_guest_pagemask(cop0); | ||
809 | break; | ||
810 | case KVM_REG_MIPS_CP0_WIRED: | ||
811 | v = (long)kvm_read_c0_guest_wired(cop0); | ||
812 | break; | ||
813 | case KVM_REG_MIPS_CP0_HWRENA: | ||
814 | v = (long)kvm_read_c0_guest_hwrena(cop0); | ||
815 | break; | ||
816 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
817 | v = (long)kvm_read_c0_guest_badvaddr(cop0); | ||
818 | break; | ||
819 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
820 | v = (long)kvm_read_c0_guest_entryhi(cop0); | ||
821 | break; | ||
822 | case KVM_REG_MIPS_CP0_COMPARE: | ||
823 | v = (long)kvm_read_c0_guest_compare(cop0); | ||
824 | break; | ||
825 | case KVM_REG_MIPS_CP0_STATUS: | ||
826 | v = (long)kvm_read_c0_guest_status(cop0); | ||
827 | break; | ||
828 | case KVM_REG_MIPS_CP0_CAUSE: | ||
829 | v = (long)kvm_read_c0_guest_cause(cop0); | ||
830 | break; | ||
831 | case KVM_REG_MIPS_CP0_EPC: | ||
832 | v = (long)kvm_read_c0_guest_epc(cop0); | ||
833 | break; | ||
834 | case KVM_REG_MIPS_CP0_PRID: | ||
835 | v = (long)kvm_read_c0_guest_prid(cop0); | ||
836 | break; | ||
837 | case KVM_REG_MIPS_CP0_CONFIG: | ||
838 | v = (long)kvm_read_c0_guest_config(cop0); | ||
839 | break; | ||
840 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
841 | v = (long)kvm_read_c0_guest_config1(cop0); | ||
842 | break; | ||
843 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
844 | v = (long)kvm_read_c0_guest_config2(cop0); | ||
845 | break; | ||
846 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
847 | v = (long)kvm_read_c0_guest_config3(cop0); | ||
848 | break; | ||
849 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
850 | v = (long)kvm_read_c0_guest_config4(cop0); | ||
851 | break; | ||
852 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
853 | v = (long)kvm_read_c0_guest_config5(cop0); | ||
854 | break; | ||
855 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
856 | v = (long)kvm_read_c0_guest_config7(cop0); | ||
857 | break; | ||
858 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
859 | v = (long)kvm_read_c0_guest_errorepc(cop0); | ||
860 | break; | ||
861 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: | ||
862 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; | ||
863 | if (!(vcpu->arch.kscratch_enabled & BIT(idx))) | ||
864 | return -EINVAL; | ||
865 | switch (idx) { | ||
866 | case 2: | ||
867 | v = (long)kvm_read_c0_guest_kscratch1(cop0); | ||
868 | break; | ||
869 | case 3: | ||
870 | v = (long)kvm_read_c0_guest_kscratch2(cop0); | ||
871 | break; | ||
872 | case 4: | ||
873 | v = (long)kvm_read_c0_guest_kscratch3(cop0); | ||
874 | break; | ||
875 | case 5: | ||
876 | v = (long)kvm_read_c0_guest_kscratch4(cop0); | ||
877 | break; | ||
878 | case 6: | ||
879 | v = (long)kvm_read_c0_guest_kscratch5(cop0); | ||
880 | break; | ||
881 | case 7: | ||
882 | v = (long)kvm_read_c0_guest_kscratch6(cop0); | ||
883 | break; | ||
884 | } | ||
885 | break; | ||
886 | /* registers to be handled specially */ | 690 | /* registers to be handled specially */ |
887 | default: | 691 | default: |
888 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); | 692 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); |
@@ -1014,68 +818,6 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
1014 | fpu->msacsr = v; | 818 | fpu->msacsr = v; |
1015 | break; | 819 | break; |
1016 | 820 | ||
1017 | /* Co-processor 0 registers */ | ||
1018 | case KVM_REG_MIPS_CP0_INDEX: | ||
1019 | kvm_write_c0_guest_index(cop0, v); | ||
1020 | break; | ||
1021 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
1022 | kvm_write_c0_guest_context(cop0, v); | ||
1023 | break; | ||
1024 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
1025 | kvm_write_c0_guest_userlocal(cop0, v); | ||
1026 | break; | ||
1027 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
1028 | kvm_write_c0_guest_pagemask(cop0, v); | ||
1029 | break; | ||
1030 | case KVM_REG_MIPS_CP0_WIRED: | ||
1031 | kvm_write_c0_guest_wired(cop0, v); | ||
1032 | break; | ||
1033 | case KVM_REG_MIPS_CP0_HWRENA: | ||
1034 | kvm_write_c0_guest_hwrena(cop0, v); | ||
1035 | break; | ||
1036 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
1037 | kvm_write_c0_guest_badvaddr(cop0, v); | ||
1038 | break; | ||
1039 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
1040 | kvm_write_c0_guest_entryhi(cop0, v); | ||
1041 | break; | ||
1042 | case KVM_REG_MIPS_CP0_STATUS: | ||
1043 | kvm_write_c0_guest_status(cop0, v); | ||
1044 | break; | ||
1045 | case KVM_REG_MIPS_CP0_EPC: | ||
1046 | kvm_write_c0_guest_epc(cop0, v); | ||
1047 | break; | ||
1048 | case KVM_REG_MIPS_CP0_PRID: | ||
1049 | kvm_write_c0_guest_prid(cop0, v); | ||
1050 | break; | ||
1051 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
1052 | kvm_write_c0_guest_errorepc(cop0, v); | ||
1053 | break; | ||
1054 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: | ||
1055 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; | ||
1056 | if (!(vcpu->arch.kscratch_enabled & BIT(idx))) | ||
1057 | return -EINVAL; | ||
1058 | switch (idx) { | ||
1059 | case 2: | ||
1060 | kvm_write_c0_guest_kscratch1(cop0, v); | ||
1061 | break; | ||
1062 | case 3: | ||
1063 | kvm_write_c0_guest_kscratch2(cop0, v); | ||
1064 | break; | ||
1065 | case 4: | ||
1066 | kvm_write_c0_guest_kscratch3(cop0, v); | ||
1067 | break; | ||
1068 | case 5: | ||
1069 | kvm_write_c0_guest_kscratch4(cop0, v); | ||
1070 | break; | ||
1071 | case 6: | ||
1072 | kvm_write_c0_guest_kscratch5(cop0, v); | ||
1073 | break; | ||
1074 | case 7: | ||
1075 | kvm_write_c0_guest_kscratch6(cop0, v); | ||
1076 | break; | ||
1077 | } | ||
1078 | break; | ||
1079 | /* registers to be handled specially */ | 821 | /* registers to be handled specially */ |
1080 | default: | 822 | default: |
1081 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); | 823 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); |
@@ -1144,18 +886,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | |||
1144 | return -E2BIG; | 886 | return -E2BIG; |
1145 | return kvm_mips_copy_reg_indices(vcpu, user_list->reg); | 887 | return kvm_mips_copy_reg_indices(vcpu, user_list->reg); |
1146 | } | 888 | } |
1147 | case KVM_NMI: | ||
1148 | /* Treat the NMI as a CPU reset */ | ||
1149 | r = kvm_mips_reset_vcpu(vcpu); | ||
1150 | break; | ||
1151 | case KVM_INTERRUPT: | 889 | case KVM_INTERRUPT: |
1152 | { | 890 | { |
1153 | struct kvm_mips_interrupt irq; | 891 | struct kvm_mips_interrupt irq; |
1154 | 892 | ||
1155 | r = -EFAULT; | ||
1156 | if (copy_from_user(&irq, argp, sizeof(irq))) | 893 | if (copy_from_user(&irq, argp, sizeof(irq))) |
1157 | goto out; | 894 | return -EFAULT; |
1158 | |||
1159 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, | 895 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, |
1160 | irq.irq); | 896 | irq.irq); |
1161 | 897 | ||
@@ -1165,56 +901,57 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | |||
1165 | case KVM_ENABLE_CAP: { | 901 | case KVM_ENABLE_CAP: { |
1166 | struct kvm_enable_cap cap; | 902 | struct kvm_enable_cap cap; |
1167 | 903 | ||
1168 | r = -EFAULT; | ||
1169 | if (copy_from_user(&cap, argp, sizeof(cap))) | 904 | if (copy_from_user(&cap, argp, sizeof(cap))) |
1170 | goto out; | 905 | return -EFAULT; |
1171 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 906 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
1172 | break; | 907 | break; |
1173 | } | 908 | } |
1174 | default: | 909 | default: |
1175 | r = -ENOIOCTLCMD; | 910 | r = -ENOIOCTLCMD; |
1176 | } | 911 | } |
1177 | |||
1178 | out: | ||
1179 | return r; | 912 | return r; |
1180 | } | 913 | } |
1181 | 914 | ||
1182 | /* Get (and clear) the dirty memory log for a memory slot. */ | 915 | /** |
916 | * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot | ||
917 | * @kvm: kvm instance | ||
918 | * @log: slot id and address to which we copy the log | ||
919 | * | ||
920 | * Steps 1-4 below provide general overview of dirty page logging. See | ||
921 | * kvm_get_dirty_log_protect() function description for additional details. | ||
922 | * | ||
923 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we | ||
924 | * always flush the TLB (step 4) even if previous step failed and the dirty | ||
925 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API | ||
926 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures | ||
927 | * writes will be marked dirty for next log read. | ||
928 | * | ||
929 | * 1. Take a snapshot of the bit and clear it if needed. | ||
930 | * 2. Write protect the corresponding page. | ||
931 | * 3. Copy the snapshot to the userspace. | ||
932 | * 4. Flush TLB's if needed. | ||
933 | */ | ||
1183 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 934 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
1184 | { | 935 | { |
1185 | struct kvm_memslots *slots; | 936 | struct kvm_memslots *slots; |
1186 | struct kvm_memory_slot *memslot; | 937 | struct kvm_memory_slot *memslot; |
1187 | unsigned long ga, ga_end; | 938 | bool is_dirty = false; |
1188 | int is_dirty = 0; | ||
1189 | int r; | 939 | int r; |
1190 | unsigned long n; | ||
1191 | 940 | ||
1192 | mutex_lock(&kvm->slots_lock); | 941 | mutex_lock(&kvm->slots_lock); |
1193 | 942 | ||
1194 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 943 | r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); |
1195 | if (r) | ||
1196 | goto out; | ||
1197 | 944 | ||
1198 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1199 | if (is_dirty) { | 945 | if (is_dirty) { |
1200 | slots = kvm_memslots(kvm); | 946 | slots = kvm_memslots(kvm); |
1201 | memslot = id_to_memslot(slots, log->slot); | 947 | memslot = id_to_memslot(slots, log->slot); |
1202 | 948 | ||
1203 | ga = memslot->base_gfn << PAGE_SHIFT; | 949 | /* Let implementation handle TLB/GVA invalidation */ |
1204 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 950 | kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); |
1205 | |||
1206 | kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, | ||
1207 | ga_end); | ||
1208 | |||
1209 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1210 | memset(memslot->dirty_bitmap, 0, n); | ||
1211 | } | 951 | } |
1212 | 952 | ||
1213 | r = 0; | ||
1214 | out: | ||
1215 | mutex_unlock(&kvm->slots_lock); | 953 | mutex_unlock(&kvm->slots_lock); |
1216 | return r; | 954 | return r; |
1217 | |||
1218 | } | 955 | } |
1219 | 956 | ||
1220 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 957 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
@@ -1282,11 +1019,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
1282 | switch (ext) { | 1019 | switch (ext) { |
1283 | case KVM_CAP_ONE_REG: | 1020 | case KVM_CAP_ONE_REG: |
1284 | case KVM_CAP_ENABLE_CAP: | 1021 | case KVM_CAP_ENABLE_CAP: |
1022 | case KVM_CAP_READONLY_MEM: | ||
1023 | case KVM_CAP_SYNC_MMU: | ||
1285 | r = 1; | 1024 | r = 1; |
1286 | break; | 1025 | break; |
1287 | case KVM_CAP_COALESCED_MMIO: | 1026 | case KVM_CAP_COALESCED_MMIO: |
1288 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 1027 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
1289 | break; | 1028 | break; |
1029 | case KVM_CAP_NR_VCPUS: | ||
1030 | r = num_online_cpus(); | ||
1031 | break; | ||
1032 | case KVM_CAP_MAX_VCPUS: | ||
1033 | r = KVM_MAX_VCPUS; | ||
1034 | break; | ||
1290 | case KVM_CAP_MIPS_FPU: | 1035 | case KVM_CAP_MIPS_FPU: |
1291 | /* We don't handle systems with inconsistent cpu_has_fpu */ | 1036 | /* We don't handle systems with inconsistent cpu_has_fpu */ |
1292 | r = !!raw_cpu_has_fpu; | 1037 | r = !!raw_cpu_has_fpu; |
@@ -1400,13 +1145,23 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | |||
1400 | 1145 | ||
1401 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 1146 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
1402 | { | 1147 | { |
1403 | kvm_mips_callbacks->vcpu_init(vcpu); | 1148 | int err; |
1149 | |||
1150 | err = kvm_mips_callbacks->vcpu_init(vcpu); | ||
1151 | if (err) | ||
1152 | return err; | ||
1153 | |||
1404 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | 1154 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, |
1405 | HRTIMER_MODE_REL); | 1155 | HRTIMER_MODE_REL); |
1406 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | 1156 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; |
1407 | return 0; | 1157 | return 0; |
1408 | } | 1158 | } |
1409 | 1159 | ||
1160 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
1161 | { | ||
1162 | kvm_mips_callbacks->vcpu_uninit(vcpu); | ||
1163 | } | ||
1164 | |||
1410 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 1165 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1411 | struct kvm_translation *tr) | 1166 | struct kvm_translation *tr) |
1412 | { | 1167 | { |
@@ -1440,8 +1195,11 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1440 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | 1195 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
1441 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 1196 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
1442 | enum emulation_result er = EMULATE_DONE; | 1197 | enum emulation_result er = EMULATE_DONE; |
1198 | u32 inst; | ||
1443 | int ret = RESUME_GUEST; | 1199 | int ret = RESUME_GUEST; |
1444 | 1200 | ||
1201 | vcpu->mode = OUTSIDE_GUEST_MODE; | ||
1202 | |||
1445 | /* re-enable HTW before enabling interrupts */ | 1203 | /* re-enable HTW before enabling interrupts */ |
1446 | htw_start(); | 1204 | htw_start(); |
1447 | 1205 | ||
@@ -1564,8 +1322,12 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1564 | break; | 1322 | break; |
1565 | 1323 | ||
1566 | default: | 1324 | default: |
1325 | if (cause & CAUSEF_BD) | ||
1326 | opc += 1; | ||
1327 | inst = 0; | ||
1328 | kvm_get_badinstr(opc, vcpu, &inst); | ||
1567 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", | 1329 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1568 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, | 1330 | exccode, opc, inst, badvaddr, |
1569 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | 1331 | kvm_read_c0_guest_status(vcpu->arch.cop0)); |
1570 | kvm_arch_vcpu_dump_regs(vcpu); | 1332 | kvm_arch_vcpu_dump_regs(vcpu); |
1571 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 1333 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -1593,7 +1355,15 @@ skip_emul: | |||
1593 | if (ret == RESUME_GUEST) { | 1355 | if (ret == RESUME_GUEST) { |
1594 | trace_kvm_reenter(vcpu); | 1356 | trace_kvm_reenter(vcpu); |
1595 | 1357 | ||
1596 | kvm_mips_check_asids(vcpu); | 1358 | /* |
1359 | * Make sure the read of VCPU requests in vcpu_reenter() | ||
1360 | * callback is not reordered ahead of the write to vcpu->mode, | ||
1361 | * or we could miss a TLB flush request while the requester sees | ||
1362 | * the VCPU as outside of guest mode and not needing an IPI. | ||
1363 | */ | ||
1364 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
1365 | |||
1366 | kvm_mips_callbacks->vcpu_reenter(run, vcpu); | ||
1597 | 1367 | ||
1598 | /* | 1368 | /* |
1599 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context | 1369 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 3b677c851be0..cb0faade311e 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c | |||
@@ -11,86 +11,995 @@ | |||
11 | 11 | ||
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/kvm_host.h> | 13 | #include <linux/kvm_host.h> |
14 | #include <linux/uaccess.h> | ||
14 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/pgalloc.h> | ||
15 | 17 | ||
16 | static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 18 | /* |
19 | * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels | ||
20 | * for which pages need to be cached. | ||
21 | */ | ||
22 | #if defined(__PAGETABLE_PMD_FOLDED) | ||
23 | #define KVM_MMU_CACHE_MIN_PAGES 1 | ||
24 | #else | ||
25 | #define KVM_MMU_CACHE_MIN_PAGES 2 | ||
26 | #endif | ||
27 | |||
28 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | ||
29 | int min, int max) | ||
17 | { | 30 | { |
18 | int cpu = smp_processor_id(); | 31 | void *page; |
32 | |||
33 | BUG_ON(max > KVM_NR_MEM_OBJS); | ||
34 | if (cache->nobjs >= min) | ||
35 | return 0; | ||
36 | while (cache->nobjs < max) { | ||
37 | page = (void *)__get_free_page(GFP_KERNEL); | ||
38 | if (!page) | ||
39 | return -ENOMEM; | ||
40 | cache->objects[cache->nobjs++] = page; | ||
41 | } | ||
42 | return 0; | ||
43 | } | ||
19 | 44 | ||
20 | return vcpu->arch.guest_kernel_asid[cpu] & | 45 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
21 | cpu_asid_mask(&cpu_data[cpu]); | 46 | { |
47 | while (mc->nobjs) | ||
48 | free_page((unsigned long)mc->objects[--mc->nobjs]); | ||
22 | } | 49 | } |
23 | 50 | ||
24 | static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 51 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
25 | { | 52 | { |
26 | int cpu = smp_processor_id(); | 53 | void *p; |
27 | 54 | ||
28 | return vcpu->arch.guest_user_asid[cpu] & | 55 | BUG_ON(!mc || !mc->nobjs); |
29 | cpu_asid_mask(&cpu_data[cpu]); | 56 | p = mc->objects[--mc->nobjs]; |
57 | return p; | ||
30 | } | 58 | } |
31 | 59 | ||
32 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | 60 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
33 | { | 61 | { |
34 | int srcu_idx, err = 0; | 62 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
35 | kvm_pfn_t pfn; | 63 | } |
64 | |||
65 | /** | ||
66 | * kvm_pgd_init() - Initialise KVM GPA page directory. | ||
67 | * @page: Pointer to page directory (PGD) for KVM GPA. | ||
68 | * | ||
69 | * Initialise a KVM GPA page directory with pointers to the invalid table, i.e. | ||
70 | * representing no mappings. This is similar to pgd_init(), however it | ||
71 | * initialises all the page directory pointers, not just the ones corresponding | ||
72 | * to the userland address space (since it is for the guest physical address | ||
73 | * space rather than a virtual address space). | ||
74 | */ | ||
75 | static void kvm_pgd_init(void *page) | ||
76 | { | ||
77 | unsigned long *p, *end; | ||
78 | unsigned long entry; | ||
79 | |||
80 | #ifdef __PAGETABLE_PMD_FOLDED | ||
81 | entry = (unsigned long)invalid_pte_table; | ||
82 | #else | ||
83 | entry = (unsigned long)invalid_pmd_table; | ||
84 | #endif | ||
85 | |||
86 | p = (unsigned long *)page; | ||
87 | end = p + PTRS_PER_PGD; | ||
88 | |||
89 | do { | ||
90 | p[0] = entry; | ||
91 | p[1] = entry; | ||
92 | p[2] = entry; | ||
93 | p[3] = entry; | ||
94 | p[4] = entry; | ||
95 | p += 8; | ||
96 | p[-3] = entry; | ||
97 | p[-2] = entry; | ||
98 | p[-1] = entry; | ||
99 | } while (p != end); | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. | ||
104 | * | ||
105 | * Allocate a blank KVM GPA page directory (PGD) for representing guest physical | ||
106 | * to host physical page mappings. | ||
107 | * | ||
108 | * Returns: Pointer to new KVM GPA page directory. | ||
109 | * NULL on allocation failure. | ||
110 | */ | ||
111 | pgd_t *kvm_pgd_alloc(void) | ||
112 | { | ||
113 | pgd_t *ret; | ||
114 | |||
115 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER); | ||
116 | if (ret) | ||
117 | kvm_pgd_init(ret); | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * kvm_mips_walk_pgd() - Walk page table with optional allocation. | ||
124 | * @pgd: Page directory pointer. | ||
125 | * @addr: Address to index page table using. | ||
126 | * @cache: MMU page cache to allocate new page tables from, or NULL. | ||
127 | * | ||
128 | * Walk the page tables pointed to by @pgd to find the PTE corresponding to the | ||
129 | * address @addr. If page tables don't exist for @addr, they will be created | ||
130 | * from the MMU cache if @cache is not NULL. | ||
131 | * | ||
132 | * Returns: Pointer to pte_t corresponding to @addr. | ||
133 | * NULL if a page table doesn't exist for @addr and !@cache. | ||
134 | * NULL if a page table allocation failed. | ||
135 | */ | ||
136 | static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, | ||
137 | unsigned long addr) | ||
138 | { | ||
139 | pud_t *pud; | ||
140 | pmd_t *pmd; | ||
141 | |||
142 | pgd += pgd_index(addr); | ||
143 | if (pgd_none(*pgd)) { | ||
144 | /* Not used on MIPS yet */ | ||
145 | BUG(); | ||
146 | return NULL; | ||
147 | } | ||
148 | pud = pud_offset(pgd, addr); | ||
149 | if (pud_none(*pud)) { | ||
150 | pmd_t *new_pmd; | ||
151 | |||
152 | if (!cache) | ||
153 | return NULL; | ||
154 | new_pmd = mmu_memory_cache_alloc(cache); | ||
155 | pmd_init((unsigned long)new_pmd, | ||
156 | (unsigned long)invalid_pte_table); | ||
157 | pud_populate(NULL, pud, new_pmd); | ||
158 | } | ||
159 | pmd = pmd_offset(pud, addr); | ||
160 | if (pmd_none(*pmd)) { | ||
161 | pte_t *new_pte; | ||
162 | |||
163 | if (!cache) | ||
164 | return NULL; | ||
165 | new_pte = mmu_memory_cache_alloc(cache); | ||
166 | clear_page(new_pte); | ||
167 | pmd_populate_kernel(NULL, pmd, new_pte); | ||
168 | } | ||
169 | return pte_offset(pmd, addr); | ||
170 | } | ||
171 | |||
172 | /* Caller must hold kvm->mm_lock */ | ||
173 | static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm, | ||
174 | struct kvm_mmu_memory_cache *cache, | ||
175 | unsigned long addr) | ||
176 | { | ||
177 | return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}. | ||
182 | * Flush a range of guest physical address space from the VM's GPA page tables. | ||
183 | */ | ||
184 | |||
185 | static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa, | ||
186 | unsigned long end_gpa) | ||
187 | { | ||
188 | int i_min = __pte_offset(start_gpa); | ||
189 | int i_max = __pte_offset(end_gpa); | ||
190 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); | ||
191 | int i; | ||
192 | |||
193 | for (i = i_min; i <= i_max; ++i) { | ||
194 | if (!pte_present(pte[i])) | ||
195 | continue; | ||
196 | |||
197 | set_pte(pte + i, __pte(0)); | ||
198 | } | ||
199 | return safe_to_remove; | ||
200 | } | ||
201 | |||
202 | static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, | ||
203 | unsigned long end_gpa) | ||
204 | { | ||
205 | pte_t *pte; | ||
206 | unsigned long end = ~0ul; | ||
207 | int i_min = __pmd_offset(start_gpa); | ||
208 | int i_max = __pmd_offset(end_gpa); | ||
209 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); | ||
210 | int i; | ||
211 | |||
212 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
213 | if (!pmd_present(pmd[i])) | ||
214 | continue; | ||
215 | |||
216 | pte = pte_offset(pmd + i, 0); | ||
217 | if (i == i_max) | ||
218 | end = end_gpa; | ||
219 | |||
220 | if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) { | ||
221 | pmd_clear(pmd + i); | ||
222 | pte_free_kernel(NULL, pte); | ||
223 | } else { | ||
224 | safe_to_remove = false; | ||
225 | } | ||
226 | } | ||
227 | return safe_to_remove; | ||
228 | } | ||
229 | |||
230 | static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa, | ||
231 | unsigned long end_gpa) | ||
232 | { | ||
233 | pmd_t *pmd; | ||
234 | unsigned long end = ~0ul; | ||
235 | int i_min = __pud_offset(start_gpa); | ||
236 | int i_max = __pud_offset(end_gpa); | ||
237 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); | ||
238 | int i; | ||
239 | |||
240 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
241 | if (!pud_present(pud[i])) | ||
242 | continue; | ||
243 | |||
244 | pmd = pmd_offset(pud + i, 0); | ||
245 | if (i == i_max) | ||
246 | end = end_gpa; | ||
247 | |||
248 | if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) { | ||
249 | pud_clear(pud + i); | ||
250 | pmd_free(NULL, pmd); | ||
251 | } else { | ||
252 | safe_to_remove = false; | ||
253 | } | ||
254 | } | ||
255 | return safe_to_remove; | ||
256 | } | ||
257 | |||
258 | static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa, | ||
259 | unsigned long end_gpa) | ||
260 | { | ||
261 | pud_t *pud; | ||
262 | unsigned long end = ~0ul; | ||
263 | int i_min = pgd_index(start_gpa); | ||
264 | int i_max = pgd_index(end_gpa); | ||
265 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); | ||
266 | int i; | ||
267 | |||
268 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
269 | if (!pgd_present(pgd[i])) | ||
270 | continue; | ||
271 | |||
272 | pud = pud_offset(pgd + i, 0); | ||
273 | if (i == i_max) | ||
274 | end = end_gpa; | ||
275 | |||
276 | if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) { | ||
277 | pgd_clear(pgd + i); | ||
278 | pud_free(NULL, pud); | ||
279 | } else { | ||
280 | safe_to_remove = false; | ||
281 | } | ||
282 | } | ||
283 | return safe_to_remove; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses. | ||
288 | * @kvm: KVM pointer. | ||
289 | * @start_gfn: Guest frame number of first page in GPA range to flush. | ||
290 | * @end_gfn: Guest frame number of last page in GPA range to flush. | ||
291 | * | ||
292 | * Flushes a range of GPA mappings from the GPA page tables. | ||
293 | * | ||
294 | * The caller must hold the @kvm->mmu_lock spinlock. | ||
295 | * | ||
296 | * Returns: Whether its safe to remove the top level page directory because | ||
297 | * all lower levels have been removed. | ||
298 | */ | ||
299 | bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) | ||
300 | { | ||
301 | return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, | ||
302 | start_gfn << PAGE_SHIFT, | ||
303 | end_gfn << PAGE_SHIFT); | ||
304 | } | ||
305 | |||
306 | #define BUILD_PTE_RANGE_OP(name, op) \ | ||
307 | static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \ | ||
308 | unsigned long end) \ | ||
309 | { \ | ||
310 | int ret = 0; \ | ||
311 | int i_min = __pte_offset(start); \ | ||
312 | int i_max = __pte_offset(end); \ | ||
313 | int i; \ | ||
314 | pte_t old, new; \ | ||
315 | \ | ||
316 | for (i = i_min; i <= i_max; ++i) { \ | ||
317 | if (!pte_present(pte[i])) \ | ||
318 | continue; \ | ||
319 | \ | ||
320 | old = pte[i]; \ | ||
321 | new = op(old); \ | ||
322 | if (pte_val(new) == pte_val(old)) \ | ||
323 | continue; \ | ||
324 | set_pte(pte + i, new); \ | ||
325 | ret = 1; \ | ||
326 | } \ | ||
327 | return ret; \ | ||
328 | } \ | ||
329 | \ | ||
330 | /* returns true if anything was done */ \ | ||
331 | static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \ | ||
332 | unsigned long end) \ | ||
333 | { \ | ||
334 | int ret = 0; \ | ||
335 | pte_t *pte; \ | ||
336 | unsigned long cur_end = ~0ul; \ | ||
337 | int i_min = __pmd_offset(start); \ | ||
338 | int i_max = __pmd_offset(end); \ | ||
339 | int i; \ | ||
340 | \ | ||
341 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
342 | if (!pmd_present(pmd[i])) \ | ||
343 | continue; \ | ||
344 | \ | ||
345 | pte = pte_offset(pmd + i, 0); \ | ||
346 | if (i == i_max) \ | ||
347 | cur_end = end; \ | ||
348 | \ | ||
349 | ret |= kvm_mips_##name##_pte(pte, start, cur_end); \ | ||
350 | } \ | ||
351 | return ret; \ | ||
352 | } \ | ||
353 | \ | ||
354 | static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \ | ||
355 | unsigned long end) \ | ||
356 | { \ | ||
357 | int ret = 0; \ | ||
358 | pmd_t *pmd; \ | ||
359 | unsigned long cur_end = ~0ul; \ | ||
360 | int i_min = __pud_offset(start); \ | ||
361 | int i_max = __pud_offset(end); \ | ||
362 | int i; \ | ||
363 | \ | ||
364 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
365 | if (!pud_present(pud[i])) \ | ||
366 | continue; \ | ||
367 | \ | ||
368 | pmd = pmd_offset(pud + i, 0); \ | ||
369 | if (i == i_max) \ | ||
370 | cur_end = end; \ | ||
371 | \ | ||
372 | ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \ | ||
373 | } \ | ||
374 | return ret; \ | ||
375 | } \ | ||
376 | \ | ||
377 | static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \ | ||
378 | unsigned long end) \ | ||
379 | { \ | ||
380 | int ret = 0; \ | ||
381 | pud_t *pud; \ | ||
382 | unsigned long cur_end = ~0ul; \ | ||
383 | int i_min = pgd_index(start); \ | ||
384 | int i_max = pgd_index(end); \ | ||
385 | int i; \ | ||
386 | \ | ||
387 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
388 | if (!pgd_present(pgd[i])) \ | ||
389 | continue; \ | ||
390 | \ | ||
391 | pud = pud_offset(pgd + i, 0); \ | ||
392 | if (i == i_max) \ | ||
393 | cur_end = end; \ | ||
394 | \ | ||
395 | ret |= kvm_mips_##name##_pud(pud, start, cur_end); \ | ||
396 | } \ | ||
397 | return ret; \ | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * kvm_mips_mkclean_gpa_pt. | ||
402 | * Mark a range of guest physical address space clean (writes fault) in the VM's | ||
403 | * GPA page table to allow dirty page tracking. | ||
404 | */ | ||
36 | 405 | ||
37 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) | 406 | BUILD_PTE_RANGE_OP(mkclean, pte_mkclean) |
407 | |||
408 | /** | ||
409 | * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean. | ||
410 | * @kvm: KVM pointer. | ||
411 | * @start_gfn: Guest frame number of first page in GPA range to flush. | ||
412 | * @end_gfn: Guest frame number of last page in GPA range to flush. | ||
413 | * | ||
414 | * Make a range of GPA mappings clean so that guest writes will fault and | ||
415 | * trigger dirty page logging. | ||
416 | * | ||
417 | * The caller must hold the @kvm->mmu_lock spinlock. | ||
418 | * | ||
419 | * Returns: Whether any GPA mappings were modified, which would require | ||
420 | * derived mappings (GVA page tables & TLB enties) to be | ||
421 | * invalidated. | ||
422 | */ | ||
423 | int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) | ||
424 | { | ||
425 | return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd, | ||
426 | start_gfn << PAGE_SHIFT, | ||
427 | end_gfn << PAGE_SHIFT); | ||
428 | } | ||
429 | |||
430 | /** | ||
431 | * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages | ||
432 | * @kvm: The KVM pointer | ||
433 | * @slot: The memory slot associated with mask | ||
434 | * @gfn_offset: The gfn offset in memory slot | ||
435 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | ||
436 | * slot to be write protected | ||
437 | * | ||
438 | * Walks bits set in mask write protects the associated pte's. Caller must | ||
439 | * acquire @kvm->mmu_lock. | ||
440 | */ | ||
441 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | ||
442 | struct kvm_memory_slot *slot, | ||
443 | gfn_t gfn_offset, unsigned long mask) | ||
444 | { | ||
445 | gfn_t base_gfn = slot->base_gfn + gfn_offset; | ||
446 | gfn_t start = base_gfn + __ffs(mask); | ||
447 | gfn_t end = base_gfn + __fls(mask); | ||
448 | |||
449 | kvm_mips_mkclean_gpa_pt(kvm, start, end); | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * kvm_mips_mkold_gpa_pt. | ||
454 | * Mark a range of guest physical address space old (all accesses fault) in the | ||
455 | * VM's GPA page table to allow detection of commonly used pages. | ||
456 | */ | ||
457 | |||
458 | BUILD_PTE_RANGE_OP(mkold, pte_mkold) | ||
459 | |||
460 | static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, | ||
461 | gfn_t end_gfn) | ||
462 | { | ||
463 | return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd, | ||
464 | start_gfn << PAGE_SHIFT, | ||
465 | end_gfn << PAGE_SHIFT); | ||
466 | } | ||
467 | |||
468 | static int handle_hva_to_gpa(struct kvm *kvm, | ||
469 | unsigned long start, | ||
470 | unsigned long end, | ||
471 | int (*handler)(struct kvm *kvm, gfn_t gfn, | ||
472 | gpa_t gfn_end, | ||
473 | struct kvm_memory_slot *memslot, | ||
474 | void *data), | ||
475 | void *data) | ||
476 | { | ||
477 | struct kvm_memslots *slots; | ||
478 | struct kvm_memory_slot *memslot; | ||
479 | int ret = 0; | ||
480 | |||
481 | slots = kvm_memslots(kvm); | ||
482 | |||
483 | /* we only care about the pages that the guest sees */ | ||
484 | kvm_for_each_memslot(memslot, slots) { | ||
485 | unsigned long hva_start, hva_end; | ||
486 | gfn_t gfn, gfn_end; | ||
487 | |||
488 | hva_start = max(start, memslot->userspace_addr); | ||
489 | hva_end = min(end, memslot->userspace_addr + | ||
490 | (memslot->npages << PAGE_SHIFT)); | ||
491 | if (hva_start >= hva_end) | ||
492 | continue; | ||
493 | |||
494 | /* | ||
495 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
496 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | ||
497 | */ | ||
498 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
499 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
500 | |||
501 | ret |= handler(kvm, gfn, gfn_end, memslot, data); | ||
502 | } | ||
503 | |||
504 | return ret; | ||
505 | } | ||
506 | |||
507 | |||
508 | static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
509 | struct kvm_memory_slot *memslot, void *data) | ||
510 | { | ||
511 | kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); | ||
512 | return 1; | ||
513 | } | ||
514 | |||
515 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
516 | { | ||
517 | unsigned long end = hva + PAGE_SIZE; | ||
518 | |||
519 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | ||
520 | |||
521 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | ||
526 | { | ||
527 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | ||
528 | |||
529 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
534 | struct kvm_memory_slot *memslot, void *data) | ||
535 | { | ||
536 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
537 | pte_t hva_pte = *(pte_t *)data; | ||
538 | pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
539 | pte_t old_pte; | ||
540 | |||
541 | if (!gpa_pte) | ||
542 | return 0; | ||
543 | |||
544 | /* Mapping may need adjusting depending on memslot flags */ | ||
545 | old_pte = *gpa_pte; | ||
546 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) | ||
547 | hva_pte = pte_mkclean(hva_pte); | ||
548 | else if (memslot->flags & KVM_MEM_READONLY) | ||
549 | hva_pte = pte_wrprotect(hva_pte); | ||
550 | |||
551 | set_pte(gpa_pte, hva_pte); | ||
552 | |||
553 | /* Replacing an absent or old page doesn't need flushes */ | ||
554 | if (!pte_present(old_pte) || !pte_young(old_pte)) | ||
38 | return 0; | 555 | return 0; |
39 | 556 | ||
557 | /* Pages swapped, aged, moved, or cleaned require flushes */ | ||
558 | return !pte_present(hva_pte) || | ||
559 | !pte_young(hva_pte) || | ||
560 | pte_pfn(old_pte) != pte_pfn(hva_pte) || | ||
561 | (pte_dirty(old_pte) && !pte_dirty(hva_pte)); | ||
562 | } | ||
563 | |||
564 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
565 | { | ||
566 | unsigned long end = hva + PAGE_SIZE; | ||
567 | int ret; | ||
568 | |||
569 | ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); | ||
570 | if (ret) | ||
571 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
572 | } | ||
573 | |||
574 | static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
575 | struct kvm_memory_slot *memslot, void *data) | ||
576 | { | ||
577 | return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); | ||
578 | } | ||
579 | |||
580 | static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
581 | struct kvm_memory_slot *memslot, void *data) | ||
582 | { | ||
583 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
584 | pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
585 | |||
586 | if (!gpa_pte) | ||
587 | return 0; | ||
588 | return pte_young(*gpa_pte); | ||
589 | } | ||
590 | |||
591 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | ||
592 | { | ||
593 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | ||
594 | } | ||
595 | |||
596 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
597 | { | ||
598 | return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * _kvm_mips_map_page_fast() - Fast path GPA fault handler. | ||
603 | * @vcpu: VCPU pointer. | ||
604 | * @gpa: Guest physical address of fault. | ||
605 | * @write_fault: Whether the fault was due to a write. | ||
606 | * @out_entry: New PTE for @gpa (written on success unless NULL). | ||
607 | * @out_buddy: New PTE for @gpa's buddy (written on success unless | ||
608 | * NULL). | ||
609 | * | ||
610 | * Perform fast path GPA fault handling, doing all that can be done without | ||
611 | * calling into KVM. This handles marking old pages young (for idle page | ||
612 | * tracking), and dirtying of clean pages (for dirty page logging). | ||
613 | * | ||
614 | * Returns: 0 on success, in which case we can update derived mappings and | ||
615 | * resume guest execution. | ||
616 | * -EFAULT on failure due to absent GPA mapping or write to | ||
617 | * read-only page, in which case KVM must be consulted. | ||
618 | */ | ||
619 | static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, | ||
620 | bool write_fault, | ||
621 | pte_t *out_entry, pte_t *out_buddy) | ||
622 | { | ||
623 | struct kvm *kvm = vcpu->kvm; | ||
624 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
625 | pte_t *ptep; | ||
626 | kvm_pfn_t pfn = 0; /* silence bogus GCC warning */ | ||
627 | bool pfn_valid = false; | ||
628 | int ret = 0; | ||
629 | |||
630 | spin_lock(&kvm->mmu_lock); | ||
631 | |||
632 | /* Fast path - just check GPA page table for an existing entry */ | ||
633 | ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
634 | if (!ptep || !pte_present(*ptep)) { | ||
635 | ret = -EFAULT; | ||
636 | goto out; | ||
637 | } | ||
638 | |||
639 | /* Track access to pages marked old */ | ||
640 | if (!pte_young(*ptep)) { | ||
641 | set_pte(ptep, pte_mkyoung(*ptep)); | ||
642 | pfn = pte_pfn(*ptep); | ||
643 | pfn_valid = true; | ||
644 | /* call kvm_set_pfn_accessed() after unlock */ | ||
645 | } | ||
646 | if (write_fault && !pte_dirty(*ptep)) { | ||
647 | if (!pte_write(*ptep)) { | ||
648 | ret = -EFAULT; | ||
649 | goto out; | ||
650 | } | ||
651 | |||
652 | /* Track dirtying of writeable pages */ | ||
653 | set_pte(ptep, pte_mkdirty(*ptep)); | ||
654 | pfn = pte_pfn(*ptep); | ||
655 | mark_page_dirty(kvm, gfn); | ||
656 | kvm_set_pfn_dirty(pfn); | ||
657 | } | ||
658 | |||
659 | if (out_entry) | ||
660 | *out_entry = *ptep; | ||
661 | if (out_buddy) | ||
662 | *out_buddy = *ptep_buddy(ptep); | ||
663 | |||
664 | out: | ||
665 | spin_unlock(&kvm->mmu_lock); | ||
666 | if (pfn_valid) | ||
667 | kvm_set_pfn_accessed(pfn); | ||
668 | return ret; | ||
669 | } | ||
670 | |||
671 | /** | ||
672 | * kvm_mips_map_page() - Map a guest physical page. | ||
673 | * @vcpu: VCPU pointer. | ||
674 | * @gpa: Guest physical address of fault. | ||
675 | * @write_fault: Whether the fault was due to a write. | ||
676 | * @out_entry: New PTE for @gpa (written on success unless NULL). | ||
677 | * @out_buddy: New PTE for @gpa's buddy (written on success unless | ||
678 | * NULL). | ||
679 | * | ||
680 | * Handle GPA faults by creating a new GPA mapping (or updating an existing | ||
681 | * one). | ||
682 | * | ||
683 | * This takes care of marking pages young or dirty (idle/dirty page tracking), | ||
684 | * asking KVM for the corresponding PFN, and creating a mapping in the GPA page | ||
685 | * tables. Derived mappings (GVA page tables and TLBs) must be handled by the | ||
686 | * caller. | ||
687 | * | ||
688 | * Returns: 0 on success, in which case the caller may use the @out_entry | ||
689 | * and @out_buddy PTEs to update derived mappings and resume guest | ||
690 | * execution. | ||
691 | * -EFAULT if there is no memory region at @gpa or a write was | ||
692 | * attempted to a read-only memory region. This is usually handled | ||
693 | * as an MMIO access. | ||
694 | */ | ||
695 | static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, | ||
696 | bool write_fault, | ||
697 | pte_t *out_entry, pte_t *out_buddy) | ||
698 | { | ||
699 | struct kvm *kvm = vcpu->kvm; | ||
700 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | ||
701 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
702 | int srcu_idx, err; | ||
703 | kvm_pfn_t pfn; | ||
704 | pte_t *ptep, entry, old_pte; | ||
705 | bool writeable; | ||
706 | unsigned long prot_bits; | ||
707 | unsigned long mmu_seq; | ||
708 | |||
709 | /* Try the fast path to handle old / clean pages */ | ||
40 | srcu_idx = srcu_read_lock(&kvm->srcu); | 710 | srcu_idx = srcu_read_lock(&kvm->srcu); |
41 | pfn = gfn_to_pfn(kvm, gfn); | 711 | err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, |
712 | out_buddy); | ||
713 | if (!err) | ||
714 | goto out; | ||
42 | 715 | ||
716 | /* We need a minimum of cached pages ready for page table creation */ | ||
717 | err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, | ||
718 | KVM_NR_MEM_OBJS); | ||
719 | if (err) | ||
720 | goto out; | ||
721 | |||
722 | retry: | ||
723 | /* | ||
724 | * Used to check for invalidations in progress, of the pfn that is | ||
725 | * returned by pfn_to_pfn_prot below. | ||
726 | */ | ||
727 | mmu_seq = kvm->mmu_notifier_seq; | ||
728 | /* | ||
729 | * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in | ||
730 | * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't | ||
731 | * risk the page we get a reference to getting unmapped before we have a | ||
732 | * chance to grab the mmu_lock without mmu_notifier_retry() noticing. | ||
733 | * | ||
734 | * This smp_rmb() pairs with the effective smp_wmb() of the combination | ||
735 | * of the pte_unmap_unlock() after the PTE is zapped, and the | ||
736 | * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before | ||
737 | * mmu_notifier_seq is incremented. | ||
738 | */ | ||
739 | smp_rmb(); | ||
740 | |||
741 | /* Slow path - ask KVM core whether we can access this GPA */ | ||
742 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); | ||
43 | if (is_error_noslot_pfn(pfn)) { | 743 | if (is_error_noslot_pfn(pfn)) { |
44 | kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); | ||
45 | err = -EFAULT; | 744 | err = -EFAULT; |
46 | goto out; | 745 | goto out; |
47 | } | 746 | } |
48 | 747 | ||
49 | kvm->arch.guest_pmap[gfn] = pfn; | 748 | spin_lock(&kvm->mmu_lock); |
749 | /* Check if an invalidation has taken place since we got pfn */ | ||
750 | if (mmu_notifier_retry(kvm, mmu_seq)) { | ||
751 | /* | ||
752 | * This can happen when mappings are changed asynchronously, but | ||
753 | * also synchronously if a COW is triggered by | ||
754 | * gfn_to_pfn_prot(). | ||
755 | */ | ||
756 | spin_unlock(&kvm->mmu_lock); | ||
757 | kvm_release_pfn_clean(pfn); | ||
758 | goto retry; | ||
759 | } | ||
760 | |||
761 | /* Ensure page tables are allocated */ | ||
762 | ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa); | ||
763 | |||
764 | /* Set up the PTE */ | ||
765 | prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default; | ||
766 | if (writeable) { | ||
767 | prot_bits |= _PAGE_WRITE; | ||
768 | if (write_fault) { | ||
769 | prot_bits |= __WRITEABLE; | ||
770 | mark_page_dirty(kvm, gfn); | ||
771 | kvm_set_pfn_dirty(pfn); | ||
772 | } | ||
773 | } | ||
774 | entry = pfn_pte(pfn, __pgprot(prot_bits)); | ||
775 | |||
776 | /* Write the PTE */ | ||
777 | old_pte = *ptep; | ||
778 | set_pte(ptep, entry); | ||
779 | |||
780 | err = 0; | ||
781 | if (out_entry) | ||
782 | *out_entry = *ptep; | ||
783 | if (out_buddy) | ||
784 | *out_buddy = *ptep_buddy(ptep); | ||
785 | |||
786 | spin_unlock(&kvm->mmu_lock); | ||
787 | kvm_release_pfn_clean(pfn); | ||
788 | kvm_set_pfn_accessed(pfn); | ||
50 | out: | 789 | out: |
51 | srcu_read_unlock(&kvm->srcu, srcu_idx); | 790 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
52 | return err; | 791 | return err; |
53 | } | 792 | } |
54 | 793 | ||
55 | /* Translate guest KSEG0 addresses to Host PA */ | 794 | static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, |
56 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | 795 | unsigned long addr) |
57 | unsigned long gva) | ||
58 | { | 796 | { |
59 | gfn_t gfn; | 797 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
60 | unsigned long offset = gva & ~PAGE_MASK; | 798 | pgd_t *pgdp; |
61 | struct kvm *kvm = vcpu->kvm; | 799 | int ret; |
800 | |||
801 | /* We need a minimum of cached pages ready for page table creation */ | ||
802 | ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, | ||
803 | KVM_NR_MEM_OBJS); | ||
804 | if (ret) | ||
805 | return NULL; | ||
806 | |||
807 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | ||
808 | pgdp = vcpu->arch.guest_kernel_mm.pgd; | ||
809 | else | ||
810 | pgdp = vcpu->arch.guest_user_mm.pgd; | ||
811 | |||
812 | return kvm_mips_walk_pgd(pgdp, memcache, addr); | ||
813 | } | ||
62 | 814 | ||
63 | if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { | 815 | void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, |
64 | kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, | 816 | bool user) |
65 | __builtin_return_address(0), gva); | 817 | { |
66 | return KVM_INVALID_PAGE; | 818 | pgd_t *pgdp; |
819 | pte_t *ptep; | ||
820 | |||
821 | addr &= PAGE_MASK << 1; | ||
822 | |||
823 | pgdp = vcpu->arch.guest_kernel_mm.pgd; | ||
824 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); | ||
825 | if (ptep) { | ||
826 | ptep[0] = pfn_pte(0, __pgprot(0)); | ||
827 | ptep[1] = pfn_pte(0, __pgprot(0)); | ||
828 | } | ||
829 | |||
830 | if (user) { | ||
831 | pgdp = vcpu->arch.guest_user_mm.pgd; | ||
832 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); | ||
833 | if (ptep) { | ||
834 | ptep[0] = pfn_pte(0, __pgprot(0)); | ||
835 | ptep[1] = pfn_pte(0, __pgprot(0)); | ||
836 | } | ||
67 | } | 837 | } |
838 | } | ||
68 | 839 | ||
69 | gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); | 840 | /* |
841 | * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. | ||
842 | * Flush a range of guest physical address space from the VM's GPA page tables. | ||
843 | */ | ||
70 | 844 | ||
71 | if (gfn >= kvm->arch.guest_pmap_npages) { | 845 | static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, |
72 | kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, | 846 | unsigned long end_gva) |
73 | gva); | 847 | { |
74 | return KVM_INVALID_PAGE; | 848 | int i_min = __pte_offset(start_gva); |
849 | int i_max = __pte_offset(end_gva); | ||
850 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); | ||
851 | int i; | ||
852 | |||
853 | /* | ||
854 | * There's no freeing to do, so there's no point clearing individual | ||
855 | * entries unless only part of the last level page table needs flushing. | ||
856 | */ | ||
857 | if (safe_to_remove) | ||
858 | return true; | ||
859 | |||
860 | for (i = i_min; i <= i_max; ++i) { | ||
861 | if (!pte_present(pte[i])) | ||
862 | continue; | ||
863 | |||
864 | set_pte(pte + i, __pte(0)); | ||
75 | } | 865 | } |
866 | return false; | ||
867 | } | ||
76 | 868 | ||
77 | if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) | 869 | static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, |
78 | return KVM_INVALID_ADDR; | 870 | unsigned long end_gva) |
871 | { | ||
872 | pte_t *pte; | ||
873 | unsigned long end = ~0ul; | ||
874 | int i_min = __pmd_offset(start_gva); | ||
875 | int i_max = __pmd_offset(end_gva); | ||
876 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); | ||
877 | int i; | ||
878 | |||
879 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
880 | if (!pmd_present(pmd[i])) | ||
881 | continue; | ||
882 | |||
883 | pte = pte_offset(pmd + i, 0); | ||
884 | if (i == i_max) | ||
885 | end = end_gva; | ||
886 | |||
887 | if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { | ||
888 | pmd_clear(pmd + i); | ||
889 | pte_free_kernel(NULL, pte); | ||
890 | } else { | ||
891 | safe_to_remove = false; | ||
892 | } | ||
893 | } | ||
894 | return safe_to_remove; | ||
895 | } | ||
79 | 896 | ||
80 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; | 897 | static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, |
898 | unsigned long end_gva) | ||
899 | { | ||
900 | pmd_t *pmd; | ||
901 | unsigned long end = ~0ul; | ||
902 | int i_min = __pud_offset(start_gva); | ||
903 | int i_max = __pud_offset(end_gva); | ||
904 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); | ||
905 | int i; | ||
906 | |||
907 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
908 | if (!pud_present(pud[i])) | ||
909 | continue; | ||
910 | |||
911 | pmd = pmd_offset(pud + i, 0); | ||
912 | if (i == i_max) | ||
913 | end = end_gva; | ||
914 | |||
915 | if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { | ||
916 | pud_clear(pud + i); | ||
917 | pmd_free(NULL, pmd); | ||
918 | } else { | ||
919 | safe_to_remove = false; | ||
920 | } | ||
921 | } | ||
922 | return safe_to_remove; | ||
923 | } | ||
924 | |||
925 | static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, | ||
926 | unsigned long end_gva) | ||
927 | { | ||
928 | pud_t *pud; | ||
929 | unsigned long end = ~0ul; | ||
930 | int i_min = pgd_index(start_gva); | ||
931 | int i_max = pgd_index(end_gva); | ||
932 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); | ||
933 | int i; | ||
934 | |||
935 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
936 | if (!pgd_present(pgd[i])) | ||
937 | continue; | ||
938 | |||
939 | pud = pud_offset(pgd + i, 0); | ||
940 | if (i == i_max) | ||
941 | end = end_gva; | ||
942 | |||
943 | if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { | ||
944 | pgd_clear(pgd + i); | ||
945 | pud_free(NULL, pud); | ||
946 | } else { | ||
947 | safe_to_remove = false; | ||
948 | } | ||
949 | } | ||
950 | return safe_to_remove; | ||
951 | } | ||
952 | |||
953 | void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) | ||
954 | { | ||
955 | if (flags & KMF_GPA) { | ||
956 | /* all of guest virtual address space could be affected */ | ||
957 | if (flags & KMF_KERN) | ||
958 | /* useg, kseg0, seg2/3 */ | ||
959 | kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); | ||
960 | else | ||
961 | /* useg */ | ||
962 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); | ||
963 | } else { | ||
964 | /* useg */ | ||
965 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); | ||
966 | |||
967 | /* kseg2/3 */ | ||
968 | if (flags & KMF_KERN) | ||
969 | kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); | ||
970 | } | ||
971 | } | ||
972 | |||
973 | static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte) | ||
974 | { | ||
975 | /* | ||
976 | * Don't leak writeable but clean entries from GPA page tables. We don't | ||
977 | * want the normal Linux tlbmod handler to handle dirtying when KVM | ||
978 | * accesses guest memory. | ||
979 | */ | ||
980 | if (!pte_dirty(pte)) | ||
981 | pte = pte_wrprotect(pte); | ||
982 | |||
983 | return pte; | ||
984 | } | ||
985 | |||
986 | static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) | ||
987 | { | ||
988 | /* Guest EntryLo overrides host EntryLo */ | ||
989 | if (!(entrylo & ENTRYLO_D)) | ||
990 | pte = pte_mkclean(pte); | ||
991 | |||
992 | return kvm_mips_gpa_pte_to_gva_unmapped(pte); | ||
81 | } | 993 | } |
82 | 994 | ||
83 | /* XXXKYMA: Must be called with interrupts disabled */ | 995 | /* XXXKYMA: Must be called with interrupts disabled */ |
84 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | 996 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, |
85 | struct kvm_vcpu *vcpu) | 997 | struct kvm_vcpu *vcpu, |
998 | bool write_fault) | ||
86 | { | 999 | { |
87 | gfn_t gfn; | 1000 | unsigned long gpa; |
88 | kvm_pfn_t pfn0, pfn1; | 1001 | pte_t pte_gpa[2], *ptep_gva; |
89 | unsigned long vaddr = 0; | 1002 | int idx; |
90 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; | ||
91 | struct kvm *kvm = vcpu->kvm; | ||
92 | const int flush_dcache_mask = 0; | ||
93 | int ret; | ||
94 | 1003 | ||
95 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { | 1004 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { |
96 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); | 1005 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); |
@@ -98,49 +1007,39 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
98 | return -1; | 1007 | return -1; |
99 | } | 1008 | } |
100 | 1009 | ||
101 | gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); | 1010 | /* Get the GPA page table entry */ |
102 | if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { | 1011 | gpa = KVM_GUEST_CPHYSADDR(badvaddr); |
103 | kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, | 1012 | idx = (badvaddr >> PAGE_SHIFT) & 1; |
104 | gfn, badvaddr); | 1013 | if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], |
105 | kvm_mips_dump_host_tlbs(); | 1014 | &pte_gpa[!idx]) < 0) |
106 | return -1; | 1015 | return -1; |
107 | } | ||
108 | vaddr = badvaddr & (PAGE_MASK << 1); | ||
109 | 1016 | ||
110 | if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) | 1017 | /* Get the GVA page table entry */ |
1018 | ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE); | ||
1019 | if (!ptep_gva) { | ||
1020 | kvm_err("No ptep for gva %lx\n", badvaddr); | ||
111 | return -1; | 1021 | return -1; |
1022 | } | ||
112 | 1023 | ||
113 | if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) | 1024 | /* Copy a pair of entries from GPA page table to GVA page table */ |
114 | return -1; | 1025 | ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]); |
115 | 1026 | ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); | |
116 | pfn0 = kvm->arch.guest_pmap[gfn & ~0x1]; | ||
117 | pfn1 = kvm->arch.guest_pmap[gfn | 0x1]; | ||
118 | |||
119 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | | ||
120 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | ||
121 | ENTRYLO_D | ENTRYLO_V; | ||
122 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | | ||
123 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | ||
124 | ENTRYLO_D | ENTRYLO_V; | ||
125 | |||
126 | preempt_disable(); | ||
127 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); | ||
128 | ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | ||
129 | flush_dcache_mask); | ||
130 | preempt_enable(); | ||
131 | 1027 | ||
132 | return ret; | 1028 | /* Invalidate this entry in the TLB, guest kernel ASID only */ |
1029 | kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); | ||
1030 | return 0; | ||
133 | } | 1031 | } |
134 | 1032 | ||
135 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | 1033 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
136 | struct kvm_mips_tlb *tlb) | 1034 | struct kvm_mips_tlb *tlb, |
1035 | unsigned long gva, | ||
1036 | bool write_fault) | ||
137 | { | 1037 | { |
138 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; | ||
139 | struct kvm *kvm = vcpu->kvm; | 1038 | struct kvm *kvm = vcpu->kvm; |
140 | kvm_pfn_t pfn0, pfn1; | ||
141 | gfn_t gfn0, gfn1; | ||
142 | long tlb_lo[2]; | 1039 | long tlb_lo[2]; |
143 | int ret; | 1040 | pte_t pte_gpa[2], *ptep_buddy, *ptep_gva; |
1041 | unsigned int idx = TLB_LO_IDX(*tlb, gva); | ||
1042 | bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); | ||
144 | 1043 | ||
145 | tlb_lo[0] = tlb->tlb_lo[0]; | 1044 | tlb_lo[0] = tlb->tlb_lo[0]; |
146 | tlb_lo[1] = tlb->tlb_lo[1]; | 1045 | tlb_lo[1] = tlb->tlb_lo[1]; |
@@ -149,70 +1048,64 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
149 | * The commpage address must not be mapped to anything else if the guest | 1048 | * The commpage address must not be mapped to anything else if the guest |
150 | * TLB contains entries nearby, or commpage accesses will break. | 1049 | * TLB contains entries nearby, or commpage accesses will break. |
151 | */ | 1050 | */ |
152 | if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & | 1051 | if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) |
153 | VPN2_MASK & (PAGE_MASK << 1))) | 1052 | tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0; |
154 | tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; | ||
155 | |||
156 | gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; | ||
157 | gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; | ||
158 | if (gfn0 >= kvm->arch.guest_pmap_npages || | ||
159 | gfn1 >= kvm->arch.guest_pmap_npages) { | ||
160 | kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", | ||
161 | __func__, gfn0, gfn1, tlb->tlb_hi); | ||
162 | kvm_mips_dump_guest_tlbs(vcpu); | ||
163 | return -1; | ||
164 | } | ||
165 | 1053 | ||
166 | if (kvm_mips_map_page(kvm, gfn0) < 0) | 1054 | /* Get the GPA page table entry */ |
1055 | if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]), | ||
1056 | write_fault, &pte_gpa[idx], NULL) < 0) | ||
167 | return -1; | 1057 | return -1; |
168 | 1058 | ||
169 | if (kvm_mips_map_page(kvm, gfn1) < 0) | 1059 | /* And its GVA buddy's GPA page table entry if it also exists */ |
1060 | pte_gpa[!idx] = pfn_pte(0, __pgprot(0)); | ||
1061 | if (tlb_lo[!idx] & ENTRYLO_V) { | ||
1062 | spin_lock(&kvm->mmu_lock); | ||
1063 | ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL, | ||
1064 | mips3_tlbpfn_to_paddr(tlb_lo[!idx])); | ||
1065 | if (ptep_buddy) | ||
1066 | pte_gpa[!idx] = *ptep_buddy; | ||
1067 | spin_unlock(&kvm->mmu_lock); | ||
1068 | } | ||
1069 | |||
1070 | /* Get the GVA page table entry pair */ | ||
1071 | ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); | ||
1072 | if (!ptep_gva) { | ||
1073 | kvm_err("No ptep for gva %lx\n", gva); | ||
170 | return -1; | 1074 | return -1; |
1075 | } | ||
171 | 1076 | ||
172 | pfn0 = kvm->arch.guest_pmap[gfn0]; | 1077 | /* Copy a pair of entries from GPA page table to GVA page table */ |
173 | pfn1 = kvm->arch.guest_pmap[gfn1]; | 1078 | ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]); |
1079 | ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); | ||
174 | 1080 | ||
175 | /* Get attributes from the Guest TLB */ | 1081 | /* Invalidate this entry in the TLB, current guest mode ASID only */ |
176 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | | 1082 | kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); |
177 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | ||
178 | (tlb_lo[0] & ENTRYLO_D) | | ||
179 | (tlb_lo[0] & ENTRYLO_V); | ||
180 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | | ||
181 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | ||
182 | (tlb_lo[1] & ENTRYLO_D) | | ||
183 | (tlb_lo[1] & ENTRYLO_V); | ||
184 | 1083 | ||
185 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | 1084 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
186 | tlb->tlb_lo[0], tlb->tlb_lo[1]); | 1085 | tlb->tlb_lo[0], tlb->tlb_lo[1]); |
187 | 1086 | ||
188 | preempt_disable(); | 1087 | return 0; |
189 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? | ||
190 | kvm_mips_get_kernel_asid(vcpu) : | ||
191 | kvm_mips_get_user_asid(vcpu)); | ||
192 | ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | ||
193 | tlb->tlb_mask); | ||
194 | preempt_enable(); | ||
195 | |||
196 | return ret; | ||
197 | } | 1088 | } |
198 | 1089 | ||
199 | void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 1090 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, |
200 | struct kvm_vcpu *vcpu) | 1091 | struct kvm_vcpu *vcpu) |
201 | { | 1092 | { |
202 | unsigned long asid = asid_cache(cpu); | 1093 | kvm_pfn_t pfn; |
203 | 1094 | pte_t *ptep; | |
204 | asid += cpu_asid_inc(); | ||
205 | if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) { | ||
206 | if (cpu_has_vtag_icache) | ||
207 | flush_icache_all(); | ||
208 | |||
209 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | ||
210 | 1095 | ||
211 | if (!asid) /* fix version if needed */ | 1096 | ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr); |
212 | asid = asid_first_version(cpu); | 1097 | if (!ptep) { |
1098 | kvm_err("No ptep for commpage %lx\n", badvaddr); | ||
1099 | return -1; | ||
213 | } | 1100 | } |
214 | 1101 | ||
215 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 1102 | pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); |
1103 | /* Also set valid and dirty, so refill handler doesn't have to */ | ||
1104 | *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED))); | ||
1105 | |||
1106 | /* Invalidate this entry in the TLB, guest kernel ASID only */ | ||
1107 | kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); | ||
1108 | return 0; | ||
216 | } | 1109 | } |
217 | 1110 | ||
218 | /** | 1111 | /** |
@@ -235,42 +1128,13 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) | |||
235 | /* Restore ASID once we are scheduled back after preemption */ | 1128 | /* Restore ASID once we are scheduled back after preemption */ |
236 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1129 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
237 | { | 1130 | { |
238 | unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); | ||
239 | unsigned long flags; | 1131 | unsigned long flags; |
240 | int newasid = 0; | ||
241 | 1132 | ||
242 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); | 1133 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); |
243 | 1134 | ||
244 | /* Allocate new kernel and user ASIDs if needed */ | ||
245 | |||
246 | local_irq_save(flags); | 1135 | local_irq_save(flags); |
247 | 1136 | ||
248 | if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & | 1137 | vcpu->cpu = cpu; |
249 | asid_version_mask(cpu)) { | ||
250 | kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); | ||
251 | vcpu->arch.guest_kernel_asid[cpu] = | ||
252 | vcpu->arch.guest_kernel_mm.context.asid[cpu]; | ||
253 | newasid++; | ||
254 | |||
255 | kvm_debug("[%d]: cpu_context: %#lx\n", cpu, | ||
256 | cpu_context(cpu, current->mm)); | ||
257 | kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", | ||
258 | cpu, vcpu->arch.guest_kernel_asid[cpu]); | ||
259 | } | ||
260 | |||
261 | if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & | ||
262 | asid_version_mask(cpu)) { | ||
263 | kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); | ||
264 | vcpu->arch.guest_user_asid[cpu] = | ||
265 | vcpu->arch.guest_user_mm.context.asid[cpu]; | ||
266 | newasid++; | ||
267 | |||
268 | kvm_debug("[%d]: cpu_context: %#lx\n", cpu, | ||
269 | cpu_context(cpu, current->mm)); | ||
270 | kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, | ||
271 | vcpu->arch.guest_user_asid[cpu]); | ||
272 | } | ||
273 | |||
274 | if (vcpu->arch.last_sched_cpu != cpu) { | 1138 | if (vcpu->arch.last_sched_cpu != cpu) { |
275 | kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", | 1139 | kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", |
276 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); | 1140 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); |
@@ -282,42 +1146,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
282 | kvm_mips_migrate_count(vcpu); | 1146 | kvm_mips_migrate_count(vcpu); |
283 | } | 1147 | } |
284 | 1148 | ||
285 | if (!newasid) { | ||
286 | /* | ||
287 | * If we preempted while the guest was executing, then reload | ||
288 | * the pre-empted ASID | ||
289 | */ | ||
290 | if (current->flags & PF_VCPU) { | ||
291 | write_c0_entryhi(vcpu->arch. | ||
292 | preempt_entryhi & asid_mask); | ||
293 | ehb(); | ||
294 | } | ||
295 | } else { | ||
296 | /* New ASIDs were allocated for the VM */ | ||
297 | |||
298 | /* | ||
299 | * Were we in guest context? If so then the pre-empted ASID is | ||
300 | * no longer valid, we need to set it to what it should be based | ||
301 | * on the mode of the Guest (Kernel/User) | ||
302 | */ | ||
303 | if (current->flags & PF_VCPU) { | ||
304 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | ||
305 | write_c0_entryhi(vcpu->arch. | ||
306 | guest_kernel_asid[cpu] & | ||
307 | asid_mask); | ||
308 | else | ||
309 | write_c0_entryhi(vcpu->arch. | ||
310 | guest_user_asid[cpu] & | ||
311 | asid_mask); | ||
312 | ehb(); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /* restore guest state to registers */ | 1149 | /* restore guest state to registers */ |
317 | kvm_mips_callbacks->vcpu_set_regs(vcpu); | 1150 | kvm_mips_callbacks->vcpu_load(vcpu, cpu); |
318 | 1151 | ||
319 | local_irq_restore(flags); | 1152 | local_irq_restore(flags); |
320 | |||
321 | } | 1153 | } |
322 | 1154 | ||
323 | /* ASID can change if another task is scheduled during preemption */ | 1155 | /* ASID can change if another task is scheduled during preemption */ |
@@ -329,75 +1161,90 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
329 | local_irq_save(flags); | 1161 | local_irq_save(flags); |
330 | 1162 | ||
331 | cpu = smp_processor_id(); | 1163 | cpu = smp_processor_id(); |
332 | |||
333 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); | ||
334 | vcpu->arch.last_sched_cpu = cpu; | 1164 | vcpu->arch.last_sched_cpu = cpu; |
1165 | vcpu->cpu = -1; | ||
335 | 1166 | ||
336 | /* save guest state in registers */ | 1167 | /* save guest state in registers */ |
337 | kvm_mips_callbacks->vcpu_get_regs(vcpu); | 1168 | kvm_mips_callbacks->vcpu_put(vcpu, cpu); |
338 | |||
339 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | ||
340 | asid_version_mask(cpu))) { | ||
341 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, | ||
342 | cpu_context(cpu, current->mm)); | ||
343 | drop_mmu_context(current->mm, cpu); | ||
344 | } | ||
345 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | ||
346 | ehb(); | ||
347 | 1169 | ||
348 | local_irq_restore(flags); | 1170 | local_irq_restore(flags); |
349 | } | 1171 | } |
350 | 1172 | ||
351 | u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) | 1173 | /** |
1174 | * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault. | ||
1175 | * @vcpu: Virtual CPU. | ||
1176 | * @gva: Guest virtual address to be accessed. | ||
1177 | * @write: True if write attempted (must be dirtied and made writable). | ||
1178 | * | ||
1179 | * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and | ||
1180 | * dirtying the page if @write so that guest instructions can be modified. | ||
1181 | * | ||
1182 | * Returns: KVM_MIPS_MAPPED on success. | ||
1183 | * KVM_MIPS_GVA if bad guest virtual address. | ||
1184 | * KVM_MIPS_GPA if bad guest physical address. | ||
1185 | * KVM_MIPS_TLB if guest TLB not present. | ||
1186 | * KVM_MIPS_TLBINV if guest TLB present but not valid. | ||
1187 | * KVM_MIPS_TLBMOD if guest TLB read only. | ||
1188 | */ | ||
1189 | enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, | ||
1190 | unsigned long gva, | ||
1191 | bool write) | ||
352 | { | 1192 | { |
353 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1193 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
354 | unsigned long paddr, flags, vpn2, asid; | 1194 | struct kvm_mips_tlb *tlb; |
355 | unsigned long va = (unsigned long)opc; | ||
356 | void *vaddr; | ||
357 | u32 inst; | ||
358 | int index; | 1195 | int index; |
359 | 1196 | ||
360 | if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 || | 1197 | if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { |
361 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | 1198 | if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) |
362 | local_irq_save(flags); | 1199 | return KVM_MIPS_GPA; |
363 | index = kvm_mips_host_tlb_lookup(vcpu, va); | 1200 | } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || |
364 | if (index >= 0) { | 1201 | KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { |
365 | inst = *(opc); | 1202 | /* Address should be in the guest TLB */ |
366 | } else { | 1203 | index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | |
367 | vpn2 = va & VPN2_MASK; | 1204 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID)); |
368 | asid = kvm_read_c0_guest_entryhi(cop0) & | 1205 | if (index < 0) |
369 | KVM_ENTRYHI_ASID; | 1206 | return KVM_MIPS_TLB; |
370 | index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); | 1207 | tlb = &vcpu->arch.guest_tlb[index]; |
371 | if (index < 0) { | 1208 | |
372 | kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 1209 | /* Entry should be valid, and dirty for writes */ |
373 | __func__, opc, vcpu, read_c0_entryhi()); | 1210 | if (!TLB_IS_VALID(*tlb, gva)) |
374 | kvm_mips_dump_host_tlbs(); | 1211 | return KVM_MIPS_TLBINV; |
375 | kvm_mips_dump_guest_tlbs(vcpu); | 1212 | if (write && !TLB_IS_DIRTY(*tlb, gva)) |
376 | local_irq_restore(flags); | 1213 | return KVM_MIPS_TLBMOD; |
377 | return KVM_INVALID_INST; | 1214 | |
378 | } | 1215 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) |
379 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, | 1216 | return KVM_MIPS_GPA; |
380 | &vcpu->arch.guest_tlb[index])) { | ||
381 | kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", | ||
382 | __func__, opc, index, vcpu, | ||
383 | read_c0_entryhi()); | ||
384 | kvm_mips_dump_guest_tlbs(vcpu); | ||
385 | local_irq_restore(flags); | ||
386 | return KVM_INVALID_INST; | ||
387 | } | ||
388 | inst = *(opc); | ||
389 | } | ||
390 | local_irq_restore(flags); | ||
391 | } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | ||
392 | paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va); | ||
393 | vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr))); | ||
394 | vaddr += paddr & ~PAGE_MASK; | ||
395 | inst = *(u32 *)vaddr; | ||
396 | kunmap_atomic(vaddr); | ||
397 | } else { | 1217 | } else { |
398 | kvm_err("%s: illegal address: %p\n", __func__, opc); | 1218 | return KVM_MIPS_GVA; |
399 | return KVM_INVALID_INST; | ||
400 | } | 1219 | } |
401 | 1220 | ||
402 | return inst; | 1221 | return KVM_MIPS_MAPPED; |
1222 | } | ||
1223 | |||
1224 | int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
1225 | { | ||
1226 | int err; | ||
1227 | |||
1228 | retry: | ||
1229 | kvm_trap_emul_gva_lockless_begin(vcpu); | ||
1230 | err = get_user(*out, opc); | ||
1231 | kvm_trap_emul_gva_lockless_end(vcpu); | ||
1232 | |||
1233 | if (unlikely(err)) { | ||
1234 | /* | ||
1235 | * Try to handle the fault, maybe we just raced with a GVA | ||
1236 | * invalidation. | ||
1237 | */ | ||
1238 | err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc, | ||
1239 | false); | ||
1240 | if (unlikely(err)) { | ||
1241 | kvm_err("%s: illegal address: %p\n", | ||
1242 | __func__, opc); | ||
1243 | return -EFAULT; | ||
1244 | } | ||
1245 | |||
1246 | /* Hopefully it'll work now */ | ||
1247 | goto retry; | ||
1248 | } | ||
1249 | return 0; | ||
403 | } | 1250 | } |
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 254377d8e0b9..2819eb793345 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -33,28 +33,20 @@ | |||
33 | #define KVM_GUEST_PC_TLB 0 | 33 | #define KVM_GUEST_PC_TLB 0 |
34 | #define KVM_GUEST_SP_TLB 1 | 34 | #define KVM_GUEST_SP_TLB 1 |
35 | 35 | ||
36 | atomic_t kvm_mips_instance; | ||
37 | EXPORT_SYMBOL_GPL(kvm_mips_instance); | ||
38 | |||
39 | static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 36 | static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
40 | { | 37 | { |
38 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
41 | int cpu = smp_processor_id(); | 39 | int cpu = smp_processor_id(); |
42 | 40 | ||
43 | return vcpu->arch.guest_kernel_asid[cpu] & | 41 | return cpu_asid(cpu, kern_mm); |
44 | cpu_asid_mask(&cpu_data[cpu]); | ||
45 | } | 42 | } |
46 | 43 | ||
47 | static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 44 | static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
48 | { | 45 | { |
46 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
49 | int cpu = smp_processor_id(); | 47 | int cpu = smp_processor_id(); |
50 | 48 | ||
51 | return vcpu->arch.guest_user_asid[cpu] & | 49 | return cpu_asid(cpu, user_mm); |
52 | cpu_asid_mask(&cpu_data[cpu]); | ||
53 | } | ||
54 | |||
55 | inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) | ||
56 | { | ||
57 | return vcpu->kvm->arch.commpage_tlb; | ||
58 | } | 50 | } |
59 | 51 | ||
60 | /* Structure defining an tlb entry data set. */ | 52 | /* Structure defining an tlb entry data set. */ |
@@ -104,109 +96,6 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | |||
104 | } | 96 | } |
105 | EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); | 97 | EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); |
106 | 98 | ||
107 | /* XXXKYMA: Must be called with interrupts disabled */ | ||
108 | /* set flush_dcache_mask == 0 if no dcache flush required */ | ||
109 | int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | ||
110 | unsigned long entrylo0, unsigned long entrylo1, | ||
111 | int flush_dcache_mask) | ||
112 | { | ||
113 | unsigned long flags; | ||
114 | unsigned long old_entryhi; | ||
115 | int idx; | ||
116 | |||
117 | local_irq_save(flags); | ||
118 | |||
119 | old_entryhi = read_c0_entryhi(); | ||
120 | write_c0_entryhi(entryhi); | ||
121 | mtc0_tlbw_hazard(); | ||
122 | |||
123 | tlb_probe(); | ||
124 | tlb_probe_hazard(); | ||
125 | idx = read_c0_index(); | ||
126 | |||
127 | if (idx > current_cpu_data.tlbsize) { | ||
128 | kvm_err("%s: Invalid Index: %d\n", __func__, idx); | ||
129 | kvm_mips_dump_host_tlbs(); | ||
130 | local_irq_restore(flags); | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | write_c0_entrylo0(entrylo0); | ||
135 | write_c0_entrylo1(entrylo1); | ||
136 | mtc0_tlbw_hazard(); | ||
137 | |||
138 | if (idx < 0) | ||
139 | tlb_write_random(); | ||
140 | else | ||
141 | tlb_write_indexed(); | ||
142 | tlbw_use_hazard(); | ||
143 | |||
144 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", | ||
145 | vcpu->arch.pc, idx, read_c0_entryhi(), | ||
146 | read_c0_entrylo0(), read_c0_entrylo1()); | ||
147 | |||
148 | /* Flush D-cache */ | ||
149 | if (flush_dcache_mask) { | ||
150 | if (entrylo0 & ENTRYLO_V) { | ||
151 | ++vcpu->stat.flush_dcache_exits; | ||
152 | flush_data_cache_page((entryhi & VPN2_MASK) & | ||
153 | ~flush_dcache_mask); | ||
154 | } | ||
155 | if (entrylo1 & ENTRYLO_V) { | ||
156 | ++vcpu->stat.flush_dcache_exits; | ||
157 | flush_data_cache_page(((entryhi & VPN2_MASK) & | ||
158 | ~flush_dcache_mask) | | ||
159 | (0x1 << PAGE_SHIFT)); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* Restore old ASID */ | ||
164 | write_c0_entryhi(old_entryhi); | ||
165 | mtc0_tlbw_hazard(); | ||
166 | local_irq_restore(flags); | ||
167 | return 0; | ||
168 | } | ||
169 | EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write); | ||
170 | |||
171 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | ||
172 | struct kvm_vcpu *vcpu) | ||
173 | { | ||
174 | kvm_pfn_t pfn; | ||
175 | unsigned long flags, old_entryhi = 0, vaddr = 0; | ||
176 | unsigned long entrylo[2] = { 0, 0 }; | ||
177 | unsigned int pair_idx; | ||
178 | |||
179 | pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); | ||
180 | pair_idx = (badvaddr >> PAGE_SHIFT) & 1; | ||
181 | entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) | | ||
182 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | ||
183 | ENTRYLO_D | ENTRYLO_V; | ||
184 | |||
185 | local_irq_save(flags); | ||
186 | |||
187 | old_entryhi = read_c0_entryhi(); | ||
188 | vaddr = badvaddr & (PAGE_MASK << 1); | ||
189 | write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); | ||
190 | write_c0_entrylo0(entrylo[0]); | ||
191 | write_c0_entrylo1(entrylo[1]); | ||
192 | write_c0_index(kvm_mips_get_commpage_asid(vcpu)); | ||
193 | mtc0_tlbw_hazard(); | ||
194 | tlb_write_indexed(); | ||
195 | tlbw_use_hazard(); | ||
196 | |||
197 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", | ||
198 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), | ||
199 | read_c0_entrylo0(), read_c0_entrylo1()); | ||
200 | |||
201 | /* Restore old ASID */ | ||
202 | write_c0_entryhi(old_entryhi); | ||
203 | mtc0_tlbw_hazard(); | ||
204 | local_irq_restore(flags); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault); | ||
209 | |||
210 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | 99 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) |
211 | { | 100 | { |
212 | int i; | 101 | int i; |
@@ -228,51 +117,11 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
228 | } | 117 | } |
229 | EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); | 118 | EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); |
230 | 119 | ||
231 | int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | 120 | static int _kvm_mips_host_tlb_inv(unsigned long entryhi) |
232 | { | ||
233 | unsigned long old_entryhi, flags; | ||
234 | int idx; | ||
235 | |||
236 | local_irq_save(flags); | ||
237 | |||
238 | old_entryhi = read_c0_entryhi(); | ||
239 | |||
240 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | ||
241 | write_c0_entryhi((vaddr & VPN2_MASK) | | ||
242 | kvm_mips_get_kernel_asid(vcpu)); | ||
243 | else { | ||
244 | write_c0_entryhi((vaddr & VPN2_MASK) | | ||
245 | kvm_mips_get_user_asid(vcpu)); | ||
246 | } | ||
247 | |||
248 | mtc0_tlbw_hazard(); | ||
249 | |||
250 | tlb_probe(); | ||
251 | tlb_probe_hazard(); | ||
252 | idx = read_c0_index(); | ||
253 | |||
254 | /* Restore old ASID */ | ||
255 | write_c0_entryhi(old_entryhi); | ||
256 | mtc0_tlbw_hazard(); | ||
257 | |||
258 | local_irq_restore(flags); | ||
259 | |||
260 | kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); | ||
261 | |||
262 | return idx; | ||
263 | } | ||
264 | EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup); | ||
265 | |||
266 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | ||
267 | { | 121 | { |
268 | int idx; | 122 | int idx; |
269 | unsigned long flags, old_entryhi; | ||
270 | |||
271 | local_irq_save(flags); | ||
272 | |||
273 | old_entryhi = read_c0_entryhi(); | ||
274 | 123 | ||
275 | write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); | 124 | write_c0_entryhi(entryhi); |
276 | mtc0_tlbw_hazard(); | 125 | mtc0_tlbw_hazard(); |
277 | 126 | ||
278 | tlb_probe(); | 127 | tlb_probe(); |
@@ -282,7 +131,7 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
282 | if (idx >= current_cpu_data.tlbsize) | 131 | if (idx >= current_cpu_data.tlbsize) |
283 | BUG(); | 132 | BUG(); |
284 | 133 | ||
285 | if (idx > 0) { | 134 | if (idx >= 0) { |
286 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | 135 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
287 | write_c0_entrylo0(0); | 136 | write_c0_entrylo0(0); |
288 | write_c0_entrylo1(0); | 137 | write_c0_entrylo1(0); |
@@ -292,93 +141,75 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
292 | tlbw_use_hazard(); | 141 | tlbw_use_hazard(); |
293 | } | 142 | } |
294 | 143 | ||
295 | write_c0_entryhi(old_entryhi); | 144 | return idx; |
296 | mtc0_tlbw_hazard(); | ||
297 | |||
298 | local_irq_restore(flags); | ||
299 | |||
300 | if (idx > 0) | ||
301 | kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, | ||
302 | (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx); | ||
303 | |||
304 | return 0; | ||
305 | } | 145 | } |
306 | EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); | ||
307 | 146 | ||
308 | void kvm_mips_flush_host_tlb(int skip_kseg0) | 147 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, |
148 | bool user, bool kernel) | ||
309 | { | 149 | { |
310 | unsigned long flags; | 150 | int idx_user, idx_kernel; |
311 | unsigned long old_entryhi, entryhi; | 151 | unsigned long flags, old_entryhi; |
312 | unsigned long old_pagemask; | ||
313 | int entry = 0; | ||
314 | int maxentry = current_cpu_data.tlbsize; | ||
315 | 152 | ||
316 | local_irq_save(flags); | 153 | local_irq_save(flags); |
317 | 154 | ||
318 | old_entryhi = read_c0_entryhi(); | 155 | old_entryhi = read_c0_entryhi(); |
319 | old_pagemask = read_c0_pagemask(); | ||
320 | |||
321 | /* Blast 'em all away. */ | ||
322 | for (entry = 0; entry < maxentry; entry++) { | ||
323 | write_c0_index(entry); | ||
324 | |||
325 | if (skip_kseg0) { | ||
326 | mtc0_tlbr_hazard(); | ||
327 | tlb_read(); | ||
328 | tlb_read_hazard(); | ||
329 | |||
330 | entryhi = read_c0_entryhi(); | ||
331 | 156 | ||
332 | /* Don't blow away guest kernel entries */ | 157 | if (user) |
333 | if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) | 158 | idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | |
334 | continue; | 159 | kvm_mips_get_user_asid(vcpu)); |
335 | 160 | if (kernel) | |
336 | write_c0_pagemask(old_pagemask); | 161 | idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | |
337 | } | 162 | kvm_mips_get_kernel_asid(vcpu)); |
338 | |||
339 | /* Make sure all entries differ. */ | ||
340 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | ||
341 | write_c0_entrylo0(0); | ||
342 | write_c0_entrylo1(0); | ||
343 | mtc0_tlbw_hazard(); | ||
344 | |||
345 | tlb_write_indexed(); | ||
346 | tlbw_use_hazard(); | ||
347 | } | ||
348 | 163 | ||
349 | write_c0_entryhi(old_entryhi); | 164 | write_c0_entryhi(old_entryhi); |
350 | write_c0_pagemask(old_pagemask); | ||
351 | mtc0_tlbw_hazard(); | 165 | mtc0_tlbw_hazard(); |
352 | 166 | ||
353 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
168 | |||
169 | if (user && idx_user >= 0) | ||
170 | kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", | ||
171 | __func__, (va & VPN2_MASK) | | ||
172 | kvm_mips_get_user_asid(vcpu), idx_user); | ||
173 | if (kernel && idx_kernel >= 0) | ||
174 | kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", | ||
175 | __func__, (va & VPN2_MASK) | | ||
176 | kvm_mips_get_kernel_asid(vcpu), idx_kernel); | ||
177 | |||
178 | return 0; | ||
354 | } | 179 | } |
355 | EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); | 180 | EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); |
356 | 181 | ||
357 | void kvm_local_flush_tlb_all(void) | 182 | /** |
183 | * kvm_mips_suspend_mm() - Suspend the active mm. | ||
184 | * @cpu The CPU we're running on. | ||
185 | * | ||
186 | * Suspend the active_mm, ready for a switch to a KVM guest virtual address | ||
187 | * space. This is left active for the duration of guest context, including time | ||
188 | * with interrupts enabled, so we need to be careful not to confuse e.g. cache | ||
189 | * management IPIs. | ||
190 | * | ||
191 | * kvm_mips_resume_mm() should be called before context switching to a different | ||
192 | * process so we don't need to worry about reference counting. | ||
193 | * | ||
194 | * This needs to be in static kernel code to avoid exporting init_mm. | ||
195 | */ | ||
196 | void kvm_mips_suspend_mm(int cpu) | ||
358 | { | 197 | { |
359 | unsigned long flags; | 198 | cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); |
360 | unsigned long old_ctx; | 199 | current->active_mm = &init_mm; |
361 | int entry = 0; | 200 | } |
362 | 201 | EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); | |
363 | local_irq_save(flags); | ||
364 | /* Save old context and create impossible VPN2 value */ | ||
365 | old_ctx = read_c0_entryhi(); | ||
366 | write_c0_entrylo0(0); | ||
367 | write_c0_entrylo1(0); | ||
368 | |||
369 | /* Blast 'em all away. */ | ||
370 | while (entry < current_cpu_data.tlbsize) { | ||
371 | /* Make sure all entries differ. */ | ||
372 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | ||
373 | write_c0_index(entry); | ||
374 | mtc0_tlbw_hazard(); | ||
375 | tlb_write_indexed(); | ||
376 | tlbw_use_hazard(); | ||
377 | entry++; | ||
378 | } | ||
379 | write_c0_entryhi(old_ctx); | ||
380 | mtc0_tlbw_hazard(); | ||
381 | 202 | ||
382 | local_irq_restore(flags); | 203 | /** |
204 | * kvm_mips_resume_mm() - Resume the current process mm. | ||
205 | * @cpu The CPU we're running on. | ||
206 | * | ||
207 | * Resume the mm of the current process, after a switch back from a KVM guest | ||
208 | * virtual address space (see kvm_mips_suspend_mm()). | ||
209 | */ | ||
210 | void kvm_mips_resume_mm(int cpu) | ||
211 | { | ||
212 | cpumask_set_cpu(cpu, mm_cpumask(current->mm)); | ||
213 | current->active_mm = current->mm; | ||
383 | } | 214 | } |
384 | EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all); | 215 | EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); |
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 3b20441f2beb..b1fa53b252ea 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -11,9 +11,11 @@ | |||
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/vmalloc.h> | ||
15 | |||
16 | #include <linux/kvm_host.h> | 14 | #include <linux/kvm_host.h> |
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/pgalloc.h> | ||
17 | 19 | ||
18 | #include "interrupt.h" | 20 | #include "interrupt.h" |
19 | 21 | ||
@@ -21,9 +23,12 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
21 | { | 23 | { |
22 | gpa_t gpa; | 24 | gpa_t gpa; |
23 | gva_t kseg = KSEGX(gva); | 25 | gva_t kseg = KSEGX(gva); |
26 | gva_t gkseg = KVM_GUEST_KSEGX(gva); | ||
24 | 27 | ||
25 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | 28 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) |
26 | gpa = CPHYSADDR(gva); | 29 | gpa = CPHYSADDR(gva); |
30 | else if (gkseg == KVM_GUEST_KSEG0) | ||
31 | gpa = KVM_GUEST_CPHYSADDR(gva); | ||
27 | else { | 32 | else { |
28 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | 33 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
29 | kvm_mips_dump_host_tlbs(); | 34 | kvm_mips_dump_host_tlbs(); |
@@ -83,48 +88,134 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | |||
83 | return ret; | 88 | return ret; |
84 | } | 89 | } |
85 | 90 | ||
91 | static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, | ||
92 | struct kvm_vcpu *vcpu) | ||
93 | { | ||
94 | enum emulation_result er; | ||
95 | union mips_instruction inst; | ||
96 | int err; | ||
97 | |||
98 | /* A code fetch fault doesn't count as an MMIO */ | ||
99 | if (kvm_is_ifetch_fault(&vcpu->arch)) { | ||
100 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
101 | return RESUME_HOST; | ||
102 | } | ||
103 | |||
104 | /* Fetch the instruction. */ | ||
105 | if (cause & CAUSEF_BD) | ||
106 | opc += 1; | ||
107 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
108 | if (err) { | ||
109 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
110 | return RESUME_HOST; | ||
111 | } | ||
112 | |||
113 | /* Emulate the load */ | ||
114 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); | ||
115 | if (er == EMULATE_FAIL) { | ||
116 | kvm_err("Emulate load from MMIO space failed\n"); | ||
117 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
118 | } else { | ||
119 | run->exit_reason = KVM_EXIT_MMIO; | ||
120 | } | ||
121 | return RESUME_HOST; | ||
122 | } | ||
123 | |||
124 | static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run, | ||
125 | struct kvm_vcpu *vcpu) | ||
126 | { | ||
127 | enum emulation_result er; | ||
128 | union mips_instruction inst; | ||
129 | int err; | ||
130 | |||
131 | /* Fetch the instruction. */ | ||
132 | if (cause & CAUSEF_BD) | ||
133 | opc += 1; | ||
134 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
135 | if (err) { | ||
136 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
137 | return RESUME_HOST; | ||
138 | } | ||
139 | |||
140 | /* Emulate the store */ | ||
141 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); | ||
142 | if (er == EMULATE_FAIL) { | ||
143 | kvm_err("Emulate store to MMIO space failed\n"); | ||
144 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
145 | } else { | ||
146 | run->exit_reason = KVM_EXIT_MMIO; | ||
147 | } | ||
148 | return RESUME_HOST; | ||
149 | } | ||
150 | |||
151 | static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run, | ||
152 | struct kvm_vcpu *vcpu, bool store) | ||
153 | { | ||
154 | if (store) | ||
155 | return kvm_mips_bad_store(cause, opc, run, vcpu); | ||
156 | else | ||
157 | return kvm_mips_bad_load(cause, opc, run, vcpu); | ||
158 | } | ||
159 | |||
86 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | 160 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) |
87 | { | 161 | { |
162 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
88 | struct kvm_run *run = vcpu->run; | 163 | struct kvm_run *run = vcpu->run; |
89 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | 164 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
90 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 165 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
91 | u32 cause = vcpu->arch.host_cp0_cause; | 166 | u32 cause = vcpu->arch.host_cp0_cause; |
92 | enum emulation_result er = EMULATE_DONE; | 167 | struct kvm_mips_tlb *tlb; |
93 | int ret = RESUME_GUEST; | 168 | unsigned long entryhi; |
169 | int index; | ||
94 | 170 | ||
95 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 171 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
96 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 172 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
97 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", | 173 | /* |
98 | cause, opc, badvaddr); | 174 | * First find the mapping in the guest TLB. If the failure to |
99 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | 175 | * write was due to the guest TLB, it should be up to the guest |
176 | * to handle it. | ||
177 | */ | ||
178 | entryhi = (badvaddr & VPN2_MASK) | | ||
179 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
180 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | ||
100 | 181 | ||
101 | if (er == EMULATE_DONE) | 182 | /* |
102 | ret = RESUME_GUEST; | 183 | * These should never happen. |
103 | else { | 184 | * They would indicate stale host TLB entries. |
185 | */ | ||
186 | if (unlikely(index < 0)) { | ||
104 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 187 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
105 | ret = RESUME_HOST; | 188 | return RESUME_HOST; |
106 | } | 189 | } |
107 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 190 | tlb = vcpu->arch.guest_tlb + index; |
191 | if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { | ||
192 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
193 | return RESUME_HOST; | ||
194 | } | ||
195 | |||
108 | /* | 196 | /* |
109 | * XXXKYMA: The guest kernel does not expect to get this fault | 197 | * Guest entry not dirty? That would explain the TLB modified |
110 | * when we are not using HIGHMEM. Need to address this in a | 198 | * exception. Relay that on to the guest so it can handle it. |
111 | * HIGHMEM kernel | ||
112 | */ | 199 | */ |
113 | kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n", | 200 | if (!TLB_IS_DIRTY(*tlb, badvaddr)) { |
114 | cause, opc, badvaddr); | 201 | kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); |
115 | kvm_mips_dump_host_tlbs(); | 202 | return RESUME_GUEST; |
116 | kvm_arch_vcpu_dump_regs(vcpu); | 203 | } |
117 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 204 | |
118 | ret = RESUME_HOST; | 205 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, |
206 | true)) | ||
207 | /* Not writable, needs handling as MMIO */ | ||
208 | return kvm_mips_bad_store(cause, opc, run, vcpu); | ||
209 | return RESUME_GUEST; | ||
210 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | ||
211 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) | ||
212 | /* Not writable, needs handling as MMIO */ | ||
213 | return kvm_mips_bad_store(cause, opc, run, vcpu); | ||
214 | return RESUME_GUEST; | ||
119 | } else { | 215 | } else { |
120 | kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", | 216 | /* host kernel addresses are all handled as MMIO */ |
121 | cause, opc, badvaddr); | 217 | return kvm_mips_bad_store(cause, opc, run, vcpu); |
122 | kvm_mips_dump_host_tlbs(); | ||
123 | kvm_arch_vcpu_dump_regs(vcpu); | ||
124 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
125 | ret = RESUME_HOST; | ||
126 | } | 218 | } |
127 | return ret; | ||
128 | } | 219 | } |
129 | 220 | ||
130 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) | 221 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) |
@@ -157,7 +248,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) | |||
157 | * into the shadow host TLB | 248 | * into the shadow host TLB |
158 | */ | 249 | */ |
159 | 250 | ||
160 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 251 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store); |
161 | if (er == EMULATE_DONE) | 252 | if (er == EMULATE_DONE) |
162 | ret = RESUME_GUEST; | 253 | ret = RESUME_GUEST; |
163 | else { | 254 | else { |
@@ -169,29 +260,15 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) | |||
169 | * All KSEG0 faults are handled by KVM, as the guest kernel does | 260 | * All KSEG0 faults are handled by KVM, as the guest kernel does |
170 | * not expect to ever get them | 261 | * not expect to ever get them |
171 | */ | 262 | */ |
172 | if (kvm_mips_handle_kseg0_tlb_fault | 263 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) |
173 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | 264 | ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); |
174 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
175 | ret = RESUME_HOST; | ||
176 | } | ||
177 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) | 265 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) |
178 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | 266 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
179 | /* | 267 | /* |
180 | * With EVA we may get a TLB exception instead of an address | 268 | * With EVA we may get a TLB exception instead of an address |
181 | * error when the guest performs MMIO to KSeg1 addresses. | 269 | * error when the guest performs MMIO to KSeg1 addresses. |
182 | */ | 270 | */ |
183 | kvm_debug("Emulate %s MMIO space\n", | 271 | ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); |
184 | store ? "Store to" : "Load from"); | ||
185 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | ||
186 | if (er == EMULATE_FAIL) { | ||
187 | kvm_err("Emulate %s MMIO space failed\n", | ||
188 | store ? "Store to" : "Load from"); | ||
189 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
190 | ret = RESUME_HOST; | ||
191 | } else { | ||
192 | run->exit_reason = KVM_EXIT_MMIO; | ||
193 | ret = RESUME_HOST; | ||
194 | } | ||
195 | } else { | 272 | } else { |
196 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", | 273 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
197 | store ? "ST" : "LD", cause, opc, badvaddr); | 274 | store ? "ST" : "LD", cause, opc, badvaddr); |
@@ -219,21 +296,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
219 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | 296 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
220 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 297 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
221 | u32 cause = vcpu->arch.host_cp0_cause; | 298 | u32 cause = vcpu->arch.host_cp0_cause; |
222 | enum emulation_result er = EMULATE_DONE; | ||
223 | int ret = RESUME_GUEST; | 299 | int ret = RESUME_GUEST; |
224 | 300 | ||
225 | if (KVM_GUEST_KERNEL_MODE(vcpu) | 301 | if (KVM_GUEST_KERNEL_MODE(vcpu) |
226 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | 302 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
227 | kvm_debug("Emulate Store to MMIO space\n"); | 303 | ret = kvm_mips_bad_store(cause, opc, run, vcpu); |
228 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | ||
229 | if (er == EMULATE_FAIL) { | ||
230 | kvm_err("Emulate Store to MMIO space failed\n"); | ||
231 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
232 | ret = RESUME_HOST; | ||
233 | } else { | ||
234 | run->exit_reason = KVM_EXIT_MMIO; | ||
235 | ret = RESUME_HOST; | ||
236 | } | ||
237 | } else { | 304 | } else { |
238 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", | 305 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", |
239 | cause, opc, badvaddr); | 306 | cause, opc, badvaddr); |
@@ -249,26 +316,15 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
249 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | 316 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
250 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 317 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
251 | u32 cause = vcpu->arch.host_cp0_cause; | 318 | u32 cause = vcpu->arch.host_cp0_cause; |
252 | enum emulation_result er = EMULATE_DONE; | ||
253 | int ret = RESUME_GUEST; | 319 | int ret = RESUME_GUEST; |
254 | 320 | ||
255 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | 321 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { |
256 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | 322 | ret = kvm_mips_bad_load(cause, opc, run, vcpu); |
257 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | ||
258 | if (er == EMULATE_FAIL) { | ||
259 | kvm_err("Emulate Load from MMIO space failed\n"); | ||
260 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
261 | ret = RESUME_HOST; | ||
262 | } else { | ||
263 | run->exit_reason = KVM_EXIT_MMIO; | ||
264 | ret = RESUME_HOST; | ||
265 | } | ||
266 | } else { | 323 | } else { |
267 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", | 324 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", |
268 | cause, opc, badvaddr); | 325 | cause, opc, badvaddr); |
269 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 326 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
270 | ret = RESUME_HOST; | 327 | ret = RESUME_HOST; |
271 | er = EMULATE_FAIL; | ||
272 | } | 328 | } |
273 | return ret; | 329 | return ret; |
274 | } | 330 | } |
@@ -428,16 +484,75 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) | |||
428 | return ret; | 484 | return ret; |
429 | } | 485 | } |
430 | 486 | ||
431 | static int kvm_trap_emul_vm_init(struct kvm *kvm) | 487 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) |
432 | { | 488 | { |
489 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
490 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
491 | |||
492 | /* | ||
493 | * Allocate GVA -> HPA page tables. | ||
494 | * MIPS doesn't use the mm_struct pointer argument. | ||
495 | */ | ||
496 | kern_mm->pgd = pgd_alloc(kern_mm); | ||
497 | if (!kern_mm->pgd) | ||
498 | return -ENOMEM; | ||
499 | |||
500 | user_mm->pgd = pgd_alloc(user_mm); | ||
501 | if (!user_mm->pgd) { | ||
502 | pgd_free(kern_mm, kern_mm->pgd); | ||
503 | return -ENOMEM; | ||
504 | } | ||
505 | |||
433 | return 0; | 506 | return 0; |
434 | } | 507 | } |
435 | 508 | ||
436 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | 509 | static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) |
437 | { | 510 | { |
438 | vcpu->arch.kscratch_enabled = 0xfc; | 511 | /* Don't free host kernel page tables copied from init_mm.pgd */ |
512 | const unsigned long end = 0x80000000; | ||
513 | unsigned long pgd_va, pud_va, pmd_va; | ||
514 | pud_t *pud; | ||
515 | pmd_t *pmd; | ||
516 | pte_t *pte; | ||
517 | int i, j, k; | ||
518 | |||
519 | for (i = 0; i < USER_PTRS_PER_PGD; i++) { | ||
520 | if (pgd_none(pgd[i])) | ||
521 | continue; | ||
522 | |||
523 | pgd_va = (unsigned long)i << PGDIR_SHIFT; | ||
524 | if (pgd_va >= end) | ||
525 | break; | ||
526 | pud = pud_offset(pgd + i, 0); | ||
527 | for (j = 0; j < PTRS_PER_PUD; j++) { | ||
528 | if (pud_none(pud[j])) | ||
529 | continue; | ||
530 | |||
531 | pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); | ||
532 | if (pud_va >= end) | ||
533 | break; | ||
534 | pmd = pmd_offset(pud + j, 0); | ||
535 | for (k = 0; k < PTRS_PER_PMD; k++) { | ||
536 | if (pmd_none(pmd[k])) | ||
537 | continue; | ||
538 | |||
539 | pmd_va = pud_va | (k << PMD_SHIFT); | ||
540 | if (pmd_va >= end) | ||
541 | break; | ||
542 | pte = pte_offset(pmd + k, 0); | ||
543 | pte_free_kernel(NULL, pte); | ||
544 | } | ||
545 | pmd_free(NULL, pmd); | ||
546 | } | ||
547 | pud_free(NULL, pud); | ||
548 | } | ||
549 | pgd_free(NULL, pgd); | ||
550 | } | ||
439 | 551 | ||
440 | return 0; | 552 | static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) |
553 | { | ||
554 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); | ||
555 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); | ||
441 | } | 556 | } |
442 | 557 | ||
443 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | 558 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) |
@@ -499,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
499 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | 614 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
500 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | 615 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
501 | 616 | ||
617 | /* Status */ | ||
618 | kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); | ||
619 | |||
502 | /* | 620 | /* |
503 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) | 621 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) |
504 | */ | 622 | */ |
@@ -508,17 +626,76 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
508 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | | 626 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | |
509 | (vcpu_id & MIPS_EBASE_CPUNUM)); | 627 | (vcpu_id & MIPS_EBASE_CPUNUM)); |
510 | 628 | ||
629 | /* Put PC at guest reset vector */ | ||
630 | vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); | ||
631 | |||
511 | return 0; | 632 | return 0; |
512 | } | 633 | } |
513 | 634 | ||
635 | static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) | ||
636 | { | ||
637 | /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ | ||
638 | kvm_flush_remote_tlbs(kvm); | ||
639 | } | ||
640 | |||
641 | static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, | ||
642 | const struct kvm_memory_slot *slot) | ||
643 | { | ||
644 | kvm_trap_emul_flush_shadow_all(kvm); | ||
645 | } | ||
646 | |||
647 | static u64 kvm_trap_emul_get_one_regs[] = { | ||
648 | KVM_REG_MIPS_CP0_INDEX, | ||
649 | KVM_REG_MIPS_CP0_ENTRYLO0, | ||
650 | KVM_REG_MIPS_CP0_ENTRYLO1, | ||
651 | KVM_REG_MIPS_CP0_CONTEXT, | ||
652 | KVM_REG_MIPS_CP0_USERLOCAL, | ||
653 | KVM_REG_MIPS_CP0_PAGEMASK, | ||
654 | KVM_REG_MIPS_CP0_WIRED, | ||
655 | KVM_REG_MIPS_CP0_HWRENA, | ||
656 | KVM_REG_MIPS_CP0_BADVADDR, | ||
657 | KVM_REG_MIPS_CP0_COUNT, | ||
658 | KVM_REG_MIPS_CP0_ENTRYHI, | ||
659 | KVM_REG_MIPS_CP0_COMPARE, | ||
660 | KVM_REG_MIPS_CP0_STATUS, | ||
661 | KVM_REG_MIPS_CP0_INTCTL, | ||
662 | KVM_REG_MIPS_CP0_CAUSE, | ||
663 | KVM_REG_MIPS_CP0_EPC, | ||
664 | KVM_REG_MIPS_CP0_PRID, | ||
665 | KVM_REG_MIPS_CP0_EBASE, | ||
666 | KVM_REG_MIPS_CP0_CONFIG, | ||
667 | KVM_REG_MIPS_CP0_CONFIG1, | ||
668 | KVM_REG_MIPS_CP0_CONFIG2, | ||
669 | KVM_REG_MIPS_CP0_CONFIG3, | ||
670 | KVM_REG_MIPS_CP0_CONFIG4, | ||
671 | KVM_REG_MIPS_CP0_CONFIG5, | ||
672 | KVM_REG_MIPS_CP0_CONFIG7, | ||
673 | KVM_REG_MIPS_CP0_ERROREPC, | ||
674 | KVM_REG_MIPS_CP0_KSCRATCH1, | ||
675 | KVM_REG_MIPS_CP0_KSCRATCH2, | ||
676 | KVM_REG_MIPS_CP0_KSCRATCH3, | ||
677 | KVM_REG_MIPS_CP0_KSCRATCH4, | ||
678 | KVM_REG_MIPS_CP0_KSCRATCH5, | ||
679 | KVM_REG_MIPS_CP0_KSCRATCH6, | ||
680 | |||
681 | KVM_REG_MIPS_COUNT_CTL, | ||
682 | KVM_REG_MIPS_COUNT_RESUME, | ||
683 | KVM_REG_MIPS_COUNT_HZ, | ||
684 | }; | ||
685 | |||
514 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) | 686 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) |
515 | { | 687 | { |
516 | return 0; | 688 | return ARRAY_SIZE(kvm_trap_emul_get_one_regs); |
517 | } | 689 | } |
518 | 690 | ||
519 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, | 691 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, |
520 | u64 __user *indices) | 692 | u64 __user *indices) |
521 | { | 693 | { |
694 | if (copy_to_user(indices, kvm_trap_emul_get_one_regs, | ||
695 | sizeof(kvm_trap_emul_get_one_regs))) | ||
696 | return -EFAULT; | ||
697 | indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); | ||
698 | |||
522 | return 0; | 699 | return 0; |
523 | } | 700 | } |
524 | 701 | ||
@@ -526,7 +703,81 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, | |||
526 | const struct kvm_one_reg *reg, | 703 | const struct kvm_one_reg *reg, |
527 | s64 *v) | 704 | s64 *v) |
528 | { | 705 | { |
706 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
707 | |||
529 | switch (reg->id) { | 708 | switch (reg->id) { |
709 | case KVM_REG_MIPS_CP0_INDEX: | ||
710 | *v = (long)kvm_read_c0_guest_index(cop0); | ||
711 | break; | ||
712 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
713 | *v = kvm_read_c0_guest_entrylo0(cop0); | ||
714 | break; | ||
715 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
716 | *v = kvm_read_c0_guest_entrylo1(cop0); | ||
717 | break; | ||
718 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
719 | *v = (long)kvm_read_c0_guest_context(cop0); | ||
720 | break; | ||
721 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
722 | *v = (long)kvm_read_c0_guest_userlocal(cop0); | ||
723 | break; | ||
724 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
725 | *v = (long)kvm_read_c0_guest_pagemask(cop0); | ||
726 | break; | ||
727 | case KVM_REG_MIPS_CP0_WIRED: | ||
728 | *v = (long)kvm_read_c0_guest_wired(cop0); | ||
729 | break; | ||
730 | case KVM_REG_MIPS_CP0_HWRENA: | ||
731 | *v = (long)kvm_read_c0_guest_hwrena(cop0); | ||
732 | break; | ||
733 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
734 | *v = (long)kvm_read_c0_guest_badvaddr(cop0); | ||
735 | break; | ||
736 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
737 | *v = (long)kvm_read_c0_guest_entryhi(cop0); | ||
738 | break; | ||
739 | case KVM_REG_MIPS_CP0_COMPARE: | ||
740 | *v = (long)kvm_read_c0_guest_compare(cop0); | ||
741 | break; | ||
742 | case KVM_REG_MIPS_CP0_STATUS: | ||
743 | *v = (long)kvm_read_c0_guest_status(cop0); | ||
744 | break; | ||
745 | case KVM_REG_MIPS_CP0_INTCTL: | ||
746 | *v = (long)kvm_read_c0_guest_intctl(cop0); | ||
747 | break; | ||
748 | case KVM_REG_MIPS_CP0_CAUSE: | ||
749 | *v = (long)kvm_read_c0_guest_cause(cop0); | ||
750 | break; | ||
751 | case KVM_REG_MIPS_CP0_EPC: | ||
752 | *v = (long)kvm_read_c0_guest_epc(cop0); | ||
753 | break; | ||
754 | case KVM_REG_MIPS_CP0_PRID: | ||
755 | *v = (long)kvm_read_c0_guest_prid(cop0); | ||
756 | break; | ||
757 | case KVM_REG_MIPS_CP0_EBASE: | ||
758 | *v = (long)kvm_read_c0_guest_ebase(cop0); | ||
759 | break; | ||
760 | case KVM_REG_MIPS_CP0_CONFIG: | ||
761 | *v = (long)kvm_read_c0_guest_config(cop0); | ||
762 | break; | ||
763 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
764 | *v = (long)kvm_read_c0_guest_config1(cop0); | ||
765 | break; | ||
766 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
767 | *v = (long)kvm_read_c0_guest_config2(cop0); | ||
768 | break; | ||
769 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
770 | *v = (long)kvm_read_c0_guest_config3(cop0); | ||
771 | break; | ||
772 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
773 | *v = (long)kvm_read_c0_guest_config4(cop0); | ||
774 | break; | ||
775 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
776 | *v = (long)kvm_read_c0_guest_config5(cop0); | ||
777 | break; | ||
778 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
779 | *v = (long)kvm_read_c0_guest_config7(cop0); | ||
780 | break; | ||
530 | case KVM_REG_MIPS_CP0_COUNT: | 781 | case KVM_REG_MIPS_CP0_COUNT: |
531 | *v = kvm_mips_read_count(vcpu); | 782 | *v = kvm_mips_read_count(vcpu); |
532 | break; | 783 | break; |
@@ -539,6 +790,27 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, | |||
539 | case KVM_REG_MIPS_COUNT_HZ: | 790 | case KVM_REG_MIPS_COUNT_HZ: |
540 | *v = vcpu->arch.count_hz; | 791 | *v = vcpu->arch.count_hz; |
541 | break; | 792 | break; |
793 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
794 | *v = (long)kvm_read_c0_guest_errorepc(cop0); | ||
795 | break; | ||
796 | case KVM_REG_MIPS_CP0_KSCRATCH1: | ||
797 | *v = (long)kvm_read_c0_guest_kscratch1(cop0); | ||
798 | break; | ||
799 | case KVM_REG_MIPS_CP0_KSCRATCH2: | ||
800 | *v = (long)kvm_read_c0_guest_kscratch2(cop0); | ||
801 | break; | ||
802 | case KVM_REG_MIPS_CP0_KSCRATCH3: | ||
803 | *v = (long)kvm_read_c0_guest_kscratch3(cop0); | ||
804 | break; | ||
805 | case KVM_REG_MIPS_CP0_KSCRATCH4: | ||
806 | *v = (long)kvm_read_c0_guest_kscratch4(cop0); | ||
807 | break; | ||
808 | case KVM_REG_MIPS_CP0_KSCRATCH5: | ||
809 | *v = (long)kvm_read_c0_guest_kscratch5(cop0); | ||
810 | break; | ||
811 | case KVM_REG_MIPS_CP0_KSCRATCH6: | ||
812 | *v = (long)kvm_read_c0_guest_kscratch6(cop0); | ||
813 | break; | ||
542 | default: | 814 | default: |
543 | return -EINVAL; | 815 | return -EINVAL; |
544 | } | 816 | } |
@@ -554,6 +826,56 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
554 | unsigned int cur, change; | 826 | unsigned int cur, change; |
555 | 827 | ||
556 | switch (reg->id) { | 828 | switch (reg->id) { |
829 | case KVM_REG_MIPS_CP0_INDEX: | ||
830 | kvm_write_c0_guest_index(cop0, v); | ||
831 | break; | ||
832 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
833 | kvm_write_c0_guest_entrylo0(cop0, v); | ||
834 | break; | ||
835 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
836 | kvm_write_c0_guest_entrylo1(cop0, v); | ||
837 | break; | ||
838 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
839 | kvm_write_c0_guest_context(cop0, v); | ||
840 | break; | ||
841 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
842 | kvm_write_c0_guest_userlocal(cop0, v); | ||
843 | break; | ||
844 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
845 | kvm_write_c0_guest_pagemask(cop0, v); | ||
846 | break; | ||
847 | case KVM_REG_MIPS_CP0_WIRED: | ||
848 | kvm_write_c0_guest_wired(cop0, v); | ||
849 | break; | ||
850 | case KVM_REG_MIPS_CP0_HWRENA: | ||
851 | kvm_write_c0_guest_hwrena(cop0, v); | ||
852 | break; | ||
853 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
854 | kvm_write_c0_guest_badvaddr(cop0, v); | ||
855 | break; | ||
856 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
857 | kvm_write_c0_guest_entryhi(cop0, v); | ||
858 | break; | ||
859 | case KVM_REG_MIPS_CP0_STATUS: | ||
860 | kvm_write_c0_guest_status(cop0, v); | ||
861 | break; | ||
862 | case KVM_REG_MIPS_CP0_INTCTL: | ||
863 | /* No VInt, so no VS, read-only for now */ | ||
864 | break; | ||
865 | case KVM_REG_MIPS_CP0_EPC: | ||
866 | kvm_write_c0_guest_epc(cop0, v); | ||
867 | break; | ||
868 | case KVM_REG_MIPS_CP0_PRID: | ||
869 | kvm_write_c0_guest_prid(cop0, v); | ||
870 | break; | ||
871 | case KVM_REG_MIPS_CP0_EBASE: | ||
872 | /* | ||
873 | * Allow core number to be written, but the exception base must | ||
874 | * remain in guest KSeg0. | ||
875 | */ | ||
876 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, | ||
877 | v); | ||
878 | break; | ||
557 | case KVM_REG_MIPS_CP0_COUNT: | 879 | case KVM_REG_MIPS_CP0_COUNT: |
558 | kvm_mips_write_count(vcpu, v); | 880 | kvm_mips_write_count(vcpu, v); |
559 | break; | 881 | break; |
@@ -618,6 +940,9 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
618 | kvm_write_c0_guest_config5(cop0, v); | 940 | kvm_write_c0_guest_config5(cop0, v); |
619 | } | 941 | } |
620 | break; | 942 | break; |
943 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
944 | /* writes ignored */ | ||
945 | break; | ||
621 | case KVM_REG_MIPS_COUNT_CTL: | 946 | case KVM_REG_MIPS_COUNT_CTL: |
622 | ret = kvm_mips_set_count_ctl(vcpu, v); | 947 | ret = kvm_mips_set_count_ctl(vcpu, v); |
623 | break; | 948 | break; |
@@ -627,24 +952,269 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |||
627 | case KVM_REG_MIPS_COUNT_HZ: | 952 | case KVM_REG_MIPS_COUNT_HZ: |
628 | ret = kvm_mips_set_count_hz(vcpu, v); | 953 | ret = kvm_mips_set_count_hz(vcpu, v); |
629 | break; | 954 | break; |
955 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
956 | kvm_write_c0_guest_errorepc(cop0, v); | ||
957 | break; | ||
958 | case KVM_REG_MIPS_CP0_KSCRATCH1: | ||
959 | kvm_write_c0_guest_kscratch1(cop0, v); | ||
960 | break; | ||
961 | case KVM_REG_MIPS_CP0_KSCRATCH2: | ||
962 | kvm_write_c0_guest_kscratch2(cop0, v); | ||
963 | break; | ||
964 | case KVM_REG_MIPS_CP0_KSCRATCH3: | ||
965 | kvm_write_c0_guest_kscratch3(cop0, v); | ||
966 | break; | ||
967 | case KVM_REG_MIPS_CP0_KSCRATCH4: | ||
968 | kvm_write_c0_guest_kscratch4(cop0, v); | ||
969 | break; | ||
970 | case KVM_REG_MIPS_CP0_KSCRATCH5: | ||
971 | kvm_write_c0_guest_kscratch5(cop0, v); | ||
972 | break; | ||
973 | case KVM_REG_MIPS_CP0_KSCRATCH6: | ||
974 | kvm_write_c0_guest_kscratch6(cop0, v); | ||
975 | break; | ||
630 | default: | 976 | default: |
631 | return -EINVAL; | 977 | return -EINVAL; |
632 | } | 978 | } |
633 | return ret; | 979 | return ret; |
634 | } | 980 | } |
635 | 981 | ||
636 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) | 982 | static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
637 | { | 983 | { |
638 | kvm_lose_fpu(vcpu); | 984 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
985 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
986 | struct mm_struct *mm; | ||
987 | |||
988 | /* | ||
989 | * Were we in guest context? If so, restore the appropriate ASID based | ||
990 | * on the mode of the Guest (Kernel/User). | ||
991 | */ | ||
992 | if (current->flags & PF_VCPU) { | ||
993 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; | ||
994 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & | ||
995 | asid_version_mask(cpu)) | ||
996 | get_new_mmu_context(mm, cpu); | ||
997 | write_c0_entryhi(cpu_asid(cpu, mm)); | ||
998 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); | ||
999 | kvm_mips_suspend_mm(cpu); | ||
1000 | ehb(); | ||
1001 | } | ||
639 | 1002 | ||
640 | return 0; | 1003 | return 0; |
641 | } | 1004 | } |
642 | 1005 | ||
643 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) | 1006 | static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) |
644 | { | 1007 | { |
1008 | kvm_lose_fpu(vcpu); | ||
1009 | |||
1010 | if (current->flags & PF_VCPU) { | ||
1011 | /* Restore normal Linux process memory map */ | ||
1012 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | ||
1013 | asid_version_mask(cpu))) | ||
1014 | get_new_mmu_context(current->mm, cpu); | ||
1015 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | ||
1016 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); | ||
1017 | kvm_mips_resume_mm(cpu); | ||
1018 | ehb(); | ||
1019 | } | ||
1020 | |||
645 | return 0; | 1021 | return 0; |
646 | } | 1022 | } |
647 | 1023 | ||
1024 | static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, | ||
1025 | bool reload_asid) | ||
1026 | { | ||
1027 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1028 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1029 | struct mm_struct *mm; | ||
1030 | int i; | ||
1031 | |||
1032 | if (likely(!vcpu->requests)) | ||
1033 | return; | ||
1034 | |||
1035 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { | ||
1036 | /* | ||
1037 | * Both kernel & user GVA mappings must be invalidated. The | ||
1038 | * caller is just about to check whether the ASID is stale | ||
1039 | * anyway so no need to reload it here. | ||
1040 | */ | ||
1041 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); | ||
1042 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); | ||
1043 | for_each_possible_cpu(i) { | ||
1044 | cpu_context(i, kern_mm) = 0; | ||
1045 | cpu_context(i, user_mm) = 0; | ||
1046 | } | ||
1047 | |||
1048 | /* Generate new ASID for current mode */ | ||
1049 | if (reload_asid) { | ||
1050 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; | ||
1051 | get_new_mmu_context(mm, cpu); | ||
1052 | htw_stop(); | ||
1053 | write_c0_entryhi(cpu_asid(cpu, mm)); | ||
1054 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); | ||
1055 | htw_start(); | ||
1056 | } | ||
1057 | } | ||
1058 | } | ||
1059 | |||
1060 | /** | ||
1061 | * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. | ||
1062 | * @vcpu: VCPU pointer. | ||
1063 | * | ||
1064 | * Call before a GVA space access outside of guest mode, to ensure that | ||
1065 | * asynchronous TLB flush requests are handled or delayed until completion of | ||
1066 | * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). | ||
1067 | * | ||
1068 | * Should be called with IRQs already enabled. | ||
1069 | */ | ||
1070 | void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) | ||
1071 | { | ||
1072 | /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ | ||
1073 | WARN_ON_ONCE(irqs_disabled()); | ||
1074 | |||
1075 | /* | ||
1076 | * The caller is about to access the GVA space, so we set the mode to | ||
1077 | * force TLB flush requests to send an IPI, and also disable IRQs to | ||
1078 | * delay IPI handling until kvm_trap_emul_gva_lockless_end(). | ||
1079 | */ | ||
1080 | local_irq_disable(); | ||
1081 | |||
1082 | /* | ||
1083 | * Make sure the read of VCPU requests is not reordered ahead of the | ||
1084 | * write to vcpu->mode, or we could miss a TLB flush request while | ||
1085 | * the requester sees the VCPU as outside of guest mode and not needing | ||
1086 | * an IPI. | ||
1087 | */ | ||
1088 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); | ||
1089 | |||
1090 | /* | ||
1091 | * If a TLB flush has been requested (potentially while | ||
1092 | * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it | ||
1093 | * before accessing the GVA space, and be sure to reload the ASID if | ||
1094 | * necessary as it'll be immediately used. | ||
1095 | * | ||
1096 | * TLB flush requests after this check will trigger an IPI due to the | ||
1097 | * mode change above, which will be delayed due to IRQs disabled. | ||
1098 | */ | ||
1099 | kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); | ||
1100 | } | ||
1101 | |||
1102 | /** | ||
1103 | * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. | ||
1104 | * @vcpu: VCPU pointer. | ||
1105 | * | ||
1106 | * Called after a GVA space access outside of guest mode. Should have a matching | ||
1107 | * call to kvm_trap_emul_gva_lockless_begin(). | ||
1108 | */ | ||
1109 | void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) | ||
1110 | { | ||
1111 | /* | ||
1112 | * Make sure the write to vcpu->mode is not reordered in front of GVA | ||
1113 | * accesses, or a TLB flush requester may not think it necessary to send | ||
1114 | * an IPI. | ||
1115 | */ | ||
1116 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); | ||
1117 | |||
1118 | /* | ||
1119 | * Now that the access to GVA space is complete, its safe for pending | ||
1120 | * TLB flush request IPIs to be handled (which indicates completion). | ||
1121 | */ | ||
1122 | local_irq_enable(); | ||
1123 | } | ||
1124 | |||
1125 | static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, | ||
1126 | struct kvm_vcpu *vcpu) | ||
1127 | { | ||
1128 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1129 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1130 | struct mm_struct *mm; | ||
1131 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1132 | int i, cpu = smp_processor_id(); | ||
1133 | unsigned int gasid; | ||
1134 | |||
1135 | /* | ||
1136 | * No need to reload ASID, IRQs are disabled already so there's no rush, | ||
1137 | * and we'll check if we need to regenerate below anyway before | ||
1138 | * re-entering the guest. | ||
1139 | */ | ||
1140 | kvm_trap_emul_check_requests(vcpu, cpu, false); | ||
1141 | |||
1142 | if (KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
1143 | mm = kern_mm; | ||
1144 | } else { | ||
1145 | mm = user_mm; | ||
1146 | |||
1147 | /* | ||
1148 | * Lazy host ASID regeneration / PT flush for guest user mode. | ||
1149 | * If the guest ASID has changed since the last guest usermode | ||
1150 | * execution, invalidate the stale TLB entries and flush GVA PT | ||
1151 | * entries too. | ||
1152 | */ | ||
1153 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; | ||
1154 | if (gasid != vcpu->arch.last_user_gasid) { | ||
1155 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); | ||
1156 | for_each_possible_cpu(i) | ||
1157 | cpu_context(i, user_mm) = 0; | ||
1158 | vcpu->arch.last_user_gasid = gasid; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | /* | ||
1163 | * Check if ASID is stale. This may happen due to a TLB flush request or | ||
1164 | * a lazy user MM invalidation. | ||
1165 | */ | ||
1166 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & | ||
1167 | asid_version_mask(cpu)) | ||
1168 | get_new_mmu_context(mm, cpu); | ||
1169 | } | ||
1170 | |||
1171 | static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
1172 | { | ||
1173 | int cpu = smp_processor_id(); | ||
1174 | int r; | ||
1175 | |||
1176 | /* Check if we have any exceptions/interrupts pending */ | ||
1177 | kvm_mips_deliver_interrupts(vcpu, | ||
1178 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | ||
1179 | |||
1180 | kvm_trap_emul_vcpu_reenter(run, vcpu); | ||
1181 | |||
1182 | /* | ||
1183 | * We use user accessors to access guest memory, but we don't want to | ||
1184 | * invoke Linux page faulting. | ||
1185 | */ | ||
1186 | pagefault_disable(); | ||
1187 | |||
1188 | /* Disable hardware page table walking while in guest */ | ||
1189 | htw_stop(); | ||
1190 | |||
1191 | /* | ||
1192 | * While in guest context we're in the guest's address space, not the | ||
1193 | * host process address space, so we need to be careful not to confuse | ||
1194 | * e.g. cache management IPIs. | ||
1195 | */ | ||
1196 | kvm_mips_suspend_mm(cpu); | ||
1197 | |||
1198 | r = vcpu->arch.vcpu_run(run, vcpu); | ||
1199 | |||
1200 | /* We may have migrated while handling guest exits */ | ||
1201 | cpu = smp_processor_id(); | ||
1202 | |||
1203 | /* Restore normal Linux process memory map */ | ||
1204 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | ||
1205 | asid_version_mask(cpu))) | ||
1206 | get_new_mmu_context(current->mm, cpu); | ||
1207 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | ||
1208 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); | ||
1209 | kvm_mips_resume_mm(cpu); | ||
1210 | |||
1211 | htw_start(); | ||
1212 | |||
1213 | pagefault_enable(); | ||
1214 | |||
1215 | return r; | ||
1216 | } | ||
1217 | |||
648 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | 1218 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
649 | /* exit handlers */ | 1219 | /* exit handlers */ |
650 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | 1220 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, |
@@ -661,9 +1231,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
661 | .handle_fpe = kvm_trap_emul_handle_fpe, | 1231 | .handle_fpe = kvm_trap_emul_handle_fpe, |
662 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, | 1232 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
663 | 1233 | ||
664 | .vm_init = kvm_trap_emul_vm_init, | ||
665 | .vcpu_init = kvm_trap_emul_vcpu_init, | 1234 | .vcpu_init = kvm_trap_emul_vcpu_init, |
1235 | .vcpu_uninit = kvm_trap_emul_vcpu_uninit, | ||
666 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | 1236 | .vcpu_setup = kvm_trap_emul_vcpu_setup, |
1237 | .flush_shadow_all = kvm_trap_emul_flush_shadow_all, | ||
1238 | .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, | ||
667 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | 1239 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, |
668 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | 1240 | .queue_timer_int = kvm_mips_queue_timer_int_cb, |
669 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | 1241 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, |
@@ -675,8 +1247,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
675 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, | 1247 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, |
676 | .get_one_reg = kvm_trap_emul_get_one_reg, | 1248 | .get_one_reg = kvm_trap_emul_get_one_reg, |
677 | .set_one_reg = kvm_trap_emul_set_one_reg, | 1249 | .set_one_reg = kvm_trap_emul_set_one_reg, |
678 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, | 1250 | .vcpu_load = kvm_trap_emul_vcpu_load, |
679 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, | 1251 | .vcpu_put = kvm_trap_emul_vcpu_put, |
1252 | .vcpu_run = kvm_trap_emul_vcpu_run, | ||
1253 | .vcpu_reenter = kvm_trap_emul_vcpu_reenter, | ||
680 | }; | 1254 | }; |
681 | 1255 | ||
682 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | 1256 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index b4c64bd3f723..b4cc8811a664 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y += cache.o dma-default.o extable.o fault.o \ | 5 | obj-y += cache.o dma-default.o extable.o fault.o \ |
6 | gup.o init.o mmap.o page.o page-funcs.o \ | 6 | gup.o init.o mmap.o page.o page-funcs.o \ |
7 | tlbex.o tlbex-fault.o tlb-funcs.o | 7 | pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o |
8 | 8 | ||
9 | ifdef CONFIG_CPU_MICROMIPS | 9 | ifdef CONFIG_CPU_MICROMIPS |
10 | obj-y += uasm-micromips.o | 10 | obj-y += uasm-micromips.o |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index e86ebcf5c071..653569bc0da7 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -538,5 +538,6 @@ unsigned long pgd_current[NR_CPUS]; | |||
538 | pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); | 538 | pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
539 | #ifndef __PAGETABLE_PMD_FOLDED | 539 | #ifndef __PAGETABLE_PMD_FOLDED |
540 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; | 540 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; |
541 | EXPORT_SYMBOL_GPL(invalid_pmd_table); | ||
541 | #endif | 542 | #endif |
542 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; | 543 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index ce4473e7c0d2..0ae7b28b4db5 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 1999, 2000 by Silicon Graphics | 6 | * Copyright (C) 1999, 2000 by Silicon Graphics |
7 | * Copyright (C) 2003 by Ralf Baechle | 7 | * Copyright (C) 2003 by Ralf Baechle |
8 | */ | 8 | */ |
9 | #include <linux/export.h> | ||
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <asm/fixmap.h> | 12 | #include <asm/fixmap.h> |
@@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable) | |||
60 | p[-1] = pagetable; | 61 | p[-1] = pagetable; |
61 | } while (p != end); | 62 | } while (p != end); |
62 | } | 63 | } |
64 | EXPORT_SYMBOL_GPL(pmd_init); | ||
63 | #endif | 65 | #endif |
64 | 66 | ||
65 | pmd_t mk_pmd(struct page *page, pgprot_t prot) | 67 | pmd_t mk_pmd(struct page *page, pgprot_t prot) |
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c new file mode 100644 index 000000000000..05560b042d82 --- /dev/null +++ b/arch/mips/mm/pgtable.c | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | */ | ||
6 | #include <linux/export.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/string.h> | ||
9 | #include <asm/pgalloc.h> | ||
10 | |||
11 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
12 | { | ||
13 | pgd_t *ret, *init; | ||
14 | |||
15 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); | ||
16 | if (ret) { | ||
17 | init = pgd_offset(&init_mm, 0UL); | ||
18 | pgd_init((unsigned long)ret); | ||
19 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | ||
20 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
21 | } | ||
22 | |||
23 | return ret; | ||
24 | } | ||
25 | EXPORT_SYMBOL_GPL(pgd_alloc); | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 55ce39606cb8..2465f83c79c3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/export.h> | ||
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/types.h> | 27 | #include <linux/types.h> |
27 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
@@ -34,6 +35,7 @@ | |||
34 | #include <asm/war.h> | 35 | #include <asm/war.h> |
35 | #include <asm/uasm.h> | 36 | #include <asm/uasm.h> |
36 | #include <asm/setup.h> | 37 | #include <asm/setup.h> |
38 | #include <asm/tlbex.h> | ||
37 | 39 | ||
38 | static int mips_xpa_disabled; | 40 | static int mips_xpa_disabled; |
39 | 41 | ||
@@ -344,7 +346,8 @@ static int allocate_kscratch(void) | |||
344 | } | 346 | } |
345 | 347 | ||
346 | static int scratch_reg; | 348 | static int scratch_reg; |
347 | static int pgd_reg; | 349 | int pgd_reg; |
350 | EXPORT_SYMBOL_GPL(pgd_reg); | ||
348 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; | 351 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; |
349 | 352 | ||
350 | static struct work_registers build_get_work_registers(u32 **p) | 353 | static struct work_registers build_get_work_registers(u32 **p) |
@@ -496,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p) | |||
496 | } | 499 | } |
497 | } | 500 | } |
498 | 501 | ||
499 | /* | 502 | void build_tlb_write_entry(u32 **p, struct uasm_label **l, |
500 | * Write random or indexed TLB entry, and care about the hazards from | 503 | struct uasm_reloc **r, |
501 | * the preceding mtc0 and for the following eret. | 504 | enum tlb_write_entry wmode) |
502 | */ | ||
503 | enum tlb_write_entry { tlb_random, tlb_indexed }; | ||
504 | |||
505 | static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | ||
506 | struct uasm_reloc **r, | ||
507 | enum tlb_write_entry wmode) | ||
508 | { | 505 | { |
509 | void(*tlbw)(u32 **) = NULL; | 506 | void(*tlbw)(u32 **) = NULL; |
510 | 507 | ||
@@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
627 | break; | 624 | break; |
628 | } | 625 | } |
629 | } | 626 | } |
627 | EXPORT_SYMBOL_GPL(build_tlb_write_entry); | ||
630 | 628 | ||
631 | static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, | 629 | static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
632 | unsigned int reg) | 630 | unsigned int reg) |
@@ -781,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, | |||
781 | * TMP and PTR are scratch. | 779 | * TMP and PTR are scratch. |
782 | * TMP will be clobbered, PTR will hold the pmd entry. | 780 | * TMP will be clobbered, PTR will hold the pmd entry. |
783 | */ | 781 | */ |
784 | static void | 782 | void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
785 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 783 | unsigned int tmp, unsigned int ptr) |
786 | unsigned int tmp, unsigned int ptr) | ||
787 | { | 784 | { |
788 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 785 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
789 | long pgdc = (long)pgd_current; | 786 | long pgdc = (long)pgd_current; |
@@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
859 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 856 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
860 | #endif | 857 | #endif |
861 | } | 858 | } |
859 | EXPORT_SYMBOL_GPL(build_get_pmde64); | ||
862 | 860 | ||
863 | /* | 861 | /* |
864 | * BVADDR is the faulting address, PTR is scratch. | 862 | * BVADDR is the faulting address, PTR is scratch. |
@@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
934 | * TMP and PTR are scratch. | 932 | * TMP and PTR are scratch. |
935 | * TMP will be clobbered, PTR will hold the pgd entry. | 933 | * TMP will be clobbered, PTR will hold the pgd entry. |
936 | */ | 934 | */ |
937 | static void __maybe_unused | 935 | void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
938 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | ||
939 | { | 936 | { |
940 | if (pgd_reg != -1) { | 937 | if (pgd_reg != -1) { |
941 | /* pgd is in pgd_reg */ | 938 | /* pgd is in pgd_reg */ |
@@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |||
960 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); | 957 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); |
961 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | 958 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ |
962 | } | 959 | } |
960 | EXPORT_SYMBOL_GPL(build_get_pgde32); | ||
963 | 961 | ||
964 | #endif /* !CONFIG_64BIT */ | 962 | #endif /* !CONFIG_64BIT */ |
965 | 963 | ||
@@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx) | |||
989 | uasm_i_andi(p, ctx, ctx, mask); | 987 | uasm_i_andi(p, ctx, ctx, mask); |
990 | } | 988 | } |
991 | 989 | ||
992 | static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | 990 | void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
993 | { | 991 | { |
994 | /* | 992 | /* |
995 | * Bug workaround for the Nevada. It seems as if under certain | 993 | * Bug workaround for the Nevada. It seems as if under certain |
@@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | |||
1013 | build_adjust_context(p, tmp); | 1011 | build_adjust_context(p, tmp); |
1014 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ | 1012 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
1015 | } | 1013 | } |
1014 | EXPORT_SYMBOL_GPL(build_get_ptep); | ||
1016 | 1015 | ||
1017 | static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) | 1016 | void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) |
1018 | { | 1017 | { |
1019 | int pte_off_even = 0; | 1018 | int pte_off_even = 0; |
1020 | int pte_off_odd = sizeof(pte_t); | 1019 | int pte_off_odd = sizeof(pte_t); |
@@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) | |||
1063 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); | 1062 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
1064 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 1063 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
1065 | } | 1064 | } |
1065 | EXPORT_SYMBOL_GPL(build_update_entries); | ||
1066 | 1066 | ||
1067 | struct mips_huge_tlb_info { | 1067 | struct mips_huge_tlb_info { |
1068 | int huge_pte; | 1068 | int huge_pte; |
@@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void) | |||
1536 | extern u32 handle_tlbl[], handle_tlbl_end[]; | 1536 | extern u32 handle_tlbl[], handle_tlbl_end[]; |
1537 | extern u32 handle_tlbs[], handle_tlbs_end[]; | 1537 | extern u32 handle_tlbs[], handle_tlbs_end[]; |
1538 | extern u32 handle_tlbm[], handle_tlbm_end[]; | 1538 | extern u32 handle_tlbm[], handle_tlbm_end[]; |
1539 | extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; | 1539 | extern u32 tlbmiss_handler_setup_pgd_start[]; |
1540 | extern u32 tlbmiss_handler_setup_pgd[]; | ||
1541 | EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd); | ||
1540 | extern u32 tlbmiss_handler_setup_pgd_end[]; | 1542 | extern u32 tlbmiss_handler_setup_pgd_end[]; |
1541 | 1543 | ||
1542 | static void build_setup_pgd(void) | 1544 | static void build_setup_pgd(void) |