diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 51 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 15 |
6 files changed, 64 insertions, 18 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dc3a7562bae4..4338b03da8f9 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -82,6 +82,9 @@ struct kvm_vcpu_arch { | |||
82 | /* Pages which are referenced in the shadow TLB. */ | 82 | /* Pages which are referenced in the shadow TLB. */ |
83 | struct page *shadow_pages[PPC44x_TLB_SIZE]; | 83 | struct page *shadow_pages[PPC44x_TLB_SIZE]; |
84 | 84 | ||
85 | /* Track which TLB entries we've modified in the current exit. */ | ||
86 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | ||
87 | |||
85 | u32 host_stack; | 88 | u32 host_stack; |
86 | u32 host_pid; | 89 | u32 host_pid; |
87 | u32 host_dbcr0; | 90 | u32 host_dbcr0; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index a8b068792260..8e7e42959903 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -65,6 +65,9 @@ extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
65 | gva_t eend, u32 asid); | 65 | gva_t eend, u32 asid); |
66 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 66 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
67 | 67 | ||
68 | /* XXX Book E specific */ | ||
69 | extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); | ||
70 | |||
68 | extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); | 71 | extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); |
69 | 72 | ||
70 | static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) | 73 | static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 594064953951..1631d670b9ed 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -357,6 +357,7 @@ int main(void) | |||
357 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 357 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
358 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 358 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
359 | DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); | 359 | DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); |
360 | DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod)); | ||
360 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 361 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
361 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 362 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
362 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 363 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index a207d16b9dbb..06a5fcfc4d33 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -125,6 +125,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | |||
125 | } | 125 | } |
126 | } | 126 | } |
127 | 127 | ||
128 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | ||
129 | { | ||
130 | vcpu->arch.shadow_tlb_mod[i] = 1; | ||
131 | } | ||
132 | |||
128 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 133 | /* Caller must ensure that the specified guest TLB entry is safe to insert into |
129 | * the shadow TLB. */ | 134 | * the shadow TLB. */ |
130 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 135 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, |
@@ -172,10 +177,10 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
172 | * use host large pages in the future. */ | 177 | * use host large pages in the future. */ |
173 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 178 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS |
174 | | PPC44x_TLB_4K; | 179 | | PPC44x_TLB_4K; |
175 | |||
176 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 180 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
177 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 181 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
178 | vcpu->arch.msr & MSR_PR); | 182 | vcpu->arch.msr & MSR_PR); |
183 | kvmppc_tlbe_set_modified(vcpu, victim); | ||
179 | 184 | ||
180 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | 185 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, |
181 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | 186 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, |
@@ -209,6 +214,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
209 | 214 | ||
210 | kvmppc_44x_shadow_release(vcpu, i); | 215 | kvmppc_44x_shadow_release(vcpu, i); |
211 | stlbe->word0 = 0; | 216 | stlbe->word0 = 0; |
217 | kvmppc_tlbe_set_modified(vcpu, i); | ||
212 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 218 | KVMTRACE_5D(STLB_INVAL, vcpu, i, |
213 | stlbe->tid, stlbe->word0, stlbe->word1, | 219 | stlbe->tid, stlbe->word0, stlbe->word1, |
214 | stlbe->word2, handler); | 220 | stlbe->word2, handler); |
@@ -229,6 +235,7 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | |||
229 | 235 | ||
230 | kvmppc_44x_shadow_release(vcpu, i); | 236 | kvmppc_44x_shadow_release(vcpu, i); |
231 | stlbe->word0 = 0; | 237 | stlbe->word0 = 0; |
238 | kvmppc_tlbe_set_modified(vcpu, i); | ||
232 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 239 | KVMTRACE_5D(STLB_INVAL, vcpu, i, |
233 | stlbe->tid, stlbe->word0, stlbe->word1, | 240 | stlbe->tid, stlbe->word0, stlbe->word1, |
234 | stlbe->word2, handler); | 241 | stlbe->word2, handler); |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 3e88dfa1dbe4..564ea32ecbac 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -335,7 +335,7 @@ lightweight_exit: | |||
335 | lwz r3, VCPU_PID(r4) | 335 | lwz r3, VCPU_PID(r4) |
336 | mtspr SPRN_PID, r3 | 336 | mtspr SPRN_PID, r3 |
337 | 337 | ||
338 | /* Prevent all TLB updates. */ | 338 | /* Prevent all asynchronous TLB updates. */ |
339 | mfmsr r5 | 339 | mfmsr r5 |
340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | 340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h |
341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | 341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l |
@@ -344,28 +344,45 @@ lightweight_exit: | |||
344 | 344 | ||
345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings | 345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings |
346 | * in place. */ | 346 | * in place. */ |
347 | /* XXX optimization: load only modified guest entries. */ | ||
348 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ | 347 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ |
349 | lis r8, tlb_44x_hwater@ha | 348 | li r5, PPC44x_TLB_SIZE |
350 | lwz r8, tlb_44x_hwater@l(r8) | 349 | lis r5, tlb_44x_hwater@ha |
351 | addi r9, r4, VCPU_SHADOW_TLB - 4 | 350 | lwz r5, tlb_44x_hwater@l(r5) |
352 | li r6, 0 | 351 | mtctr r5 |
352 | addi r9, r4, VCPU_SHADOW_TLB | ||
353 | addi r5, r4, VCPU_SHADOW_MOD | ||
354 | li r3, 0 | ||
353 | 1: | 355 | 1: |
356 | lbzx r7, r3, r5 | ||
357 | cmpwi r7, 0 | ||
358 | beq 3f | ||
359 | |||
354 | /* Load guest entry. */ | 360 | /* Load guest entry. */ |
355 | lwzu r7, 4(r9) | 361 | mulli r11, r3, TLBE_BYTES |
362 | add r11, r11, r9 | ||
363 | lwz r7, 0(r11) | ||
356 | mtspr SPRN_MMUCR, r7 | 364 | mtspr SPRN_MMUCR, r7 |
357 | lwzu r7, 4(r9) | 365 | lwz r7, 4(r11) |
358 | tlbwe r7, r6, PPC44x_TLB_PAGEID | 366 | tlbwe r7, r3, PPC44x_TLB_PAGEID |
359 | lwzu r7, 4(r9) | 367 | lwz r7, 8(r11) |
360 | tlbwe r7, r6, PPC44x_TLB_XLAT | 368 | tlbwe r7, r3, PPC44x_TLB_XLAT |
361 | lwzu r7, 4(r9) | 369 | lwz r7, 12(r11) |
362 | tlbwe r7, r6, PPC44x_TLB_ATTRIB | 370 | tlbwe r7, r3, PPC44x_TLB_ATTRIB |
363 | /* Increment index. */ | 371 | 3: |
364 | addi r6, r6, 1 | 372 | addi r3, r3, 1 /* Increment index. */ |
365 | cmpw r6, r8 | 373 | bdnz 1b |
366 | blt 1b | 374 | |
367 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ | 375 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ |
368 | 376 | ||
377 | /* Clear bitmap of modified TLB entries */ | ||
378 | li r5, PPC44x_TLB_SIZE>>2 | ||
379 | mtctr r5 | ||
380 | addi r5, r4, VCPU_SHADOW_MOD - 4 | ||
381 | li r6, 0 | ||
382 | 1: | ||
383 | stwu r6, 4(r5) | ||
384 | bdnz 1b | ||
385 | |||
369 | iccci 0, 0 /* XXX hack */ | 386 | iccci 0, 0 /* XXX hack */ |
370 | 387 | ||
371 | /* Load some guest volatiles. */ | 388 | /* Load some guest volatiles. */ |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b75607180ddb..90a6fc422b23 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/cputable.h> | 27 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/tlbflush.h> | ||
30 | 31 | ||
31 | 32 | ||
32 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 33 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
@@ -307,14 +308,28 @@ static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) | |||
307 | 308 | ||
308 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 309 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
309 | { | 310 | { |
311 | int i; | ||
312 | |||
310 | if (vcpu->guest_debug.enabled) | 313 | if (vcpu->guest_debug.enabled) |
311 | kvmppc_load_guest_debug_registers(vcpu); | 314 | kvmppc_load_guest_debug_registers(vcpu); |
315 | |||
316 | /* Mark every guest entry in the shadow TLB entry modified, so that they | ||
317 | * will all be reloaded on the next vcpu run (instead of being | ||
318 | * demand-faulted). */ | ||
319 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
320 | kvmppc_tlbe_set_modified(vcpu, i); | ||
312 | } | 321 | } |
313 | 322 | ||
314 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 323 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
315 | { | 324 | { |
316 | if (vcpu->guest_debug.enabled) | 325 | if (vcpu->guest_debug.enabled) |
317 | kvmppc_restore_host_debug_state(vcpu); | 326 | kvmppc_restore_host_debug_state(vcpu); |
327 | |||
328 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | ||
329 | /* XXX It would be nice to differentiate between heavyweight exit and | ||
330 | * sched_out here, since we could avoid the TLB flush for heavyweight | ||
331 | * exits. */ | ||
332 | _tlbia(); | ||
318 | } | 333 | } |
319 | 334 | ||
320 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 335 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |