aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:27:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:27:09 -0400
commit7beaa24ba49717419e24d1f6321e8b3c265a719c (patch)
treea5c5433d3c7bfc4c23e67174463ccf519c8406f0 /arch/mips/kvm
parent07b75260ebc2c789724c594d7eaf0194fa47b3be (diff)
parent9842df62004f366b9fed2423e24df10542ee0dc5 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small release overall. x86: - miscellaneous fixes - AVIC support (local APIC virtualization, AMD version) s390: - polling for interrupts after a VCPU goes to halted state is now enabled for s390 - use hardware provided information about facility bits that do not need any hypervisor activity, and other fixes for cpu models and facilities - improve perf output - floating interrupt controller improvements. MIPS: - miscellaneous fixes PPC: - bugfixes only ARM: - 16K page size support - generic firmware probing layer for timer and GIC Christoffer Dall (KVM-ARM maintainer) says: "There are a few changes in this pull request touching things outside KVM, but they should all carry the necessary acks and it made the merge process much easier to do it this way." though actually the irqchip maintainers' acks didn't make it into the patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer, later acked at http://mid.gmane.org/573351D1.4060303@arm.com ('more formally and for documentation purposes')" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (82 commits) KVM: MTRR: remove MSR 0x2f8 KVM: x86: make hwapic_isr_update and hwapic_irr_update look the same svm: Manage vcpu load/unload when enable AVIC svm: Do not intercept CR8 when enable AVIC svm: Do not expose x2APIC when enable AVIC KVM: x86: Introducing kvm_x86_ops.apicv_post_state_restore svm: Add VMEXIT handlers for AVIC svm: Add interrupt injection via AVIC KVM: x86: Detect and Initialize AVIC support svm: Introduce new AVIC VMCB registers KVM: split kvm_vcpu_wake_up from kvm_vcpu_kick KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking hooks KVM: x86: Introducing kvm_x86_ops VM init/destroy hooks KVM: x86: Rename kvm_apic_get_reg to kvm_lapic_get_reg KVM: x86: Misc LAPIC changes to expose helper functions KVM: shrink halt polling even more for invalid wakeups KVM: s390: set halt polling to 80 microseconds KVM: halt_polling: provide a way to qualify wakeups during poll KVM: PPC: Book3S HV: Re-enable XICS fast path for irqfd-generated interrupts kvm: Conditionally register IRQ bypass consumer ...
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/emulate.c89
-rw-r--r--arch/mips/kvm/mips.c9
-rw-r--r--arch/mips/kvm/tlb.c26
-rw-r--r--arch/mips/kvm/trap_emul.c2
4 files changed, 76 insertions, 50 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 8e945e866a73..396df6eb0a12 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
302 */ 302 */
303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) 303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
304{ 304{
305 ktime_t expires; 305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 ktime_t expires, threshold;
307 uint32_t count, compare;
306 int running; 308 int running;
307 309
308 /* Is the hrtimer pending? */ 310 /* Calculate the biased and scaled guest CP0_Count */
311 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
312 compare = kvm_read_c0_guest_compare(cop0);
313
314 /*
315 * Find whether CP0_Count has reached the closest timer interrupt. If
316 * not, we shouldn't inject it.
317 */
318 if ((int32_t)(count - compare) < 0)
319 return count;
320
321 /*
322 * The CP0_Count we're going to return has already reached the closest
323 * timer interrupt. Quickly check if it really is a new interrupt by
324 * looking at whether the interval until the hrtimer expiry time is
325 * less than 1/4 of the timer period.
326 */
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); 327 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) { 328 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
329 if (ktime_before(expires, threshold)) {
311 /* 330 /*
312 * Cancel it while we handle it so there's no chance of 331 * Cancel it while we handle it so there's no chance of
313 * interference with the timeout handler. 332 * interference with the timeout handler.
@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
329 } 348 }
330 } 349 }
331 350
332 /* Return the biased and scaled guest CP0_Count */ 351 return count;
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
334} 352}
335 353
336/** 354/**
@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
420} 438}
421 439
422/** 440/**
423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424 * @vcpu: Virtual CPU.
425 *
426 * Recalculates and updates the expiry time of the hrtimer. This can be used
427 * after timer parameters have been altered which do not depend on the time that
428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429 * kvm_mips_resume_hrtimer() are used directly).
430 *
431 * It is guaranteed that no timer interrupts will be lost in the process.
432 *
433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
434 */
435static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
436{
437 ktime_t now;
438 uint32_t count;
439
440 /*
441 * freeze_hrtimer takes care of a timer interrupts <= count, and
442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
443 */
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
446}
447
448/**
449 * kvm_mips_write_count() - Modify the count and update timer. 441 * kvm_mips_write_count() - Modify the count and update timer.
450 * @vcpu: Virtual CPU. 442 * @vcpu: Virtual CPU.
451 * @count: Guest CP0_Count value to set. 443 * @count: Guest CP0_Count value to set.
@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
540 * kvm_mips_write_compare() - Modify compare and update timer. 532 * kvm_mips_write_compare() - Modify compare and update timer.
541 * @vcpu: Virtual CPU. 533 * @vcpu: Virtual CPU.
542 * @compare: New CP0_Compare value. 534 * @compare: New CP0_Compare value.
535 * @ack: Whether to acknowledge timer interrupt.
543 * 536 *
544 * Update CP0_Compare to a new value and update the timeout. 537 * Update CP0_Compare to a new value and update the timeout.
538 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
539 * any pending timer interrupt is preserved.
545 */ 540 */
546void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) 541void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
547{ 542{
548 struct mips_coproc *cop0 = vcpu->arch.cop0; 543 struct mips_coproc *cop0 = vcpu->arch.cop0;
544 int dc;
545 u32 old_compare = kvm_read_c0_guest_compare(cop0);
546 ktime_t now;
547 uint32_t count;
549 548
550 /* if unchanged, must just be an ack */ 549 /* if unchanged, must just be an ack */
551 if (kvm_read_c0_guest_compare(cop0) == compare) 550 if (old_compare == compare) {
551 if (!ack)
552 return;
553 kvm_mips_callbacks->dequeue_timer_int(vcpu);
554 kvm_write_c0_guest_compare(cop0, compare);
552 return; 555 return;
556 }
557
558 /* freeze_hrtimer() takes care of timer interrupts <= count */
559 dc = kvm_mips_count_disabled(vcpu);
560 if (!dc)
561 now = kvm_mips_freeze_hrtimer(vcpu, &count);
562
563 if (ack)
564 kvm_mips_callbacks->dequeue_timer_int(vcpu);
553 565
554 /* Update compare */
555 kvm_write_c0_guest_compare(cop0, compare); 566 kvm_write_c0_guest_compare(cop0, compare);
556 567
557 /* Update timeout if count enabled */ 568 /* resume_hrtimer() takes care of timer interrupts > count */
558 if (!kvm_mips_count_disabled(vcpu)) 569 if (!dc)
559 kvm_mips_update_hrtimer(vcpu); 570 kvm_mips_resume_hrtimer(vcpu, now, count);
560} 571}
561 572
562/** 573/**
@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1095 1106
1096 /* If we are writing to COMPARE */ 1107 /* If we are writing to COMPARE */
1097 /* Clear pending timer interrupt, if any */ 1108 /* Clear pending timer interrupt, if any */
1098 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1099 kvm_mips_write_compare(vcpu, 1109 kvm_mips_write_compare(vcpu,
1100 vcpu->arch.gprs[rt]); 1110 vcpu->arch.gprs[rt],
1111 true);
1101 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1112 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1102 unsigned int old_val, val, change; 1113 unsigned int old_val, val, change;
1103 1114
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 70ef1a43c114..dc052fb5c7a2 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -56,6 +56,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, 56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, 57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
58 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, 58 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
59 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
59 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, 60 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
60 {NULL} 61 {NULL}
61}; 62};
@@ -1079,7 +1080,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1079 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 1080 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1080 break; 1081 break;
1081 case KVM_CAP_MIPS_FPU: 1082 case KVM_CAP_MIPS_FPU:
1082 r = !!cpu_has_fpu; 1083 /* We don't handle systems with inconsistent cpu_has_fpu */
1084 r = !!raw_cpu_has_fpu;
1083 break; 1085 break;
1084 case KVM_CAP_MIPS_MSA: 1086 case KVM_CAP_MIPS_MSA:
1085 /* 1087 /*
@@ -1555,8 +1557,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1555 1557
1556 /* Disable MSA & FPU */ 1558 /* Disable MSA & FPU */
1557 disable_msa(); 1559 disable_msa();
1558 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) 1560 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1559 clear_c0_status(ST0_CU1 | ST0_FR); 1561 clear_c0_status(ST0_CU1 | ST0_FR);
1562 disable_fpu_hazard();
1563 }
1560 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); 1564 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1561 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1565 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1562 set_c0_status(ST0_CU1); 1566 set_c0_status(ST0_CU1);
@@ -1567,6 +1571,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1567 1571
1568 /* Disable FPU */ 1572 /* Disable FPU */
1569 clear_c0_status(ST0_CU1 | ST0_FR); 1573 clear_c0_status(ST0_CU1 | ST0_FR);
1574 disable_fpu_hazard();
1570 } 1575 }
1571 preempt_enable(); 1576 preempt_enable();
1572} 1577}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index b9c52c1d35d6..ed021ae7867a 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -275,6 +275,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
275 int even; 275 int even;
276 struct kvm *kvm = vcpu->kvm; 276 struct kvm *kvm = vcpu->kvm;
277 const int flush_dcache_mask = 0; 277 const int flush_dcache_mask = 0;
278 int ret;
278 279
279 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 280 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
280 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 281 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
@@ -306,14 +307,18 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
306 pfn1 = kvm->arch.guest_pmap[gfn]; 307 pfn1 = kvm->arch.guest_pmap[gfn];
307 } 308 }
308 309
309 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
310 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 310 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
311 (1 << 2) | (0x1 << 1); 311 (1 << 2) | (0x1 << 1);
312 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 312 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
313 (1 << 2) | (0x1 << 1); 313 (1 << 2) | (0x1 << 1);
314 314
315 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 315 preempt_disable();
316 flush_dcache_mask); 316 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
317 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
318 flush_dcache_mask);
319 preempt_enable();
320
321 return ret;
317} 322}
318EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault); 323EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
319 324
@@ -368,6 +373,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
368 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 373 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
369 struct kvm *kvm = vcpu->kvm; 374 struct kvm *kvm = vcpu->kvm;
370 kvm_pfn_t pfn0, pfn1; 375 kvm_pfn_t pfn0, pfn1;
376 int ret;
371 377
372 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 378 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
373 pfn0 = 0; 379 pfn0 = 0;
@@ -394,9 +400,6 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
394 *hpa1 = pfn1 << PAGE_SHIFT; 400 *hpa1 = pfn1 << PAGE_SHIFT;
395 401
396 /* Get attributes from the Guest TLB */ 402 /* Get attributes from the Guest TLB */
397 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
398 kvm_mips_get_kernel_asid(vcpu) :
399 kvm_mips_get_user_asid(vcpu));
400 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 403 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
401 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); 404 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
402 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 405 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
@@ -405,8 +408,15 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
405 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 408 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
406 tlb->tlb_lo0, tlb->tlb_lo1); 409 tlb->tlb_lo0, tlb->tlb_lo1);
407 410
408 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 411 preempt_disable();
409 tlb->tlb_mask); 412 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
413 kvm_mips_get_kernel_asid(vcpu) :
414 kvm_mips_get_user_asid(vcpu));
415 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
416 tlb->tlb_mask);
417 preempt_enable();
418
419 return ret;
410} 420}
411EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault); 421EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
412 422
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd43f0afdb9f..6ba0fafcecbc 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
547 kvm_mips_write_count(vcpu, v); 547 kvm_mips_write_count(vcpu, v);
548 break; 548 break;
549 case KVM_REG_MIPS_CP0_COMPARE: 549 case KVM_REG_MIPS_CP0_COMPARE:
550 kvm_mips_write_compare(vcpu, v); 550 kvm_mips_write_compare(vcpu, v, false);
551 break; 551 break;
552 case KVM_REG_MIPS_CP0_CAUSE: 552 case KVM_REG_MIPS_CP0_CAUSE:
553 /* 553 /*