diff options
author | Denis Plotnikov <dplotnikov@virtuozzo.com> | 2017-04-07 05:09:52 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2017-04-12 14:17:15 -0400 |
commit | c5e8ec8e9beffb9481dba27ab5048c03c8585d7d (patch) | |
tree | 05586fdc5b1af75fd56476339aff0c12f3345aa7 | |
parent | 900ab14ca93dce3f95fdb2bd9c4b654d4d5f2571 (diff) |
KVM: x86: remaster kvm_write_tsc code
Reuse existing code instead of using inline asm.
Make the code more concise and clear in the TSC
synchronization part.
Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 52 |
1 files changed, 13 insertions, 39 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3046b7f278ae..422f803fa365 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1443,10 +1443,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1443 | struct kvm *kvm = vcpu->kvm; | 1443 | struct kvm *kvm = vcpu->kvm; |
1444 | u64 offset, ns, elapsed; | 1444 | u64 offset, ns, elapsed; |
1445 | unsigned long flags; | 1445 | unsigned long flags; |
1446 | s64 usdiff; | ||
1447 | bool matched; | 1446 | bool matched; |
1448 | bool already_matched; | 1447 | bool already_matched; |
1449 | u64 data = msr->data; | 1448 | u64 data = msr->data; |
1449 | bool synchronizing = false; | ||
1450 | 1450 | ||
1451 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); | 1451 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); |
1452 | offset = kvm_compute_tsc_offset(vcpu, data); | 1452 | offset = kvm_compute_tsc_offset(vcpu, data); |
@@ -1454,51 +1454,25 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1454 | elapsed = ns - kvm->arch.last_tsc_nsec; | 1454 | elapsed = ns - kvm->arch.last_tsc_nsec; |
1455 | 1455 | ||
1456 | if (vcpu->arch.virtual_tsc_khz) { | 1456 | if (vcpu->arch.virtual_tsc_khz) { |
1457 | int faulted = 0; | 1457 | u64 tsc_exp = kvm->arch.last_tsc_write + |
1458 | 1458 | nsec_to_cycles(vcpu, elapsed); | |
1459 | /* n.b - signed multiplication and division required */ | 1459 | u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; |
1460 | usdiff = data - kvm->arch.last_tsc_write; | 1460 | /* |
1461 | #ifdef CONFIG_X86_64 | 1461 | * Special case: TSC write with a small delta (1 second) |
1462 | usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; | 1462 | * of virtual cycle time against real time is |
1463 | #else | 1463 | * interpreted as an attempt to synchronize the CPU. |
1464 | /* do_div() only does unsigned */ | 1464 | */ |
1465 | asm("1: idivl %[divisor]\n" | 1465 | synchronizing = data < tsc_exp + tsc_hz && |
1466 | "2: xor %%edx, %%edx\n" | 1466 | data + tsc_hz > tsc_exp; |
1467 | " movl $0, %[faulted]\n" | 1467 | } |
1468 | "3:\n" | ||
1469 | ".section .fixup,\"ax\"\n" | ||
1470 | "4: movl $1, %[faulted]\n" | ||
1471 | " jmp 3b\n" | ||
1472 | ".previous\n" | ||
1473 | |||
1474 | _ASM_EXTABLE(1b, 4b) | ||
1475 | |||
1476 | : "=A"(usdiff), [faulted] "=r" (faulted) | ||
1477 | : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); | ||
1478 | |||
1479 | #endif | ||
1480 | do_div(elapsed, 1000); | ||
1481 | usdiff -= elapsed; | ||
1482 | if (usdiff < 0) | ||
1483 | usdiff = -usdiff; | ||
1484 | |||
1485 | /* idivl overflow => difference is larger than USEC_PER_SEC */ | ||
1486 | if (faulted) | ||
1487 | usdiff = USEC_PER_SEC; | ||
1488 | } else | ||
1489 | usdiff = USEC_PER_SEC; /* disable TSC match window below */ | ||
1490 | 1468 | ||
1491 | /* | 1469 | /* |
1492 | * Special case: TSC write with a small delta (1 second) of virtual | ||
1493 | * cycle time against real time is interpreted as an attempt to | ||
1494 | * synchronize the CPU. | ||
1495 | * | ||
1496 | * For a reliable TSC, we can match TSC offsets, and for an unstable | 1470 | * For a reliable TSC, we can match TSC offsets, and for an unstable |
1497 | * TSC, we add elapsed time in this computation. We could let the | 1471 | * TSC, we add elapsed time in this computation. We could let the |
1498 | * compensation code attempt to catch up if we fall behind, but | 1472 | * compensation code attempt to catch up if we fall behind, but |
1499 | * it's better to try to match offsets from the beginning. | 1473 | * it's better to try to match offsets from the beginning. |
1500 | */ | 1474 | */ |
1501 | if (usdiff < USEC_PER_SEC && | 1475 | if (synchronizing && |
1502 | vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { | 1476 | vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { |
1503 | if (!check_tsc_unstable()) { | 1477 | if (!check_tsc_unstable()) { |
1504 | offset = kvm->arch.cur_tsc_offset; | 1478 | offset = kvm->arch.cur_tsc_offset; |