aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/asm-offsets_64.c9
-rw-r--r--arch/x86/kernel/kvm.c24
2 files changed, 33 insertions, 0 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 210927ee2e74..99332f550c48 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -13,6 +13,10 @@ static char syscalls_ia32[] = {
13#include <asm/syscalls_32.h> 13#include <asm/syscalls_32.h>
14}; 14};
15 15
16#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17#include <asm/kvm_para.h>
18#endif
19
16int main(void) 20int main(void)
17{ 21{
18#ifdef CONFIG_PARAVIRT 22#ifdef CONFIG_PARAVIRT
@@ -22,6 +26,11 @@ int main(void)
22 BLANK(); 26 BLANK();
23#endif 27#endif
24 28
29#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
30 OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
31 BLANK();
32#endif
33
25#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) 34#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
26 ENTRY(bx); 35 ENTRY(bx);
27 ENTRY(cx); 36 ENTRY(cx);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 334173d2665a..d05797be2f64 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -589,6 +589,7 @@ out:
589 local_irq_restore(flags); 589 local_irq_restore(flags);
590} 590}
591 591
592#ifdef CONFIG_X86_32
592__visible bool __kvm_vcpu_is_preempted(long cpu) 593__visible bool __kvm_vcpu_is_preempted(long cpu)
593{ 594{
594 struct kvm_steal_time *src = &per_cpu(steal_time, cpu); 595 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
@@ -597,6 +598,29 @@ __visible bool __kvm_vcpu_is_preempted(long cpu)
597} 598}
598PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); 599PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
599 600
601#else
602
603#include <asm/asm-offsets.h>
604
605extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
606
607/*
608 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
609 * restoring to/from the stack.
610 */
611asm(
612".pushsection .text;"
613".global __raw_callee_save___kvm_vcpu_is_preempted;"
614".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
615"__raw_callee_save___kvm_vcpu_is_preempted:"
616"movq __per_cpu_offset(,%rdi,8), %rax;"
617"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
618"setne %al;"
619"ret;"
620".popsection");
621
622#endif
623
600/* 624/*
601 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 625 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
602 */ 626 */