aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 18:31:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 18:31:08 -0500
commit66cdd0ceaf65a18996f561b770eedde1d123b019 (patch)
tree4892eaa422d366fce5d1e866ff1fe0988af95569 /arch/x86/include/asm
parent896ea17d3da5f44b2625c9cda9874d7dfe447393 (diff)
parent58b7825bc324da55415034a9f6ca5d716b8fd898 (diff)
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Marcelo Tosatti: "Considerable KVM/PPC work, x86 kvmclock vsyscall support, IA32_TSC_ADJUST MSR emulation, amongst others." Fix up trivial conflict in kernel/sched/core.c due to cross-cpu migration notifier added next to rq migration call-back. * tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (156 commits) KVM: emulator: fix real mode segment checks in address linearization VMX: remove unneeded enable_unrestricted_guest check KVM: VMX: fix DPL during entry to protected mode x86/kexec: crash_vmclear_local_vmcss needs __rcu kvm: Fix irqfd resampler list walk KVM: VMX: provide the vmclear function and a bitmap to support VMCLEAR in kdump x86/kexec: VMCLEAR VMCSs loaded on all cpus if necessary KVM: MMU: optimize for set_spte KVM: PPC: booke: Get/set guest EPCR register using ONE_REG interface KVM: PPC: bookehv: Add EPCR support in mtspr/mfspr emulation KVM: PPC: bookehv: Add guest computation mode for irq delivery KVM: PPC: Make EPCR a valid field for booke64 and bookehv KVM: PPC: booke: Extend MAS2 EPN mask for 64-bit KVM: PPC: e500: Mask MAS2 EPN high 32-bits in 32/64 tlbwe emulation KVM: PPC: Mask ea's high 32-bits in 32/64 instr emulation KVM: PPC: e500: Add emulation helper for getting instruction ea KVM: PPC: bookehv64: Add support for interrupt handling KVM: PPC: bookehv: Remove GET_VCPU macro from exception handler KVM: PPC: booke: Fix get_tb() compile error on 64-bit KVM: PPC: e500: Silence bogus GCC warning in tlb code ...
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/clocksource.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/fixmap.h5
-rw-r--r--arch/x86/include/asm/kexec.h3
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h24
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/pvclock.h47
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/include/asm/vsyscall.h20
10 files changed, 105 insertions, 6 deletions
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index 0bdbbb3b9ce7..16a57f4ed64d 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -8,6 +8,7 @@
8#define VCLOCK_NONE 0 /* No vDSO clock available. */ 8#define VCLOCK_NONE 0 /* No vDSO clock available. */
9#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ 9#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
10#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */ 10#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
11#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
11 12
12struct arch_clocksource_data { 13struct arch_clocksource_data {
13 int vclock_mode; 14 int vclock_mode;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index da40b1e2228e..2d9075e863a0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -202,6 +202,7 @@
202 202
203/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 203/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
204#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 204#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
205#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
205#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ 206#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
206#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ 207#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
207#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ 208#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4da3c0c4c974..a09c28571064 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,6 +19,7 @@
19#include <asm/acpi.h> 19#include <asm/acpi.h>
20#include <asm/apicdef.h> 20#include <asm/apicdef.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/pvclock.h>
22#ifdef CONFIG_X86_32 23#ifdef CONFIG_X86_32
23#include <linux/threads.h> 24#include <linux/threads.h>
24#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
@@ -81,6 +82,10 @@ enum fixed_addresses {
81 VVAR_PAGE, 82 VVAR_PAGE,
82 VSYSCALL_HPET, 83 VSYSCALL_HPET,
83#endif 84#endif
85#ifdef CONFIG_PARAVIRT_CLOCK
86 PVCLOCK_FIXMAP_BEGIN,
87 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
88#endif
84 FIX_DBGP_BASE, 89 FIX_DBGP_BASE,
85 FIX_EARLYCON_MEM_BASE, 90 FIX_EARLYCON_MEM_BASE,
86#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 91#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 317ff1703d0b..6080d2694bad 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -163,6 +163,9 @@ struct kimage_arch {
163}; 163};
164#endif 164#endif
165 165
166typedef void crash_vmclear_fn(void);
167extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
168
166#endif /* __ASSEMBLY__ */ 169#endif /* __ASSEMBLY__ */
167 170
168#endif /* _ASM_X86_KEXEC_H */ 171#endif /* _ASM_X86_KEXEC_H */
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 000000000000..a92b1763c419
--- /dev/null
+++ b/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_X86_KVM_GUEST_H
2#define _ASM_X86_KVM_GUEST_H
3
4int kvm_setup_vsyscall_timeinfo(void);
5
6#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b2e11f452435..dc87b65e9c3a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
22#include <linux/kvm_para.h> 22#include <linux/kvm_para.h>
23#include <linux/kvm_types.h> 23#include <linux/kvm_types.h>
24#include <linux/perf_event.h> 24#include <linux/perf_event.h>
25#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h>
25 27
26#include <asm/pvclock-abi.h> 28#include <asm/pvclock-abi.h>
27#include <asm/desc.h> 29#include <asm/desc.h>
@@ -442,6 +444,7 @@ struct kvm_vcpu_arch {
442 s8 virtual_tsc_shift; 444 s8 virtual_tsc_shift;
443 u32 virtual_tsc_mult; 445 u32 virtual_tsc_mult;
444 u32 virtual_tsc_khz; 446 u32 virtual_tsc_khz;
447 s64 ia32_tsc_adjust_msr;
445 448
446 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 449 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
447 unsigned nmi_pending; /* NMI queued after currently running handler */ 450 unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -559,6 +562,12 @@ struct kvm_arch {
559 u64 cur_tsc_write; 562 u64 cur_tsc_write;
560 u64 cur_tsc_offset; 563 u64 cur_tsc_offset;
561 u8 cur_tsc_generation; 564 u8 cur_tsc_generation;
565 int nr_vcpus_matched_tsc;
566
567 spinlock_t pvclock_gtod_sync_lock;
568 bool use_master_clock;
569 u64 master_kernel_ns;
570 cycle_t master_cycle_now;
562 571
563 struct kvm_xen_hvm_config xen_hvm_config; 572 struct kvm_xen_hvm_config xen_hvm_config;
564 573
@@ -612,6 +621,12 @@ struct kvm_vcpu_stat {
612 621
613struct x86_instruction_info; 622struct x86_instruction_info;
614 623
624struct msr_data {
625 bool host_initiated;
626 u32 index;
627 u64 data;
628};
629
615struct kvm_x86_ops { 630struct kvm_x86_ops {
616 int (*cpu_has_kvm_support)(void); /* __init */ 631 int (*cpu_has_kvm_support)(void); /* __init */
617 int (*disabled_by_bios)(void); /* __init */ 632 int (*disabled_by_bios)(void); /* __init */
@@ -634,7 +649,7 @@ struct kvm_x86_ops {
634 649
635 void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); 650 void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
636 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 651 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
637 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 652 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
638 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 653 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
639 void (*get_segment)(struct kvm_vcpu *vcpu, 654 void (*get_segment)(struct kvm_vcpu *vcpu,
640 struct kvm_segment *var, int seg); 655 struct kvm_segment *var, int seg);
@@ -697,10 +712,11 @@ struct kvm_x86_ops {
697 bool (*has_wbinvd_exit)(void); 712 bool (*has_wbinvd_exit)(void);
698 713
699 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); 714 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
715 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
700 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 716 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
701 717
702 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 718 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
703 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu); 719 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
704 720
705 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 721 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
706 722
@@ -785,7 +801,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
785 801
786void kvm_enable_efer_bits(u64); 802void kvm_enable_efer_bits(u64);
787int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 803int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
788int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 804int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
789 805
790struct x86_emulate_ctxt; 806struct x86_emulate_ctxt;
791 807
@@ -812,7 +828,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
812int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 828int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
813 829
814int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 830int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
815int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 831int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
816 832
817unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 833unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
818void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 834void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e400cdb2dd65..6e930b218724 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -236,6 +236,7 @@
236#define MSR_IA32_EBL_CR_POWERON 0x0000002a 236#define MSR_IA32_EBL_CR_POWERON 0x0000002a
237#define MSR_EBC_FREQUENCY_ID 0x0000002c 237#define MSR_EBC_FREQUENCY_ID 0x0000002c
238#define MSR_IA32_FEATURE_CONTROL 0x0000003a 238#define MSR_IA32_FEATURE_CONTROL 0x0000003a
239#define MSR_IA32_TSC_ADJUST 0x0000003b
239 240
240#define FEATURE_CONTROL_LOCKED (1<<0) 241#define FEATURE_CONTROL_LOCKED (1<<0)
241#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) 242#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index c59cc97fe6c1..109a9dd5d454 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,6 +6,7 @@
6 6
7/* some helper functions for xen and kvm pv clock sources */ 7/* some helper functions for xen and kvm pv clock sources */
8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 8cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
9u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
9void pvclock_set_flags(u8 flags); 10void pvclock_set_flags(u8 flags);
10unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); 11unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
11void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 12void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
@@ -56,4 +57,50 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
56 return product; 57 return product;
57} 58}
58 59
60static __always_inline
61u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
62{
63 u64 delta = __native_read_tsc() - src->tsc_timestamp;
64 return pvclock_scale_delta(delta, src->tsc_to_system_mul,
65 src->tsc_shift);
66}
67
68static __always_inline
69unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
70 cycle_t *cycles, u8 *flags)
71{
72 unsigned version;
73 cycle_t ret, offset;
74 u8 ret_flags;
75
76 version = src->version;
77 /* Note: emulated platforms which do not advertise SSE2 support
78 * result in kvmclock not using the necessary RDTSC barriers.
79 * Without barriers, it is possible that RDTSC instruction reads from
80 * the time stamp counter outside rdtsc_barrier protected section
81 * below, resulting in violation of monotonicity.
82 */
83 rdtsc_barrier();
84 offset = pvclock_get_nsec_offset(src);
85 ret = src->system_time + offset;
86 ret_flags = src->flags;
87 rdtsc_barrier();
88
89 *cycles = ret;
90 *flags = ret_flags;
91 return version;
92}
93
94struct pvclock_vsyscall_time_info {
95 struct pvclock_vcpu_time_info pvti;
96 u32 migrate_count;
97} __attribute__((__aligned__(SMP_CACHE_BYTES)));
98
99#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
100#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
101
102int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
103 int size);
104struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
105
59#endif /* _ASM_X86_PVCLOCK_H */ 106#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 36ec21c36d68..c2d56b34830d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -445,8 +445,7 @@ enum vmcs_field {
445#define VMX_EPTP_WB_BIT (1ull << 14) 445#define VMX_EPTP_WB_BIT (1ull << 14)
446#define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 446#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
447#define VMX_EPT_1GB_PAGE_BIT (1ull << 17) 447#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
448#define VMX_EPT_AD_BIT (1ull << 21) 448#define VMX_EPT_AD_BIT (1ull << 21)
449#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
450#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 449#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
451#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 450#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
452 451
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index eaea1d31f753..80f80955cfd8 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -33,6 +33,26 @@ extern void map_vsyscall(void);
33 */ 33 */
34extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); 34extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
35 35
36#ifdef CONFIG_X86_64
37
38#define VGETCPU_CPU_MASK 0xfff
39
40static inline unsigned int __getcpu(void)
41{
42 unsigned int p;
43
44 if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
45 /* Load per CPU data from RDTSCP */
46 native_read_tscp(&p);
47 } else {
48 /* Load per CPU data from GDT */
49 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
50 }
51
52 return p;
53}
54#endif /* CONFIG_X86_64 */
55
36#endif /* __KERNEL__ */ 56#endif /* __KERNEL__ */
37 57
38#endif /* _ASM_X86_VSYSCALL_H */ 58#endif /* _ASM_X86_VSYSCALL_H */