aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-27 13:13:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-27 13:13:52 -0400
commit42cadc86008aae0fd9ff31642dc01ed50723cf32 (patch)
treeb05d4c8f0561bad5a0183a89fb23ce4c8ee1653c /include/asm-x86
parentfba5c1af5c4fd6645fe62ea84ccde0981282cf66 (diff)
parent66c0b394f08fd89236515c1c84485ea712a157be (diff)
Merge branch 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.26' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (147 commits) KVM: kill file->f_count abuse in kvm KVM: MMU: kvm_pv_mmu_op should not take mmap_sem KVM: SVM: remove selective CR0 comment KVM: SVM: remove now obsolete FIXME comment KVM: SVM: disable CR8 intercept when tpr is not masking interrupts KVM: SVM: sync V_TPR with LAPIC.TPR if CR8 write intercept is disabled KVM: export kvm_lapic_set_tpr() to modules KVM: SVM: sync TPR value to V_TPR field in the VMCB KVM: ppc: PowerPC 440 KVM implementation KVM: Add MAINTAINERS entry for PowerPC KVM KVM: ppc: Add DCR access information to struct kvm_run ppc: Export tlb_44x_hwater for KVM KVM: Rename debugfs_dir to kvm_debugfs_dir KVM: x86 emulator: fix lea to really get the effective address KVM: x86 emulator: fix smsw and lmsw with a memory operand KVM: x86 emulator: initialize src.val and dst.val for register operands KVM: SVM: force a new asid when initializing the vmcb KVM: fix kvm_vcpu_kick vs __vcpu_run race KVM: add ioctls to save/store mpstate KVM: Rename VCPU_MP_STATE_* to KVM_MP_STATE_* ...
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/reboot.h2
4 files changed, 183 insertions, 14 deletions
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120426a3..80eefef2cc76 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
188 struct kvm_cpuid_entry2 entries[0]; 188 struct kvm_cpuid_entry2 entries[0];
189}; 189};
190 190
191/* for KVM_GET_PIT and KVM_SET_PIT */
192struct kvm_pit_channel_state {
193 __u32 count; /* can be 65536 */
194 __u16 latched_count;
195 __u8 count_latched;
196 __u8 status_latched;
197 __u8 status;
198 __u8 read_state;
199 __u8 write_state;
200 __u8 write_latch;
201 __u8 rw_mode;
202 __u8 mode;
203 __u8 bcd;
204 __u8 gate;
205 __s64 count_load_time;
206};
207
208struct kvm_pit_state {
209 struct kvm_pit_channel_state channels[3];
210};
211
212#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
213#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
214#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
215#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
216#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
217#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
218#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
219#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
220#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
221#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
222#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
223#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
224#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
225#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
226#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
227#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
228#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
229#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231
191#endif 232#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390b2844..9d963cd6533c 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
20 20
21#include <asm/desc.h> 21#include <asm/desc.h>
22 22
23#define KVM_MAX_VCPUS 16
24#define KVM_MEMORY_SLOTS 32
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 4
27
28#define KVM_PIO_PAGE_OFFSET 1
29
23#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 30#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 31#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 32#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
39#define INVALID_PAGE (~(hpa_t)0) 46#define INVALID_PAGE (~(hpa_t)0)
40#define UNMAPPED_GVA (~(gpa_t)0) 47#define UNMAPPED_GVA (~(gpa_t)0)
41 48
49/* shadow tables are PAE even on non-PAE hosts */
50#define KVM_HPAGE_SHIFT 21
51#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
52#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
53
54#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
55
42#define DE_VECTOR 0 56#define DE_VECTOR 0
43#define UD_VECTOR 6 57#define UD_VECTOR 6
44#define NM_VECTOR 7 58#define NM_VECTOR 7
@@ -48,6 +62,7 @@
48#define SS_VECTOR 12 62#define SS_VECTOR 12
49#define GP_VECTOR 13 63#define GP_VECTOR 13
50#define PF_VECTOR 14 64#define PF_VECTOR 14
65#define MC_VECTOR 18
51 66
52#define SELECTOR_TI_MASK (1 << 2) 67#define SELECTOR_TI_MASK (1 << 2)
53#define SELECTOR_RPL_MASK 0x03 68#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
58 73
59#define KVM_PERMILLE_MMU_PAGES 20 74#define KVM_PERMILLE_MMU_PAGES 20
60#define KVM_MIN_ALLOC_MMU_PAGES 64 75#define KVM_MIN_ALLOC_MMU_PAGES 64
61#define KVM_NUM_MMU_PAGES 1024 76#define KVM_MMU_HASH_SHIFT 10
77#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
62#define KVM_MIN_FREE_MMU_PAGES 5 78#define KVM_MIN_FREE_MMU_PAGES 5
63#define KVM_REFILL_PAGES 25 79#define KVM_REFILL_PAGES 25
64#define KVM_MAX_CPUID_ENTRIES 40 80#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
106 122
107#define KVM_NR_MEM_OBJS 40 123#define KVM_NR_MEM_OBJS 40
108 124
125struct kvm_guest_debug {
126 int enabled;
127 unsigned long bp[4];
128 int singlestep;
129};
130
109/* 131/*
110 * We don't want allocation failures within the mmu code, so we preallocate 132 * We don't want allocation failures within the mmu code, so we preallocate
111 * enough memory for a single page fault in a cache. 133 * enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
140 unsigned pad_for_nice_hex_output:6; 162 unsigned pad_for_nice_hex_output:6;
141 unsigned metaphysical:1; 163 unsigned metaphysical:1;
142 unsigned access:3; 164 unsigned access:3;
165 unsigned invalid:1;
143 }; 166 };
144}; 167};
145 168
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
204 u64 shadow_efer; 227 u64 shadow_efer;
205 u64 apic_base; 228 u64 apic_base;
206 struct kvm_lapic *apic; /* kernel irqchip context */ 229 struct kvm_lapic *apic; /* kernel irqchip context */
207#define VCPU_MP_STATE_RUNNABLE 0
208#define VCPU_MP_STATE_UNINITIALIZED 1
209#define VCPU_MP_STATE_INIT_RECEIVED 2
210#define VCPU_MP_STATE_SIPI_RECEIVED 3
211#define VCPU_MP_STATE_HALTED 4
212 int mp_state; 230 int mp_state;
213 int sipi_vector; 231 int sipi_vector;
214 u64 ia32_misc_enable_msr; 232 u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
226 u64 *last_pte_updated; 244 u64 *last_pte_updated;
227 245
228 struct { 246 struct {
229 gfn_t gfn; /* presumed gfn during guest pte update */ 247 gfn_t gfn; /* presumed gfn during guest pte update */
230 struct page *page; /* page corresponding to that gfn */ 248 pfn_t pfn; /* pfn corresponding to that gfn */
249 int largepage;
231 } update_pte; 250 } update_pte;
232 251
233 struct i387_fxsave_struct host_fx_image; 252 struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
261 /* emulate context */ 280 /* emulate context */
262 281
263 struct x86_emulate_ctxt emulate_ctxt; 282 struct x86_emulate_ctxt emulate_ctxt;
283
284 gpa_t time;
285 struct kvm_vcpu_time_info hv_clock;
286 unsigned int time_offset;
287 struct page *time_page;
264}; 288};
265 289
266struct kvm_mem_alias { 290struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
283 struct list_head active_mmu_pages; 307 struct list_head active_mmu_pages;
284 struct kvm_pic *vpic; 308 struct kvm_pic *vpic;
285 struct kvm_ioapic *vioapic; 309 struct kvm_ioapic *vioapic;
310 struct kvm_pit *vpit;
286 311
287 int round_robin_prev_vcpu; 312 int round_robin_prev_vcpu;
288 unsigned int tss_addr; 313 unsigned int tss_addr;
289 struct page *apic_access_page; 314 struct page *apic_access_page;
315
316 gpa_t wall_clock;
290}; 317};
291 318
292struct kvm_vm_stat { 319struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
298 u32 mmu_recycled; 325 u32 mmu_recycled;
299 u32 mmu_cache_miss; 326 u32 mmu_cache_miss;
300 u32 remote_tlb_flush; 327 u32 remote_tlb_flush;
328 u32 lpages;
301}; 329};
302 330
303struct kvm_vcpu_stat { 331struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
320 u32 fpu_reload; 348 u32 fpu_reload;
321 u32 insn_emulation; 349 u32 insn_emulation;
322 u32 insn_emulation_fail; 350 u32 insn_emulation_fail;
351 u32 hypercalls;
323}; 352};
324 353
325struct descriptor_table { 354struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
355 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 384 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
356 void (*get_segment)(struct kvm_vcpu *vcpu, 385 void (*get_segment)(struct kvm_vcpu *vcpu,
357 struct kvm_segment *var, int seg); 386 struct kvm_segment *var, int seg);
387 int (*get_cpl)(struct kvm_vcpu *vcpu);
358 void (*set_segment)(struct kvm_vcpu *vcpu, 388 void (*set_segment)(struct kvm_vcpu *vcpu,
359 struct kvm_segment *var, int seg); 389 struct kvm_segment *var, int seg);
360 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 390 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
410unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 440unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
411void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 441void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
412 442
443int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
444
445int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
446 const void *val, int bytes);
447int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
448 gpa_t addr, unsigned long *ret);
449
450extern bool tdp_enabled;
451
413enum emulation_result { 452enum emulation_result {
414 EMULATE_DONE, /* no further processing */ 453 EMULATE_DONE, /* no further processing */
415 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 454 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
429unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 468unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
430void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 469void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
431 unsigned long *rflags); 470 unsigned long *rflags);
471void kvm_enable_efer_bits(u64);
432int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 472int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
433int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 473int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
434 474
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
448int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 488int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
449 unsigned long value); 489 unsigned long value);
450 490
451void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 491int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
452void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 492
453void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 493void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
454void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 494void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
455unsigned long get_cr8(struct kvm_vcpu *vcpu); 495void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
456void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 496void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
497unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
498void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
457void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); 499void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
458 500
459int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 501int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
491 533
492int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 534int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
493 535
536void kvm_enable_tdp(void);
537
494int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 538int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
495int complete_pio(struct kvm_vcpu *vcpu); 539int complete_pio(struct kvm_vcpu *vcpu);
496 540
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
600#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" 644#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
601#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" 645#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
602#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" 646#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
647#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
603 648
604#define MSR_IA32_TIME_STAMP_COUNTER 0x010 649#define MSR_IA32_TIME_STAMP_COUNTER 0x010
605 650
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
610#define RMODE_TSS_SIZE \ 655#define RMODE_TSS_SIZE \
611 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 656 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
612 657
658enum {
659 TASK_SWITCH_CALL = 0,
660 TASK_SWITCH_IRET = 1,
661 TASK_SWITCH_JMP = 2,
662 TASK_SWITCH_GATE = 3,
663};
664
665#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
666 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
667 vcpu, 5, d1, d2, d3, d4, d5)
668#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
669 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
670 vcpu, 4, d1, d2, d3, d4, 0)
671#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
672 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
673 vcpu, 3, d1, d2, d3, 0, 0)
674#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
675 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
676 vcpu, 2, d1, d2, 0, 0, 0)
677#define KVMTRACE_1D(evt, vcpu, d1, name) \
678 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
679 vcpu, 1, d1, 0, 0, 0, 0)
680#define KVMTRACE_0D(evt, vcpu, name) \
681 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
682 vcpu, 0, 0, 0, 0, 0, 0)
683
613#endif 684#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8d8c53..509845942070 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
10 * paravirtualization, the appropriate feature bit should be checked. 10 * paravirtualization, the appropriate feature bit should be checked.
11 */ 11 */
12#define KVM_CPUID_FEATURES 0x40000001 12#define KVM_CPUID_FEATURES 0x40000001
13#define KVM_FEATURE_CLOCKSOURCE 0
14#define KVM_FEATURE_NOP_IO_DELAY 1
15#define KVM_FEATURE_MMU_OP 2
16
17#define MSR_KVM_WALL_CLOCK 0x11
18#define MSR_KVM_SYSTEM_TIME 0x12
19
20#define KVM_MAX_MMU_OP_BATCH 32
21
22/* Operations for KVM_HC_MMU_OP */
23#define KVM_MMU_OP_WRITE_PTE 1
24#define KVM_MMU_OP_FLUSH_TLB 2
25#define KVM_MMU_OP_RELEASE_PT 3
26
27/* Payload for KVM_HC_MMU_OP */
28struct kvm_mmu_op_header {
29 __u32 op;
30 __u32 pad;
31};
32
33struct kvm_mmu_op_write_pte {
34 struct kvm_mmu_op_header header;
35 __u64 pte_phys;
36 __u64 pte_val;
37};
38
39struct kvm_mmu_op_flush_tlb {
40 struct kvm_mmu_op_header header;
41};
42
43struct kvm_mmu_op_release_pt {
44 struct kvm_mmu_op_header header;
45 __u64 pt_phys;
46};
13 47
14#ifdef __KERNEL__ 48#ifdef __KERNEL__
15#include <asm/processor.h> 49#include <asm/processor.h>
16 50
51/* xen binary-compatible interface. See xen headers for details */
52struct kvm_vcpu_time_info {
53 uint32_t version;
54 uint32_t pad0;
55 uint64_t tsc_timestamp;
56 uint64_t system_time;
57 uint32_t tsc_to_system_mul;
58 int8_t tsc_shift;
59 int8_t pad[3];
60} __attribute__((__packed__)); /* 32 bytes */
61
62struct kvm_wall_clock {
63 uint32_t wc_version;
64 uint32_t wc_sec;
65 uint32_t wc_nsec;
66} __attribute__((__packed__));
67
68
69extern void kvmclock_init(void);
70
71
17/* This instruction is vmcall. On non-VT architectures, it will generate a 72/* This instruction is vmcall. On non-VT architectures, it will generate a
18 * trap that we will then rewrite to the appropriate instruction. 73 * trap that we will then rewrite to the appropriate instruction.
19 */ 74 */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b4f84b..e63741f19392 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length); 17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void);
18 20
19#endif /* _ASM_REBOOT_H */ 21#endif /* _ASM_REBOOT_H */