aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/debugreg.h67
-rw-r--r--arch/x86/include/asm/kgdb.h10
-rw-r--r--arch/x86/include/asm/kvm.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h63
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/processor.h63
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/x86_init.h6
10 files changed, 135 insertions, 87 deletions
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b903d5ea394..2d91580bf22 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -78,8 +78,75 @@
78 */ 78 */
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80 80
81#include <linux/bug.h>
82
81DECLARE_PER_CPU(unsigned long, cpu_dr7); 83DECLARE_PER_CPU(unsigned long, cpu_dr7);
82 84
85#ifndef CONFIG_PARAVIRT
86/*
87 * These special macros can be used to get or set a debugging register
88 */
89#define get_debugreg(var, register) \
90 (var) = native_get_debugreg(register)
91#define set_debugreg(value, register) \
92 native_set_debugreg(register, value)
93#endif
94
95static inline unsigned long native_get_debugreg(int regno)
96{
97 unsigned long val = 0; /* Damn you, gcc! */
98
99 switch (regno) {
100 case 0:
101 asm("mov %%db0, %0" :"=r" (val));
102 break;
103 case 1:
104 asm("mov %%db1, %0" :"=r" (val));
105 break;
106 case 2:
107 asm("mov %%db2, %0" :"=r" (val));
108 break;
109 case 3:
110 asm("mov %%db3, %0" :"=r" (val));
111 break;
112 case 6:
113 asm("mov %%db6, %0" :"=r" (val));
114 break;
115 case 7:
116 asm("mov %%db7, %0" :"=r" (val));
117 break;
118 default:
119 BUG();
120 }
121 return val;
122}
123
124static inline void native_set_debugreg(int regno, unsigned long value)
125{
126 switch (regno) {
127 case 0:
128 asm("mov %0, %%db0" ::"r" (value));
129 break;
130 case 1:
131 asm("mov %0, %%db1" ::"r" (value));
132 break;
133 case 2:
134 asm("mov %0, %%db2" ::"r" (value));
135 break;
136 case 3:
137 asm("mov %0, %%db3" ::"r" (value));
138 break;
139 case 6:
140 asm("mov %0, %%db6" ::"r" (value));
141 break;
142 case 7:
143 asm("mov %0, %%db7" ::"r" (value));
144 break;
145 default:
146 BUG();
147 }
148}
149
83static inline void hw_breakpoint_disable(void) 150static inline void hw_breakpoint_disable(void)
84{ 151{
85 /* Zero the control register for HW Breakpoint */ 152 /* Zero the control register for HW Breakpoint */
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 77e95f54570..332f98c9111 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -64,11 +64,15 @@ enum regnames {
64 GDB_PS, /* 17 */ 64 GDB_PS, /* 17 */
65 GDB_CS, /* 18 */ 65 GDB_CS, /* 18 */
66 GDB_SS, /* 19 */ 66 GDB_SS, /* 19 */
67 GDB_DS, /* 20 */
68 GDB_ES, /* 21 */
69 GDB_FS, /* 22 */
70 GDB_GS, /* 23 */
67}; 71};
68#define GDB_ORIG_AX 57 72#define GDB_ORIG_AX 57
69#define DBG_MAX_REG_NUM 20 73#define DBG_MAX_REG_NUM 24
70/* 17 64 bit regs and 3 32 bit regs */ 74/* 17 64 bit regs and 5 32 bit regs */
71#define NUMREGBYTES ((17 * 8) + (3 * 4)) 75#define NUMREGBYTES ((17 * 8) + (5 * 4))
72#endif /* ! CONFIG_X86_32 */ 76#endif /* ! CONFIG_X86_32 */
73 77
74static inline void arch_kgdb_breakpoint(void) 78static inline void arch_kgdb_breakpoint(void)
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 4d8dcbdfc12..e7d1c194d27 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -321,4 +321,8 @@ struct kvm_xcrs {
321 __u64 padding[16]; 321 __u64 padding[16];
322}; 322};
323 323
324/* definition of registers in kvm_run */
325struct kvm_sync_regs {
326};
327
324#endif /* _ASM_X86_KVM_H */ 328#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7b9cfc4878a..c222e1a1b12 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -176,6 +176,7 @@ struct x86_emulate_ops {
176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); 176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); 177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); 178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
179 void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
179 int (*cpl)(struct x86_emulate_ctxt *ctxt); 180 int (*cpl)(struct x86_emulate_ctxt *ctxt);
180 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); 181 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); 182 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
@@ -388,7 +389,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
388#define EMULATION_INTERCEPTED 2 389#define EMULATION_INTERCEPTED 2
389int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); 390int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
390int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 391int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
391 u16 tss_selector, int reason, 392 u16 tss_selector, int idt_index, int reason,
392 bool has_error_code, u32 error_code); 393 bool has_error_code, u32 error_code);
393int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); 394int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
394#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 395#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 52d6640a5ca..e216ba066e7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -29,7 +29,7 @@
29#include <asm/msr-index.h> 29#include <asm/msr-index.h>
30 30
31#define KVM_MAX_VCPUS 254 31#define KVM_MAX_VCPUS 254
32#define KVM_SOFT_MAX_VCPUS 64 32#define KVM_SOFT_MAX_VCPUS 160
33#define KVM_MEMORY_SLOTS 32 33#define KVM_MEMORY_SLOTS 32
34/* memory slots that does not exposed to userspace */ 34/* memory slots that does not exposed to userspace */
35#define KVM_PRIVATE_MEM_SLOTS 4 35#define KVM_PRIVATE_MEM_SLOTS 4
@@ -181,13 +181,6 @@ struct kvm_mmu_memory_cache {
181 void *objects[KVM_NR_MEM_OBJS]; 181 void *objects[KVM_NR_MEM_OBJS];
182}; 182};
183 183
184#define NR_PTE_CHAIN_ENTRIES 5
185
186struct kvm_pte_chain {
187 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
188 struct hlist_node link;
189};
190
191/* 184/*
192 * kvm_mmu_page_role, below, is defined as: 185 * kvm_mmu_page_role, below, is defined as:
193 * 186 *
@@ -427,12 +420,16 @@ struct kvm_vcpu_arch {
427 420
428 u64 last_guest_tsc; 421 u64 last_guest_tsc;
429 u64 last_kernel_ns; 422 u64 last_kernel_ns;
430 u64 last_tsc_nsec; 423 u64 last_host_tsc;
431 u64 last_tsc_write; 424 u64 tsc_offset_adjustment;
432 u32 virtual_tsc_khz; 425 u64 this_tsc_nsec;
426 u64 this_tsc_write;
427 u8 this_tsc_generation;
433 bool tsc_catchup; 428 bool tsc_catchup;
434 u32 tsc_catchup_mult; 429 bool tsc_always_catchup;
435 s8 tsc_catchup_shift; 430 s8 virtual_tsc_shift;
431 u32 virtual_tsc_mult;
432 u32 virtual_tsc_khz;
436 433
437 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 434 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
438 unsigned nmi_pending; /* NMI queued after currently running handler */ 435 unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -478,6 +475,21 @@ struct kvm_vcpu_arch {
478 u32 id; 475 u32 id;
479 bool send_user_only; 476 bool send_user_only;
480 } apf; 477 } apf;
478
479 /* OSVW MSRs (AMD only) */
480 struct {
481 u64 length;
482 u64 status;
483 } osvw;
484};
485
486struct kvm_lpage_info {
487 unsigned long rmap_pde;
488 int write_count;
489};
490
491struct kvm_arch_memory_slot {
492 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
481}; 493};
482 494
483struct kvm_arch { 495struct kvm_arch {
@@ -511,8 +523,12 @@ struct kvm_arch {
511 s64 kvmclock_offset; 523 s64 kvmclock_offset;
512 raw_spinlock_t tsc_write_lock; 524 raw_spinlock_t tsc_write_lock;
513 u64 last_tsc_nsec; 525 u64 last_tsc_nsec;
514 u64 last_tsc_offset;
515 u64 last_tsc_write; 526 u64 last_tsc_write;
527 u32 last_tsc_khz;
528 u64 cur_tsc_nsec;
529 u64 cur_tsc_write;
530 u64 cur_tsc_offset;
531 u8 cur_tsc_generation;
516 532
517 struct kvm_xen_hvm_config xen_hvm_config; 533 struct kvm_xen_hvm_config xen_hvm_config;
518 534
@@ -644,7 +660,7 @@ struct kvm_x86_ops {
644 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 660 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
645 int (*get_lpage_level)(void); 661 int (*get_lpage_level)(void);
646 bool (*rdtscp_supported)(void); 662 bool (*rdtscp_supported)(void);
647 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); 663 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
648 664
649 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 665 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
650 666
@@ -652,7 +668,7 @@ struct kvm_x86_ops {
652 668
653 bool (*has_wbinvd_exit)(void); 669 bool (*has_wbinvd_exit)(void);
654 670
655 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz); 671 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
656 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 672 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
657 673
658 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 674 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
@@ -674,6 +690,17 @@ struct kvm_arch_async_pf {
674 690
675extern struct kvm_x86_ops *kvm_x86_ops; 691extern struct kvm_x86_ops *kvm_x86_ops;
676 692
693static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
694 s64 adjustment)
695{
696 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
697}
698
699static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
700{
701 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
702}
703
677int kvm_mmu_module_init(void); 704int kvm_mmu_module_init(void);
678void kvm_mmu_module_exit(void); 705void kvm_mmu_module_exit(void);
679 706
@@ -741,8 +768,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
741void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 768void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
742int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); 769int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
743 770
744int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, 771int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
745 bool has_error_code, u32 error_code); 772 int reason, bool has_error_code, u32 error_code);
746 773
747int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 774int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
748int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 775int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c0180fd372d..aa0f9130836 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -10,6 +10,7 @@
10#include <asm/paravirt_types.h> 10#include <asm/paravirt_types.h>
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/bug.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/cpumask.h> 15#include <linux/cpumask.h>
15 16
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index e8fb2c7a5f4..2291895b183 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -23,6 +23,7 @@
23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 27#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 28#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 29#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 78e30ea492b..a19542c1685 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -484,61 +484,6 @@ struct thread_struct {
484 unsigned io_bitmap_max; 484 unsigned io_bitmap_max;
485}; 485};
486 486
487static inline unsigned long native_get_debugreg(int regno)
488{
489 unsigned long val = 0; /* Damn you, gcc! */
490
491 switch (regno) {
492 case 0:
493 asm("mov %%db0, %0" :"=r" (val));
494 break;
495 case 1:
496 asm("mov %%db1, %0" :"=r" (val));
497 break;
498 case 2:
499 asm("mov %%db2, %0" :"=r" (val));
500 break;
501 case 3:
502 asm("mov %%db3, %0" :"=r" (val));
503 break;
504 case 6:
505 asm("mov %%db6, %0" :"=r" (val));
506 break;
507 case 7:
508 asm("mov %%db7, %0" :"=r" (val));
509 break;
510 default:
511 BUG();
512 }
513 return val;
514}
515
516static inline void native_set_debugreg(int regno, unsigned long value)
517{
518 switch (regno) {
519 case 0:
520 asm("mov %0, %%db0" ::"r" (value));
521 break;
522 case 1:
523 asm("mov %0, %%db1" ::"r" (value));
524 break;
525 case 2:
526 asm("mov %0, %%db2" ::"r" (value));
527 break;
528 case 3:
529 asm("mov %0, %%db3" ::"r" (value));
530 break;
531 case 6:
532 asm("mov %0, %%db6" ::"r" (value));
533 break;
534 case 7:
535 asm("mov %0, %%db7" ::"r" (value));
536 break;
537 default:
538 BUG();
539 }
540}
541
542/* 487/*
543 * Set IOPL bits in EFLAGS from given mask 488 * Set IOPL bits in EFLAGS from given mask
544 */ 489 */
@@ -584,14 +529,6 @@ static inline void native_swapgs(void)
584#define __cpuid native_cpuid 529#define __cpuid native_cpuid
585#define paravirt_enabled() 0 530#define paravirt_enabled() 0
586 531
587/*
588 * These special macros can be used to get or set a debugging register
589 */
590#define get_debugreg(var, register) \
591 (var) = native_get_debugreg(register)
592#define set_debugreg(value, register) \
593 native_set_debugreg(register, value)
594
595static inline void load_sp0(struct tss_struct *tss, 532static inline void load_sp0(struct tss_struct *tss,
596 struct thread_struct *thread) 533 struct thread_struct *thread)
597{ 534{
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 15d99153a96..c91e8b9d588 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -61,7 +61,7 @@ extern void check_tsc_sync_source(int cpu);
61extern void check_tsc_sync_target(void); 61extern void check_tsc_sync_target(void);
62 62
63extern int notsc_setup(char *); 63extern int notsc_setup(char *);
64extern void save_sched_clock_state(void); 64extern void tsc_save_sched_clock_state(void);
65extern void restore_sched_clock_state(void); 65extern void tsc_restore_sched_clock_state(void);
66 66
67#endif /* _ASM_X86_TSC_H */ 67#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 517d4767ffd..baaca8defec 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -145,9 +145,11 @@ struct x86_init_ops {
145/** 145/**
146 * struct x86_cpuinit_ops - platform specific cpu hotplug setups 146 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
147 * @setup_percpu_clockev: set up the per cpu clock event device 147 * @setup_percpu_clockev: set up the per cpu clock event device
148 * @early_percpu_clock_init: early init of the per cpu clock event device
148 */ 149 */
149struct x86_cpuinit_ops { 150struct x86_cpuinit_ops {
150 void (*setup_percpu_clockev)(void); 151 void (*setup_percpu_clockev)(void);
152 void (*early_percpu_clock_init)(void);
151 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); 153 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
152}; 154};
153 155
@@ -160,6 +162,8 @@ struct x86_cpuinit_ops {
160 * @is_untracked_pat_range exclude from PAT logic 162 * @is_untracked_pat_range exclude from PAT logic
161 * @nmi_init enable NMI on cpus 163 * @nmi_init enable NMI on cpus
162 * @i8042_detect pre-detect if i8042 controller exists 164 * @i8042_detect pre-detect if i8042 controller exists
165 * @save_sched_clock_state: save state for sched_clock() on suspend
166 * @restore_sched_clock_state: restore state for sched_clock() on resume
163 */ 167 */
164struct x86_platform_ops { 168struct x86_platform_ops {
165 unsigned long (*calibrate_tsc)(void); 169 unsigned long (*calibrate_tsc)(void);
@@ -171,6 +175,8 @@ struct x86_platform_ops {
171 void (*nmi_init)(void); 175 void (*nmi_init)(void);
172 unsigned char (*get_nmi_reason)(void); 176 unsigned char (*get_nmi_reason)(void);
173 int (*i8042_detect)(void); 177 int (*i8042_detect)(void);
178 void (*save_sched_clock_state)(void);
179 void (*restore_sched_clock_state)(void);
174}; 180};
175 181
176struct pci_dev; 182struct pci_dev;