diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/inst.h | 96 | ||||
-rw-r--r-- | arch/x86/include/asm/intel_scu_ipc.h | 55 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm.h | 17 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 46 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 80 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_para.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/pci_x86.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event_p4.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/pvclock-abi.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/pvclock.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/rdc321x_defs.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/scatterlist.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/suspend_32.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/suspend_64.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/topology.h | 26 | ||||
-rw-r--r-- | arch/x86/include/asm/vmx.h | 12 |
23 files changed, 316 insertions, 102 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 56f462cf22d2..aa2c39d968fc 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -85,7 +85,6 @@ extern int acpi_ioapic; | |||
85 | extern int acpi_noirq; | 85 | extern int acpi_noirq; |
86 | extern int acpi_strict; | 86 | extern int acpi_strict; |
87 | extern int acpi_disabled; | 87 | extern int acpi_disabled; |
88 | extern int acpi_ht; | ||
89 | extern int acpi_pci_disabled; | 88 | extern int acpi_pci_disabled; |
90 | extern int acpi_skip_timer_override; | 89 | extern int acpi_skip_timer_override; |
91 | extern int acpi_use_timer_override; | 90 | extern int acpi_use_timer_override; |
@@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16); | |||
97 | static inline void disable_acpi(void) | 96 | static inline void disable_acpi(void) |
98 | { | 97 | { |
99 | acpi_disabled = 1; | 98 | acpi_disabled = 1; |
100 | acpi_ht = 0; | ||
101 | acpi_pci_disabled = 1; | 99 | acpi_pci_disabled = 1; |
102 | acpi_noirq = 1; | 100 | acpi_noirq = 1; |
103 | } | 101 | } |
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047cfaaca..48f99f15452e 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) | 7 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) |
8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
9 | 9 | ||
10 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | 10 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
11 | 11 | ||
12 | #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT | 12 | #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT |
13 | #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) | 13 | #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index dca9c545f44e..468145914389 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -332,6 +332,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) | |||
332 | #endif | 332 | #endif |
333 | } | 333 | } |
334 | 334 | ||
335 | #if __GNUC__ >= 4 | ||
335 | #define static_cpu_has(bit) \ | 336 | #define static_cpu_has(bit) \ |
336 | ( \ | 337 | ( \ |
337 | __builtin_constant_p(boot_cpu_has(bit)) ? \ | 338 | __builtin_constant_p(boot_cpu_has(bit)) ? \ |
@@ -340,6 +341,12 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) | |||
340 | __static_cpu_has(bit) : \ | 341 | __static_cpu_has(bit) : \ |
341 | boot_cpu_has(bit) \ | 342 | boot_cpu_has(bit) \ |
342 | ) | 343 | ) |
344 | #else | ||
345 | /* | ||
346 | * gcc 3.x is too stupid to do the static test; fall back to dynamic. | ||
347 | */ | ||
348 | #define static_cpu_has(bit) boot_cpu_has(bit) | ||
349 | #endif | ||
343 | 350 | ||
344 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ | 351 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
345 | 352 | ||
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h index 14cf526091f9..280bf7fb6aba 100644 --- a/arch/x86/include/asm/inst.h +++ b/arch/x86/include/asm/inst.h | |||
@@ -7,7 +7,66 @@ | |||
7 | 7 | ||
8 | #ifdef __ASSEMBLY__ | 8 | #ifdef __ASSEMBLY__ |
9 | 9 | ||
10 | #define REG_NUM_INVALID 100 | ||
11 | |||
12 | #define REG_TYPE_R64 0 | ||
13 | #define REG_TYPE_XMM 1 | ||
14 | #define REG_TYPE_INVALID 100 | ||
15 | |||
16 | .macro R64_NUM opd r64 | ||
17 | \opd = REG_NUM_INVALID | ||
18 | .ifc \r64,%rax | ||
19 | \opd = 0 | ||
20 | .endif | ||
21 | .ifc \r64,%rcx | ||
22 | \opd = 1 | ||
23 | .endif | ||
24 | .ifc \r64,%rdx | ||
25 | \opd = 2 | ||
26 | .endif | ||
27 | .ifc \r64,%rbx | ||
28 | \opd = 3 | ||
29 | .endif | ||
30 | .ifc \r64,%rsp | ||
31 | \opd = 4 | ||
32 | .endif | ||
33 | .ifc \r64,%rbp | ||
34 | \opd = 5 | ||
35 | .endif | ||
36 | .ifc \r64,%rsi | ||
37 | \opd = 6 | ||
38 | .endif | ||
39 | .ifc \r64,%rdi | ||
40 | \opd = 7 | ||
41 | .endif | ||
42 | .ifc \r64,%r8 | ||
43 | \opd = 8 | ||
44 | .endif | ||
45 | .ifc \r64,%r9 | ||
46 | \opd = 9 | ||
47 | .endif | ||
48 | .ifc \r64,%r10 | ||
49 | \opd = 10 | ||
50 | .endif | ||
51 | .ifc \r64,%r11 | ||
52 | \opd = 11 | ||
53 | .endif | ||
54 | .ifc \r64,%r12 | ||
55 | \opd = 12 | ||
56 | .endif | ||
57 | .ifc \r64,%r13 | ||
58 | \opd = 13 | ||
59 | .endif | ||
60 | .ifc \r64,%r14 | ||
61 | \opd = 14 | ||
62 | .endif | ||
63 | .ifc \r64,%r15 | ||
64 | \opd = 15 | ||
65 | .endif | ||
66 | .endm | ||
67 | |||
10 | .macro XMM_NUM opd xmm | 68 | .macro XMM_NUM opd xmm |
69 | \opd = REG_NUM_INVALID | ||
11 | .ifc \xmm,%xmm0 | 70 | .ifc \xmm,%xmm0 |
12 | \opd = 0 | 71 | \opd = 0 |
13 | .endif | 72 | .endif |
@@ -58,13 +117,25 @@ | |||
58 | .endif | 117 | .endif |
59 | .endm | 118 | .endm |
60 | 119 | ||
120 | .macro REG_TYPE type reg | ||
121 | R64_NUM reg_type_r64 \reg | ||
122 | XMM_NUM reg_type_xmm \reg | ||
123 | .if reg_type_r64 <> REG_NUM_INVALID | ||
124 | \type = REG_TYPE_R64 | ||
125 | .elseif reg_type_xmm <> REG_NUM_INVALID | ||
126 | \type = REG_TYPE_XMM | ||
127 | .else | ||
128 | \type = REG_TYPE_INVALID | ||
129 | .endif | ||
130 | .endm | ||
131 | |||
61 | .macro PFX_OPD_SIZE | 132 | .macro PFX_OPD_SIZE |
62 | .byte 0x66 | 133 | .byte 0x66 |
63 | .endm | 134 | .endm |
64 | 135 | ||
65 | .macro PFX_REX opd1 opd2 | 136 | .macro PFX_REX opd1 opd2 W=0 |
66 | .if (\opd1 | \opd2) & 8 | 137 | .if ((\opd1 | \opd2) & 8) || \W |
67 | .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | 138 | .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3) |
68 | .endif | 139 | .endif |
69 | .endm | 140 | .endm |
70 | 141 | ||
@@ -145,6 +216,25 @@ | |||
145 | .byte 0x0f, 0x38, 0xdf | 216 | .byte 0x0f, 0x38, 0xdf |
146 | MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 | 217 | MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2 |
147 | .endm | 218 | .endm |
219 | |||
220 | .macro MOVQ_R64_XMM opd1 opd2 | ||
221 | REG_TYPE movq_r64_xmm_opd1_type \opd1 | ||
222 | .if movq_r64_xmm_opd1_type == REG_TYPE_XMM | ||
223 | XMM_NUM movq_r64_xmm_opd1 \opd1 | ||
224 | R64_NUM movq_r64_xmm_opd2 \opd2 | ||
225 | .else | ||
226 | R64_NUM movq_r64_xmm_opd1 \opd1 | ||
227 | XMM_NUM movq_r64_xmm_opd2 \opd2 | ||
228 | .endif | ||
229 | PFX_OPD_SIZE | ||
230 | PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1 | ||
231 | .if movq_r64_xmm_opd1_type == REG_TYPE_XMM | ||
232 | .byte 0x0f, 0x7e | ||
233 | .else | ||
234 | .byte 0x0f, 0x6e | ||
235 | .endif | ||
236 | MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 | ||
237 | .endm | ||
148 | #endif | 238 | #endif |
149 | 239 | ||
150 | #endif | 240 | #endif |
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h new file mode 100644 index 000000000000..4470c9ad4a3e --- /dev/null +++ b/arch/x86/include/asm/intel_scu_ipc.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef _ASM_X86_INTEL_SCU_IPC_H_ | ||
2 | #define _ASM_X86_INTEL_SCU_IPC_H_ | ||
3 | |||
4 | /* Read single register */ | ||
5 | int intel_scu_ipc_ioread8(u16 addr, u8 *data); | ||
6 | |||
7 | /* Read two sequential registers */ | ||
8 | int intel_scu_ipc_ioread16(u16 addr, u16 *data); | ||
9 | |||
10 | /* Read four sequential registers */ | ||
11 | int intel_scu_ipc_ioread32(u16 addr, u32 *data); | ||
12 | |||
13 | /* Read a vector */ | ||
14 | int intel_scu_ipc_readv(u16 *addr, u8 *data, int len); | ||
15 | |||
16 | /* Write single register */ | ||
17 | int intel_scu_ipc_iowrite8(u16 addr, u8 data); | ||
18 | |||
19 | /* Write two sequential registers */ | ||
20 | int intel_scu_ipc_iowrite16(u16 addr, u16 data); | ||
21 | |||
22 | /* Write four sequential registers */ | ||
23 | int intel_scu_ipc_iowrite32(u16 addr, u32 data); | ||
24 | |||
25 | /* Write a vector */ | ||
26 | int intel_scu_ipc_writev(u16 *addr, u8 *data, int len); | ||
27 | |||
28 | /* Update single register based on the mask */ | ||
29 | int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask); | ||
30 | |||
31 | /* | ||
32 | * Indirect register read | ||
33 | * Can be used when SCCB(System Controller Configuration Block) register | ||
34 | * HRIM(Honor Restricted IPC Messages) is set (bit 23) | ||
35 | */ | ||
36 | int intel_scu_ipc_register_read(u32 addr, u32 *data); | ||
37 | |||
38 | /* | ||
39 | * Indirect register write | ||
40 | * Can be used when SCCB(System Controller Configuration Block) register | ||
41 | * HRIM(Honor Restricted IPC Messages) is set (bit 23) | ||
42 | */ | ||
43 | int intel_scu_ipc_register_write(u32 addr, u32 data); | ||
44 | |||
45 | /* Issue commands to the SCU with or without data */ | ||
46 | int intel_scu_ipc_simple_command(int cmd, int sub); | ||
47 | int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, | ||
48 | u32 *out, int outlen); | ||
49 | /* I2C control api */ | ||
50 | int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data); | ||
51 | |||
52 | /* Update FW version */ | ||
53 | int intel_scu_ipc_fw_update(u8 *buffer, u32 length); | ||
54 | |||
55 | #endif | ||
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index f46b79f6c16c..ff90055c7f0b 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define __KVM_HAVE_PIT_STATE2 | 21 | #define __KVM_HAVE_PIT_STATE2 |
22 | #define __KVM_HAVE_XEN_HVM | 22 | #define __KVM_HAVE_XEN_HVM |
23 | #define __KVM_HAVE_VCPU_EVENTS | 23 | #define __KVM_HAVE_VCPU_EVENTS |
24 | #define __KVM_HAVE_DEBUGREGS | ||
24 | 25 | ||
25 | /* Architectural interrupt line count. */ | 26 | /* Architectural interrupt line count. */ |
26 | #define KVM_NR_INTERRUPTS 256 | 27 | #define KVM_NR_INTERRUPTS 256 |
@@ -257,6 +258,11 @@ struct kvm_reinject_control { | |||
257 | /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ | 258 | /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ |
258 | #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 | 259 | #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 |
259 | #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 | 260 | #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 |
261 | #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 | ||
262 | |||
263 | /* Interrupt shadow states */ | ||
264 | #define KVM_X86_SHADOW_INT_MOV_SS 0x01 | ||
265 | #define KVM_X86_SHADOW_INT_STI 0x02 | ||
260 | 266 | ||
261 | /* for KVM_GET/SET_VCPU_EVENTS */ | 267 | /* for KVM_GET/SET_VCPU_EVENTS */ |
262 | struct kvm_vcpu_events { | 268 | struct kvm_vcpu_events { |
@@ -271,7 +277,7 @@ struct kvm_vcpu_events { | |||
271 | __u8 injected; | 277 | __u8 injected; |
272 | __u8 nr; | 278 | __u8 nr; |
273 | __u8 soft; | 279 | __u8 soft; |
274 | __u8 pad; | 280 | __u8 shadow; |
275 | } interrupt; | 281 | } interrupt; |
276 | struct { | 282 | struct { |
277 | __u8 injected; | 283 | __u8 injected; |
@@ -284,4 +290,13 @@ struct kvm_vcpu_events { | |||
284 | __u32 reserved[10]; | 290 | __u32 reserved[10]; |
285 | }; | 291 | }; |
286 | 292 | ||
293 | /* for KVM_GET/SET_DEBUGREGS */ | ||
294 | struct kvm_debugregs { | ||
295 | __u64 db[4]; | ||
296 | __u64 dr6; | ||
297 | __u64 dr7; | ||
298 | __u64 flags; | ||
299 | __u64 reserved[9]; | ||
300 | }; | ||
301 | |||
287 | #endif /* _ASM_X86_KVM_H */ | 302 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7a6f54fa13ba..0b2729bf2070 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef _ASM_X86_KVM_X86_EMULATE_H | 11 | #ifndef _ASM_X86_KVM_X86_EMULATE_H |
12 | #define _ASM_X86_KVM_X86_EMULATE_H | 12 | #define _ASM_X86_KVM_X86_EMULATE_H |
13 | 13 | ||
14 | #include <asm/desc_defs.h> | ||
15 | |||
14 | struct x86_emulate_ctxt; | 16 | struct x86_emulate_ctxt; |
15 | 17 | ||
16 | /* | 18 | /* |
@@ -63,6 +65,15 @@ struct x86_emulate_ops { | |||
63 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | 65 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); |
64 | 66 | ||
65 | /* | 67 | /* |
68 | * write_std: Write bytes of standard (non-emulated/special) memory. | ||
69 | * Used for descriptor writing. | ||
70 | * @addr: [IN ] Linear address to which to write. | ||
71 | * @val: [OUT] Value write to memory, zero-extended to 'u_long'. | ||
72 | * @bytes: [IN ] Number of bytes to write to memory. | ||
73 | */ | ||
74 | int (*write_std)(unsigned long addr, void *val, | ||
75 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | ||
76 | /* | ||
66 | * fetch: Read bytes of standard (non-emulated/special) memory. | 77 | * fetch: Read bytes of standard (non-emulated/special) memory. |
67 | * Used for instruction fetch. | 78 | * Used for instruction fetch. |
68 | * @addr: [IN ] Linear address from which to read. | 79 | * @addr: [IN ] Linear address from which to read. |
@@ -109,6 +120,23 @@ struct x86_emulate_ops { | |||
109 | unsigned int bytes, | 120 | unsigned int bytes, |
110 | struct kvm_vcpu *vcpu); | 121 | struct kvm_vcpu *vcpu); |
111 | 122 | ||
123 | int (*pio_in_emulated)(int size, unsigned short port, void *val, | ||
124 | unsigned int count, struct kvm_vcpu *vcpu); | ||
125 | |||
126 | int (*pio_out_emulated)(int size, unsigned short port, const void *val, | ||
127 | unsigned int count, struct kvm_vcpu *vcpu); | ||
128 | |||
129 | bool (*get_cached_descriptor)(struct desc_struct *desc, | ||
130 | int seg, struct kvm_vcpu *vcpu); | ||
131 | void (*set_cached_descriptor)(struct desc_struct *desc, | ||
132 | int seg, struct kvm_vcpu *vcpu); | ||
133 | u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); | ||
134 | void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); | ||
135 | void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); | ||
136 | ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); | ||
137 | void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); | ||
138 | int (*cpl)(struct kvm_vcpu *vcpu); | ||
139 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
112 | }; | 140 | }; |
113 | 141 | ||
114 | /* Type, address-of, and value of an instruction's operand. */ | 142 | /* Type, address-of, and value of an instruction's operand. */ |
@@ -124,6 +152,12 @@ struct fetch_cache { | |||
124 | unsigned long end; | 152 | unsigned long end; |
125 | }; | 153 | }; |
126 | 154 | ||
155 | struct read_cache { | ||
156 | u8 data[1024]; | ||
157 | unsigned long pos; | ||
158 | unsigned long end; | ||
159 | }; | ||
160 | |||
127 | struct decode_cache { | 161 | struct decode_cache { |
128 | u8 twobyte; | 162 | u8 twobyte; |
129 | u8 b; | 163 | u8 b; |
@@ -139,7 +173,7 @@ struct decode_cache { | |||
139 | u8 seg_override; | 173 | u8 seg_override; |
140 | unsigned int d; | 174 | unsigned int d; |
141 | unsigned long regs[NR_VCPU_REGS]; | 175 | unsigned long regs[NR_VCPU_REGS]; |
142 | unsigned long eip, eip_orig; | 176 | unsigned long eip; |
143 | /* modrm */ | 177 | /* modrm */ |
144 | u8 modrm; | 178 | u8 modrm; |
145 | u8 modrm_mod; | 179 | u8 modrm_mod; |
@@ -151,16 +185,15 @@ struct decode_cache { | |||
151 | void *modrm_ptr; | 185 | void *modrm_ptr; |
152 | unsigned long modrm_val; | 186 | unsigned long modrm_val; |
153 | struct fetch_cache fetch; | 187 | struct fetch_cache fetch; |
188 | struct read_cache io_read; | ||
154 | }; | 189 | }; |
155 | 190 | ||
156 | #define X86_SHADOW_INT_MOV_SS 1 | ||
157 | #define X86_SHADOW_INT_STI 2 | ||
158 | |||
159 | struct x86_emulate_ctxt { | 191 | struct x86_emulate_ctxt { |
160 | /* Register state before/after emulation. */ | 192 | /* Register state before/after emulation. */ |
161 | struct kvm_vcpu *vcpu; | 193 | struct kvm_vcpu *vcpu; |
162 | 194 | ||
163 | unsigned long eflags; | 195 | unsigned long eflags; |
196 | unsigned long eip; /* eip before instruction emulation */ | ||
164 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 197 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
165 | int mode; | 198 | int mode; |
166 | u32 cs_base; | 199 | u32 cs_base; |
@@ -168,6 +201,7 @@ struct x86_emulate_ctxt { | |||
168 | /* interruptibility state, as a result of execution of STI or MOV SS */ | 201 | /* interruptibility state, as a result of execution of STI or MOV SS */ |
169 | int interruptibility; | 202 | int interruptibility; |
170 | 203 | ||
204 | bool restart; /* restart string instruction after writeback */ | ||
171 | /* decode cache */ | 205 | /* decode cache */ |
172 | struct decode_cache decode; | 206 | struct decode_cache decode; |
173 | }; | 207 | }; |
@@ -194,5 +228,9 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, | |||
194 | struct x86_emulate_ops *ops); | 228 | struct x86_emulate_ops *ops); |
195 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, | 229 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, |
196 | struct x86_emulate_ops *ops); | 230 | struct x86_emulate_ops *ops); |
231 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | ||
232 | struct x86_emulate_ops *ops, | ||
233 | u16 tss_selector, int reason, | ||
234 | bool has_error_code, u32 error_code); | ||
197 | 235 | ||
198 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ | 236 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 06d9e79ca37d..76f5483cffec 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -171,15 +171,15 @@ struct kvm_pte_chain { | |||
171 | union kvm_mmu_page_role { | 171 | union kvm_mmu_page_role { |
172 | unsigned word; | 172 | unsigned word; |
173 | struct { | 173 | struct { |
174 | unsigned glevels:4; | ||
175 | unsigned level:4; | 174 | unsigned level:4; |
175 | unsigned cr4_pae:1; | ||
176 | unsigned quadrant:2; | 176 | unsigned quadrant:2; |
177 | unsigned pad_for_nice_hex_output:6; | 177 | unsigned pad_for_nice_hex_output:6; |
178 | unsigned direct:1; | 178 | unsigned direct:1; |
179 | unsigned access:3; | 179 | unsigned access:3; |
180 | unsigned invalid:1; | 180 | unsigned invalid:1; |
181 | unsigned cr4_pge:1; | ||
182 | unsigned nxe:1; | 181 | unsigned nxe:1; |
182 | unsigned cr0_wp:1; | ||
183 | }; | 183 | }; |
184 | }; | 184 | }; |
185 | 185 | ||
@@ -187,8 +187,6 @@ struct kvm_mmu_page { | |||
187 | struct list_head link; | 187 | struct list_head link; |
188 | struct hlist_node hash_link; | 188 | struct hlist_node hash_link; |
189 | 189 | ||
190 | struct list_head oos_link; | ||
191 | |||
192 | /* | 190 | /* |
193 | * The following two entries are used to key the shadow page in the | 191 | * The following two entries are used to key the shadow page in the |
194 | * hash table. | 192 | * hash table. |
@@ -204,9 +202,9 @@ struct kvm_mmu_page { | |||
204 | * in this shadow page. | 202 | * in this shadow page. |
205 | */ | 203 | */ |
206 | DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | 204 | DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
207 | int multimapped; /* More than one parent_pte? */ | 205 | bool multimapped; /* More than one parent_pte? */ |
208 | int root_count; /* Currently serving as active root */ | ||
209 | bool unsync; | 206 | bool unsync; |
207 | int root_count; /* Currently serving as active root */ | ||
210 | unsigned int unsync_children; | 208 | unsigned int unsync_children; |
211 | union { | 209 | union { |
212 | u64 *parent_pte; /* !multimapped */ | 210 | u64 *parent_pte; /* !multimapped */ |
@@ -224,14 +222,9 @@ struct kvm_pv_mmu_op_buffer { | |||
224 | 222 | ||
225 | struct kvm_pio_request { | 223 | struct kvm_pio_request { |
226 | unsigned long count; | 224 | unsigned long count; |
227 | int cur_count; | ||
228 | gva_t guest_gva; | ||
229 | int in; | 225 | int in; |
230 | int port; | 226 | int port; |
231 | int size; | 227 | int size; |
232 | int string; | ||
233 | int down; | ||
234 | int rep; | ||
235 | }; | 228 | }; |
236 | 229 | ||
237 | /* | 230 | /* |
@@ -320,6 +313,7 @@ struct kvm_vcpu_arch { | |||
320 | struct kvm_queued_exception { | 313 | struct kvm_queued_exception { |
321 | bool pending; | 314 | bool pending; |
322 | bool has_error_code; | 315 | bool has_error_code; |
316 | bool reinject; | ||
323 | u8 nr; | 317 | u8 nr; |
324 | u32 error_code; | 318 | u32 error_code; |
325 | } exception; | 319 | } exception; |
@@ -362,8 +356,8 @@ struct kvm_vcpu_arch { | |||
362 | u64 *mce_banks; | 356 | u64 *mce_banks; |
363 | 357 | ||
364 | /* used for guest single stepping over the given code position */ | 358 | /* used for guest single stepping over the given code position */ |
365 | u16 singlestep_cs; | ||
366 | unsigned long singlestep_rip; | 359 | unsigned long singlestep_rip; |
360 | |||
367 | /* fields used by HYPER-V emulation */ | 361 | /* fields used by HYPER-V emulation */ |
368 | u64 hv_vapic; | 362 | u64 hv_vapic; |
369 | }; | 363 | }; |
@@ -389,6 +383,7 @@ struct kvm_arch { | |||
389 | unsigned int n_free_mmu_pages; | 383 | unsigned int n_free_mmu_pages; |
390 | unsigned int n_requested_mmu_pages; | 384 | unsigned int n_requested_mmu_pages; |
391 | unsigned int n_alloc_mmu_pages; | 385 | unsigned int n_alloc_mmu_pages; |
386 | atomic_t invlpg_counter; | ||
392 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | 387 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
393 | /* | 388 | /* |
394 | * Hash table of struct kvm_mmu_page. | 389 | * Hash table of struct kvm_mmu_page. |
@@ -461,11 +456,6 @@ struct kvm_vcpu_stat { | |||
461 | u32 nmi_injections; | 456 | u32 nmi_injections; |
462 | }; | 457 | }; |
463 | 458 | ||
464 | struct descriptor_table { | ||
465 | u16 limit; | ||
466 | unsigned long base; | ||
467 | } __attribute__((packed)); | ||
468 | |||
469 | struct kvm_x86_ops { | 459 | struct kvm_x86_ops { |
470 | int (*cpu_has_kvm_support)(void); /* __init */ | 460 | int (*cpu_has_kvm_support)(void); /* __init */ |
471 | int (*disabled_by_bios)(void); /* __init */ | 461 | int (*disabled_by_bios)(void); /* __init */ |
@@ -503,12 +493,11 @@ struct kvm_x86_ops { | |||
503 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 493 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
504 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | 494 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
505 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | 495 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
506 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 496 | void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
507 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 497 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
508 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 498 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
509 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 499 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
510 | int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); | 500 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
511 | int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); | ||
512 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | 501 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
513 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 502 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
514 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 503 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
@@ -527,7 +516,8 @@ struct kvm_x86_ops { | |||
527 | void (*set_irq)(struct kvm_vcpu *vcpu); | 516 | void (*set_irq)(struct kvm_vcpu *vcpu); |
528 | void (*set_nmi)(struct kvm_vcpu *vcpu); | 517 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
529 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, | 518 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
530 | bool has_error_code, u32 error_code); | 519 | bool has_error_code, u32 error_code, |
520 | bool reinject); | ||
531 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); | 521 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
532 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); | 522 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
533 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | 523 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
@@ -541,6 +531,8 @@ struct kvm_x86_ops { | |||
541 | int (*get_lpage_level)(void); | 531 | int (*get_lpage_level)(void); |
542 | bool (*rdtscp_supported)(void); | 532 | bool (*rdtscp_supported)(void); |
543 | 533 | ||
534 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); | ||
535 | |||
544 | const struct trace_print_flags *exit_reasons_str; | 536 | const struct trace_print_flags *exit_reasons_str; |
545 | }; | 537 | }; |
546 | 538 | ||
@@ -587,23 +579,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
587 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | 579 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
588 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 580 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
589 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 581 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
590 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | ||
591 | unsigned long *rflags); | ||
592 | 582 | ||
593 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); | ||
594 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, | ||
595 | unsigned long *rflags); | ||
596 | void kvm_enable_efer_bits(u64); | 583 | void kvm_enable_efer_bits(u64); |
597 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); | 584 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
598 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 585 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
599 | 586 | ||
600 | struct x86_emulate_ctxt; | 587 | struct x86_emulate_ctxt; |
601 | 588 | ||
602 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, | 589 | int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); |
603 | int size, unsigned port); | ||
604 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in, | ||
605 | int size, unsigned long count, int down, | ||
606 | gva_t address, int rep, unsigned port); | ||
607 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | 590 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
608 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | 591 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
609 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); | 592 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); |
@@ -616,12 +599,15 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
616 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 599 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
617 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); | 600 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
618 | 601 | ||
619 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 602 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, |
603 | bool has_error_code, u32 error_code); | ||
620 | 604 | ||
621 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 605 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
622 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | 606 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
623 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | 607 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
624 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); | 608 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
609 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); | ||
610 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | ||
625 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); | 611 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
626 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | 612 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
627 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | 613 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
@@ -634,6 +620,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |||
634 | 620 | ||
635 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 621 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
636 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 622 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
623 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); | ||
624 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | ||
637 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 625 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
638 | u32 error_code); | 626 | u32 error_code); |
639 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | 627 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
@@ -649,8 +637,6 @@ int emulator_write_emulated(unsigned long addr, | |||
649 | unsigned int bytes, | 637 | unsigned int bytes, |
650 | struct kvm_vcpu *vcpu); | 638 | struct kvm_vcpu *vcpu); |
651 | 639 | ||
652 | unsigned long segment_base(u16 selector); | ||
653 | |||
654 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | 640 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
655 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 641 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
656 | const u8 *new, int bytes, | 642 | const u8 *new, int bytes, |
@@ -675,7 +661,6 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); | |||
675 | void kvm_enable_tdp(void); | 661 | void kvm_enable_tdp(void); |
676 | void kvm_disable_tdp(void); | 662 | void kvm_disable_tdp(void); |
677 | 663 | ||
678 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | ||
679 | int complete_pio(struct kvm_vcpu *vcpu); | 664 | int complete_pio(struct kvm_vcpu *vcpu); |
680 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | 665 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); |
681 | 666 | ||
@@ -724,23 +709,6 @@ static inline void kvm_load_ldt(u16 sel) | |||
724 | asm("lldt %0" : : "rm"(sel)); | 709 | asm("lldt %0" : : "rm"(sel)); |
725 | } | 710 | } |
726 | 711 | ||
727 | static inline void kvm_get_idt(struct descriptor_table *table) | ||
728 | { | ||
729 | asm("sidt %0" : "=m"(*table)); | ||
730 | } | ||
731 | |||
732 | static inline void kvm_get_gdt(struct descriptor_table *table) | ||
733 | { | ||
734 | asm("sgdt %0" : "=m"(*table)); | ||
735 | } | ||
736 | |||
737 | static inline unsigned long kvm_read_tr_base(void) | ||
738 | { | ||
739 | u16 tr; | ||
740 | asm("str %0" : "=g"(tr)); | ||
741 | return segment_base(tr); | ||
742 | } | ||
743 | |||
744 | #ifdef CONFIG_X86_64 | 712 | #ifdef CONFIG_X86_64 |
745 | static inline unsigned long read_msr(unsigned long msr) | 713 | static inline unsigned long read_msr(unsigned long msr) |
746 | { | 714 | { |
@@ -826,4 +794,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | |||
826 | void kvm_define_shared_msr(unsigned index, u32 msr); | 794 | void kvm_define_shared_msr(unsigned index, u32 msr); |
827 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | 795 | void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); |
828 | 796 | ||
797 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); | ||
798 | |||
829 | #endif /* _ASM_X86_KVM_HOST_H */ | 799 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index ffae1420e7d7..05eba5e9a8e8 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -16,10 +16,23 @@ | |||
16 | #define KVM_FEATURE_CLOCKSOURCE 0 | 16 | #define KVM_FEATURE_CLOCKSOURCE 0 |
17 | #define KVM_FEATURE_NOP_IO_DELAY 1 | 17 | #define KVM_FEATURE_NOP_IO_DELAY 1 |
18 | #define KVM_FEATURE_MMU_OP 2 | 18 | #define KVM_FEATURE_MMU_OP 2 |
19 | /* This indicates that the new set of kvmclock msrs | ||
20 | * are available. The use of 0x11 and 0x12 is deprecated | ||
21 | */ | ||
22 | #define KVM_FEATURE_CLOCKSOURCE2 3 | ||
23 | |||
24 | /* The last 8 bits are used to indicate how to interpret the flags field | ||
25 | * in pvclock structure. If no bits are set, all flags are ignored. | ||
26 | */ | ||
27 | #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 | ||
19 | 28 | ||
20 | #define MSR_KVM_WALL_CLOCK 0x11 | 29 | #define MSR_KVM_WALL_CLOCK 0x11 |
21 | #define MSR_KVM_SYSTEM_TIME 0x12 | 30 | #define MSR_KVM_SYSTEM_TIME 0x12 |
22 | 31 | ||
32 | /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ | ||
33 | #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 | ||
34 | #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 | ||
35 | |||
23 | #define KVM_MAX_MMU_OP_BATCH 32 | 36 | #define KVM_MAX_MMU_OP_BATCH 32 |
24 | 37 | ||
25 | /* Operations for KVM_HC_MMU_OP */ | 38 | /* Operations for KVM_HC_MMU_OP */ |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 6c3fdd631ed3..f32a4301c4d4 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void); | |||
225 | static inline void mcheck_intel_therm_init(void) { } | 225 | static inline void mcheck_intel_therm_init(void) { } |
226 | #endif | 226 | #endif |
227 | 227 | ||
228 | /* | ||
229 | * Used by APEI to report memory error via /dev/mcelog | ||
230 | */ | ||
231 | |||
232 | struct cper_sec_mem_err; | ||
233 | extern void apei_mce_report_mem_error(int corrected, | ||
234 | struct cper_sec_mem_err *mem_err); | ||
235 | |||
228 | #endif /* __KERNEL__ */ | 236 | #endif /* __KERNEL__ */ |
229 | #endif /* _ASM_X86_MCE_H */ | 237 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index bc473acfa7f9..8c7ae4318629 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -110,6 +110,7 @@ | |||
110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 | 110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 |
111 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 | 111 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
112 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 | 112 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 |
113 | #define MSR_AMD64_DC_CFG 0xc0011022 | ||
113 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 114 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
114 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 115 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
115 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 116 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
@@ -202,8 +203,9 @@ | |||
202 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a | 203 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a |
203 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a | 204 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a |
204 | 205 | ||
205 | #define FEATURE_CONTROL_LOCKED (1<<0) | 206 | #define FEATURE_CONTROL_LOCKED (1<<0) |
206 | #define FEATURE_CONTROL_VMXON_ENABLED (1<<2) | 207 | #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
208 | #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) | ||
207 | 209 | ||
208 | #define MSR_IA32_APICBASE 0x0000001b | 210 | #define MSR_IA32_APICBASE 0x0000001b |
209 | #define MSR_IA32_APICBASE_BSP (1<<8) | 211 | #define MSR_IA32_APICBASE_BSP (1<<8) |
@@ -235,6 +237,8 @@ | |||
235 | 237 | ||
236 | #define MSR_IA32_MISC_ENABLE 0x000001a0 | 238 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
237 | 239 | ||
240 | #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 | ||
241 | |||
238 | /* MISC_ENABLE bits: architectural */ | 242 | /* MISC_ENABLE bits: architectural */ |
239 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) | 243 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) |
240 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) | 244 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 1a0422348d6d..cd2a31dc5fb8 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -53,6 +53,8 @@ extern int pcibios_last_bus; | |||
53 | extern struct pci_bus *pci_root_bus; | 53 | extern struct pci_bus *pci_root_bus; |
54 | extern struct pci_ops pci_root_ops; | 54 | extern struct pci_ops pci_root_ops; |
55 | 55 | ||
56 | void pcibios_scan_specific_bus(int busn); | ||
57 | |||
56 | /* pci-irq.c */ | 58 | /* pci-irq.c */ |
57 | 59 | ||
58 | struct irq_info { | 60 | struct irq_info { |
@@ -83,7 +85,7 @@ struct irq_routing_table { | |||
83 | 85 | ||
84 | extern unsigned int pcibios_irq_mask; | 86 | extern unsigned int pcibios_irq_mask; |
85 | 87 | ||
86 | extern spinlock_t pci_config_lock; | 88 | extern raw_spinlock_t pci_config_lock; |
87 | 89 | ||
88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 90 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
89 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 91 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index b05400a542ff..64a8ebff06fc 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -89,7 +89,8 @@ | |||
89 | P4_CCCR_ENABLE) | 89 | P4_CCCR_ENABLE) |
90 | 90 | ||
91 | /* HT mask */ | 91 | /* HT mask */ |
92 | #define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY) | 92 | #define P4_CCCR_MASK_HT \ |
93 | (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY) | ||
93 | 94 | ||
94 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ | 95 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ |
95 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) | 96 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) |
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 6d93508f2626..35f2d1948ada 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h | |||
@@ -29,7 +29,8 @@ struct pvclock_vcpu_time_info { | |||
29 | u64 system_time; | 29 | u64 system_time; |
30 | u32 tsc_to_system_mul; | 30 | u32 tsc_to_system_mul; |
31 | s8 tsc_shift; | 31 | s8 tsc_shift; |
32 | u8 pad[3]; | 32 | u8 flags; |
33 | u8 pad[2]; | ||
33 | } __attribute__((__packed__)); /* 32 bytes */ | 34 | } __attribute__((__packed__)); /* 32 bytes */ |
34 | 35 | ||
35 | struct pvclock_wall_clock { | 36 | struct pvclock_wall_clock { |
@@ -38,5 +39,6 @@ struct pvclock_wall_clock { | |||
38 | u32 nsec; | 39 | u32 nsec; |
39 | } __attribute__((__packed__)); | 40 | } __attribute__((__packed__)); |
40 | 41 | ||
42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) | ||
41 | #endif /* __ASSEMBLY__ */ | 43 | #endif /* __ASSEMBLY__ */ |
42 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ | 44 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ |
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 53235fd5f8ce..cd02f324aa6b 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | /* some helper functions for xen and kvm pv clock sources */ | 7 | /* some helper functions for xen and kvm pv clock sources */ |
8 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); | 8 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); |
9 | void pvclock_set_flags(u8 flags); | ||
9 | unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); | 10 | unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); |
10 | void pvclock_read_wallclock(struct pvclock_wall_clock *wall, | 11 | void pvclock_read_wallclock(struct pvclock_wall_clock *wall, |
11 | struct pvclock_vcpu_time_info *vcpu, | 12 | struct pvclock_vcpu_time_info *vcpu, |
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h deleted file mode 100644 index c8e9c8bed3d0..000000000000 --- a/arch/x86/include/asm/rdc321x_defs.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #define PFX "rdc321x: " | ||
2 | |||
3 | /* General purpose configuration and data registers */ | ||
4 | #define RDC3210_CFGREG_ADDR 0x0CF8 | ||
5 | #define RDC3210_CFGREG_DATA 0x0CFC | ||
6 | |||
7 | #define RDC321X_GPIO_CTRL_REG1 0x48 | ||
8 | #define RDC321X_GPIO_CTRL_REG2 0x84 | ||
9 | #define RDC321X_GPIO_DATA_REG1 0x4c | ||
10 | #define RDC321X_GPIO_DATA_REG2 0x88 | ||
11 | |||
12 | #define RDC321X_MAX_GPIO 58 | ||
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 75af592677ec..fb0b1874396f 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h | |||
@@ -1,8 +1,9 @@ | |||
1 | #ifndef _ASM_X86_SCATTERLIST_H | 1 | #ifndef _ASM_X86_SCATTERLIST_H |
2 | #define _ASM_X86_SCATTERLIST_H | 2 | #define _ASM_X86_SCATTERLIST_H |
3 | 3 | ||
4 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
5 | |||
6 | #include <asm-generic/scatterlist.h> | 4 | #include <asm-generic/scatterlist.h> |
7 | 5 | ||
6 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
7 | #define ARCH_HAS_SG_CHAIN | ||
8 | |||
8 | #endif /* _ASM_X86_SCATTERLIST_H */ | 9 | #endif /* _ASM_X86_SCATTERLIST_H */ |
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 48dcfa62ea07..fd921c3a6841 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
@@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
15 | struct saved_context { | 15 | struct saved_context { |
16 | u16 es, fs, gs, ss; | 16 | u16 es, fs, gs, ss; |
17 | unsigned long cr0, cr2, cr3, cr4; | 17 | unsigned long cr0, cr2, cr3, cr4; |
18 | u64 misc_enable; | ||
19 | bool misc_enable_saved; | ||
18 | struct desc_ptr gdt; | 20 | struct desc_ptr gdt; |
19 | struct desc_ptr idt; | 21 | struct desc_ptr idt; |
20 | u16 ldt; | 22 | u16 ldt; |
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 06284f42b759..8d942afae681 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h | |||
@@ -27,6 +27,8 @@ struct saved_context { | |||
27 | u16 ds, es, fs, gs, ss; | 27 | u16 ds, es, fs, gs, ss; |
28 | unsigned long gs_base, gs_kernel_base, fs_base; | 28 | unsigned long gs_base, gs_kernel_base, fs_base; |
29 | unsigned long cr0, cr2, cr3, cr4, cr8; | 29 | unsigned long cr0, cr2, cr3, cr4, cr8; |
30 | u64 misc_enable; | ||
31 | bool misc_enable_saved; | ||
30 | unsigned long efer; | 32 | unsigned long efer; |
31 | u16 gdt_pad; | 33 | u16 gdt_pad; |
32 | u16 gdt_limit; | 34 | u16 gdt_limit; |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 38638cd2fa4c..0e831059ac5a 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { | |||
81 | u32 event_inj_err; | 81 | u32 event_inj_err; |
82 | u64 nested_cr3; | 82 | u64 nested_cr3; |
83 | u64 lbr_ctl; | 83 | u64 lbr_ctl; |
84 | u8 reserved_5[832]; | 84 | u64 reserved_5; |
85 | u64 next_rip; | ||
86 | u8 reserved_6[816]; | ||
85 | }; | 87 | }; |
86 | 88 | ||
87 | 89 | ||
@@ -115,6 +117,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area { | |||
115 | #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) | 117 | #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) |
116 | #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) | 118 | #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) |
117 | 119 | ||
120 | #define SVM_VM_CR_VALID_MASK 0x001fULL | ||
121 | #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL | ||
122 | #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL | ||
123 | |||
118 | struct __attribute__ ((__packed__)) vmcb_seg { | 124 | struct __attribute__ ((__packed__)) vmcb_seg { |
119 | u16 selector; | 125 | u16 selector; |
120 | u16 attrib; | 126 | u16 attrib; |
@@ -238,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb { | |||
238 | 244 | ||
239 | #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 | 245 | #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 |
240 | #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 | 246 | #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 |
247 | #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 | ||
241 | 248 | ||
242 | #define SVM_EXIT_READ_CR0 0x000 | 249 | #define SVM_EXIT_READ_CR0 0x000 |
243 | #define SVM_EXIT_READ_CR3 0x003 | 250 | #define SVM_EXIT_READ_CR3 0x003 |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 62ba9400cc43..f0b6e5dbc5a0 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -239,8 +239,8 @@ static inline struct thread_info *current_thread_info(void) | |||
239 | #define TS_USEDFPU 0x0001 /* FPU was used by this task | 239 | #define TS_USEDFPU 0x0001 /* FPU was used by this task |
240 | this quantum (SMP) */ | 240 | this quantum (SMP) */ |
241 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ | 241 | #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ |
242 | #define TS_POLLING 0x0004 /* true if in idle loop | 242 | #define TS_POLLING 0x0004 /* idle task polling need_resched, |
243 | and not sleeping */ | 243 | skip sending interrupt */ |
244 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 244 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
245 | 245 | ||
246 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 246 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index c5087d796587..21899cc31e52 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -53,33 +53,29 @@ | |||
53 | extern int cpu_to_node_map[]; | 53 | extern int cpu_to_node_map[]; |
54 | 54 | ||
55 | /* Returns the number of the node containing CPU 'cpu' */ | 55 | /* Returns the number of the node containing CPU 'cpu' */ |
56 | static inline int cpu_to_node(int cpu) | 56 | static inline int __cpu_to_node(int cpu) |
57 | { | 57 | { |
58 | return cpu_to_node_map[cpu]; | 58 | return cpu_to_node_map[cpu]; |
59 | } | 59 | } |
60 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | 60 | #define early_cpu_to_node __cpu_to_node |
61 | #define cpu_to_node __cpu_to_node | ||
61 | 62 | ||
62 | #else /* CONFIG_X86_64 */ | 63 | #else /* CONFIG_X86_64 */ |
63 | 64 | ||
64 | /* Mappings between logical cpu number and node number */ | 65 | /* Mappings between logical cpu number and node number */ |
65 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | 66 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
66 | 67 | ||
67 | /* Returns the number of the current Node. */ | ||
68 | DECLARE_PER_CPU(int, node_number); | ||
69 | #define numa_node_id() percpu_read(node_number) | ||
70 | |||
71 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 68 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
72 | extern int cpu_to_node(int cpu); | 69 | /* |
70 | * override generic percpu implementation of cpu_to_node | ||
71 | */ | ||
72 | extern int __cpu_to_node(int cpu); | ||
73 | #define cpu_to_node __cpu_to_node | ||
74 | |||
73 | extern int early_cpu_to_node(int cpu); | 75 | extern int early_cpu_to_node(int cpu); |
74 | 76 | ||
75 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 77 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
76 | 78 | ||
77 | /* Returns the number of the node containing CPU 'cpu' */ | ||
78 | static inline int cpu_to_node(int cpu) | ||
79 | { | ||
80 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
81 | } | ||
82 | |||
83 | /* Same function but used if called before per_cpu areas are setup */ | 79 | /* Same function but used if called before per_cpu areas are setup */ |
84 | static inline int early_cpu_to_node(int cpu) | 80 | static inline int early_cpu_to_node(int cpu) |
85 | { | 81 | { |
@@ -170,6 +166,10 @@ static inline int numa_node_id(void) | |||
170 | { | 166 | { |
171 | return 0; | 167 | return 0; |
172 | } | 168 | } |
169 | /* | ||
170 | * indicate override: | ||
171 | */ | ||
172 | #define numa_node_id numa_node_id | ||
173 | 173 | ||
174 | static inline int early_cpu_to_node(int cpu) | 174 | static inline int early_cpu_to_node(int cpu) |
175 | { | 175 | { |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index fb9a080740ec..9e6779f7cf2d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -25,6 +25,8 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/types.h> | ||
29 | |||
28 | /* | 30 | /* |
29 | * Definitions of Primary Processor-Based VM-Execution Controls. | 31 | * Definitions of Primary Processor-Based VM-Execution Controls. |
30 | */ | 32 | */ |
@@ -120,6 +122,8 @@ enum vmcs_field { | |||
120 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | 122 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, |
121 | GUEST_IA32_PAT = 0x00002804, | 123 | GUEST_IA32_PAT = 0x00002804, |
122 | GUEST_IA32_PAT_HIGH = 0x00002805, | 124 | GUEST_IA32_PAT_HIGH = 0x00002805, |
125 | GUEST_IA32_EFER = 0x00002806, | ||
126 | GUEST_IA32_EFER_HIGH = 0x00002807, | ||
123 | GUEST_PDPTR0 = 0x0000280a, | 127 | GUEST_PDPTR0 = 0x0000280a, |
124 | GUEST_PDPTR0_HIGH = 0x0000280b, | 128 | GUEST_PDPTR0_HIGH = 0x0000280b, |
125 | GUEST_PDPTR1 = 0x0000280c, | 129 | GUEST_PDPTR1 = 0x0000280c, |
@@ -130,6 +134,8 @@ enum vmcs_field { | |||
130 | GUEST_PDPTR3_HIGH = 0x00002811, | 134 | GUEST_PDPTR3_HIGH = 0x00002811, |
131 | HOST_IA32_PAT = 0x00002c00, | 135 | HOST_IA32_PAT = 0x00002c00, |
132 | HOST_IA32_PAT_HIGH = 0x00002c01, | 136 | HOST_IA32_PAT_HIGH = 0x00002c01, |
137 | HOST_IA32_EFER = 0x00002c02, | ||
138 | HOST_IA32_EFER_HIGH = 0x00002c03, | ||
133 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | 139 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, |
134 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | 140 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, |
135 | EXCEPTION_BITMAP = 0x00004004, | 141 | EXCEPTION_BITMAP = 0x00004004, |
@@ -394,6 +400,10 @@ enum vmcs_field { | |||
394 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | 400 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" |
395 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | 401 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" |
396 | 402 | ||
397 | 403 | struct vmx_msr_entry { | |
404 | u32 index; | ||
405 | u32 reserved; | ||
406 | u64 value; | ||
407 | } __aligned(16); | ||
398 | 408 | ||
399 | #endif | 409 | #endif |