diff options
Diffstat (limited to 'arch/x86/include')
33 files changed, 1469 insertions, 195 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ac302a2fa339..95c8cd9d22b5 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -190,16 +190,23 @@ | |||
190 | /* FIXME: move this macro to <linux/pci.h> */ | 190 | /* FIXME: move this macro to <linux/pci.h> */ |
191 | #define PCI_BUS(x) (((x) >> 8) & 0xff) | 191 | #define PCI_BUS(x) (((x) >> 8) & 0xff) |
192 | 192 | ||
193 | /* Protection domain flags */ | ||
194 | #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ | ||
195 | #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops | ||
196 | domain for an IOMMU */ | ||
197 | |||
193 | /* | 198 | /* |
194 | * This structure contains generic data for IOMMU protection domains | 199 | * This structure contains generic data for IOMMU protection domains |
195 | * independent of their use. | 200 | * independent of their use. |
196 | */ | 201 | */ |
197 | struct protection_domain { | 202 | struct protection_domain { |
198 | spinlock_t lock; /* mostly used to lock the page table*/ | 203 | spinlock_t lock; /* mostly used to lock the page table*/ |
199 | u16 id; /* the domain id written to the device table */ | 204 | u16 id; /* the domain id written to the device table */ |
200 | int mode; /* paging mode (0-6 levels) */ | 205 | int mode; /* paging mode (0-6 levels) */ |
201 | u64 *pt_root; /* page table root pointer */ | 206 | u64 *pt_root; /* page table root pointer */ |
202 | void *priv; /* private data */ | 207 | unsigned long flags; /* flags to find out type of domain */ |
208 | unsigned dev_cnt; /* devices assigned to this domain */ | ||
209 | void *priv; /* private data */ | ||
203 | }; | 210 | }; |
204 | 211 | ||
205 | /* | 212 | /* |
@@ -295,7 +302,7 @@ struct amd_iommu { | |||
295 | bool int_enabled; | 302 | bool int_enabled; |
296 | 303 | ||
297 | /* if one, we need to send a completion wait command */ | 304 | /* if one, we need to send a completion wait command */ |
298 | int need_sync; | 305 | bool need_sync; |
299 | 306 | ||
300 | /* default dma_ops domain for that IOMMU */ | 307 | /* default dma_ops domain for that IOMMU */ |
301 | struct dma_ops_domain *default_dom; | 308 | struct dma_ops_domain *default_dom; |
@@ -374,7 +381,7 @@ extern struct protection_domain **amd_iommu_pd_table; | |||
374 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 381 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
375 | 382 | ||
376 | /* will be 1 if device isolation is enabled */ | 383 | /* will be 1 if device isolation is enabled */ |
377 | extern int amd_iommu_isolate; | 384 | extern bool amd_iommu_isolate; |
378 | 385 | ||
379 | /* | 386 | /* |
380 | * If true, the addresses will be flushed on unmap time, not when | 387 | * If true, the addresses will be flushed on unmap time, not when |
@@ -382,18 +389,6 @@ extern int amd_iommu_isolate; | |||
382 | */ | 389 | */ |
383 | extern bool amd_iommu_unmap_flush; | 390 | extern bool amd_iommu_unmap_flush; |
384 | 391 | ||
385 | /* takes a PCI device id and prints it out in a readable form */ | ||
386 | static inline void print_devid(u16 devid, int nl) | ||
387 | { | ||
388 | int bus = devid >> 8; | ||
389 | int dev = devid >> 3 & 0x1f; | ||
390 | int fn = devid & 0x07; | ||
391 | |||
392 | printk("%02x:%02x.%x", bus, dev, fn); | ||
393 | if (nl) | ||
394 | printk("\n"); | ||
395 | } | ||
396 | |||
397 | /* takes bus and device/function and returns the device id | 392 | /* takes bus and device/function and returns the device id |
398 | * FIXME: should that be in generic PCI code? */ | 393 | * FIXME: should that be in generic PCI code? */ |
399 | static inline u16 calc_devid(u8 bus, u8 devfn) | 394 | static inline u16 calc_devid(u8 bus, u8 devfn) |
@@ -401,4 +396,32 @@ static inline u16 calc_devid(u8 bus, u8 devfn) | |||
401 | return (((u16)bus) << 8) | devfn; | 396 | return (((u16)bus) << 8) | devfn; |
402 | } | 397 | } |
403 | 398 | ||
399 | #ifdef CONFIG_AMD_IOMMU_STATS | ||
400 | |||
401 | struct __iommu_counter { | ||
402 | char *name; | ||
403 | struct dentry *dent; | ||
404 | u64 value; | ||
405 | }; | ||
406 | |||
407 | #define DECLARE_STATS_COUNTER(nm) \ | ||
408 | static struct __iommu_counter nm = { \ | ||
409 | .name = #nm, \ | ||
410 | } | ||
411 | |||
412 | #define INC_STATS_COUNTER(name) name.value += 1 | ||
413 | #define ADD_STATS_COUNTER(name, x) name.value += (x) | ||
414 | #define SUB_STATS_COUNTER(name, x) name.value -= (x) | ||
415 | |||
416 | #else /* CONFIG_AMD_IOMMU_STATS */ | ||
417 | |||
418 | #define DECLARE_STATS_COUNTER(name) | ||
419 | #define INC_STATS_COUNTER(name) | ||
420 | #define ADD_STATS_COUNTER(name, x) | ||
421 | #define SUB_STATS_COUNTER(name, x) | ||
422 | |||
423 | static inline void amd_iommu_stats_init(void) { } | ||
424 | |||
425 | #endif /* CONFIG_AMD_IOMMU_STATS */ | ||
426 | |||
404 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ | 427 | #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 25caa0738af5..ab1d51a8855e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -54,7 +54,6 @@ extern int disable_apic; | |||
54 | extern int is_vsmp_box(void); | 54 | extern int is_vsmp_box(void); |
55 | extern void xapic_wait_icr_idle(void); | 55 | extern void xapic_wait_icr_idle(void); |
56 | extern u32 safe_xapic_wait_icr_idle(void); | 56 | extern u32 safe_xapic_wait_icr_idle(void); |
57 | extern u64 xapic_icr_read(void); | ||
58 | extern void xapic_icr_write(u32, u32); | 57 | extern void xapic_icr_write(u32, u32); |
59 | extern int setup_profiling_timer(unsigned int); | 58 | extern int setup_profiling_timer(unsigned int); |
60 | 59 | ||
@@ -93,7 +92,7 @@ static inline u32 native_apic_msr_read(u32 reg) | |||
93 | } | 92 | } |
94 | 93 | ||
95 | #ifndef CONFIG_X86_32 | 94 | #ifndef CONFIG_X86_32 |
96 | extern int x2apic, x2apic_preenabled; | 95 | extern int x2apic; |
97 | extern void check_x2apic(void); | 96 | extern void check_x2apic(void); |
98 | extern void enable_x2apic(void); | 97 | extern void enable_x2apic(void); |
99 | extern void enable_IR_x2apic(void); | 98 | extern void enable_IR_x2apic(void); |
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index ce547f24a1cd..d8dd9f537911 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h | |||
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline const cpumask_t *target_cpus(void) |
13 | { | 13 | { |
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | return cpu_online_map; | 15 | return &cpu_online_map; |
16 | #else | 16 | #else |
17 | return cpumask_of_cpu(0); | 17 | return &cpumask_of_cpu(0); |
18 | #endif | 18 | #endif |
19 | } | 19 | } |
20 | 20 | ||
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
79 | 79 | ||
80 | static inline int cpu_present_to_apicid(int mps_cpu) | 80 | static inline int cpu_present_to_apicid(int mps_cpu) |
81 | { | 81 | { |
82 | if (mps_cpu < NR_CPUS) | 82 | if (mps_cpu < nr_cpu_ids) |
83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
84 | 84 | ||
85 | return BAD_APICID; | 85 | return BAD_APICID; |
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; | |||
94 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
95 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
96 | { | 96 | { |
97 | if (cpu >= NR_CPUS) | 97 | if (cpu >= nr_cpu_ids) |
98 | return BAD_APICID; | 98 | return BAD_APICID; |
99 | return cpu_physical_id(cpu); | 99 | return cpu_physical_id(cpu); |
100 | } | 100 | } |
@@ -119,16 +119,34 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* As we are using single CPU as destination, pick only one CPU here */ | 121 | /* As we are using single CPU as destination, pick only one CPU here */ |
122 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 122 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
123 | { | 123 | { |
124 | int cpu; | 124 | int cpu; |
125 | int apicid; | 125 | int apicid; |
126 | 126 | ||
127 | cpu = first_cpu(cpumask); | 127 | cpu = first_cpu(*cpumask); |
128 | apicid = cpu_to_logical_apicid(cpu); | 128 | apicid = cpu_to_logical_apicid(cpu); |
129 | return apicid; | 129 | return apicid; |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | ||
134 | { | ||
135 | int cpu; | ||
136 | |||
137 | /* | ||
138 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
139 | * May as well be the first. | ||
140 | */ | ||
141 | for_each_cpu_and(cpu, cpumask, andmask) | ||
142 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
143 | break; | ||
144 | if (cpu < nr_cpu_ids) | ||
145 | return cpu_to_logical_apicid(cpu); | ||
146 | |||
147 | return BAD_APICID; | ||
148 | } | ||
149 | |||
132 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 150 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
133 | { | 151 | { |
134 | return cpuid_apic >> index_msb; | 152 | return cpuid_apic >> index_msb; |
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 9404c535b7ec..27fcd01b3ae6 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_MACH_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_MACH_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 22 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e6b82b17b072..dc27705f5443 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr) | |||
320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); | 320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); |
321 | } | 321 | } |
322 | 322 | ||
323 | #define SYS_VECTOR_FREE 0 | ||
324 | #define SYS_VECTOR_ALLOCED 1 | ||
325 | |||
326 | extern int first_system_vector; | 323 | extern int first_system_vector; |
327 | extern char system_vectors[]; | 324 | /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ |
325 | extern unsigned long used_vectors[]; | ||
328 | 326 | ||
329 | static inline void alloc_system_vector(int vector) | 327 | static inline void alloc_system_vector(int vector) |
330 | { | 328 | { |
331 | if (system_vectors[vector] == SYS_VECTOR_FREE) { | 329 | if (!test_bit(vector, used_vectors)) { |
332 | system_vectors[vector] = SYS_VECTOR_ALLOCED; | 330 | set_bit(vector, used_vectors); |
333 | if (first_system_vector > vector) | 331 | if (first_system_vector > vector) |
334 | first_system_vector = vector; | 332 | first_system_vector = vector; |
335 | } else | 333 | } else |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index a2e545c91c35..ca5ffb2856b6 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); | |||
90 | 90 | ||
91 | #endif /* CONFIG_X86_32 */ | 91 | #endif /* CONFIG_X86_32 */ |
92 | 92 | ||
93 | extern int add_efi_memmap; | ||
93 | extern void efi_reserve_early(void); | 94 | extern void efi_reserve_early(void); |
94 | extern void efi_call_phys_prelog(void); | 95 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 96 | extern void efi_call_phys_epilog(void); |
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e24ef876915f..bc53d5ef1386 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h | |||
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus_cluster(void) | 12 | static inline const cpumask_t *target_cpus_cluster(void) |
13 | { | 13 | { |
14 | return CPU_MASK_ALL; | 14 | return &CPU_MASK_ALL; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | return cpumask_of_cpu(smp_processor_id()); | 19 | return &cpumask_of_cpu(smp_processor_id()); |
20 | } | 20 | } |
21 | 21 | ||
22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) | 22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) |
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; | |||
80 | static inline void setup_apic_routing(void) | 80 | static inline void setup_apic_routing(void) |
81 | { | 81 | { |
82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
84 | (apic_version[apic] == 0x14) ? | 84 | (apic_version[apic] == 0x14) ? |
85 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); | 85 | "Physical Cluster" : "Logical Cluster", |
86 | nr_ioapics, cpus_addr(*target_cpus())[0]); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | static inline int multi_timer_check(int apic, int irq) | 89 | static inline int multi_timer_check(int apic, int irq) |
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
100 | { | 101 | { |
101 | if (!mps_cpu) | 102 | if (!mps_cpu) |
102 | return boot_cpu_physical_apicid; | 103 | return boot_cpu_physical_apicid; |
103 | else if (mps_cpu < NR_CPUS) | 104 | else if (mps_cpu < nr_cpu_ids) |
104 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 105 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
105 | else | 106 | else |
106 | return BAD_APICID; | 107 | return BAD_APICID; |
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; | |||
120 | static inline int cpu_to_logical_apicid(int cpu) | 121 | static inline int cpu_to_logical_apicid(int cpu) |
121 | { | 122 | { |
122 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
123 | if (cpu >= NR_CPUS) | 124 | if (cpu >= nr_cpu_ids) |
124 | return BAD_APICID; | 125 | return BAD_APICID; |
125 | return (int)cpu_2_logical_apicid[cpu]; | 126 | return (int)cpu_2_logical_apicid[cpu]; |
126 | #else | 127 | #else |
127 | return logical_smp_processor_id(); | 128 | return logical_smp_processor_id(); |
128 | #endif | 129 | #endif |
@@ -146,25 +147,26 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) | |||
146 | return (1); | 147 | return (1); |
147 | } | 148 | } |
148 | 149 | ||
149 | static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | 150 | static inline unsigned int |
151 | cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
150 | { | 152 | { |
151 | int num_bits_set; | 153 | int num_bits_set; |
152 | int cpus_found = 0; | 154 | int cpus_found = 0; |
153 | int cpu; | 155 | int cpu; |
154 | int apicid; | 156 | int apicid; |
155 | 157 | ||
156 | num_bits_set = cpus_weight(cpumask); | 158 | num_bits_set = cpumask_weight(cpumask); |
157 | /* Return id to all */ | 159 | /* Return id to all */ |
158 | if (num_bits_set == NR_CPUS) | 160 | if (num_bits_set == nr_cpu_ids) |
159 | return 0xFF; | 161 | return 0xFF; |
160 | /* | 162 | /* |
161 | * The cpus in the mask must all be on the apic cluster. If are not | 163 | * The cpus in the mask must all be on the apic cluster. If are not |
162 | * on the same apicid cluster return default value of TARGET_CPUS. | 164 | * on the same apicid cluster return default value of TARGET_CPUS. |
163 | */ | 165 | */ |
164 | cpu = first_cpu(cpumask); | 166 | cpu = cpumask_first(cpumask); |
165 | apicid = cpu_to_logical_apicid(cpu); | 167 | apicid = cpu_to_logical_apicid(cpu); |
166 | while (cpus_found < num_bits_set) { | 168 | while (cpus_found < num_bits_set) { |
167 | if (cpu_isset(cpu, cpumask)) { | 169 | if (cpumask_test_cpu(cpu, cpumask)) { |
168 | int new_apicid = cpu_to_logical_apicid(cpu); | 170 | int new_apicid = cpu_to_logical_apicid(cpu); |
169 | if (apicid_cluster(apicid) != | 171 | if (apicid_cluster(apicid) != |
170 | apicid_cluster(new_apicid)){ | 172 | apicid_cluster(new_apicid)){ |
@@ -179,25 +181,25 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
179 | return apicid; | 181 | return apicid; |
180 | } | 182 | } |
181 | 183 | ||
182 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 184 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
183 | { | 185 | { |
184 | int num_bits_set; | 186 | int num_bits_set; |
185 | int cpus_found = 0; | 187 | int cpus_found = 0; |
186 | int cpu; | 188 | int cpu; |
187 | int apicid; | 189 | int apicid; |
188 | 190 | ||
189 | num_bits_set = cpus_weight(cpumask); | 191 | num_bits_set = cpus_weight(*cpumask); |
190 | /* Return id to all */ | 192 | /* Return id to all */ |
191 | if (num_bits_set == NR_CPUS) | 193 | if (num_bits_set == nr_cpu_ids) |
192 | return cpu_to_logical_apicid(0); | 194 | return cpu_to_logical_apicid(0); |
193 | /* | 195 | /* |
194 | * The cpus in the mask must all be on the apic cluster. If are not | 196 | * The cpus in the mask must all be on the apic cluster. If are not |
195 | * on the same apicid cluster return default value of TARGET_CPUS. | 197 | * on the same apicid cluster return default value of TARGET_CPUS. |
196 | */ | 198 | */ |
197 | cpu = first_cpu(cpumask); | 199 | cpu = first_cpu(*cpumask); |
198 | apicid = cpu_to_logical_apicid(cpu); | 200 | apicid = cpu_to_logical_apicid(cpu); |
199 | while (cpus_found < num_bits_set) { | 201 | while (cpus_found < num_bits_set) { |
200 | if (cpu_isset(cpu, cpumask)) { | 202 | if (cpu_isset(cpu, *cpumask)) { |
201 | int new_apicid = cpu_to_logical_apicid(cpu); | 203 | int new_apicid = cpu_to_logical_apicid(cpu); |
202 | if (apicid_cluster(apicid) != | 204 | if (apicid_cluster(apicid) != |
203 | apicid_cluster(new_apicid)){ | 205 | apicid_cluster(new_apicid)){ |
@@ -212,6 +214,24 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
212 | return apicid; | 214 | return apicid; |
213 | } | 215 | } |
214 | 216 | ||
217 | |||
218 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
219 | const struct cpumask *andmask) | ||
220 | { | ||
221 | int apicid = cpu_to_logical_apicid(0); | ||
222 | cpumask_var_t cpumask; | ||
223 | |||
224 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
225 | return apicid; | ||
226 | |||
227 | cpumask_and(cpumask, inmask, andmask); | ||
228 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
229 | apicid = cpu_mask_to_apicid(cpumask); | ||
230 | |||
231 | free_cpumask_var(cpumask); | ||
232 | return apicid; | ||
233 | } | ||
234 | |||
215 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 235 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
216 | { | 236 | { |
217 | return cpuid_apic >> index_msb; | 237 | return cpuid_apic >> index_msb; |
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 632a955fcc0a..7e8ed24d4b8a 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h | |||
@@ -1,24 +1,22 @@ | |||
1 | #ifndef __ASM_ES7000_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define __ASM_ES7000_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | if (!cpus_empty(mask)) | ||
16 | send_IPI_mask(mask, vector); | ||
17 | } | 15 | } |
18 | 16 | ||
19 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
20 | { | 18 | { |
21 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
22 | } | 20 | } |
23 | 21 | ||
24 | #endif /* __ASM_ES7000_IPI_H */ | 22 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 0ac17d33a8c7..746f37a7963a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h | |||
@@ -24,7 +24,7 @@ struct genapic { | |||
24 | int (*probe)(void); | 24 | int (*probe)(void); |
25 | 25 | ||
26 | int (*apic_id_registered)(void); | 26 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 27 | const struct cpumask *(*target_cpus)(void); |
28 | int int_delivery_mode; | 28 | int int_delivery_mode; |
29 | int int_dest_mode; | 29 | int int_dest_mode; |
30 | int ESR_DISABLE; | 30 | int ESR_DISABLE; |
@@ -57,12 +57,16 @@ struct genapic { | |||
57 | 57 | ||
58 | unsigned (*get_apic_id)(unsigned long x); | 58 | unsigned (*get_apic_id)(unsigned long x); |
59 | unsigned long apic_id_mask; | 59 | unsigned long apic_id_mask; |
60 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 60 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
61 | cpumask_t (*vector_allocation_domain)(int cpu); | 61 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, |
62 | const struct cpumask *andmask); | ||
63 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | ||
62 | 64 | ||
63 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
64 | /* ipi */ | 66 | /* ipi */ |
65 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 67 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
68 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
69 | int vector); | ||
66 | void (*send_IPI_allbutself)(int vector); | 70 | void (*send_IPI_allbutself)(int vector); |
67 | void (*send_IPI_all)(int vector); | 71 | void (*send_IPI_all)(int vector); |
68 | #endif | 72 | #endif |
@@ -114,6 +118,7 @@ struct genapic { | |||
114 | APICFUNC(get_apic_id) \ | 118 | APICFUNC(get_apic_id) \ |
115 | .apic_id_mask = APIC_ID_MASK, \ | 119 | .apic_id_mask = APIC_ID_MASK, \ |
116 | APICFUNC(cpu_mask_to_apicid) \ | 120 | APICFUNC(cpu_mask_to_apicid) \ |
121 | APICFUNC(cpu_mask_to_apicid_and) \ | ||
117 | APICFUNC(vector_allocation_domain) \ | 122 | APICFUNC(vector_allocation_domain) \ |
118 | APICFUNC(acpi_madt_oem_check) \ | 123 | APICFUNC(acpi_madt_oem_check) \ |
119 | IPIFUNC(send_IPI_mask) \ | 124 | IPIFUNC(send_IPI_mask) \ |
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 2cae011668b7..adf32fb56aa6 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_GENAPIC_64_H | 1 | #ifndef _ASM_X86_GENAPIC_64_H |
2 | #define _ASM_X86_GENAPIC_64_H | 2 | #define _ASM_X86_GENAPIC_64_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 7 | * Copyright 2004 James Cleverdon, IBM. |
6 | * Subject to the GNU Public License, v.2 | 8 | * Subject to the GNU Public License, v.2 |
@@ -18,16 +20,20 @@ struct genapic { | |||
18 | u32 int_delivery_mode; | 20 | u32 int_delivery_mode; |
19 | u32 int_dest_mode; | 21 | u32 int_dest_mode; |
20 | int (*apic_id_registered)(void); | 22 | int (*apic_id_registered)(void); |
21 | cpumask_t (*target_cpus)(void); | 23 | const struct cpumask *(*target_cpus)(void); |
22 | cpumask_t (*vector_allocation_domain)(int cpu); | 24 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
23 | void (*init_apic_ldr)(void); | 25 | void (*init_apic_ldr)(void); |
24 | /* ipi */ | 26 | /* ipi */ |
25 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 27 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
28 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
29 | int vector); | ||
26 | void (*send_IPI_allbutself)(int vector); | 30 | void (*send_IPI_allbutself)(int vector); |
27 | void (*send_IPI_all)(int vector); | 31 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | 32 | void (*send_IPI_self)(int vector); |
29 | /* */ | 33 | /* */ |
30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 34 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
35 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, | ||
36 | const struct cpumask *andmask); | ||
31 | unsigned int (*phys_pkg_id)(int index_msb); | 37 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | 38 | unsigned int (*get_apic_id)(unsigned long x); |
33 | unsigned long (*set_apic_id)(unsigned int id); | 39 | unsigned long (*set_apic_id)(unsigned int id); |
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index f89dffb28aa9..c745a306f7d3 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
117 | native_apic_mem_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(const struct cpumask *mask, |
121 | int vector) | ||
121 | { | 122 | { |
122 | unsigned long flags; | 123 | unsigned long flags; |
123 | unsigned long query_cpu; | 124 | unsigned long query_cpu; |
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
128 | * - mbligh | 129 | * - mbligh |
129 | */ | 130 | */ |
130 | local_irq_save(flags); | 131 | local_irq_save(flags); |
131 | for_each_cpu_mask_nr(query_cpu, mask) { | 132 | for_each_cpu(query_cpu, mask) { |
132 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 133 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
133 | vector, APIC_DEST_PHYSICAL); | 134 | vector, APIC_DEST_PHYSICAL); |
134 | } | 135 | } |
135 | local_irq_restore(flags); | 136 | local_irq_restore(flags); |
136 | } | 137 | } |
137 | 138 | ||
139 | static inline void send_IPI_mask_allbutself(const struct cpumask *mask, | ||
140 | int vector) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | unsigned int query_cpu; | ||
144 | unsigned int this_cpu = smp_processor_id(); | ||
145 | |||
146 | /* See Hack comment above */ | ||
147 | |||
148 | local_irq_save(flags); | ||
149 | for_each_cpu(query_cpu, mask) | ||
150 | if (query_cpu != this_cpu) | ||
151 | __send_IPI_dest_field( | ||
152 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
153 | vector, APIC_DEST_PHYSICAL); | ||
154 | local_irq_restore(flags); | ||
155 | } | ||
156 | |||
138 | #endif /* _ASM_X86_IPI_H */ | 157 | #endif /* _ASM_X86_IPI_H */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 28e409fc73f3..592688ed04d3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -33,7 +33,7 @@ static inline int irq_canonicalize(int irq) | |||
33 | 33 | ||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(cpumask_t map); | 36 | extern void fixup_irqs(void); |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | extern unsigned int do_IRQ(struct pt_regs *regs); | 39 | extern unsigned int do_IRQ(struct pt_regs *regs); |
@@ -42,5 +42,6 @@ extern void native_init_IRQ(void); | |||
42 | 42 | ||
43 | /* Interrupt vector management */ | 43 | /* Interrupt vector management */ |
44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | 44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); |
45 | extern int vector_used_by_percpu_irq(unsigned int vector); | ||
45 | 46 | ||
46 | #endif /* _ASM_X86_IRQ_H */ | 47 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8346be87cfa1..730843d1d2fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <asm/pvclock-abi.h> | 22 | #include <asm/pvclock-abi.h> |
23 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
24 | #include <asm/mtrr.h> | ||
24 | 25 | ||
25 | #define KVM_MAX_VCPUS 16 | 26 | #define KVM_MAX_VCPUS 16 |
26 | #define KVM_MEMORY_SLOTS 32 | 27 | #define KVM_MEMORY_SLOTS 32 |
@@ -86,6 +87,7 @@ | |||
86 | #define KVM_MIN_FREE_MMU_PAGES 5 | 87 | #define KVM_MIN_FREE_MMU_PAGES 5 |
87 | #define KVM_REFILL_PAGES 25 | 88 | #define KVM_REFILL_PAGES 25 |
88 | #define KVM_MAX_CPUID_ENTRIES 40 | 89 | #define KVM_MAX_CPUID_ENTRIES 40 |
90 | #define KVM_NR_FIXED_MTRR_REGION 88 | ||
89 | #define KVM_NR_VAR_MTRR 8 | 91 | #define KVM_NR_VAR_MTRR 8 |
90 | 92 | ||
91 | extern spinlock_t kvm_lock; | 93 | extern spinlock_t kvm_lock; |
@@ -180,6 +182,8 @@ struct kvm_mmu_page { | |||
180 | struct list_head link; | 182 | struct list_head link; |
181 | struct hlist_node hash_link; | 183 | struct hlist_node hash_link; |
182 | 184 | ||
185 | struct list_head oos_link; | ||
186 | |||
183 | /* | 187 | /* |
184 | * The following two entries are used to key the shadow page in the | 188 | * The following two entries are used to key the shadow page in the |
185 | * hash table. | 189 | * hash table. |
@@ -190,13 +194,16 @@ struct kvm_mmu_page { | |||
190 | u64 *spt; | 194 | u64 *spt; |
191 | /* hold the gfn of each spte inside spt */ | 195 | /* hold the gfn of each spte inside spt */ |
192 | gfn_t *gfns; | 196 | gfn_t *gfns; |
193 | unsigned long slot_bitmap; /* One bit set per slot which has memory | 197 | /* |
194 | * in this shadow page. | 198 | * One bit set per slot which has memory |
195 | */ | 199 | * in this shadow page. |
200 | */ | ||
201 | DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | ||
196 | int multimapped; /* More than one parent_pte? */ | 202 | int multimapped; /* More than one parent_pte? */ |
197 | int root_count; /* Currently serving as active root */ | 203 | int root_count; /* Currently serving as active root */ |
198 | bool unsync; | 204 | bool unsync; |
199 | bool unsync_children; | 205 | bool global; |
206 | unsigned int unsync_children; | ||
200 | union { | 207 | union { |
201 | u64 *parent_pte; /* !multimapped */ | 208 | u64 *parent_pte; /* !multimapped */ |
202 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 209 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
@@ -327,8 +334,10 @@ struct kvm_vcpu_arch { | |||
327 | 334 | ||
328 | bool nmi_pending; | 335 | bool nmi_pending; |
329 | bool nmi_injected; | 336 | bool nmi_injected; |
337 | bool nmi_window_open; | ||
330 | 338 | ||
331 | u64 mtrr[0x100]; | 339 | struct mtrr_state_type mtrr_state; |
340 | u32 pat; | ||
332 | }; | 341 | }; |
333 | 342 | ||
334 | struct kvm_mem_alias { | 343 | struct kvm_mem_alias { |
@@ -350,11 +359,13 @@ struct kvm_arch{ | |||
350 | */ | 359 | */ |
351 | struct list_head active_mmu_pages; | 360 | struct list_head active_mmu_pages; |
352 | struct list_head assigned_dev_head; | 361 | struct list_head assigned_dev_head; |
353 | struct dmar_domain *intel_iommu_domain; | 362 | struct list_head oos_global_pages; |
363 | struct iommu_domain *iommu_domain; | ||
354 | struct kvm_pic *vpic; | 364 | struct kvm_pic *vpic; |
355 | struct kvm_ioapic *vioapic; | 365 | struct kvm_ioapic *vioapic; |
356 | struct kvm_pit *vpit; | 366 | struct kvm_pit *vpit; |
357 | struct hlist_head irq_ack_notifier_list; | 367 | struct hlist_head irq_ack_notifier_list; |
368 | int vapics_in_nmi_mode; | ||
358 | 369 | ||
359 | int round_robin_prev_vcpu; | 370 | int round_robin_prev_vcpu; |
360 | unsigned int tss_addr; | 371 | unsigned int tss_addr; |
@@ -378,6 +389,7 @@ struct kvm_vm_stat { | |||
378 | u32 mmu_recycled; | 389 | u32 mmu_recycled; |
379 | u32 mmu_cache_miss; | 390 | u32 mmu_cache_miss; |
380 | u32 mmu_unsync; | 391 | u32 mmu_unsync; |
392 | u32 mmu_unsync_global; | ||
381 | u32 remote_tlb_flush; | 393 | u32 remote_tlb_flush; |
382 | u32 lpages; | 394 | u32 lpages; |
383 | }; | 395 | }; |
@@ -397,6 +409,7 @@ struct kvm_vcpu_stat { | |||
397 | u32 halt_exits; | 409 | u32 halt_exits; |
398 | u32 halt_wakeup; | 410 | u32 halt_wakeup; |
399 | u32 request_irq_exits; | 411 | u32 request_irq_exits; |
412 | u32 request_nmi_exits; | ||
400 | u32 irq_exits; | 413 | u32 irq_exits; |
401 | u32 host_state_reload; | 414 | u32 host_state_reload; |
402 | u32 efer_reload; | 415 | u32 efer_reload; |
@@ -405,6 +418,7 @@ struct kvm_vcpu_stat { | |||
405 | u32 insn_emulation_fail; | 418 | u32 insn_emulation_fail; |
406 | u32 hypercalls; | 419 | u32 hypercalls; |
407 | u32 irq_injections; | 420 | u32 irq_injections; |
421 | u32 nmi_injections; | ||
408 | }; | 422 | }; |
409 | 423 | ||
410 | struct descriptor_table { | 424 | struct descriptor_table { |
@@ -477,6 +491,7 @@ struct kvm_x86_ops { | |||
477 | 491 | ||
478 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 492 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
479 | int (*get_tdp_level)(void); | 493 | int (*get_tdp_level)(void); |
494 | int (*get_mt_mask_shift)(void); | ||
480 | }; | 495 | }; |
481 | 496 | ||
482 | extern struct kvm_x86_ops *kvm_x86_ops; | 497 | extern struct kvm_x86_ops *kvm_x86_ops; |
@@ -490,7 +505,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |||
490 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | 505 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); |
491 | void kvm_mmu_set_base_ptes(u64 base_pte); | 506 | void kvm_mmu_set_base_ptes(u64 base_pte); |
492 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 507 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
493 | u64 dirty_mask, u64 nx_mask, u64 x_mask); | 508 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); |
494 | 509 | ||
495 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 510 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
496 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 511 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
@@ -587,12 +602,14 @@ unsigned long segment_base(u16 selector); | |||
587 | 602 | ||
588 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | 603 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
589 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 604 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
590 | const u8 *new, int bytes); | 605 | const u8 *new, int bytes, |
606 | bool guest_initiated); | ||
591 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 607 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
592 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 608 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
593 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 609 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
594 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 610 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
595 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 611 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
612 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); | ||
596 | 613 | ||
597 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 614 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
598 | 615 | ||
@@ -607,6 +624,8 @@ void kvm_disable_tdp(void); | |||
607 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 624 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
608 | int complete_pio(struct kvm_vcpu *vcpu); | 625 | int complete_pio(struct kvm_vcpu *vcpu); |
609 | 626 | ||
627 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | ||
628 | |||
610 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | 629 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
611 | { | 630 | { |
612 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 631 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
@@ -702,18 +721,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |||
702 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | 721 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
703 | } | 722 | } |
704 | 723 | ||
705 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
706 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
707 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
708 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
709 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
710 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
711 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
712 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
713 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
714 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
715 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
716 | |||
717 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | 724 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 |
718 | 725 | ||
719 | #define TSS_IOPB_BASE_OFFSET 0x66 | 726 | #define TSS_IOPB_BASE_OFFSET 0x66 |
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 25179a29f208..6a159732881a 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h | |||
@@ -123,6 +123,7 @@ struct decode_cache { | |||
123 | u8 ad_bytes; | 123 | u8 ad_bytes; |
124 | u8 rex_prefix; | 124 | u8 rex_prefix; |
125 | struct operand src; | 125 | struct operand src; |
126 | struct operand src2; | ||
126 | struct operand dst; | 127 | struct operand dst; |
127 | bool has_seg_override; | 128 | bool has_seg_override; |
128 | u8 seg_override; | 129 | u8 seg_override; |
@@ -146,22 +147,18 @@ struct x86_emulate_ctxt { | |||
146 | /* Register state before/after emulation. */ | 147 | /* Register state before/after emulation. */ |
147 | struct kvm_vcpu *vcpu; | 148 | struct kvm_vcpu *vcpu; |
148 | 149 | ||
149 | /* Linear faulting address (if emulating a page-faulting instruction) */ | ||
150 | unsigned long eflags; | 150 | unsigned long eflags; |
151 | |||
152 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 151 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
153 | int mode; | 152 | int mode; |
154 | |||
155 | u32 cs_base; | 153 | u32 cs_base; |
156 | 154 | ||
157 | /* decode cache */ | 155 | /* decode cache */ |
158 | |||
159 | struct decode_cache decode; | 156 | struct decode_cache decode; |
160 | }; | 157 | }; |
161 | 158 | ||
162 | /* Repeat String Operation Prefix */ | 159 | /* Repeat String Operation Prefix */ |
163 | #define REPE_PREFIX 1 | 160 | #define REPE_PREFIX 1 |
164 | #define REPNE_PREFIX 2 | 161 | #define REPNE_PREFIX 2 |
165 | 162 | ||
166 | /* Execution mode, passed to the emulator. */ | 163 | /* Execution mode, passed to the emulator. */ |
167 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 164 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
@@ -170,7 +167,7 @@ struct x86_emulate_ctxt { | |||
170 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 167 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
171 | 168 | ||
172 | /* Host execution mode. */ | 169 | /* Host execution mode. */ |
173 | #if defined(__i386__) | 170 | #if defined(CONFIG_X86_32) |
174 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 | 171 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 |
175 | #elif defined(CONFIG_X86_64) | 172 | #elif defined(CONFIG_X86_64) |
176 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 | 173 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 |
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index d28a507cef39..1caf57628b9c 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #define SHARED_SWITCHER_PAGES \ | 15 | #define SHARED_SWITCHER_PAGES \ |
16 | DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) | 16 | DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) |
17 | /* Pages for switcher itself, then two pages per cpu */ | 17 | /* Pages for switcher itself, then two pages per cpu */ |
18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) | 18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) |
19 | 19 | ||
20 | /* We map at -4M for ease of mapping into the guest (one PTE page). */ | 20 | /* We map at -4M for ease of mapping into the guest (one PTE page). */ |
21 | #define SWITCHER_ADDR 0xFFC00000 | 21 | #define SWITCHER_ADDR 0xFFC00000 |
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6cb3a467e067..cc09cbbee27e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
10 | 10 | ||
11 | static inline cpumask_t target_cpus(void) | 11 | static inline const struct cpumask *target_cpus(void) |
12 | { | 12 | { |
13 | #ifdef CONFIG_SMP | 13 | #ifdef CONFIG_SMP |
14 | return cpu_online_map; | 14 | return cpu_online_mask; |
15 | #else | 15 | #else |
16 | return cpumask_of_cpu(0); | 16 | return cpumask_of(0); |
17 | #endif | 17 | #endif |
18 | } | 18 | } |
19 | 19 | ||
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void) | |||
28 | #define apic_id_registered (genapic->apic_id_registered) | 28 | #define apic_id_registered (genapic->apic_id_registered) |
29 | #define init_apic_ldr (genapic->init_apic_ldr) | 29 | #define init_apic_ldr (genapic->init_apic_ldr) |
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
31 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
31 | #define phys_pkg_id (genapic->phys_pkg_id) | 32 | #define phys_pkg_id (genapic->phys_pkg_id) |
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 33 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
33 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) | 34 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) |
@@ -61,9 +62,19 @@ static inline int apic_id_registered(void) | |||
61 | return physid_isset(read_apic_id(), phys_cpu_present_map); | 62 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
62 | } | 63 | } |
63 | 64 | ||
64 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 65 | static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) |
65 | { | 66 | { |
66 | return cpus_addr(cpumask)[0]; | 67 | return cpumask_bits(cpumask)[0]; |
68 | } | ||
69 | |||
70 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
71 | const struct cpumask *andmask) | ||
72 | { | ||
73 | unsigned long mask1 = cpumask_bits(cpumask)[0]; | ||
74 | unsigned long mask2 = cpumask_bits(andmask)[0]; | ||
75 | unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; | ||
76 | |||
77 | return (unsigned int)(mask1 & mask2 & mask3); | ||
67 | } | 78 | } |
68 | 79 | ||
69 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 80 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
@@ -88,7 +99,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
88 | #endif | 99 | #endif |
89 | } | 100 | } |
90 | 101 | ||
91 | static inline cpumask_t vector_allocation_domain(int cpu) | 102 | static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) |
92 | { | 103 | { |
93 | /* Careful. Some cpus do not strictly honor the set of cpus | 104 | /* Careful. Some cpus do not strictly honor the set of cpus |
94 | * specified in the interrupt destination when using lowest | 105 | * specified in the interrupt destination when using lowest |
@@ -98,8 +109,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) | |||
98 | * deliver interrupts to the wrong hyperthread when only one | 109 | * deliver interrupts to the wrong hyperthread when only one |
99 | * hyperthread was specified in the interrupt desitination. | 110 | * hyperthread was specified in the interrupt desitination. |
100 | */ | 111 | */ |
101 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 112 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; |
102 | return domain; | ||
103 | } | 113 | } |
104 | #endif | 114 | #endif |
105 | 115 | ||
@@ -131,7 +141,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
131 | 141 | ||
132 | static inline int cpu_present_to_apicid(int mps_cpu) | 142 | static inline int cpu_present_to_apicid(int mps_cpu) |
133 | { | 143 | { |
134 | if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) | 144 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) |
135 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 145 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
136 | else | 146 | else |
137 | return BAD_APICID; | 147 | return BAD_APICID; |
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index fabca01ebacf..191312d155da 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h | |||
@@ -4,7 +4,8 @@ | |||
4 | /* Avoid include hell */ | 4 | /* Avoid include hell */ |
5 | #define NMI_VECTOR 0x02 | 5 | #define NMI_VECTOR 0x02 |
6 | 6 | ||
7 | void send_IPI_mask_bitmask(cpumask_t mask, int vector); | 7 | void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); |
8 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
8 | void __send_IPI_shortcut(unsigned int shortcut, int vector); | 9 | void __send_IPI_shortcut(unsigned int shortcut, int vector); |
9 | 10 | ||
10 | extern int no_broadcast; | 11 | extern int no_broadcast; |
@@ -12,28 +13,27 @@ extern int no_broadcast; | |||
12 | #ifdef CONFIG_X86_64 | 13 | #ifdef CONFIG_X86_64 |
13 | #include <asm/genapic.h> | 14 | #include <asm/genapic.h> |
14 | #define send_IPI_mask (genapic->send_IPI_mask) | 15 | #define send_IPI_mask (genapic->send_IPI_mask) |
16 | #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) | ||
15 | #else | 17 | #else |
16 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 18 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
17 | { | 19 | { |
18 | send_IPI_mask_bitmask(mask, vector); | 20 | send_IPI_mask_bitmask(mask, vector); |
19 | } | 21 | } |
22 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
20 | #endif | 23 | #endif |
21 | 24 | ||
22 | static inline void __local_send_IPI_allbutself(int vector) | 25 | static inline void __local_send_IPI_allbutself(int vector) |
23 | { | 26 | { |
24 | if (no_broadcast || vector == NMI_VECTOR) { | 27 | if (no_broadcast || vector == NMI_VECTOR) |
25 | cpumask_t mask = cpu_online_map; | 28 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
26 | 29 | else | |
27 | cpu_clear(smp_processor_id(), mask); | ||
28 | send_IPI_mask(mask, vector); | ||
29 | } else | ||
30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); | 30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void __local_send_IPI_all(int vector) | 33 | static inline void __local_send_IPI_all(int vector) |
34 | { | 34 | { |
35 | if (no_broadcast || vector == NMI_VECTOR) | 35 | if (no_broadcast || vector == NMI_VECTOR) |
36 | send_IPI_mask(cpu_online_map, vector); | 36 | send_IPI_mask(cpu_online_mask, vector); |
37 | else | 37 | else |
38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
39 | } | 39 | } |
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e430f47df667..48553e958ad5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
25 | #define check_apicid_used (genapic->check_apicid_used) | 25 | #define check_apicid_used (genapic->check_apicid_used) |
26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
27 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
27 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 28 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
28 | #define enable_apic_mode (genapic->enable_apic_mode) | 29 | #define enable_apic_mode (genapic->enable_apic_mode) |
29 | #define phys_pkg_id (genapic->phys_pkg_id) | 30 | #define phys_pkg_id (genapic->phys_pkg_id) |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 91885c28f66b..62d14ce3cd00 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -6,13 +6,13 @@ | |||
6 | #include <asm/mpspec_def.h> | 6 | #include <asm/mpspec_def.h> |
7 | 7 | ||
8 | extern int apic_version[MAX_APICS]; | 8 | extern int apic_version[MAX_APICS]; |
9 | extern int pic_mode; | ||
9 | 10 | ||
10 | #ifdef CONFIG_X86_32 | 11 | #ifdef CONFIG_X86_32 |
11 | #include <mach_mpspec.h> | 12 | #include <mach_mpspec.h> |
12 | 13 | ||
13 | extern unsigned int def_to_bigsmp; | 14 | extern unsigned int def_to_bigsmp; |
14 | extern u8 apicid_2_node[]; | 15 | extern u8 apicid_2_node[]; |
15 | extern int pic_mode; | ||
16 | 16 | ||
17 | #ifdef CONFIG_X86_NUMAQ | 17 | #ifdef CONFIG_X86_NUMAQ |
18 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | 18 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 7c1e4258b31e..cb988aab716d 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -57,6 +57,31 @@ struct mtrr_gentry { | |||
57 | }; | 57 | }; |
58 | #endif /* !__i386__ */ | 58 | #endif /* !__i386__ */ |
59 | 59 | ||
60 | struct mtrr_var_range { | ||
61 | u32 base_lo; | ||
62 | u32 base_hi; | ||
63 | u32 mask_lo; | ||
64 | u32 mask_hi; | ||
65 | }; | ||
66 | |||
67 | /* In the Intel processor's MTRR interface, the MTRR type is always held in | ||
68 | an 8 bit field: */ | ||
69 | typedef u8 mtrr_type; | ||
70 | |||
71 | #define MTRR_NUM_FIXED_RANGES 88 | ||
72 | #define MTRR_MAX_VAR_RANGES 256 | ||
73 | |||
74 | struct mtrr_state_type { | ||
75 | struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; | ||
76 | mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; | ||
77 | unsigned char enabled; | ||
78 | unsigned char have_fixed; | ||
79 | mtrr_type def_type; | ||
80 | }; | ||
81 | |||
82 | #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) | ||
83 | #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) | ||
84 | |||
60 | /* These are the various ioctls */ | 85 | /* These are the various ioctls */ |
61 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) | 86 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) |
62 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) | 87 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) |
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 0bf2a06b7a4e..bf37bc49bd8e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h | |||
@@ -7,9 +7,9 @@ | |||
7 | 7 | ||
8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
9 | 9 | ||
10 | static inline cpumask_t target_cpus(void) | 10 | static inline const cpumask_t *target_cpus(void) |
11 | { | 11 | { |
12 | return CPU_MASK_ALL; | 12 | return &CPU_MASK_ALL; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define NO_BALANCE_IRQ (1) | 15 | #define NO_BALANCE_IRQ (1) |
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | |||
63 | extern u8 cpu_2_logical_apicid[]; | 63 | extern u8 cpu_2_logical_apicid[]; |
64 | static inline int cpu_to_logical_apicid(int cpu) | 64 | static inline int cpu_to_logical_apicid(int cpu) |
65 | { | 65 | { |
66 | if (cpu >= NR_CPUS) | 66 | if (cpu >= nr_cpu_ids) |
67 | return BAD_APICID; | 67 | return BAD_APICID; |
68 | return (int)cpu_2_logical_apicid[cpu]; | 68 | return (int)cpu_2_logical_apicid[cpu]; |
69 | } | 69 | } |
70 | 70 | ||
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void) | |||
122 | * We use physical apicids here, not logical, so just return the default | 122 | * We use physical apicids here, not logical, so just return the default |
123 | * physical broadcast to stop people from breaking us | 123 | * physical broadcast to stop people from breaking us |
124 | */ | 124 | */ |
125 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 125 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
126 | { | ||
127 | return (int) 0xF; | ||
128 | } | ||
129 | |||
130 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
131 | const struct cpumask *andmask) | ||
126 | { | 132 | { |
127 | return (int) 0xF; | 133 | return (int) 0xF; |
128 | } | 134 | } |
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index 935588d286cf..a8374c652778 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_NUMAQ_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define __ASM_NUMAQ_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_NUMAQ_IPI_H */ | 22 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 66834c41c049..a977de23cb4d 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void); | |||
102 | 102 | ||
103 | #ifdef CONFIG_NUMA | 103 | #ifdef CONFIG_NUMA |
104 | /* Returns the node based on pci bus */ | 104 | /* Returns the node based on pci bus */ |
105 | static inline int __pcibus_to_node(struct pci_bus *bus) | 105 | static inline int __pcibus_to_node(const struct pci_bus *bus) |
106 | { | 106 | { |
107 | struct pci_sysdata *sd = bus->sysdata; | 107 | const struct pci_sysdata *sd = bus->sysdata; |
108 | 108 | ||
109 | return sd->node; | 109 | return sd->node; |
110 | } | 110 | } |
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | |||
113 | { | 113 | { |
114 | return node_to_cpumask(__pcibus_to_node(bus)); | 114 | return node_to_cpumask(__pcibus_to_node(bus)); |
115 | } | 115 | } |
116 | |||
117 | static inline const struct cpumask * | ||
118 | cpumask_of_pcibus(const struct pci_bus *bus) | ||
119 | { | ||
120 | return cpumask_of_node(__pcibus_to_node(bus)); | ||
121 | } | ||
116 | #endif | 122 | #endif |
117 | 123 | ||
118 | #endif /* _ASM_X86_PCI_H */ | 124 | #endif /* _ASM_X86_PCI_H */ |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h new file mode 100644 index 000000000000..e60fd3e14bdf --- /dev/null +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Low-Level PCI Access for i386 machines. | ||
3 | * | ||
4 | * (c) 1999 Martin Mares <mj@ucw.cz> | ||
5 | */ | ||
6 | |||
7 | #undef DEBUG | ||
8 | |||
9 | #ifdef DEBUG | ||
10 | #define DBG(x...) printk(x) | ||
11 | #else | ||
12 | #define DBG(x...) | ||
13 | #endif | ||
14 | |||
15 | #define PCI_PROBE_BIOS 0x0001 | ||
16 | #define PCI_PROBE_CONF1 0x0002 | ||
17 | #define PCI_PROBE_CONF2 0x0004 | ||
18 | #define PCI_PROBE_MMCONF 0x0008 | ||
19 | #define PCI_PROBE_MASK 0x000f | ||
20 | #define PCI_PROBE_NOEARLY 0x0010 | ||
21 | |||
22 | #define PCI_NO_CHECKS 0x0400 | ||
23 | #define PCI_USE_PIRQ_MASK 0x0800 | ||
24 | #define PCI_ASSIGN_ROMS 0x1000 | ||
25 | #define PCI_BIOS_IRQ_SCAN 0x2000 | ||
26 | #define PCI_ASSIGN_ALL_BUSSES 0x4000 | ||
27 | #define PCI_CAN_SKIP_ISA_ALIGN 0x8000 | ||
28 | #define PCI_USE__CRS 0x10000 | ||
29 | #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 | ||
30 | #define PCI_HAS_IO_ECS 0x40000 | ||
31 | #define PCI_NOASSIGN_ROMS 0x80000 | ||
32 | |||
33 | extern unsigned int pci_probe; | ||
34 | extern unsigned long pirq_table_addr; | ||
35 | |||
36 | enum pci_bf_sort_state { | ||
37 | pci_bf_sort_default, | ||
38 | pci_force_nobf, | ||
39 | pci_force_bf, | ||
40 | pci_dmi_bf, | ||
41 | }; | ||
42 | |||
43 | /* pci-i386.c */ | ||
44 | |||
45 | extern unsigned int pcibios_max_latency; | ||
46 | |||
47 | void pcibios_resource_survey(void); | ||
48 | |||
49 | /* pci-pc.c */ | ||
50 | |||
51 | extern int pcibios_last_bus; | ||
52 | extern struct pci_bus *pci_root_bus; | ||
53 | extern struct pci_ops pci_root_ops; | ||
54 | |||
55 | /* pci-irq.c */ | ||
56 | |||
57 | struct irq_info { | ||
58 | u8 bus, devfn; /* Bus, device and function */ | ||
59 | struct { | ||
60 | u8 link; /* IRQ line ID, chipset dependent, | ||
61 | 0 = not routed */ | ||
62 | u16 bitmap; /* Available IRQs */ | ||
63 | } __attribute__((packed)) irq[4]; | ||
64 | u8 slot; /* Slot number, 0=onboard */ | ||
65 | u8 rfu; | ||
66 | } __attribute__((packed)); | ||
67 | |||
68 | struct irq_routing_table { | ||
69 | u32 signature; /* PIRQ_SIGNATURE should be here */ | ||
70 | u16 version; /* PIRQ_VERSION */ | ||
71 | u16 size; /* Table size in bytes */ | ||
72 | u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ | ||
73 | u16 exclusive_irqs; /* IRQs devoted exclusively to | ||
74 | PCI usage */ | ||
75 | u16 rtr_vendor, rtr_device; /* Vendor and device ID of | ||
76 | interrupt router */ | ||
77 | u32 miniport_data; /* Crap */ | ||
78 | u8 rfu[11]; | ||
79 | u8 checksum; /* Modulo 256 checksum must give 0 */ | ||
80 | struct irq_info slots[0]; | ||
81 | } __attribute__((packed)); | ||
82 | |||
83 | extern unsigned int pcibios_irq_mask; | ||
84 | |||
85 | extern int pcibios_scanned; | ||
86 | extern spinlock_t pci_config_lock; | ||
87 | |||
88 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | ||
89 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | ||
90 | |||
91 | struct pci_raw_ops { | ||
92 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | ||
93 | int reg, int len, u32 *val); | ||
94 | int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, | ||
95 | int reg, int len, u32 val); | ||
96 | }; | ||
97 | |||
98 | extern struct pci_raw_ops *raw_pci_ops; | ||
99 | extern struct pci_raw_ops *raw_pci_ext_ops; | ||
100 | |||
101 | extern struct pci_raw_ops pci_direct_conf1; | ||
102 | extern bool port_cf9_safe; | ||
103 | |||
104 | /* arch_initcall level */ | ||
105 | extern int pci_direct_probe(void); | ||
106 | extern void pci_direct_init(int type); | ||
107 | extern void pci_pcbios_init(void); | ||
108 | extern int pci_olpc_init(void); | ||
109 | extern void __init dmi_check_pciprobe(void); | ||
110 | extern void __init dmi_check_skip_isa_align(void); | ||
111 | |||
112 | /* some common used subsys_initcalls */ | ||
113 | extern int __init pci_acpi_init(void); | ||
114 | extern int __init pcibios_irq_init(void); | ||
115 | extern int __init pci_visws_init(void); | ||
116 | extern int __init pci_numaq_init(void); | ||
117 | extern int __init pcibios_init(void); | ||
118 | |||
119 | /* pci-mmconfig.c */ | ||
120 | |||
121 | extern int __init pci_mmcfg_arch_init(void); | ||
122 | extern void __init pci_mmcfg_arch_free(void); | ||
123 | |||
124 | /* | ||
125 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space | ||
126 | * on their northbrige except through the * %eax register. As such, you MUST | ||
127 | * NOT use normal IOMEM accesses, you need to only use the magic mmio-config | ||
128 | * accessor functions. | ||
129 | * In fact just use pci_config_*, nothing else please. | ||
130 | */ | ||
131 | static inline unsigned char mmio_config_readb(void __iomem *pos) | ||
132 | { | ||
133 | u8 val; | ||
134 | asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos)); | ||
135 | return val; | ||
136 | } | ||
137 | |||
138 | static inline unsigned short mmio_config_readw(void __iomem *pos) | ||
139 | { | ||
140 | u16 val; | ||
141 | asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos)); | ||
142 | return val; | ||
143 | } | ||
144 | |||
145 | static inline unsigned int mmio_config_readl(void __iomem *pos) | ||
146 | { | ||
147 | u32 val; | ||
148 | asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos)); | ||
149 | return val; | ||
150 | } | ||
151 | |||
152 | static inline void mmio_config_writeb(void __iomem *pos, u8 val) | ||
153 | { | ||
154 | asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory"); | ||
155 | } | ||
156 | |||
157 | static inline void mmio_config_writew(void __iomem *pos, u16 val) | ||
158 | { | ||
159 | asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory"); | ||
160 | } | ||
161 | |||
162 | static inline void mmio_config_writel(void __iomem *pos, u32 val) | ||
163 | { | ||
164 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); | ||
165 | } | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d12811ce51d9..830b9fcb6427 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -60,7 +60,7 @@ struct smp_ops { | |||
60 | void (*cpu_die)(unsigned int cpu); | 60 | void (*cpu_die)(unsigned int cpu); |
61 | void (*play_dead)(void); | 61 | void (*play_dead)(void); |
62 | 62 | ||
63 | void (*send_call_func_ipi)(cpumask_t mask); | 63 | void (*send_call_func_ipi)(const struct cpumask *mask); |
64 | void (*send_call_func_single_ipi)(int cpu); | 64 | void (*send_call_func_single_ipi)(int cpu); |
65 | }; | 65 | }; |
66 | 66 | ||
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
125 | 125 | ||
126 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 126 | static inline void arch_send_call_function_ipi(cpumask_t mask) |
127 | { | 127 | { |
128 | smp_ops.send_call_func_ipi(mask); | 128 | smp_ops.send_call_func_ipi(&mask); |
129 | } | 129 | } |
130 | 130 | ||
131 | void cpu_disable_common(void); | 131 | void cpu_disable_common(void); |
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); | |||
138 | void native_play_dead(void); | 138 | void native_play_dead(void); |
139 | void play_dead_common(void); | 139 | void play_dead_common(void); |
140 | 140 | ||
141 | void native_send_call_func_ipi(cpumask_t mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
142 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
143 | 143 | ||
144 | extern void prefill_possible_map(void); | 144 | extern void prefill_possible_map(void); |
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9b3070f1c2ac..4bb5fb34f030 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h | |||
@@ -14,13 +14,13 @@ | |||
14 | 14 | ||
15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
20 | * dest_LowestPrio mode logical clustered apic interrupt routing | 20 | * dest_LowestPrio mode logical clustered apic interrupt routing |
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return &cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | 25 | ||
26 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 26 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void) | |||
52 | int i; | 52 | int i; |
53 | 53 | ||
54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 54 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
55 | for (count = 0, i = NR_CPUS; --i >= 0; ) { | 55 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
56 | lid = cpu_2_logical_apicid[i]; | 56 | lid = cpu_2_logical_apicid[i]; |
57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | 57 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) |
58 | ++count; | 58 | ++count; |
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid) | |||
97 | static inline int cpu_to_logical_apicid(int cpu) | 97 | static inline int cpu_to_logical_apicid(int cpu) |
98 | { | 98 | { |
99 | #ifdef CONFIG_SMP | 99 | #ifdef CONFIG_SMP |
100 | if (cpu >= NR_CPUS) | 100 | if (cpu >= nr_cpu_ids) |
101 | return BAD_APICID; | 101 | return BAD_APICID; |
102 | return (int)cpu_2_logical_apicid[cpu]; | 102 | return (int)cpu_2_logical_apicid[cpu]; |
103 | #else | 103 | #else |
104 | return logical_smp_processor_id(); | 104 | return logical_smp_processor_id(); |
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
107 | 107 | ||
108 | static inline int cpu_present_to_apicid(int mps_cpu) | 108 | static inline int cpu_present_to_apicid(int mps_cpu) |
109 | { | 109 | { |
110 | if (mps_cpu < NR_CPUS) | 110 | if (mps_cpu < nr_cpu_ids) |
111 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 111 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
112 | else | 112 | else |
113 | return BAD_APICID; | 113 | return BAD_APICID; |
@@ -137,25 +137,25 @@ static inline void enable_apic_mode(void) | |||
137 | { | 137 | { |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 140 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
141 | { | 141 | { |
142 | int num_bits_set; | 142 | int num_bits_set; |
143 | int cpus_found = 0; | 143 | int cpus_found = 0; |
144 | int cpu; | 144 | int cpu; |
145 | int apicid; | 145 | int apicid; |
146 | 146 | ||
147 | num_bits_set = cpus_weight(cpumask); | 147 | num_bits_set = cpus_weight(*cpumask); |
148 | /* Return id to all */ | 148 | /* Return id to all */ |
149 | if (num_bits_set == NR_CPUS) | 149 | if (num_bits_set >= nr_cpu_ids) |
150 | return (int) 0xFF; | 150 | return (int) 0xFF; |
151 | /* | 151 | /* |
152 | * The cpus in the mask must all be on the apic cluster. If are not | 152 | * The cpus in the mask must all be on the apic cluster. If are not |
153 | * on the same apicid cluster return default value of TARGET_CPUS. | 153 | * on the same apicid cluster return default value of TARGET_CPUS. |
154 | */ | 154 | */ |
155 | cpu = first_cpu(cpumask); | 155 | cpu = first_cpu(*cpumask); |
156 | apicid = cpu_to_logical_apicid(cpu); | 156 | apicid = cpu_to_logical_apicid(cpu); |
157 | while (cpus_found < num_bits_set) { | 157 | while (cpus_found < num_bits_set) { |
158 | if (cpu_isset(cpu, cpumask)) { | 158 | if (cpu_isset(cpu, *cpumask)) { |
159 | int new_apicid = cpu_to_logical_apicid(cpu); | 159 | int new_apicid = cpu_to_logical_apicid(cpu); |
160 | if (apicid_cluster(apicid) != | 160 | if (apicid_cluster(apicid) != |
161 | apicid_cluster(new_apicid)){ | 161 | apicid_cluster(new_apicid)){ |
@@ -170,6 +170,23 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
170 | return apicid; | 170 | return apicid; |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
174 | const struct cpumask *andmask) | ||
175 | { | ||
176 | int apicid = cpu_to_logical_apicid(0); | ||
177 | cpumask_var_t cpumask; | ||
178 | |||
179 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
180 | return apicid; | ||
181 | |||
182 | cpumask_and(cpumask, inmask, andmask); | ||
183 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
184 | apicid = cpu_mask_to_apicid(cpumask); | ||
185 | |||
186 | free_cpumask_var(cpumask); | ||
187 | return apicid; | ||
188 | } | ||
189 | |||
173 | /* cpuid returns the value latched in the HW at reset, not the APIC ID | 190 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
174 | * register's value. For any box whose BIOS changes APIC IDs, like | 191 | * register's value. For any box whose BIOS changes APIC IDs, like |
175 | * clustered APIC systems, we must use hard_smp_processor_id. | 192 | * clustered APIC systems, we must use hard_smp_processor_id. |
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index 53bd1e7bd7b4..a8a2c24f50cc 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_SUMMIT_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define __ASM_SUMMIT_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_SUMMIT_IPI_H */ | 26 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h new file mode 100644 index 000000000000..1b8afa78e869 --- /dev/null +++ b/arch/x86/include/asm/svm.h | |||
@@ -0,0 +1,328 @@ | |||
1 | #ifndef __SVM_H | ||
2 | #define __SVM_H | ||
3 | |||
4 | enum { | ||
5 | INTERCEPT_INTR, | ||
6 | INTERCEPT_NMI, | ||
7 | INTERCEPT_SMI, | ||
8 | INTERCEPT_INIT, | ||
9 | INTERCEPT_VINTR, | ||
10 | INTERCEPT_SELECTIVE_CR0, | ||
11 | INTERCEPT_STORE_IDTR, | ||
12 | INTERCEPT_STORE_GDTR, | ||
13 | INTERCEPT_STORE_LDTR, | ||
14 | INTERCEPT_STORE_TR, | ||
15 | INTERCEPT_LOAD_IDTR, | ||
16 | INTERCEPT_LOAD_GDTR, | ||
17 | INTERCEPT_LOAD_LDTR, | ||
18 | INTERCEPT_LOAD_TR, | ||
19 | INTERCEPT_RDTSC, | ||
20 | INTERCEPT_RDPMC, | ||
21 | INTERCEPT_PUSHF, | ||
22 | INTERCEPT_POPF, | ||
23 | INTERCEPT_CPUID, | ||
24 | INTERCEPT_RSM, | ||
25 | INTERCEPT_IRET, | ||
26 | INTERCEPT_INTn, | ||
27 | INTERCEPT_INVD, | ||
28 | INTERCEPT_PAUSE, | ||
29 | INTERCEPT_HLT, | ||
30 | INTERCEPT_INVLPG, | ||
31 | INTERCEPT_INVLPGA, | ||
32 | INTERCEPT_IOIO_PROT, | ||
33 | INTERCEPT_MSR_PROT, | ||
34 | INTERCEPT_TASK_SWITCH, | ||
35 | INTERCEPT_FERR_FREEZE, | ||
36 | INTERCEPT_SHUTDOWN, | ||
37 | INTERCEPT_VMRUN, | ||
38 | INTERCEPT_VMMCALL, | ||
39 | INTERCEPT_VMLOAD, | ||
40 | INTERCEPT_VMSAVE, | ||
41 | INTERCEPT_STGI, | ||
42 | INTERCEPT_CLGI, | ||
43 | INTERCEPT_SKINIT, | ||
44 | INTERCEPT_RDTSCP, | ||
45 | INTERCEPT_ICEBP, | ||
46 | INTERCEPT_WBINVD, | ||
47 | INTERCEPT_MONITOR, | ||
48 | INTERCEPT_MWAIT, | ||
49 | INTERCEPT_MWAIT_COND, | ||
50 | }; | ||
51 | |||
52 | |||
53 | struct __attribute__ ((__packed__)) vmcb_control_area { | ||
54 | u16 intercept_cr_read; | ||
55 | u16 intercept_cr_write; | ||
56 | u16 intercept_dr_read; | ||
57 | u16 intercept_dr_write; | ||
58 | u32 intercept_exceptions; | ||
59 | u64 intercept; | ||
60 | u8 reserved_1[44]; | ||
61 | u64 iopm_base_pa; | ||
62 | u64 msrpm_base_pa; | ||
63 | u64 tsc_offset; | ||
64 | u32 asid; | ||
65 | u8 tlb_ctl; | ||
66 | u8 reserved_2[3]; | ||
67 | u32 int_ctl; | ||
68 | u32 int_vector; | ||
69 | u32 int_state; | ||
70 | u8 reserved_3[4]; | ||
71 | u32 exit_code; | ||
72 | u32 exit_code_hi; | ||
73 | u64 exit_info_1; | ||
74 | u64 exit_info_2; | ||
75 | u32 exit_int_info; | ||
76 | u32 exit_int_info_err; | ||
77 | u64 nested_ctl; | ||
78 | u8 reserved_4[16]; | ||
79 | u32 event_inj; | ||
80 | u32 event_inj_err; | ||
81 | u64 nested_cr3; | ||
82 | u64 lbr_ctl; | ||
83 | u8 reserved_5[832]; | ||
84 | }; | ||
85 | |||
86 | |||
87 | #define TLB_CONTROL_DO_NOTHING 0 | ||
88 | #define TLB_CONTROL_FLUSH_ALL_ASID 1 | ||
89 | |||
90 | #define V_TPR_MASK 0x0f | ||
91 | |||
92 | #define V_IRQ_SHIFT 8 | ||
93 | #define V_IRQ_MASK (1 << V_IRQ_SHIFT) | ||
94 | |||
95 | #define V_INTR_PRIO_SHIFT 16 | ||
96 | #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) | ||
97 | |||
98 | #define V_IGN_TPR_SHIFT 20 | ||
99 | #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) | ||
100 | |||
101 | #define V_INTR_MASKING_SHIFT 24 | ||
102 | #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) | ||
103 | |||
104 | #define SVM_INTERRUPT_SHADOW_MASK 1 | ||
105 | |||
106 | #define SVM_IOIO_STR_SHIFT 2 | ||
107 | #define SVM_IOIO_REP_SHIFT 3 | ||
108 | #define SVM_IOIO_SIZE_SHIFT 4 | ||
109 | #define SVM_IOIO_ASIZE_SHIFT 7 | ||
110 | |||
111 | #define SVM_IOIO_TYPE_MASK 1 | ||
112 | #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) | ||
113 | #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) | ||
114 | #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) | ||
115 | #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) | ||
116 | |||
117 | struct __attribute__ ((__packed__)) vmcb_seg { | ||
118 | u16 selector; | ||
119 | u16 attrib; | ||
120 | u32 limit; | ||
121 | u64 base; | ||
122 | }; | ||
123 | |||
124 | struct __attribute__ ((__packed__)) vmcb_save_area { | ||
125 | struct vmcb_seg es; | ||
126 | struct vmcb_seg cs; | ||
127 | struct vmcb_seg ss; | ||
128 | struct vmcb_seg ds; | ||
129 | struct vmcb_seg fs; | ||
130 | struct vmcb_seg gs; | ||
131 | struct vmcb_seg gdtr; | ||
132 | struct vmcb_seg ldtr; | ||
133 | struct vmcb_seg idtr; | ||
134 | struct vmcb_seg tr; | ||
135 | u8 reserved_1[43]; | ||
136 | u8 cpl; | ||
137 | u8 reserved_2[4]; | ||
138 | u64 efer; | ||
139 | u8 reserved_3[112]; | ||
140 | u64 cr4; | ||
141 | u64 cr3; | ||
142 | u64 cr0; | ||
143 | u64 dr7; | ||
144 | u64 dr6; | ||
145 | u64 rflags; | ||
146 | u64 rip; | ||
147 | u8 reserved_4[88]; | ||
148 | u64 rsp; | ||
149 | u8 reserved_5[24]; | ||
150 | u64 rax; | ||
151 | u64 star; | ||
152 | u64 lstar; | ||
153 | u64 cstar; | ||
154 | u64 sfmask; | ||
155 | u64 kernel_gs_base; | ||
156 | u64 sysenter_cs; | ||
157 | u64 sysenter_esp; | ||
158 | u64 sysenter_eip; | ||
159 | u64 cr2; | ||
160 | u8 reserved_6[32]; | ||
161 | u64 g_pat; | ||
162 | u64 dbgctl; | ||
163 | u64 br_from; | ||
164 | u64 br_to; | ||
165 | u64 last_excp_from; | ||
166 | u64 last_excp_to; | ||
167 | }; | ||
168 | |||
169 | struct __attribute__ ((__packed__)) vmcb { | ||
170 | struct vmcb_control_area control; | ||
171 | struct vmcb_save_area save; | ||
172 | }; | ||
173 | |||
174 | #define SVM_CPUID_FEATURE_SHIFT 2 | ||
175 | #define SVM_CPUID_FUNC 0x8000000a | ||
176 | |||
177 | #define MSR_EFER_SVME_MASK (1ULL << 12) | ||
178 | #define MSR_VM_CR 0xc0010114 | ||
179 | #define MSR_VM_HSAVE_PA 0xc0010117ULL | ||
180 | |||
181 | #define SVM_VM_CR_SVM_DISABLE 4 | ||
182 | |||
183 | #define SVM_SELECTOR_S_SHIFT 4 | ||
184 | #define SVM_SELECTOR_DPL_SHIFT 5 | ||
185 | #define SVM_SELECTOR_P_SHIFT 7 | ||
186 | #define SVM_SELECTOR_AVL_SHIFT 8 | ||
187 | #define SVM_SELECTOR_L_SHIFT 9 | ||
188 | #define SVM_SELECTOR_DB_SHIFT 10 | ||
189 | #define SVM_SELECTOR_G_SHIFT 11 | ||
190 | |||
191 | #define SVM_SELECTOR_TYPE_MASK (0xf) | ||
192 | #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) | ||
193 | #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) | ||
194 | #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) | ||
195 | #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) | ||
196 | #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) | ||
197 | #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) | ||
198 | #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) | ||
199 | |||
200 | #define SVM_SELECTOR_WRITE_MASK (1 << 1) | ||
201 | #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK | ||
202 | #define SVM_SELECTOR_CODE_MASK (1 << 3) | ||
203 | |||
204 | #define INTERCEPT_CR0_MASK 1 | ||
205 | #define INTERCEPT_CR3_MASK (1 << 3) | ||
206 | #define INTERCEPT_CR4_MASK (1 << 4) | ||
207 | #define INTERCEPT_CR8_MASK (1 << 8) | ||
208 | |||
209 | #define INTERCEPT_DR0_MASK 1 | ||
210 | #define INTERCEPT_DR1_MASK (1 << 1) | ||
211 | #define INTERCEPT_DR2_MASK (1 << 2) | ||
212 | #define INTERCEPT_DR3_MASK (1 << 3) | ||
213 | #define INTERCEPT_DR4_MASK (1 << 4) | ||
214 | #define INTERCEPT_DR5_MASK (1 << 5) | ||
215 | #define INTERCEPT_DR6_MASK (1 << 6) | ||
216 | #define INTERCEPT_DR7_MASK (1 << 7) | ||
217 | |||
218 | #define SVM_EVTINJ_VEC_MASK 0xff | ||
219 | |||
220 | #define SVM_EVTINJ_TYPE_SHIFT 8 | ||
221 | #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) | ||
222 | |||
223 | #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) | ||
224 | #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) | ||
225 | #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) | ||
226 | #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) | ||
227 | |||
228 | #define SVM_EVTINJ_VALID (1 << 31) | ||
229 | #define SVM_EVTINJ_VALID_ERR (1 << 11) | ||
230 | |||
231 | #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK | ||
232 | |||
233 | #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR | ||
234 | #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI | ||
235 | #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT | ||
236 | #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT | ||
237 | |||
238 | #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID | ||
239 | #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR | ||
240 | |||
241 | #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 | ||
242 | #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 | ||
243 | |||
244 | #define SVM_EXIT_READ_CR0 0x000 | ||
245 | #define SVM_EXIT_READ_CR3 0x003 | ||
246 | #define SVM_EXIT_READ_CR4 0x004 | ||
247 | #define SVM_EXIT_READ_CR8 0x008 | ||
248 | #define SVM_EXIT_WRITE_CR0 0x010 | ||
249 | #define SVM_EXIT_WRITE_CR3 0x013 | ||
250 | #define SVM_EXIT_WRITE_CR4 0x014 | ||
251 | #define SVM_EXIT_WRITE_CR8 0x018 | ||
252 | #define SVM_EXIT_READ_DR0 0x020 | ||
253 | #define SVM_EXIT_READ_DR1 0x021 | ||
254 | #define SVM_EXIT_READ_DR2 0x022 | ||
255 | #define SVM_EXIT_READ_DR3 0x023 | ||
256 | #define SVM_EXIT_READ_DR4 0x024 | ||
257 | #define SVM_EXIT_READ_DR5 0x025 | ||
258 | #define SVM_EXIT_READ_DR6 0x026 | ||
259 | #define SVM_EXIT_READ_DR7 0x027 | ||
260 | #define SVM_EXIT_WRITE_DR0 0x030 | ||
261 | #define SVM_EXIT_WRITE_DR1 0x031 | ||
262 | #define SVM_EXIT_WRITE_DR2 0x032 | ||
263 | #define SVM_EXIT_WRITE_DR3 0x033 | ||
264 | #define SVM_EXIT_WRITE_DR4 0x034 | ||
265 | #define SVM_EXIT_WRITE_DR5 0x035 | ||
266 | #define SVM_EXIT_WRITE_DR6 0x036 | ||
267 | #define SVM_EXIT_WRITE_DR7 0x037 | ||
268 | #define SVM_EXIT_EXCP_BASE 0x040 | ||
269 | #define SVM_EXIT_INTR 0x060 | ||
270 | #define SVM_EXIT_NMI 0x061 | ||
271 | #define SVM_EXIT_SMI 0x062 | ||
272 | #define SVM_EXIT_INIT 0x063 | ||
273 | #define SVM_EXIT_VINTR 0x064 | ||
274 | #define SVM_EXIT_CR0_SEL_WRITE 0x065 | ||
275 | #define SVM_EXIT_IDTR_READ 0x066 | ||
276 | #define SVM_EXIT_GDTR_READ 0x067 | ||
277 | #define SVM_EXIT_LDTR_READ 0x068 | ||
278 | #define SVM_EXIT_TR_READ 0x069 | ||
279 | #define SVM_EXIT_IDTR_WRITE 0x06a | ||
280 | #define SVM_EXIT_GDTR_WRITE 0x06b | ||
281 | #define SVM_EXIT_LDTR_WRITE 0x06c | ||
282 | #define SVM_EXIT_TR_WRITE 0x06d | ||
283 | #define SVM_EXIT_RDTSC 0x06e | ||
284 | #define SVM_EXIT_RDPMC 0x06f | ||
285 | #define SVM_EXIT_PUSHF 0x070 | ||
286 | #define SVM_EXIT_POPF 0x071 | ||
287 | #define SVM_EXIT_CPUID 0x072 | ||
288 | #define SVM_EXIT_RSM 0x073 | ||
289 | #define SVM_EXIT_IRET 0x074 | ||
290 | #define SVM_EXIT_SWINT 0x075 | ||
291 | #define SVM_EXIT_INVD 0x076 | ||
292 | #define SVM_EXIT_PAUSE 0x077 | ||
293 | #define SVM_EXIT_HLT 0x078 | ||
294 | #define SVM_EXIT_INVLPG 0x079 | ||
295 | #define SVM_EXIT_INVLPGA 0x07a | ||
296 | #define SVM_EXIT_IOIO 0x07b | ||
297 | #define SVM_EXIT_MSR 0x07c | ||
298 | #define SVM_EXIT_TASK_SWITCH 0x07d | ||
299 | #define SVM_EXIT_FERR_FREEZE 0x07e | ||
300 | #define SVM_EXIT_SHUTDOWN 0x07f | ||
301 | #define SVM_EXIT_VMRUN 0x080 | ||
302 | #define SVM_EXIT_VMMCALL 0x081 | ||
303 | #define SVM_EXIT_VMLOAD 0x082 | ||
304 | #define SVM_EXIT_VMSAVE 0x083 | ||
305 | #define SVM_EXIT_STGI 0x084 | ||
306 | #define SVM_EXIT_CLGI 0x085 | ||
307 | #define SVM_EXIT_SKINIT 0x086 | ||
308 | #define SVM_EXIT_RDTSCP 0x087 | ||
309 | #define SVM_EXIT_ICEBP 0x088 | ||
310 | #define SVM_EXIT_WBINVD 0x089 | ||
311 | #define SVM_EXIT_MONITOR 0x08a | ||
312 | #define SVM_EXIT_MWAIT 0x08b | ||
313 | #define SVM_EXIT_MWAIT_COND 0x08c | ||
314 | #define SVM_EXIT_NPF 0x400 | ||
315 | |||
316 | #define SVM_EXIT_ERR -1 | ||
317 | |||
318 | #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | ||
319 | |||
320 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | ||
321 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | ||
322 | #define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" | ||
323 | #define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" | ||
324 | #define SVM_STGI ".byte 0x0f, 0x01, 0xdc" | ||
325 | #define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" | ||
326 | |||
327 | #endif | ||
328 | |||
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h new file mode 100644 index 000000000000..ffb08be2a530 --- /dev/null +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * sys_ia32.h - Linux ia32 syscall interfaces | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * See the file COPYING for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_X86_SYS_IA32_H | ||
11 | #define _ASM_X86_SYS_IA32_H | ||
12 | |||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/signal.h> | ||
17 | #include <asm/compat.h> | ||
18 | #include <asm/ia32.h> | ||
19 | |||
20 | /* ia32/sys_ia32.c */ | ||
21 | asmlinkage long sys32_truncate64(char __user *, unsigned long, unsigned long); | ||
22 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); | ||
23 | |||
24 | asmlinkage long sys32_stat64(char __user *, struct stat64 __user *); | ||
25 | asmlinkage long sys32_lstat64(char __user *, struct stat64 __user *); | ||
26 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | ||
27 | asmlinkage long sys32_fstatat(unsigned int, char __user *, | ||
28 | struct stat64 __user *, int); | ||
29 | struct mmap_arg_struct; | ||
30 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); | ||
31 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); | ||
32 | |||
33 | asmlinkage long sys32_pipe(int __user *); | ||
34 | struct sigaction32; | ||
35 | struct old_sigaction32; | ||
36 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, | ||
37 | struct sigaction32 __user *, unsigned int); | ||
38 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, | ||
39 | struct old_sigaction32 __user *); | ||
40 | asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, | ||
41 | compat_sigset_t __user *, unsigned int); | ||
42 | asmlinkage long sys32_alarm(unsigned int); | ||
43 | |||
44 | struct sel_arg_struct; | ||
45 | asmlinkage long sys32_old_select(struct sel_arg_struct __user *); | ||
46 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); | ||
47 | asmlinkage long sys32_sysfs(int, u32, u32); | ||
48 | |||
49 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | ||
50 | struct compat_timespec __user *); | ||
51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | ||
52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | ||
53 | |||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
55 | struct sysctl_ia32; | ||
56 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); | ||
57 | #endif | ||
58 | |||
59 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | ||
60 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | ||
61 | |||
62 | asmlinkage long sys32_personality(unsigned long); | ||
63 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | ||
64 | |||
65 | asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, | ||
66 | unsigned long, unsigned long, unsigned long); | ||
67 | |||
68 | struct oldold_utsname; | ||
69 | struct old_utsname; | ||
70 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); | ||
71 | long sys32_uname(struct old_utsname __user *); | ||
72 | |||
73 | long sys32_ustat(unsigned, struct ustat32 __user *); | ||
74 | |||
75 | asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, | ||
76 | compat_uptr_t __user *, struct pt_regs *); | ||
77 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); | ||
78 | |||
79 | long sys32_lseek(unsigned int, int, unsigned int); | ||
80 | long sys32_kill(int, int); | ||
81 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); | ||
82 | long sys32_vm86_warning(void); | ||
83 | long sys32_lookup_dcookie(u32, u32, char __user *, size_t); | ||
84 | |||
85 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); | ||
86 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, | ||
87 | unsigned, unsigned, int); | ||
88 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); | ||
89 | asmlinkage long sys32_fallocate(int, int, unsigned, | ||
90 | unsigned, unsigned, unsigned); | ||
91 | |||
92 | /* ia32/ia32_signal.c */ | ||
93 | asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); | ||
94 | asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *, | ||
95 | stack_ia32_t __user *, struct pt_regs *); | ||
96 | asmlinkage long sys32_sigreturn(struct pt_regs *); | ||
97 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *); | ||
98 | |||
99 | /* ia32/ipc32.c */ | ||
100 | asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); | ||
101 | #endif /* _ASM_X86_SYS_IA32_H */ | ||
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index ff386ff50ed7..4e2f2e0aab27 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu) | |||
61 | * | 61 | * |
62 | * Side note: this function creates the returned cpumask on the stack | 62 | * Side note: this function creates the returned cpumask on the stack |
63 | * so with a high NR_CPUS count, excessive stack space is used. The | 63 | * so with a high NR_CPUS count, excessive stack space is used. The |
64 | * node_to_cpumask_ptr function should be used whenever possible. | 64 | * cpumask_of_node function should be used whenever possible. |
65 | */ | 65 | */ |
66 | static inline cpumask_t node_to_cpumask(int node) | 66 | static inline cpumask_t node_to_cpumask(int node) |
67 | { | 67 | { |
68 | return node_to_cpumask_map[node]; | 68 | return node_to_cpumask_map[node]; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
72 | static inline const struct cpumask *cpumask_of_node(int node) | ||
73 | { | ||
74 | return &node_to_cpumask_map[node]; | ||
75 | } | ||
76 | |||
71 | #else /* CONFIG_X86_64 */ | 77 | #else /* CONFIG_X86_64 */ |
72 | 78 | ||
73 | /* Mappings between node number and cpus on that node. */ | 79 | /* Mappings between node number and cpus on that node. */ |
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | |||
82 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 88 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
83 | extern int cpu_to_node(int cpu); | 89 | extern int cpu_to_node(int cpu); |
84 | extern int early_cpu_to_node(int cpu); | 90 | extern int early_cpu_to_node(int cpu); |
85 | extern const cpumask_t *_node_to_cpumask_ptr(int node); | 91 | extern const cpumask_t *cpumask_of_node(int node); |
86 | extern cpumask_t node_to_cpumask(int node); | 92 | extern cpumask_t node_to_cpumask(int node); |
87 | 93 | ||
88 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 94 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu) | |||
103 | } | 109 | } |
104 | 110 | ||
105 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | 111 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ |
106 | static inline const cpumask_t *_node_to_cpumask_ptr(int node) | 112 | static inline const cpumask_t *cpumask_of_node(int node) |
107 | { | 113 | { |
108 | return &node_to_cpumask_map[node]; | 114 | return &node_to_cpumask_map[node]; |
109 | } | 115 | } |
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node) | |||
116 | 122 | ||
117 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 123 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
118 | 124 | ||
119 | /* Replace default node_to_cpumask_ptr with optimized version */ | 125 | /* |
126 | * Replace default node_to_cpumask_ptr with optimized version | ||
127 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
128 | */ | ||
120 | #define node_to_cpumask_ptr(v, node) \ | 129 | #define node_to_cpumask_ptr(v, node) \ |
121 | const cpumask_t *v = _node_to_cpumask_ptr(node) | 130 | const cpumask_t *v = cpumask_of_node(node) |
122 | 131 | ||
123 | #define node_to_cpumask_ptr_next(v, node) \ | 132 | #define node_to_cpumask_ptr_next(v, node) \ |
124 | v = _node_to_cpumask_ptr(node) | 133 | v = cpumask_of_node(node) |
125 | 134 | ||
126 | #endif /* CONFIG_X86_64 */ | 135 | #endif /* CONFIG_X86_64 */ |
127 | 136 | ||
@@ -187,7 +196,7 @@ extern int __node_distance(int, int); | |||
187 | #define cpu_to_node(cpu) 0 | 196 | #define cpu_to_node(cpu) 0 |
188 | #define early_cpu_to_node(cpu) 0 | 197 | #define early_cpu_to_node(cpu) 0 |
189 | 198 | ||
190 | static inline const cpumask_t *_node_to_cpumask_ptr(int node) | 199 | static inline const cpumask_t *cpumask_of_node(int node) |
191 | { | 200 | { |
192 | return &cpu_online_map; | 201 | return &cpu_online_map; |
193 | } | 202 | } |
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node) | |||
200 | return first_cpu(cpu_online_map); | 209 | return first_cpu(cpu_online_map); |
201 | } | 210 | } |
202 | 211 | ||
203 | /* Replace default node_to_cpumask_ptr with optimized version */ | 212 | /* |
213 | * Replace default node_to_cpumask_ptr with optimized version | ||
214 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
215 | */ | ||
204 | #define node_to_cpumask_ptr(v, node) \ | 216 | #define node_to_cpumask_ptr(v, node) \ |
205 | const cpumask_t *v = _node_to_cpumask_ptr(node) | 217 | const cpumask_t *v = cpumask_of_node(node) |
206 | 218 | ||
207 | #define node_to_cpumask_ptr_next(v, node) \ | 219 | #define node_to_cpumask_ptr_next(v, node) \ |
208 | v = _node_to_cpumask_ptr(node) | 220 | v = cpumask_of_node(node) |
209 | #endif | 221 | #endif |
210 | 222 | ||
211 | #include <asm-generic/topology.h> | 223 | #include <asm-generic/topology.h> |
@@ -214,18 +226,20 @@ static inline int node_to_first_cpu(int node) | |||
214 | /* Returns the number of the first CPU on Node 'node'. */ | 226 | /* Returns the number of the first CPU on Node 'node'. */ |
215 | static inline int node_to_first_cpu(int node) | 227 | static inline int node_to_first_cpu(int node) |
216 | { | 228 | { |
217 | node_to_cpumask_ptr(mask, node); | 229 | return cpumask_first(cpumask_of_node(node)); |
218 | return first_cpu(*mask); | ||
219 | } | 230 | } |
220 | #endif | 231 | #endif |
221 | 232 | ||
222 | extern cpumask_t cpu_coregroup_map(int cpu); | 233 | extern cpumask_t cpu_coregroup_map(int cpu); |
234 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | ||
223 | 235 | ||
224 | #ifdef ENABLE_TOPO_DEFINES | 236 | #ifdef ENABLE_TOPO_DEFINES |
225 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 237 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
226 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 238 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
227 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 239 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) |
228 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 240 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
241 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
242 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
229 | 243 | ||
230 | /* indicates that pointers to the topology cpumask_t maps are valid */ | 244 | /* indicates that pointers to the topology cpumask_t maps are valid */ |
231 | #define arch_provides_topology_pointers yes | 245 | #define arch_provides_topology_pointers yes |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index e2363253bbbf..50423c7b56b2 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -133,61 +133,61 @@ struct bau_msg_payload { | |||
133 | * see table 4.2.3.0.1 in broacast_assist spec. | 133 | * see table 4.2.3.0.1 in broacast_assist spec. |
134 | */ | 134 | */ |
135 | struct bau_msg_header { | 135 | struct bau_msg_header { |
136 | int dest_subnodeid:6; /* must be zero */ | 136 | unsigned int dest_subnodeid:6; /* must be zero */ |
137 | /* bits 5:0 */ | 137 | /* bits 5:0 */ |
138 | int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */ | 138 | unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ |
139 | /* bits 20:6 */ | 139 | /* bits 20:6 */ /* first bit in node_map */ |
140 | int command:8; /* message type */ | 140 | unsigned int command:8; /* message type */ |
141 | /* bits 28:21 */ | 141 | /* bits 28:21 */ |
142 | /* 0x38: SN3net EndPoint Message */ | 142 | /* 0x38: SN3net EndPoint Message */ |
143 | int rsvd_1:3; /* must be zero */ | 143 | unsigned int rsvd_1:3; /* must be zero */ |
144 | /* bits 31:29 */ | 144 | /* bits 31:29 */ |
145 | /* int will align on 32 bits */ | 145 | /* int will align on 32 bits */ |
146 | int rsvd_2:9; /* must be zero */ | 146 | unsigned int rsvd_2:9; /* must be zero */ |
147 | /* bits 40:32 */ | 147 | /* bits 40:32 */ |
148 | /* Suppl_A is 56-41 */ | 148 | /* Suppl_A is 56-41 */ |
149 | int payload_2a:8; /* becomes byte 16 of msg */ | 149 | unsigned int payload_2a:8;/* becomes byte 16 of msg */ |
150 | /* bits 48:41 */ /* not currently using */ | 150 | /* bits 48:41 */ /* not currently using */ |
151 | int payload_2b:8; /* becomes byte 17 of msg */ | 151 | unsigned int payload_2b:8;/* becomes byte 17 of msg */ |
152 | /* bits 56:49 */ /* not currently using */ | 152 | /* bits 56:49 */ /* not currently using */ |
153 | /* Address field (96:57) is never used as an | 153 | /* Address field (96:57) is never used as an |
154 | address (these are address bits 42:3) */ | 154 | address (these are address bits 42:3) */ |
155 | int rsvd_3:1; /* must be zero */ | 155 | unsigned int rsvd_3:1; /* must be zero */ |
156 | /* bit 57 */ | 156 | /* bit 57 */ |
157 | /* address bits 27:4 are payload */ | 157 | /* address bits 27:4 are payload */ |
158 | /* these 24 bits become bytes 12-14 of msg */ | 158 | /* these 24 bits become bytes 12-14 of msg */ |
159 | int replied_to:1; /* sent as 0 by the source to byte 12 */ | 159 | unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ |
160 | /* bit 58 */ | 160 | /* bit 58 */ |
161 | 161 | ||
162 | int payload_1a:5; /* not currently used */ | 162 | unsigned int payload_1a:5;/* not currently used */ |
163 | /* bits 63:59 */ | 163 | /* bits 63:59 */ |
164 | int payload_1b:8; /* not currently used */ | 164 | unsigned int payload_1b:8;/* not currently used */ |
165 | /* bits 71:64 */ | 165 | /* bits 71:64 */ |
166 | int payload_1c:8; /* not currently used */ | 166 | unsigned int payload_1c:8;/* not currently used */ |
167 | /* bits 79:72 */ | 167 | /* bits 79:72 */ |
168 | int payload_1d:2; /* not currently used */ | 168 | unsigned int payload_1d:2;/* not currently used */ |
169 | /* bits 81:80 */ | 169 | /* bits 81:80 */ |
170 | 170 | ||
171 | int rsvd_4:7; /* must be zero */ | 171 | unsigned int rsvd_4:7; /* must be zero */ |
172 | /* bits 88:82 */ | 172 | /* bits 88:82 */ |
173 | int sw_ack_flag:1; /* software acknowledge flag */ | 173 | unsigned int sw_ack_flag:1;/* software acknowledge flag */ |
174 | /* bit 89 */ | 174 | /* bit 89 */ |
175 | /* INTD trasactions at destination are to | 175 | /* INTD trasactions at destination are to |
176 | wait for software acknowledge */ | 176 | wait for software acknowledge */ |
177 | int rsvd_5:6; /* must be zero */ | 177 | unsigned int rsvd_5:6; /* must be zero */ |
178 | /* bits 95:90 */ | 178 | /* bits 95:90 */ |
179 | int rsvd_6:5; /* must be zero */ | 179 | unsigned int rsvd_6:5; /* must be zero */ |
180 | /* bits 100:96 */ | 180 | /* bits 100:96 */ |
181 | int int_both:1; /* if 1, interrupt both sockets on the blade */ | 181 | unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ |
182 | /* bit 101*/ | 182 | /* bit 101*/ |
183 | int fairness:3; /* usually zero */ | 183 | unsigned int fairness:3;/* usually zero */ |
184 | /* bits 104:102 */ | 184 | /* bits 104:102 */ |
185 | int multilevel:1; /* multi-level multicast format */ | 185 | unsigned int multilevel:1; /* multi-level multicast format */ |
186 | /* bit 105 */ | 186 | /* bit 105 */ |
187 | /* 0 for TLB: endpoint multi-unicast messages */ | 187 | /* 0 for TLB: endpoint multi-unicast messages */ |
188 | int chaining:1; /* next descriptor is part of this activation*/ | 188 | unsigned int chaining:1;/* next descriptor is part of this activation*/ |
189 | /* bit 106 */ | 189 | /* bit 106 */ |
190 | int rsvd_7:21; /* must be zero */ | 190 | unsigned int rsvd_7:21; /* must be zero */ |
191 | /* bits 127:107 */ | 191 | /* bits 127:107 */ |
192 | }; | 192 | }; |
193 | 193 | ||
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h new file mode 100644 index 000000000000..593636275238 --- /dev/null +++ b/arch/x86/include/asm/virtext.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* CPU virtualization extensions handling | ||
2 | * | ||
3 | * This should carry the code for handling CPU virtualization extensions | ||
4 | * that needs to live in the kernel core. | ||
5 | * | ||
6 | * Author: Eduardo Habkost <ehabkost@redhat.com> | ||
7 | * | ||
8 | * Copyright (C) 2008, Red Hat Inc. | ||
9 | * | ||
10 | * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. | ||
11 | * | ||
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
13 | * the COPYING file in the top-level directory. | ||
14 | */ | ||
15 | #ifndef _ASM_X86_VIRTEX_H | ||
16 | #define _ASM_X86_VIRTEX_H | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <asm/system.h> | ||
20 | |||
21 | #include <asm/vmx.h> | ||
22 | #include <asm/svm.h> | ||
23 | |||
24 | /* | ||
25 | * VMX functions: | ||
26 | */ | ||
27 | |||
28 | static inline int cpu_has_vmx(void) | ||
29 | { | ||
30 | unsigned long ecx = cpuid_ecx(1); | ||
31 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
32 | } | ||
33 | |||
34 | |||
35 | /** Disable VMX on the current CPU | ||
36 | * | ||
37 | * vmxoff causes a undefined-opcode exception if vmxon was not run | ||
38 | * on the CPU previously. Only call this function if you know VMX | ||
39 | * is enabled. | ||
40 | */ | ||
41 | static inline void cpu_vmxoff(void) | ||
42 | { | ||
43 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | ||
44 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
45 | } | ||
46 | |||
47 | static inline int cpu_vmx_enabled(void) | ||
48 | { | ||
49 | return read_cr4() & X86_CR4_VMXE; | ||
50 | } | ||
51 | |||
52 | /** Disable VMX if it is enabled on the current CPU | ||
53 | * | ||
54 | * You shouldn't call this if cpu_has_vmx() returns 0. | ||
55 | */ | ||
56 | static inline void __cpu_emergency_vmxoff(void) | ||
57 | { | ||
58 | if (cpu_vmx_enabled()) | ||
59 | cpu_vmxoff(); | ||
60 | } | ||
61 | |||
62 | /** Disable VMX if it is supported and enabled on the current CPU | ||
63 | */ | ||
64 | static inline void cpu_emergency_vmxoff(void) | ||
65 | { | ||
66 | if (cpu_has_vmx()) | ||
67 | __cpu_emergency_vmxoff(); | ||
68 | } | ||
69 | |||
70 | |||
71 | |||
72 | |||
73 | /* | ||
74 | * SVM functions: | ||
75 | */ | ||
76 | |||
77 | /** Check if the CPU has SVM support | ||
78 | * | ||
79 | * You can use the 'msg' arg to get a message describing the problem, | ||
80 | * if the function returns zero. Simply pass NULL if you are not interested | ||
81 | * on the messages; gcc should take care of not generating code for | ||
82 | * the messages on this case. | ||
83 | */ | ||
84 | static inline int cpu_has_svm(const char **msg) | ||
85 | { | ||
86 | uint32_t eax, ebx, ecx, edx; | ||
87 | |||
88 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
89 | if (msg) | ||
90 | *msg = "not amd"; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | ||
95 | if (eax < SVM_CPUID_FUNC) { | ||
96 | if (msg) | ||
97 | *msg = "can't execute cpuid_8000000a"; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | ||
102 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | ||
103 | if (msg) | ||
104 | *msg = "svm not available"; | ||
105 | return 0; | ||
106 | } | ||
107 | return 1; | ||
108 | } | ||
109 | |||
110 | |||
111 | /** Disable SVM on the current CPU | ||
112 | * | ||
113 | * You should call this only if cpu_has_svm() returned true. | ||
114 | */ | ||
115 | static inline void cpu_svm_disable(void) | ||
116 | { | ||
117 | uint64_t efer; | ||
118 | |||
119 | wrmsrl(MSR_VM_HSAVE_PA, 0); | ||
120 | rdmsrl(MSR_EFER, efer); | ||
121 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | ||
122 | } | ||
123 | |||
124 | /** Makes sure SVM is disabled, if it is supported on the CPU | ||
125 | */ | ||
126 | static inline void cpu_emergency_svm_disable(void) | ||
127 | { | ||
128 | if (cpu_has_svm(NULL)) | ||
129 | cpu_svm_disable(); | ||
130 | } | ||
131 | |||
132 | #endif /* _ASM_X86_VIRTEX_H */ | ||
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h new file mode 100644 index 000000000000..d0238e6151d8 --- /dev/null +++ b/arch/x86/include/asm/vmx.h | |||
@@ -0,0 +1,382 @@ | |||
1 | #ifndef VMX_H | ||
2 | #define VMX_H | ||
3 | |||
4 | /* | ||
5 | * vmx.h: VMX Architecture related definitions | ||
6 | * Copyright (c) 2004, Intel Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * A few random additions are: | ||
22 | * Copyright (C) 2006 Qumranet | ||
23 | * Avi Kivity <avi@qumranet.com> | ||
24 | * Yaniv Kamay <yaniv@qumranet.com> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Definitions of Primary Processor-Based VM-Execution Controls. | ||
30 | */ | ||
31 | #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 | ||
32 | #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 | ||
33 | #define CPU_BASED_HLT_EXITING 0x00000080 | ||
34 | #define CPU_BASED_INVLPG_EXITING 0x00000200 | ||
35 | #define CPU_BASED_MWAIT_EXITING 0x00000400 | ||
36 | #define CPU_BASED_RDPMC_EXITING 0x00000800 | ||
37 | #define CPU_BASED_RDTSC_EXITING 0x00001000 | ||
38 | #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 | ||
39 | #define CPU_BASED_CR3_STORE_EXITING 0x00010000 | ||
40 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | ||
41 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | ||
42 | #define CPU_BASED_TPR_SHADOW 0x00200000 | ||
43 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
44 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | ||
45 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | ||
46 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | ||
47 | #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 | ||
48 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | ||
49 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | ||
50 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | ||
51 | /* | ||
52 | * Definitions of Secondary Processor-Based VM-Execution Controls. | ||
53 | */ | ||
54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | ||
55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | ||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | ||
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | ||
58 | |||
59 | |||
60 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | ||
61 | #define PIN_BASED_NMI_EXITING 0x00000008 | ||
62 | #define PIN_BASED_VIRTUAL_NMIS 0x00000020 | ||
63 | |||
64 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | ||
65 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | ||
66 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
67 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
68 | |||
69 | #define VM_ENTRY_IA32E_MODE 0x00000200 | ||
70 | #define VM_ENTRY_SMM 0x00000400 | ||
71 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | ||
72 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
73 | |||
74 | /* VMCS Encodings */ | ||
75 | enum vmcs_field { | ||
76 | VIRTUAL_PROCESSOR_ID = 0x00000000, | ||
77 | GUEST_ES_SELECTOR = 0x00000800, | ||
78 | GUEST_CS_SELECTOR = 0x00000802, | ||
79 | GUEST_SS_SELECTOR = 0x00000804, | ||
80 | GUEST_DS_SELECTOR = 0x00000806, | ||
81 | GUEST_FS_SELECTOR = 0x00000808, | ||
82 | GUEST_GS_SELECTOR = 0x0000080a, | ||
83 | GUEST_LDTR_SELECTOR = 0x0000080c, | ||
84 | GUEST_TR_SELECTOR = 0x0000080e, | ||
85 | HOST_ES_SELECTOR = 0x00000c00, | ||
86 | HOST_CS_SELECTOR = 0x00000c02, | ||
87 | HOST_SS_SELECTOR = 0x00000c04, | ||
88 | HOST_DS_SELECTOR = 0x00000c06, | ||
89 | HOST_FS_SELECTOR = 0x00000c08, | ||
90 | HOST_GS_SELECTOR = 0x00000c0a, | ||
91 | HOST_TR_SELECTOR = 0x00000c0c, | ||
92 | IO_BITMAP_A = 0x00002000, | ||
93 | IO_BITMAP_A_HIGH = 0x00002001, | ||
94 | IO_BITMAP_B = 0x00002002, | ||
95 | IO_BITMAP_B_HIGH = 0x00002003, | ||
96 | MSR_BITMAP = 0x00002004, | ||
97 | MSR_BITMAP_HIGH = 0x00002005, | ||
98 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, | ||
99 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, | ||
100 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, | ||
101 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, | ||
102 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, | ||
103 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, | ||
104 | TSC_OFFSET = 0x00002010, | ||
105 | TSC_OFFSET_HIGH = 0x00002011, | ||
106 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | ||
107 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | ||
108 | APIC_ACCESS_ADDR = 0x00002014, | ||
109 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
110 | EPT_POINTER = 0x0000201a, | ||
111 | EPT_POINTER_HIGH = 0x0000201b, | ||
112 | GUEST_PHYSICAL_ADDRESS = 0x00002400, | ||
113 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, | ||
114 | VMCS_LINK_POINTER = 0x00002800, | ||
115 | VMCS_LINK_POINTER_HIGH = 0x00002801, | ||
116 | GUEST_IA32_DEBUGCTL = 0x00002802, | ||
117 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | ||
118 | GUEST_IA32_PAT = 0x00002804, | ||
119 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
120 | GUEST_PDPTR0 = 0x0000280a, | ||
121 | GUEST_PDPTR0_HIGH = 0x0000280b, | ||
122 | GUEST_PDPTR1 = 0x0000280c, | ||
123 | GUEST_PDPTR1_HIGH = 0x0000280d, | ||
124 | GUEST_PDPTR2 = 0x0000280e, | ||
125 | GUEST_PDPTR2_HIGH = 0x0000280f, | ||
126 | GUEST_PDPTR3 = 0x00002810, | ||
127 | GUEST_PDPTR3_HIGH = 0x00002811, | ||
128 | HOST_IA32_PAT = 0x00002c00, | ||
129 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
130 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | ||
131 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | ||
132 | EXCEPTION_BITMAP = 0x00004004, | ||
133 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, | ||
134 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, | ||
135 | CR3_TARGET_COUNT = 0x0000400a, | ||
136 | VM_EXIT_CONTROLS = 0x0000400c, | ||
137 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, | ||
138 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, | ||
139 | VM_ENTRY_CONTROLS = 0x00004012, | ||
140 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, | ||
141 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, | ||
142 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, | ||
143 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | ||
144 | TPR_THRESHOLD = 0x0000401c, | ||
145 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | ||
146 | VM_INSTRUCTION_ERROR = 0x00004400, | ||
147 | VM_EXIT_REASON = 0x00004402, | ||
148 | VM_EXIT_INTR_INFO = 0x00004404, | ||
149 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, | ||
150 | IDT_VECTORING_INFO_FIELD = 0x00004408, | ||
151 | IDT_VECTORING_ERROR_CODE = 0x0000440a, | ||
152 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, | ||
153 | VMX_INSTRUCTION_INFO = 0x0000440e, | ||
154 | GUEST_ES_LIMIT = 0x00004800, | ||
155 | GUEST_CS_LIMIT = 0x00004802, | ||
156 | GUEST_SS_LIMIT = 0x00004804, | ||
157 | GUEST_DS_LIMIT = 0x00004806, | ||
158 | GUEST_FS_LIMIT = 0x00004808, | ||
159 | GUEST_GS_LIMIT = 0x0000480a, | ||
160 | GUEST_LDTR_LIMIT = 0x0000480c, | ||
161 | GUEST_TR_LIMIT = 0x0000480e, | ||
162 | GUEST_GDTR_LIMIT = 0x00004810, | ||
163 | GUEST_IDTR_LIMIT = 0x00004812, | ||
164 | GUEST_ES_AR_BYTES = 0x00004814, | ||
165 | GUEST_CS_AR_BYTES = 0x00004816, | ||
166 | GUEST_SS_AR_BYTES = 0x00004818, | ||
167 | GUEST_DS_AR_BYTES = 0x0000481a, | ||
168 | GUEST_FS_AR_BYTES = 0x0000481c, | ||
169 | GUEST_GS_AR_BYTES = 0x0000481e, | ||
170 | GUEST_LDTR_AR_BYTES = 0x00004820, | ||
171 | GUEST_TR_AR_BYTES = 0x00004822, | ||
172 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, | ||
173 | GUEST_ACTIVITY_STATE = 0X00004826, | ||
174 | GUEST_SYSENTER_CS = 0x0000482A, | ||
175 | HOST_IA32_SYSENTER_CS = 0x00004c00, | ||
176 | CR0_GUEST_HOST_MASK = 0x00006000, | ||
177 | CR4_GUEST_HOST_MASK = 0x00006002, | ||
178 | CR0_READ_SHADOW = 0x00006004, | ||
179 | CR4_READ_SHADOW = 0x00006006, | ||
180 | CR3_TARGET_VALUE0 = 0x00006008, | ||
181 | CR3_TARGET_VALUE1 = 0x0000600a, | ||
182 | CR3_TARGET_VALUE2 = 0x0000600c, | ||
183 | CR3_TARGET_VALUE3 = 0x0000600e, | ||
184 | EXIT_QUALIFICATION = 0x00006400, | ||
185 | GUEST_LINEAR_ADDRESS = 0x0000640a, | ||
186 | GUEST_CR0 = 0x00006800, | ||
187 | GUEST_CR3 = 0x00006802, | ||
188 | GUEST_CR4 = 0x00006804, | ||
189 | GUEST_ES_BASE = 0x00006806, | ||
190 | GUEST_CS_BASE = 0x00006808, | ||
191 | GUEST_SS_BASE = 0x0000680a, | ||
192 | GUEST_DS_BASE = 0x0000680c, | ||
193 | GUEST_FS_BASE = 0x0000680e, | ||
194 | GUEST_GS_BASE = 0x00006810, | ||
195 | GUEST_LDTR_BASE = 0x00006812, | ||
196 | GUEST_TR_BASE = 0x00006814, | ||
197 | GUEST_GDTR_BASE = 0x00006816, | ||
198 | GUEST_IDTR_BASE = 0x00006818, | ||
199 | GUEST_DR7 = 0x0000681a, | ||
200 | GUEST_RSP = 0x0000681c, | ||
201 | GUEST_RIP = 0x0000681e, | ||
202 | GUEST_RFLAGS = 0x00006820, | ||
203 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, | ||
204 | GUEST_SYSENTER_ESP = 0x00006824, | ||
205 | GUEST_SYSENTER_EIP = 0x00006826, | ||
206 | HOST_CR0 = 0x00006c00, | ||
207 | HOST_CR3 = 0x00006c02, | ||
208 | HOST_CR4 = 0x00006c04, | ||
209 | HOST_FS_BASE = 0x00006c06, | ||
210 | HOST_GS_BASE = 0x00006c08, | ||
211 | HOST_TR_BASE = 0x00006c0a, | ||
212 | HOST_GDTR_BASE = 0x00006c0c, | ||
213 | HOST_IDTR_BASE = 0x00006c0e, | ||
214 | HOST_IA32_SYSENTER_ESP = 0x00006c10, | ||
215 | HOST_IA32_SYSENTER_EIP = 0x00006c12, | ||
216 | HOST_RSP = 0x00006c14, | ||
217 | HOST_RIP = 0x00006c16, | ||
218 | }; | ||
219 | |||
220 | #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 | ||
221 | |||
222 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
223 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
224 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
225 | |||
226 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
227 | #define EXIT_REASON_NMI_WINDOW 8 | ||
228 | #define EXIT_REASON_TASK_SWITCH 9 | ||
229 | #define EXIT_REASON_CPUID 10 | ||
230 | #define EXIT_REASON_HLT 12 | ||
231 | #define EXIT_REASON_INVLPG 14 | ||
232 | #define EXIT_REASON_RDPMC 15 | ||
233 | #define EXIT_REASON_RDTSC 16 | ||
234 | #define EXIT_REASON_VMCALL 18 | ||
235 | #define EXIT_REASON_VMCLEAR 19 | ||
236 | #define EXIT_REASON_VMLAUNCH 20 | ||
237 | #define EXIT_REASON_VMPTRLD 21 | ||
238 | #define EXIT_REASON_VMPTRST 22 | ||
239 | #define EXIT_REASON_VMREAD 23 | ||
240 | #define EXIT_REASON_VMRESUME 24 | ||
241 | #define EXIT_REASON_VMWRITE 25 | ||
242 | #define EXIT_REASON_VMOFF 26 | ||
243 | #define EXIT_REASON_VMON 27 | ||
244 | #define EXIT_REASON_CR_ACCESS 28 | ||
245 | #define EXIT_REASON_DR_ACCESS 29 | ||
246 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
247 | #define EXIT_REASON_MSR_READ 31 | ||
248 | #define EXIT_REASON_MSR_WRITE 32 | ||
249 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
250 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
251 | #define EXIT_REASON_APIC_ACCESS 44 | ||
252 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
253 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
254 | #define EXIT_REASON_WBINVD 54 | ||
255 | |||
256 | /* | ||
257 | * Interruption-information format | ||
258 | */ | ||
259 | #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ | ||
260 | #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ | ||
261 | #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ | ||
262 | #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ | ||
263 | #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ | ||
264 | #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 | ||
265 | |||
266 | #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK | ||
267 | #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK | ||
268 | #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK | ||
269 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK | ||
270 | |||
271 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | ||
272 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | ||
273 | #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ | ||
274 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | ||
275 | |||
276 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | ||
277 | #define GUEST_INTR_STATE_STI 0x00000001 | ||
278 | #define GUEST_INTR_STATE_MOV_SS 0x00000002 | ||
279 | #define GUEST_INTR_STATE_SMI 0x00000004 | ||
280 | #define GUEST_INTR_STATE_NMI 0x00000008 | ||
281 | |||
282 | /* | ||
283 | * Exit Qualifications for MOV for Control Register Access | ||
284 | */ | ||
285 | #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ | ||
286 | #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ | ||
287 | #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ | ||
288 | #define LMSW_SOURCE_DATA_SHIFT 16 | ||
289 | #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ | ||
290 | #define REG_EAX (0 << 8) | ||
291 | #define REG_ECX (1 << 8) | ||
292 | #define REG_EDX (2 << 8) | ||
293 | #define REG_EBX (3 << 8) | ||
294 | #define REG_ESP (4 << 8) | ||
295 | #define REG_EBP (5 << 8) | ||
296 | #define REG_ESI (6 << 8) | ||
297 | #define REG_EDI (7 << 8) | ||
298 | #define REG_R8 (8 << 8) | ||
299 | #define REG_R9 (9 << 8) | ||
300 | #define REG_R10 (10 << 8) | ||
301 | #define REG_R11 (11 << 8) | ||
302 | #define REG_R12 (12 << 8) | ||
303 | #define REG_R13 (13 << 8) | ||
304 | #define REG_R14 (14 << 8) | ||
305 | #define REG_R15 (15 << 8) | ||
306 | |||
307 | /* | ||
308 | * Exit Qualifications for MOV for Debug Register Access | ||
309 | */ | ||
310 | #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ | ||
311 | #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ | ||
312 | #define TYPE_MOV_TO_DR (0 << 4) | ||
313 | #define TYPE_MOV_FROM_DR (1 << 4) | ||
314 | #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ | ||
315 | |||
316 | |||
317 | /* segment AR */ | ||
318 | #define SEGMENT_AR_L_MASK (1 << 13) | ||
319 | |||
320 | #define AR_TYPE_ACCESSES_MASK 1 | ||
321 | #define AR_TYPE_READABLE_MASK (1 << 1) | ||
322 | #define AR_TYPE_WRITEABLE_MASK (1 << 2) | ||
323 | #define AR_TYPE_CODE_MASK (1 << 3) | ||
324 | #define AR_TYPE_MASK 0x0f | ||
325 | #define AR_TYPE_BUSY_64_TSS 11 | ||
326 | #define AR_TYPE_BUSY_32_TSS 11 | ||
327 | #define AR_TYPE_BUSY_16_TSS 3 | ||
328 | #define AR_TYPE_LDT 2 | ||
329 | |||
330 | #define AR_UNUSABLE_MASK (1 << 16) | ||
331 | #define AR_S_MASK (1 << 4) | ||
332 | #define AR_P_MASK (1 << 7) | ||
333 | #define AR_L_MASK (1 << 13) | ||
334 | #define AR_DB_MASK (1 << 14) | ||
335 | #define AR_G_MASK (1 << 15) | ||
336 | #define AR_DPL_SHIFT 5 | ||
337 | #define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) | ||
338 | |||
339 | #define AR_RESERVD_MASK 0xfffe0f00 | ||
340 | |||
341 | #define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) | ||
342 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) | ||
343 | #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) | ||
344 | |||
345 | #define VMX_NR_VPIDS (1 << 16) | ||
346 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 | ||
347 | #define VMX_VPID_EXTENT_ALL_CONTEXT 2 | ||
348 | |||
349 | #define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 | ||
350 | #define VMX_EPT_EXTENT_CONTEXT 1 | ||
351 | #define VMX_EPT_EXTENT_GLOBAL 2 | ||
352 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | ||
353 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | ||
354 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | ||
355 | #define VMX_EPT_DEFAULT_GAW 3 | ||
356 | #define VMX_EPT_MAX_GAW 0x4 | ||
357 | #define VMX_EPT_MT_EPTE_SHIFT 3 | ||
358 | #define VMX_EPT_GAW_EPTP_SHIFT 3 | ||
359 | #define VMX_EPT_DEFAULT_MT 0x6ull | ||
360 | #define VMX_EPT_READABLE_MASK 0x1ull | ||
361 | #define VMX_EPT_WRITABLE_MASK 0x2ull | ||
362 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | ||
363 | #define VMX_EPT_IGMT_BIT (1ull << 6) | ||
364 | |||
365 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | ||
366 | |||
367 | |||
368 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
369 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
370 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
371 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
372 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
373 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
374 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
375 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
376 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
377 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
378 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
379 | |||
380 | |||
381 | |||
382 | #endif | ||