diff options
Diffstat (limited to 'arch/x86/include/asm')
46 files changed, 599 insertions, 271 deletions
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 49331bedc158..70780689599a 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
75 | } | 75 | } |
76 | #endif /* CONFIG_SMP */ | 76 | #endif /* CONFIG_SMP */ |
77 | 77 | ||
78 | #define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" | ||
79 | |||
80 | #define b_replacement(number) "663"#number | ||
81 | #define e_replacement(number) "664"#number | ||
82 | |||
83 | #define alt_slen "662b-661b" | ||
84 | #define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" | ||
85 | |||
86 | #define ALTINSTR_ENTRY(feature, number) \ | ||
87 | " .long 661b - .\n" /* label */ \ | ||
88 | " .long " b_replacement(number)"f - .\n" /* new instruction */ \ | ||
89 | " .word " __stringify(feature) "\n" /* feature bit */ \ | ||
90 | " .byte " alt_slen "\n" /* source len */ \ | ||
91 | " .byte " alt_rlen(number) "\n" /* replacement len */ | ||
92 | |||
93 | #define DISCARD_ENTRY(number) /* rlen <= slen */ \ | ||
94 | " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" | ||
95 | |||
96 | #define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ | ||
97 | b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" | ||
98 | |||
78 | /* alternative assembly primitive: */ | 99 | /* alternative assembly primitive: */ |
79 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
80 | \ | 101 | OLDINSTR(oldinstr) \ |
81 | "661:\n\t" oldinstr "\n662:\n" \ | 102 | ".section .altinstructions,\"a\"\n" \ |
82 | ".section .altinstructions,\"a\"\n" \ | 103 | ALTINSTR_ENTRY(feature, 1) \ |
83 | " .long 661b - .\n" /* label */ \ | 104 | ".previous\n" \ |
84 | " .long 663f - .\n" /* new instruction */ \ | 105 | ".section .discard,\"aw\",@progbits\n" \ |
85 | " .word " __stringify(feature) "\n" /* feature bit */ \ | 106 | DISCARD_ENTRY(1) \ |
86 | " .byte 662b-661b\n" /* sourcelen */ \ | 107 | ".previous\n" \ |
87 | " .byte 664f-663f\n" /* replacementlen */ \ | 108 | ".section .altinstr_replacement, \"ax\"\n" \ |
88 | ".previous\n" \ | 109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
89 | ".section .discard,\"aw\",@progbits\n" \ | 110 | ".previous" |
90 | " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ | 111 | |
91 | ".previous\n" \ | 112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
92 | ".section .altinstr_replacement, \"ax\"\n" \ | 113 | OLDINSTR(oldinstr) \ |
93 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | 114 | ".section .altinstructions,\"a\"\n" \ |
94 | ".previous" | 115 | ALTINSTR_ENTRY(feature1, 1) \ |
116 | ALTINSTR_ENTRY(feature2, 2) \ | ||
117 | ".previous\n" \ | ||
118 | ".section .discard,\"aw\",@progbits\n" \ | ||
119 | DISCARD_ENTRY(1) \ | ||
120 | DISCARD_ENTRY(2) \ | ||
121 | ".previous\n" \ | ||
122 | ".section .altinstr_replacement, \"ax\"\n" \ | ||
123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ | ||
124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | ||
125 | ".previous" | ||
95 | 126 | ||
96 | /* | 127 | /* |
97 | * This must be included *after* the definition of ALTERNATIVE due to | 128 | * This must be included *after* the definition of ALTERNATIVE due to |
@@ -140,6 +171,19 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
140 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) | 171 | : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) |
141 | 172 | ||
142 | /* | 173 | /* |
174 | * Like alternative_call, but there are two features and respective functions. | ||
175 | * If CPU has feature2, function2 is used. | ||
176 | * Otherwise, if CPU has feature1, function1 is used. | ||
177 | * Otherwise, old function is used. | ||
178 | */ | ||
179 | #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ | ||
180 | output, input...) \ | ||
181 | asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ | ||
182 | "call %P[new2]", feature2) \ | ||
183 | : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ | ||
184 | [new2] "i" (newfunc2), ## input) | ||
185 | |||
186 | /* | ||
143 | * use this macro(s) if you need more than one output parameter | 187 | * use this macro(s) if you need more than one output parameter |
144 | * in alternative_io | 188 | * in alternative_io |
145 | */ | 189 | */ |
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 49ad773f4b9f..b3341e9cd8fd 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -26,10 +26,31 @@ struct amd_l3_cache { | |||
26 | u8 subcaches[4]; | 26 | u8 subcaches[4]; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct threshold_block { | ||
30 | unsigned int block; | ||
31 | unsigned int bank; | ||
32 | unsigned int cpu; | ||
33 | u32 address; | ||
34 | u16 interrupt_enable; | ||
35 | bool interrupt_capable; | ||
36 | u16 threshold_limit; | ||
37 | struct kobject kobj; | ||
38 | struct list_head miscj; | ||
39 | }; | ||
40 | |||
41 | struct threshold_bank { | ||
42 | struct kobject *kobj; | ||
43 | struct threshold_block *blocks; | ||
44 | |||
45 | /* initialized to the number of CPUs on the node sharing this bank */ | ||
46 | atomic_t cpus; | ||
47 | }; | ||
48 | |||
29 | struct amd_northbridge { | 49 | struct amd_northbridge { |
30 | struct pci_dev *misc; | 50 | struct pci_dev *misc; |
31 | struct pci_dev *link; | 51 | struct pci_dev *link; |
32 | struct amd_l3_cache l3_cache; | 52 | struct amd_l3_cache l3_cache; |
53 | struct threshold_bank *bank4; | ||
33 | }; | 54 | }; |
34 | 55 | ||
35 | struct amd_northbridge_info { | 56 | struct amd_northbridge_info { |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index eaff4790ed96..f34261296ffb 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -306,7 +306,8 @@ struct apic { | |||
306 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); | 306 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); |
307 | unsigned long (*check_apicid_present)(int apicid); | 307 | unsigned long (*check_apicid_present)(int apicid); |
308 | 308 | ||
309 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | 309 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, |
310 | const struct cpumask *mask); | ||
310 | void (*init_apic_ldr)(void); | 311 | void (*init_apic_ldr)(void); |
311 | 312 | ||
312 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); | 313 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); |
@@ -331,9 +332,9 @@ struct apic { | |||
331 | unsigned long (*set_apic_id)(unsigned int id); | 332 | unsigned long (*set_apic_id)(unsigned int id); |
332 | unsigned long apic_id_mask; | 333 | unsigned long apic_id_mask; |
333 | 334 | ||
334 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); | 335 | int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, |
335 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, | 336 | const struct cpumask *andmask, |
336 | const struct cpumask *andmask); | 337 | unsigned int *apicid); |
337 | 338 | ||
338 | /* ipi */ | 339 | /* ipi */ |
339 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); | 340 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
@@ -464,6 +465,8 @@ static inline u32 safe_apic_wait_icr_idle(void) | |||
464 | return apic->safe_wait_icr_idle(); | 465 | return apic->safe_wait_icr_idle(); |
465 | } | 466 | } |
466 | 467 | ||
468 | extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)); | ||
469 | |||
467 | #else /* CONFIG_X86_LOCAL_APIC */ | 470 | #else /* CONFIG_X86_LOCAL_APIC */ |
468 | 471 | ||
469 | static inline u32 apic_read(u32 reg) { return 0; } | 472 | static inline u32 apic_read(u32 reg) { return 0; } |
@@ -473,6 +476,7 @@ static inline u64 apic_icr_read(void) { return 0; } | |||
473 | static inline void apic_icr_write(u32 low, u32 high) { } | 476 | static inline void apic_icr_write(u32 low, u32 high) { } |
474 | static inline void apic_wait_icr_idle(void) { } | 477 | static inline void apic_wait_icr_idle(void) { } |
475 | static inline u32 safe_apic_wait_icr_idle(void) { return 0; } | 478 | static inline u32 safe_apic_wait_icr_idle(void) { return 0; } |
479 | static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {} | ||
476 | 480 | ||
477 | #endif /* CONFIG_X86_LOCAL_APIC */ | 481 | #endif /* CONFIG_X86_LOCAL_APIC */ |
478 | 482 | ||
@@ -537,7 +541,12 @@ static inline const struct cpumask *default_target_cpus(void) | |||
537 | #endif | 541 | #endif |
538 | } | 542 | } |
539 | 543 | ||
540 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 544 | static inline const struct cpumask *online_target_cpus(void) |
545 | { | ||
546 | return cpu_online_mask; | ||
547 | } | ||
548 | |||
549 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); | ||
541 | 550 | ||
542 | 551 | ||
543 | static inline unsigned int read_apic_id(void) | 552 | static inline unsigned int read_apic_id(void) |
@@ -586,21 +595,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) | |||
586 | 595 | ||
587 | #endif | 596 | #endif |
588 | 597 | ||
589 | static inline unsigned int | 598 | static inline int |
590 | default_cpu_mask_to_apicid(const struct cpumask *cpumask) | 599 | flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
600 | const struct cpumask *andmask, | ||
601 | unsigned int *apicid) | ||
591 | { | 602 | { |
592 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | 603 | unsigned long cpu_mask = cpumask_bits(cpumask)[0] & |
604 | cpumask_bits(andmask)[0] & | ||
605 | cpumask_bits(cpu_online_mask)[0] & | ||
606 | APIC_ALL_CPUS; | ||
607 | |||
608 | if (likely(cpu_mask)) { | ||
609 | *apicid = (unsigned int)cpu_mask; | ||
610 | return 0; | ||
611 | } else { | ||
612 | return -EINVAL; | ||
613 | } | ||
593 | } | 614 | } |
594 | 615 | ||
595 | static inline unsigned int | 616 | extern int |
596 | default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 617 | default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
597 | const struct cpumask *andmask) | 618 | const struct cpumask *andmask, |
619 | unsigned int *apicid); | ||
620 | |||
621 | static inline void | ||
622 | flat_vector_allocation_domain(int cpu, struct cpumask *retmask, | ||
623 | const struct cpumask *mask) | ||
598 | { | 624 | { |
599 | unsigned long mask1 = cpumask_bits(cpumask)[0]; | 625 | /* Careful. Some cpus do not strictly honor the set of cpus |
600 | unsigned long mask2 = cpumask_bits(andmask)[0]; | 626 | * specified in the interrupt destination when using lowest |
601 | unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; | 627 | * priority interrupt delivery mode. |
628 | * | ||
629 | * In particular there was a hyperthreading cpu observed to | ||
630 | * deliver interrupts to the wrong hyperthread when only one | ||
631 | * hyperthread was specified in the interrupt desitination. | ||
632 | */ | ||
633 | cpumask_clear(retmask); | ||
634 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
635 | } | ||
602 | 636 | ||
603 | return (unsigned int)(mask1 & mask2 & mask3); | 637 | static inline void |
638 | default_vector_allocation_domain(int cpu, struct cpumask *retmask, | ||
639 | const struct cpumask *mask) | ||
640 | { | ||
641 | cpumask_copy(retmask, cpumask_of(cpu)); | ||
604 | } | 642 | } |
605 | 643 | ||
606 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) | 644 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index a6983b277220..72f5009deb5a 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -264,6 +264,13 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
264 | * This operation is non-atomic and can be reordered. | 264 | * This operation is non-atomic and can be reordered. |
265 | * If two examples of this operation race, one can appear to succeed | 265 | * If two examples of this operation race, one can appear to succeed |
266 | * but actually fail. You must protect multiple accesses with a lock. | 266 | * but actually fail. You must protect multiple accesses with a lock. |
267 | * | ||
268 | * Note: the operation is performed atomically with respect to | ||
269 | * the local CPU, but not other CPUs. Portable code should not | ||
270 | * rely on this behaviour. | ||
271 | * KVM relies on this behaviour on x86 for modifying memory that is also | ||
272 | * accessed from a hypervisor on the same CPU if running in a VM: don't change | ||
273 | * this without also updating arch/x86/kernel/kvm.c | ||
267 | */ | 274 | */ |
268 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 275 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
269 | { | 276 | { |
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h index eb45aa6b1f27..2ad874cb661c 100644 --- a/arch/x86/include/asm/bootparam.h +++ b/arch/x86/include/asm/bootparam.h | |||
@@ -66,6 +66,7 @@ struct setup_header { | |||
66 | __u64 setup_data; | 66 | __u64 setup_data; |
67 | __u64 pref_address; | 67 | __u64 pref_address; |
68 | __u32 init_size; | 68 | __u32 init_size; |
69 | __u32 handover_offset; | ||
69 | } __attribute__((packed)); | 70 | } __attribute__((packed)); |
70 | 71 | ||
71 | struct sys_desc_table { | 72 | struct sys_desc_table { |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 340ee49961a6..6b7ee5ff6820 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -176,7 +176,7 @@ | |||
176 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 176 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
177 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 177 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
178 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 178 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
179 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | 179 | #define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ |
180 | #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ | 180 | #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ |
181 | 181 | ||
182 | /* Virtualization flags: Linux defined, word 8 */ | 182 | /* Virtualization flags: Linux defined, word 8 */ |
@@ -207,6 +207,8 @@ | |||
207 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 207 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
208 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ | 208 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ |
209 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ | 209 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ |
210 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ | ||
211 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ | ||
210 | 212 | ||
211 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 213 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
212 | 214 | ||
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/arch/x86/include/asm/crypto/ablk_helper.h new file mode 100644 index 000000000000..4f93df50c23e --- /dev/null +++ b/arch/x86/include/asm/crypto/ablk_helper.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Shared async block cipher helpers | ||
3 | */ | ||
4 | |||
5 | #ifndef _CRYPTO_ABLK_HELPER_H | ||
6 | #define _CRYPTO_ABLK_HELPER_H | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <crypto/cryptd.h> | ||
11 | |||
12 | struct async_helper_ctx { | ||
13 | struct cryptd_ablkcipher *cryptd_tfm; | ||
14 | }; | ||
15 | |||
16 | extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | ||
17 | unsigned int key_len); | ||
18 | |||
19 | extern int __ablk_encrypt(struct ablkcipher_request *req); | ||
20 | |||
21 | extern int ablk_encrypt(struct ablkcipher_request *req); | ||
22 | |||
23 | extern int ablk_decrypt(struct ablkcipher_request *req); | ||
24 | |||
25 | extern void ablk_exit(struct crypto_tfm *tfm); | ||
26 | |||
27 | extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name); | ||
28 | |||
29 | extern int ablk_init(struct crypto_tfm *tfm); | ||
30 | |||
31 | #endif /* _CRYPTO_ABLK_HELPER_H */ | ||
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/crypto/aes.h index 80545a1cbe39..80545a1cbe39 100644 --- a/arch/x86/include/asm/aes.h +++ b/arch/x86/include/asm/crypto/aes.h | |||
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h new file mode 100644 index 000000000000..3e408bddc96f --- /dev/null +++ b/arch/x86/include/asm/crypto/glue_helper.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Shared glue code for 128bit block ciphers | ||
3 | */ | ||
4 | |||
5 | #ifndef _CRYPTO_GLUE_HELPER_H | ||
6 | #define _CRYPTO_GLUE_HELPER_H | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/crypto.h> | ||
10 | #include <asm/i387.h> | ||
11 | #include <crypto/b128ops.h> | ||
12 | |||
13 | typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); | ||
14 | typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); | ||
15 | typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, | ||
16 | u128 *iv); | ||
17 | |||
18 | #define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) | ||
19 | #define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) | ||
20 | #define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) | ||
21 | |||
22 | struct common_glue_func_entry { | ||
23 | unsigned int num_blocks; /* number of blocks that @fn will process */ | ||
24 | union { | ||
25 | common_glue_func_t ecb; | ||
26 | common_glue_cbc_func_t cbc; | ||
27 | common_glue_ctr_func_t ctr; | ||
28 | } fn_u; | ||
29 | }; | ||
30 | |||
31 | struct common_glue_ctx { | ||
32 | unsigned int num_funcs; | ||
33 | int fpu_blocks_limit; /* -1 means fpu not needed at all */ | ||
34 | |||
35 | /* | ||
36 | * First funcs entry must have largest num_blocks and last funcs entry | ||
37 | * must have num_blocks == 1! | ||
38 | */ | ||
39 | struct common_glue_func_entry funcs[]; | ||
40 | }; | ||
41 | |||
42 | static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, | ||
43 | struct blkcipher_desc *desc, | ||
44 | bool fpu_enabled, unsigned int nbytes) | ||
45 | { | ||
46 | if (likely(fpu_blocks_limit < 0)) | ||
47 | return false; | ||
48 | |||
49 | if (fpu_enabled) | ||
50 | return true; | ||
51 | |||
52 | /* | ||
53 | * Vector-registers are only used when chunk to be processed is large | ||
54 | * enough, so do not enable FPU until it is necessary. | ||
55 | */ | ||
56 | if (nbytes < bsize * (unsigned int)fpu_blocks_limit) | ||
57 | return false; | ||
58 | |||
59 | if (desc) { | ||
60 | /* prevent sleeping if FPU is in use */ | ||
61 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
62 | } | ||
63 | |||
64 | kernel_fpu_begin(); | ||
65 | return true; | ||
66 | } | ||
67 | |||
68 | static inline void glue_fpu_end(bool fpu_enabled) | ||
69 | { | ||
70 | if (fpu_enabled) | ||
71 | kernel_fpu_end(); | ||
72 | } | ||
73 | |||
74 | static inline void u128_to_be128(be128 *dst, const u128 *src) | ||
75 | { | ||
76 | dst->a = cpu_to_be64(src->a); | ||
77 | dst->b = cpu_to_be64(src->b); | ||
78 | } | ||
79 | |||
80 | static inline void be128_to_u128(u128 *dst, const be128 *src) | ||
81 | { | ||
82 | dst->a = be64_to_cpu(src->a); | ||
83 | dst->b = be64_to_cpu(src->b); | ||
84 | } | ||
85 | |||
86 | static inline void u128_inc(u128 *i) | ||
87 | { | ||
88 | i->b++; | ||
89 | if (!i->b) | ||
90 | i->a++; | ||
91 | } | ||
92 | |||
93 | extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
94 | struct blkcipher_desc *desc, | ||
95 | struct scatterlist *dst, | ||
96 | struct scatterlist *src, unsigned int nbytes); | ||
97 | |||
98 | extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
99 | struct blkcipher_desc *desc, | ||
100 | struct scatterlist *dst, | ||
101 | struct scatterlist *src, | ||
102 | unsigned int nbytes); | ||
103 | |||
104 | extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
105 | struct blkcipher_desc *desc, | ||
106 | struct scatterlist *dst, | ||
107 | struct scatterlist *src, | ||
108 | unsigned int nbytes); | ||
109 | |||
110 | extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
111 | struct blkcipher_desc *desc, | ||
112 | struct scatterlist *dst, | ||
113 | struct scatterlist *src, unsigned int nbytes); | ||
114 | |||
115 | #endif /* _CRYPTO_GLUE_HELPER_H */ | ||
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h new file mode 100644 index 000000000000..432deedd2945 --- /dev/null +++ b/arch/x86/include/asm/crypto/serpent-avx.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef ASM_X86_SERPENT_AVX_H | ||
2 | #define ASM_X86_SERPENT_AVX_H | ||
3 | |||
4 | #include <linux/crypto.h> | ||
5 | #include <crypto/serpent.h> | ||
6 | |||
7 | #define SERPENT_PARALLEL_BLOCKS 8 | ||
8 | |||
9 | asmlinkage void __serpent_enc_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst, | ||
10 | const u8 *src, bool xor); | ||
11 | asmlinkage void serpent_dec_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst, | ||
12 | const u8 *src); | ||
13 | |||
14 | static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, | ||
15 | const u8 *src) | ||
16 | { | ||
17 | __serpent_enc_blk_8way_avx(ctx, dst, src, false); | ||
18 | } | ||
19 | |||
20 | static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, | ||
21 | const u8 *src) | ||
22 | { | ||
23 | __serpent_enc_blk_8way_avx(ctx, dst, src, true); | ||
24 | } | ||
25 | |||
26 | static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, | ||
27 | const u8 *src) | ||
28 | { | ||
29 | serpent_dec_blk_8way_avx(ctx, dst, src); | ||
30 | } | ||
31 | |||
32 | #endif | ||
diff --git a/arch/x86/include/asm/serpent.h b/arch/x86/include/asm/crypto/serpent-sse2.h index d3ef63fe0c81..e6e77dffbdab 100644 --- a/arch/x86/include/asm/serpent.h +++ b/arch/x86/include/asm/crypto/serpent-sse2.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86_SERPENT_H | 1 | #ifndef ASM_X86_SERPENT_SSE2_H |
2 | #define ASM_X86_SERPENT_H | 2 | #define ASM_X86_SERPENT_SSE2_H |
3 | 3 | ||
4 | #include <linux/crypto.h> | 4 | #include <linux/crypto.h> |
5 | #include <crypto/serpent.h> | 5 | #include <crypto/serpent.h> |
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h new file mode 100644 index 000000000000..9d2c514bd5f9 --- /dev/null +++ b/arch/x86/include/asm/crypto/twofish.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef ASM_X86_TWOFISH_H | ||
2 | #define ASM_X86_TWOFISH_H | ||
3 | |||
4 | #include <linux/crypto.h> | ||
5 | #include <crypto/twofish.h> | ||
6 | #include <crypto/lrw.h> | ||
7 | #include <crypto/b128ops.h> | ||
8 | |||
9 | struct twofish_lrw_ctx { | ||
10 | struct lrw_table_ctx lrw_table; | ||
11 | struct twofish_ctx twofish_ctx; | ||
12 | }; | ||
13 | |||
14 | struct twofish_xts_ctx { | ||
15 | struct twofish_ctx tweak_ctx; | ||
16 | struct twofish_ctx crypt_ctx; | ||
17 | }; | ||
18 | |||
19 | /* regular block cipher functions from twofish_x86_64 module */ | ||
20 | asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, | ||
21 | const u8 *src); | ||
22 | asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, | ||
23 | const u8 *src); | ||
24 | |||
25 | /* 3-way parallel cipher functions */ | ||
26 | asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
27 | const u8 *src, bool xor); | ||
28 | asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, | ||
29 | const u8 *src); | ||
30 | |||
31 | /* helpers from twofish_x86_64-3way module */ | ||
32 | extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); | ||
33 | extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, | ||
34 | u128 *iv); | ||
35 | extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, | ||
36 | u128 *iv); | ||
37 | |||
38 | extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
39 | unsigned int keylen); | ||
40 | |||
41 | extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm); | ||
42 | |||
43 | extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
44 | unsigned int keylen); | ||
45 | |||
46 | #endif /* ASM_X86_TWOFISH_H */ | ||
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h index cc70c1c78ca4..75ce3f47d204 100644 --- a/arch/x86/include/asm/emergency-restart.h +++ b/arch/x86/include/asm/emergency-restart.h | |||
@@ -4,9 +4,7 @@ | |||
4 | enum reboot_type { | 4 | enum reboot_type { |
5 | BOOT_TRIPLE = 't', | 5 | BOOT_TRIPLE = 't', |
6 | BOOT_KBD = 'k', | 6 | BOOT_KBD = 'k', |
7 | #ifdef CONFIG_X86_32 | ||
8 | BOOT_BIOS = 'b', | 7 | BOOT_BIOS = 'b', |
9 | #endif | ||
10 | BOOT_ACPI = 'a', | 8 | BOOT_ACPI = 'a', |
11 | BOOT_EFI = 'e', | 9 | BOOT_EFI = 'e', |
12 | BOOT_CF9 = 'p', | 10 | BOOT_CF9 = 'p', |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 0baa628e330c..40afa0005c69 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -15,15 +15,6 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | |||
15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | 15 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) |
16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | 16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) |
17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | 17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) |
18 | |||
19 | .irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ | ||
20 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | ||
21 | .if NUM_INVALIDATE_TLB_VECTORS > \idx | ||
22 | BUILD_INTERRUPT3(invalidate_interrupt\idx, | ||
23 | (INVALIDATE_TLB_VECTOR_START)+\idx, | ||
24 | smp_invalidate_interrupt) | ||
25 | .endif | ||
26 | .endr | ||
27 | #endif | 18 | #endif |
28 | 19 | ||
29 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) | 20 | BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) |
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h index dbe82a5c5eac..d3d74698dce9 100644 --- a/arch/x86/include/asm/floppy.h +++ b/arch/x86/include/asm/floppy.h | |||
@@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) | |||
99 | virtual_dma_residue += virtual_dma_count; | 99 | virtual_dma_residue += virtual_dma_count; |
100 | virtual_dma_count = 0; | 100 | virtual_dma_count = 0; |
101 | #ifdef TRACE_FLPY_INT | 101 | #ifdef TRACE_FLPY_INT |
102 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", | 102 | printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", |
103 | virtual_dma_count, virtual_dma_residue, calls, bytes, | 103 | virtual_dma_count, virtual_dma_residue, calls, bytes, |
104 | dma_wait); | 104 | dma_wait); |
105 | calls = 0; | 105 | calls = 0; |
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 7a15153c675d..b518c7509933 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h | |||
@@ -49,6 +49,7 @@ extern const struct hypervisor_x86 *x86_hyper; | |||
49 | extern const struct hypervisor_x86 x86_hyper_vmware; | 49 | extern const struct hypervisor_x86 x86_hyper_vmware; |
50 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; | 50 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; |
51 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; | 51 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; |
52 | extern const struct hypervisor_x86 x86_hyper_kvm; | ||
52 | 53 | ||
53 | static inline bool hypervisor_x2apic_available(void) | 54 | static inline bool hypervisor_x2apic_available(void) |
54 | { | 55 | { |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index dffc38ee6255..345c99cef152 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops; | |||
5 | extern int force_iommu, no_iommu; | 5 | extern int force_iommu, no_iommu; |
6 | extern int iommu_detected; | 6 | extern int iommu_detected; |
7 | extern int iommu_pass_through; | 7 | extern int iommu_pass_through; |
8 | extern int iommu_group_mf; | ||
9 | 8 | ||
10 | /* 10 seconds */ | 9 | /* 10 seconds */ |
11 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | 10 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4b4448761e88..1508e518c7e3 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -119,17 +119,6 @@ | |||
119 | */ | 119 | */ |
120 | #define LOCAL_TIMER_VECTOR 0xef | 120 | #define LOCAL_TIMER_VECTOR 0xef |
121 | 121 | ||
122 | /* up to 32 vectors used for spreading out TLB flushes: */ | ||
123 | #if NR_CPUS <= 32 | ||
124 | # define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS) | ||
125 | #else | ||
126 | # define NUM_INVALIDATE_TLB_VECTORS (32) | ||
127 | #endif | ||
128 | |||
129 | #define INVALIDATE_TLB_VECTOR_END (0xee) | ||
130 | #define INVALIDATE_TLB_VECTOR_START \ | ||
131 | (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1) | ||
132 | |||
133 | #define NR_VECTORS 256 | 122 | #define NR_VECTORS 256 |
134 | 123 | ||
135 | #define FPU_IRQ 13 | 124 | #define FPU_IRQ 13 |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index e7d1c194d272..246617efd67f 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -12,6 +12,7 @@ | |||
12 | /* Select x86 specific features in <linux/kvm.h> */ | 12 | /* Select x86 specific features in <linux/kvm.h> */ |
13 | #define __KVM_HAVE_PIT | 13 | #define __KVM_HAVE_PIT |
14 | #define __KVM_HAVE_IOAPIC | 14 | #define __KVM_HAVE_IOAPIC |
15 | #define __KVM_HAVE_IRQ_LINE | ||
15 | #define __KVM_HAVE_DEVICE_ASSIGNMENT | 16 | #define __KVM_HAVE_DEVICE_ASSIGNMENT |
16 | #define __KVM_HAVE_MSI | 17 | #define __KVM_HAVE_MSI |
17 | #define __KVM_HAVE_USER_NMI | 18 | #define __KVM_HAVE_USER_NMI |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 1ac46c22dd50..c764f43b71c5 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -192,8 +192,8 @@ struct x86_emulate_ops { | |||
192 | struct x86_instruction_info *info, | 192 | struct x86_instruction_info *info, |
193 | enum x86_intercept_stage stage); | 193 | enum x86_intercept_stage stage); |
194 | 194 | ||
195 | bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, | 195 | void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, |
196 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); | 196 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
197 | }; | 197 | }; |
198 | 198 | ||
199 | typedef u32 __attribute__((vector_size(16))) sse128_t; | 199 | typedef u32 __attribute__((vector_size(16))) sse128_t; |
@@ -280,9 +280,9 @@ struct x86_emulate_ctxt { | |||
280 | u8 modrm_seg; | 280 | u8 modrm_seg; |
281 | bool rip_relative; | 281 | bool rip_relative; |
282 | unsigned long _eip; | 282 | unsigned long _eip; |
283 | struct operand memop; | ||
283 | /* Fields above regs are cleared together. */ | 284 | /* Fields above regs are cleared together. */ |
284 | unsigned long regs[NR_VCPU_REGS]; | 285 | unsigned long regs[NR_VCPU_REGS]; |
285 | struct operand memop; | ||
286 | struct operand *memopp; | 286 | struct operand *memopp; |
287 | struct fetch_cache fetch; | 287 | struct fetch_cache fetch; |
288 | struct read_cache io_read; | 288 | struct read_cache io_read; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index db7c1f2709a2..09155d64cf7e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -48,12 +48,13 @@ | |||
48 | 48 | ||
49 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | 49 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
50 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 50 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
51 | #define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL | ||
51 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ | 52 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
52 | 0xFFFFFF0000000000ULL) | 53 | 0xFFFFFF0000000000ULL) |
53 | #define CR4_RESERVED_BITS \ | 54 | #define CR4_RESERVED_BITS \ |
54 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | 55 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
55 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | 56 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ |
56 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | 57 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ |
57 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ | 58 | | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ |
58 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | 59 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) |
59 | 60 | ||
@@ -175,6 +176,13 @@ enum { | |||
175 | 176 | ||
176 | /* apic attention bits */ | 177 | /* apic attention bits */ |
177 | #define KVM_APIC_CHECK_VAPIC 0 | 178 | #define KVM_APIC_CHECK_VAPIC 0 |
179 | /* | ||
180 | * The following bit is set with PV-EOI, unset on EOI. | ||
181 | * We detect PV-EOI changes by guest by comparing | ||
182 | * this bit with PV-EOI in guest memory. | ||
183 | * See the implementation in apic_update_pv_eoi. | ||
184 | */ | ||
185 | #define KVM_APIC_PV_EOI_PENDING 1 | ||
178 | 186 | ||
179 | /* | 187 | /* |
180 | * We don't want allocation failures within the mmu code, so we preallocate | 188 | * We don't want allocation failures within the mmu code, so we preallocate |
@@ -313,8 +321,8 @@ struct kvm_pmu { | |||
313 | u64 counter_bitmask[2]; | 321 | u64 counter_bitmask[2]; |
314 | u64 global_ctrl_mask; | 322 | u64 global_ctrl_mask; |
315 | u8 version; | 323 | u8 version; |
316 | struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC]; | 324 | struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; |
317 | struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED]; | 325 | struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; |
318 | struct irq_work irq_work; | 326 | struct irq_work irq_work; |
319 | u64 reprogram_pmi; | 327 | u64 reprogram_pmi; |
320 | }; | 328 | }; |
@@ -484,6 +492,11 @@ struct kvm_vcpu_arch { | |||
484 | u64 length; | 492 | u64 length; |
485 | u64 status; | 493 | u64 status; |
486 | } osvw; | 494 | } osvw; |
495 | |||
496 | struct { | ||
497 | u64 msr_val; | ||
498 | struct gfn_to_hva_cache data; | ||
499 | } pv_eoi; | ||
487 | }; | 500 | }; |
488 | 501 | ||
489 | struct kvm_lpage_info { | 502 | struct kvm_lpage_info { |
@@ -661,6 +674,7 @@ struct kvm_x86_ops { | |||
661 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | 674 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
662 | int (*get_lpage_level)(void); | 675 | int (*get_lpage_level)(void); |
663 | bool (*rdtscp_supported)(void); | 676 | bool (*rdtscp_supported)(void); |
677 | bool (*invpcid_supported)(void); | ||
664 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); | 678 | void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); |
665 | 679 | ||
666 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 680 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
@@ -802,7 +816,20 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
802 | void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); | 816 | void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); |
803 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | 817 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
804 | 818 | ||
805 | int kvm_pic_set_irq(void *opaque, int irq, int level); | 819 | static inline int __kvm_irq_line_state(unsigned long *irq_state, |
820 | int irq_source_id, int level) | ||
821 | { | ||
822 | /* Logical OR for level trig interrupt */ | ||
823 | if (level) | ||
824 | __set_bit(irq_source_id, irq_state); | ||
825 | else | ||
826 | __clear_bit(irq_source_id, irq_state); | ||
827 | |||
828 | return !!(*irq_state); | ||
829 | } | ||
830 | |||
831 | int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); | ||
832 | void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); | ||
806 | 833 | ||
807 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | 834 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
808 | 835 | ||
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 63ab1661d00e..2f7712e08b1e 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define KVM_FEATURE_CLOCKSOURCE2 3 | 22 | #define KVM_FEATURE_CLOCKSOURCE2 3 |
23 | #define KVM_FEATURE_ASYNC_PF 4 | 23 | #define KVM_FEATURE_ASYNC_PF 4 |
24 | #define KVM_FEATURE_STEAL_TIME 5 | 24 | #define KVM_FEATURE_STEAL_TIME 5 |
25 | #define KVM_FEATURE_PV_EOI 6 | ||
25 | 26 | ||
26 | /* The last 8 bits are used to indicate how to interpret the flags field | 27 | /* The last 8 bits are used to indicate how to interpret the flags field |
27 | * in pvclock structure. If no bits are set, all flags are ignored. | 28 | * in pvclock structure. If no bits are set, all flags are ignored. |
@@ -37,6 +38,7 @@ | |||
37 | #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 | 38 | #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 |
38 | #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 | 39 | #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 |
39 | #define MSR_KVM_STEAL_TIME 0x4b564d03 | 40 | #define MSR_KVM_STEAL_TIME 0x4b564d03 |
41 | #define MSR_KVM_PV_EOI_EN 0x4b564d04 | ||
40 | 42 | ||
41 | struct kvm_steal_time { | 43 | struct kvm_steal_time { |
42 | __u64 steal; | 44 | __u64 steal; |
@@ -89,6 +91,11 @@ struct kvm_vcpu_pv_apf_data { | |||
89 | __u32 enabled; | 91 | __u32 enabled; |
90 | }; | 92 | }; |
91 | 93 | ||
94 | #define KVM_PV_EOI_BIT 0 | ||
95 | #define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT) | ||
96 | #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK | ||
97 | #define KVM_PV_EOI_DISABLED 0x0 | ||
98 | |||
92 | #ifdef __KERNEL__ | 99 | #ifdef __KERNEL__ |
93 | #include <asm/processor.h> | 100 | #include <asm/processor.h> |
94 | 101 | ||
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 084ef95274cd..813ed103f45e 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
115 | 115 | ||
116 | extern unsigned long long native_read_tsc(void); | 116 | extern unsigned long long native_read_tsc(void); |
117 | 117 | ||
118 | extern int native_rdmsr_safe_regs(u32 regs[8]); | 118 | extern int rdmsr_safe_regs(u32 regs[8]); |
119 | extern int native_wrmsr_safe_regs(u32 regs[8]); | 119 | extern int wrmsr_safe_regs(u32 regs[8]); |
120 | 120 | ||
121 | static __always_inline unsigned long long __native_read_tsc(void) | 121 | static __always_inline unsigned long long __native_read_tsc(void) |
122 | { | 122 | { |
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
187 | return err; | 187 | return err; |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
191 | { | ||
192 | u32 gprs[8] = { 0 }; | ||
193 | int err; | ||
194 | |||
195 | gprs[1] = msr; | ||
196 | gprs[7] = 0x9c5a203a; | ||
197 | |||
198 | err = native_rdmsr_safe_regs(gprs); | ||
199 | |||
200 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
201 | |||
202 | return err; | ||
203 | } | ||
204 | |||
205 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
206 | { | ||
207 | u32 gprs[8] = { 0 }; | ||
208 | |||
209 | gprs[0] = (u32)val; | ||
210 | gprs[1] = msr; | ||
211 | gprs[2] = val >> 32; | ||
212 | gprs[7] = 0x9c5a203a; | ||
213 | |||
214 | return native_wrmsr_safe_regs(gprs); | ||
215 | } | ||
216 | |||
217 | static inline int rdmsr_safe_regs(u32 regs[8]) | ||
218 | { | ||
219 | return native_rdmsr_safe_regs(regs); | ||
220 | } | ||
221 | |||
222 | static inline int wrmsr_safe_regs(u32 regs[8]) | ||
223 | { | ||
224 | return native_wrmsr_safe_regs(regs); | ||
225 | } | ||
226 | |||
227 | #define rdtscl(low) \ | 190 | #define rdtscl(low) \ |
228 | ((low) = (u32)__native_read_tsc()) | 191 | ((low) = (u32)__native_read_tsc()) |
229 | 192 | ||
@@ -237,6 +200,8 @@ do { \ | |||
237 | (high) = (u32)(_l >> 32); \ | 200 | (high) = (u32)(_l >> 32); \ |
238 | } while (0) | 201 | } while (0) |
239 | 202 | ||
203 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) | ||
204 | |||
240 | #define rdtscp(low, high, aux) \ | 205 | #define rdtscp(low, high, aux) \ |
241 | do { \ | 206 | do { \ |
242 | unsigned long long _val = native_read_tscp(&(aux)); \ | 207 | unsigned long long _val = native_read_tscp(&(aux)); \ |
@@ -248,8 +213,7 @@ do { \ | |||
248 | 213 | ||
249 | #endif /* !CONFIG_PARAVIRT */ | 214 | #endif /* !CONFIG_PARAVIRT */ |
250 | 215 | ||
251 | 216 | #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ | |
252 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ | ||
253 | (u32)((val) >> 32)) | 217 | (u32)((val) >> 32)) |
254 | 218 | ||
255 | #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) | 219 | #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index dc580c42851c..c0fa356e90de 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -44,28 +44,14 @@ struct nmiaction { | |||
44 | const char *name; | 44 | const char *name; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #define register_nmi_handler(t, fn, fg, n) \ | 47 | #define register_nmi_handler(t, fn, fg, n, init...) \ |
48 | ({ \ | 48 | ({ \ |
49 | static struct nmiaction fn##_na = { \ | 49 | static struct nmiaction init fn##_na = { \ |
50 | .handler = (fn), \ | 50 | .handler = (fn), \ |
51 | .name = (n), \ | 51 | .name = (n), \ |
52 | .flags = (fg), \ | 52 | .flags = (fg), \ |
53 | }; \ | 53 | }; \ |
54 | __register_nmi_handler((t), &fn##_na); \ | 54 | __register_nmi_handler((t), &fn##_na); \ |
55 | }) | ||
56 | |||
57 | /* | ||
58 | * For special handlers that register/unregister in the | ||
59 | * init section only. This should be considered rare. | ||
60 | */ | ||
61 | #define register_nmi_handler_initonly(t, fn, fg, n) \ | ||
62 | ({ \ | ||
63 | static struct nmiaction fn##_na __initdata = { \ | ||
64 | .handler = (fn), \ | ||
65 | .name = (n), \ | ||
66 | .flags = (fg), \ | ||
67 | }; \ | ||
68 | __register_nmi_handler((t), &fn##_na); \ | ||
69 | }) | 55 | }) |
70 | 56 | ||
71 | int __register_nmi_handler(unsigned int, struct nmiaction *); | 57 | int __register_nmi_handler(unsigned int, struct nmiaction *); |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 6cbbabf52707..a0facf3908d7 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
128 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 128 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline int paravirt_rdmsr_regs(u32 *regs) | ||
132 | { | ||
133 | return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs); | ||
134 | } | ||
135 | |||
136 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 131 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
137 | { | 132 | { |
138 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 133 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
139 | } | 134 | } |
140 | 135 | ||
141 | static inline int paravirt_wrmsr_regs(u32 *regs) | ||
142 | { | ||
143 | return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs); | ||
144 | } | ||
145 | |||
146 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 136 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
147 | #define rdmsr(msr, val1, val2) \ | 137 | #define rdmsr(msr, val1, val2) \ |
148 | do { \ | 138 | do { \ |
@@ -176,9 +166,6 @@ do { \ | |||
176 | _err; \ | 166 | _err; \ |
177 | }) | 167 | }) |
178 | 168 | ||
179 | #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs) | ||
180 | #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs) | ||
181 | |||
182 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | 169 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
183 | { | 170 | { |
184 | int err; | 171 | int err; |
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
186 | *p = paravirt_read_msr(msr, &err); | 173 | *p = paravirt_read_msr(msr, &err); |
187 | return err; | 174 | return err; |
188 | } | 175 | } |
189 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
190 | { | ||
191 | u32 gprs[8] = { 0 }; | ||
192 | int err; | ||
193 | |||
194 | gprs[1] = msr; | ||
195 | gprs[7] = 0x9c5a203a; | ||
196 | |||
197 | err = paravirt_rdmsr_regs(gprs); | ||
198 | |||
199 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
200 | |||
201 | return err; | ||
202 | } | ||
203 | |||
204 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
205 | { | ||
206 | u32 gprs[8] = { 0 }; | ||
207 | |||
208 | gprs[0] = (u32)val; | ||
209 | gprs[1] = msr; | ||
210 | gprs[2] = val >> 32; | ||
211 | gprs[7] = 0x9c5a203a; | ||
212 | |||
213 | return paravirt_wrmsr_regs(gprs); | ||
214 | } | ||
215 | 176 | ||
216 | static inline u64 paravirt_read_tsc(void) | 177 | static inline u64 paravirt_read_tsc(void) |
217 | { | 178 | { |
@@ -252,6 +213,8 @@ do { \ | |||
252 | high = _l >> 32; \ | 213 | high = _l >> 32; \ |
253 | } while (0) | 214 | } while (0) |
254 | 215 | ||
216 | #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) | ||
217 | |||
255 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) | 218 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) |
256 | { | 219 | { |
257 | return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); | 220 | return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); |
@@ -397,9 +360,10 @@ static inline void __flush_tlb_single(unsigned long addr) | |||
397 | 360 | ||
398 | static inline void flush_tlb_others(const struct cpumask *cpumask, | 361 | static inline void flush_tlb_others(const struct cpumask *cpumask, |
399 | struct mm_struct *mm, | 362 | struct mm_struct *mm, |
400 | unsigned long va) | 363 | unsigned long start, |
364 | unsigned long end) | ||
401 | { | 365 | { |
402 | PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va); | 366 | PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end); |
403 | } | 367 | } |
404 | 368 | ||
405 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) | 369 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8e8b9a4987ee..142236ed83af 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -153,9 +153,7 @@ struct pv_cpu_ops { | |||
153 | /* MSR, PMC and TSR operations. | 153 | /* MSR, PMC and TSR operations. |
154 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 154 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
155 | u64 (*read_msr)(unsigned int msr, int *err); | 155 | u64 (*read_msr)(unsigned int msr, int *err); |
156 | int (*rdmsr_regs)(u32 *regs); | ||
157 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | 156 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
158 | int (*wrmsr_regs)(u32 *regs); | ||
159 | 157 | ||
160 | u64 (*read_tsc)(void); | 158 | u64 (*read_tsc)(void); |
161 | u64 (*read_pmc)(int counter); | 159 | u64 (*read_pmc)(int counter); |
@@ -250,7 +248,8 @@ struct pv_mmu_ops { | |||
250 | void (*flush_tlb_single)(unsigned long addr); | 248 | void (*flush_tlb_single)(unsigned long addr); |
251 | void (*flush_tlb_others)(const struct cpumask *cpus, | 249 | void (*flush_tlb_others)(const struct cpumask *cpus, |
252 | struct mm_struct *mm, | 250 | struct mm_struct *mm, |
253 | unsigned long va); | 251 | unsigned long start, |
252 | unsigned long end); | ||
254 | 253 | ||
255 | /* Hooks for allocating and freeing a pagetable top-level */ | 254 | /* Hooks for allocating and freeing a pagetable top-level */ |
256 | int (*pgd_alloc)(struct mm_struct *mm); | 255 | int (*pgd_alloc)(struct mm_struct *mm); |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index b3a531746026..73e8eeff22ee 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -7,9 +7,13 @@ | |||
7 | #undef DEBUG | 7 | #undef DEBUG |
8 | 8 | ||
9 | #ifdef DEBUG | 9 | #ifdef DEBUG |
10 | #define DBG(x...) printk(x) | 10 | #define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__) |
11 | #else | 11 | #else |
12 | #define DBG(x...) | 12 | #define DBG(fmt, ...) \ |
13 | do { \ | ||
14 | if (0) \ | ||
15 | printk(fmt, ##__VA_ARGS__); \ | ||
16 | } while (0) | ||
13 | #endif | 17 | #endif |
14 | 18 | ||
15 | #define PCI_PROBE_BIOS 0x0001 | 19 | #define PCI_PROBE_BIOS 0x0001 |
@@ -100,6 +104,7 @@ struct pci_raw_ops { | |||
100 | extern const struct pci_raw_ops *raw_pci_ops; | 104 | extern const struct pci_raw_ops *raw_pci_ops; |
101 | extern const struct pci_raw_ops *raw_pci_ext_ops; | 105 | extern const struct pci_raw_ops *raw_pci_ext_ops; |
102 | 106 | ||
107 | extern const struct pci_raw_ops pci_mmcfg; | ||
103 | extern const struct pci_raw_ops pci_direct_conf1; | 108 | extern const struct pci_raw_ops pci_direct_conf1; |
104 | extern bool port_cf9_safe; | 109 | extern bool port_cf9_safe; |
105 | 110 | ||
@@ -135,6 +140,12 @@ struct pci_mmcfg_region { | |||
135 | 140 | ||
136 | extern int __init pci_mmcfg_arch_init(void); | 141 | extern int __init pci_mmcfg_arch_init(void); |
137 | extern void __init pci_mmcfg_arch_free(void); | 142 | extern void __init pci_mmcfg_arch_free(void); |
143 | extern int __devinit pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg); | ||
144 | extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg); | ||
145 | extern int __devinit pci_mmconfig_insert(struct device *dev, | ||
146 | u16 seg, u8 start, | ||
147 | u8 end, phys_addr_t addr); | ||
148 | extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end); | ||
138 | extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); | 149 | extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); |
139 | 150 | ||
140 | extern struct list_head pci_mmcfg_list; | 151 | extern struct list_head pci_mmcfg_list; |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index d9b8e3f7f42a..1104afaba52b 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -551,6 +551,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); | |||
551 | { [0 ... NR_CPUS-1] = _initvalue }; \ | 551 | { [0 ... NR_CPUS-1] = _initvalue }; \ |
552 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | 552 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
553 | 553 | ||
554 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ | ||
555 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ | ||
556 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ | ||
557 | { [0 ... NR_CPUS-1] = _initvalue }; \ | ||
558 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | ||
559 | |||
554 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ | 560 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
555 | EXPORT_PER_CPU_SYMBOL(_name) | 561 | EXPORT_PER_CPU_SYMBOL(_name) |
556 | 562 | ||
@@ -559,6 +565,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); | |||
559 | extern __typeof__(_type) *_name##_early_ptr; \ | 565 | extern __typeof__(_type) *_name##_early_ptr; \ |
560 | extern __typeof__(_type) _name##_early_map[] | 566 | extern __typeof__(_type) _name##_early_map[] |
561 | 567 | ||
568 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ | ||
569 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ | ||
570 | extern __typeof__(_type) *_name##_early_ptr; \ | ||
571 | extern __typeof__(_type) _name##_early_map[] | ||
572 | |||
562 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) | 573 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
563 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) | 574 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) |
564 | #define early_per_cpu(_name, _cpu) \ | 575 | #define early_per_cpu(_name, _cpu) \ |
@@ -570,12 +581,18 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); | |||
570 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ | 581 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
571 | DEFINE_PER_CPU(_type, _name) = _initvalue | 582 | DEFINE_PER_CPU(_type, _name) = _initvalue |
572 | 583 | ||
584 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ | ||
585 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue | ||
586 | |||
573 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ | 587 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
574 | EXPORT_PER_CPU_SYMBOL(_name) | 588 | EXPORT_PER_CPU_SYMBOL(_name) |
575 | 589 | ||
576 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ | 590 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ |
577 | DECLARE_PER_CPU(_type, _name) | 591 | DECLARE_PER_CPU(_type, _name) |
578 | 592 | ||
593 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ | ||
594 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) | ||
595 | |||
579 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) | 596 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
580 | #define early_per_cpu_ptr(_name) NULL | 597 | #define early_per_cpu_ptr(_name) NULL |
581 | /* no early_per_cpu_map() */ | 598 | /* no early_per_cpu_map() */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 588f52ea810e..c78f14a0df00 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -5,11 +5,10 @@ | |||
5 | * Performance event hw details: | 5 | * Performance event hw details: |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define X86_PMC_MAX_GENERIC 32 | 8 | #define INTEL_PMC_MAX_GENERIC 32 |
9 | #define X86_PMC_MAX_FIXED 3 | 9 | #define INTEL_PMC_MAX_FIXED 3 |
10 | #define INTEL_PMC_IDX_FIXED 32 | ||
10 | 11 | ||
11 | #define X86_PMC_IDX_GENERIC 0 | ||
12 | #define X86_PMC_IDX_FIXED 32 | ||
13 | #define X86_PMC_IDX_MAX 64 | 12 | #define X86_PMC_IDX_MAX 64 |
14 | 13 | ||
15 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | 14 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
@@ -48,8 +47,7 @@ | |||
48 | (X86_RAW_EVENT_MASK | \ | 47 | (X86_RAW_EVENT_MASK | \ |
49 | AMD64_EVENTSEL_EVENT) | 48 | AMD64_EVENTSEL_EVENT) |
50 | #define AMD64_NUM_COUNTERS 4 | 49 | #define AMD64_NUM_COUNTERS 4 |
51 | #define AMD64_NUM_COUNTERS_F15H 6 | 50 | #define AMD64_NUM_COUNTERS_CORE 6 |
52 | #define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H | ||
53 | 51 | ||
54 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 52 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
@@ -121,16 +119,16 @@ struct x86_pmu_capability { | |||
121 | 119 | ||
122 | /* Instr_Retired.Any: */ | 120 | /* Instr_Retired.Any: */ |
123 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 | 121 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
124 | #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) | 122 | #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) |
125 | 123 | ||
126 | /* CPU_CLK_Unhalted.Core: */ | 124 | /* CPU_CLK_Unhalted.Core: */ |
127 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a | 125 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
128 | #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) | 126 | #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) |
129 | 127 | ||
130 | /* CPU_CLK_Unhalted.Ref: */ | 128 | /* CPU_CLK_Unhalted.Ref: */ |
131 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 129 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
132 | #define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) | 130 | #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) |
133 | #define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) | 131 | #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) |
134 | 132 | ||
135 | /* | 133 | /* |
136 | * We model BTS tracing as another fixed-mode PMC. | 134 | * We model BTS tracing as another fixed-mode PMC. |
@@ -139,7 +137,7 @@ struct x86_pmu_capability { | |||
139 | * values are used by actual fixed events and higher values are used | 137 | * values are used by actual fixed events and higher values are used |
140 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | 138 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
141 | */ | 139 | */ |
142 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 140 | #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) |
143 | 141 | ||
144 | /* | 142 | /* |
145 | * IBS cpuid feature detection | 143 | * IBS cpuid feature detection |
@@ -234,6 +232,7 @@ struct perf_guest_switch_msr { | |||
234 | 232 | ||
235 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | 233 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); |
236 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); | 234 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
235 | extern void perf_check_microcode(void); | ||
237 | #else | 236 | #else |
238 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | 237 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) |
239 | { | 238 | { |
@@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) | |||
247 | } | 246 | } |
248 | 247 | ||
249 | static inline void perf_events_lapic_init(void) { } | 248 | static inline void perf_events_lapic_init(void) { } |
249 | static inline void perf_check_microcode(void) { } | ||
250 | #endif | 250 | #endif |
251 | 251 | ||
252 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | 252 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 98391db840c6..f2b489cf1602 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -2,9 +2,9 @@ | |||
2 | #define _ASM_X86_PGTABLE_2LEVEL_H | 2 | #define _ASM_X86_PGTABLE_2LEVEL_H |
3 | 3 | ||
4 | #define pte_ERROR(e) \ | 4 | #define pte_ERROR(e) \ |
5 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) | 5 | pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low) |
6 | #define pgd_ERROR(e) \ | 6 | #define pgd_ERROR(e) \ |
7 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 7 | pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e)) |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Certain architectures need to do special things when PTEs | 10 | * Certain architectures need to do special things when PTEs |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 43876f16caf1..4cc9f2b7cdc3 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -9,13 +9,13 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pte_ERROR(e) \ | 11 | #define pte_ERROR(e) \ |
12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ | 12 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
13 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | 13 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
14 | #define pmd_ERROR(e) \ | 14 | #define pmd_ERROR(e) \ |
15 | printk("%s:%d: bad pmd %p(%016Lx).\n", \ | 15 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
16 | __FILE__, __LINE__, &(e), pmd_val(e)) | 16 | __FILE__, __LINE__, &(e), pmd_val(e)) |
17 | #define pgd_ERROR(e) \ | 17 | #define pgd_ERROR(e) \ |
18 | printk("%s:%d: bad pgd %p(%016Lx).\n", \ | 18 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
19 | __FILE__, __LINE__, &(e), pgd_val(e)) | 19 | __FILE__, __LINE__, &(e), pgd_val(e)) |
20 | 20 | ||
21 | /* Rules for using set_pte: the pte being assigned *must* be | 21 | /* Rules for using set_pte: the pte being assigned *must* be |
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
47 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | 47 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd |
48 | * operations. | 48 | * operations. |
49 | * | 49 | * |
50 | * Without THP if the mmap_sem is hold for reading, the | 50 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
51 | * pmd can only transition from null to not null while pmd_read_atomic runs. | 51 | * transition from null to not null while pmd_read_atomic runs. So |
52 | * So there's no need of literally reading it atomically. | 52 | * we can always return atomic pmd values with this function. |
53 | * | 53 | * |
54 | * With THP if the mmap_sem is hold for reading, the pmd can become | 54 | * With THP if the mmap_sem is hold for reading, the pmd can become |
55 | * THP or null or point to a pte (and in turn become "stable") at any | 55 | * trans_huge or none or point to a pte (and in turn become "stable") |
56 | * time under pmd_read_atomic, so it's mandatory to read it atomically | 56 | * at any time under pmd_read_atomic. We could read it really |
57 | * with cmpxchg8b. | 57 | * atomically here with a atomic64_read for the THP enabled case (and |
58 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we | ||
59 | * only return an atomic pmdval if the low part of the pmdval is later | ||
60 | * found stable (i.e. pointing to a pte). And we're returning a none | ||
61 | * pmdval if the low part of the pmd is none. In some cases the high | ||
62 | * and low part of the pmdval returned may not be consistent if THP is | ||
63 | * enabled (the low part may point to previously mapped hugepage, | ||
64 | * while the high part may point to a more recently mapped hugepage), | ||
65 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part | ||
66 | * of the pmd to be read atomically to decide if the pmd is unstable | ||
67 | * or not, with the only exception of when the low part of the pmd is | ||
68 | * zero in which case we return a none pmd. | ||
58 | */ | 69 | */ |
59 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | ||
60 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | 70 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
61 | { | 71 | { |
62 | pmdval_t ret; | 72 | pmdval_t ret; |
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | |||
74 | 84 | ||
75 | return (pmd_t) { ret }; | 85 | return (pmd_t) { ret }; |
76 | } | 86 | } |
77 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
78 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | ||
79 | { | ||
80 | return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; | ||
81 | } | ||
82 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
83 | 87 | ||
84 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 88 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
85 | { | 89 | { |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 975f709e09ae..8251be02301e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[]; | |||
26 | extern void paging_init(void); | 26 | extern void paging_init(void); |
27 | 27 | ||
28 | #define pte_ERROR(e) \ | 28 | #define pte_ERROR(e) \ |
29 | printk("%s:%d: bad pte %p(%016lx).\n", \ | 29 | pr_err("%s:%d: bad pte %p(%016lx)\n", \ |
30 | __FILE__, __LINE__, &(e), pte_val(e)) | 30 | __FILE__, __LINE__, &(e), pte_val(e)) |
31 | #define pmd_ERROR(e) \ | 31 | #define pmd_ERROR(e) \ |
32 | printk("%s:%d: bad pmd %p(%016lx).\n", \ | 32 | pr_err("%s:%d: bad pmd %p(%016lx)\n", \ |
33 | __FILE__, __LINE__, &(e), pmd_val(e)) | 33 | __FILE__, __LINE__, &(e), pmd_val(e)) |
34 | #define pud_ERROR(e) \ | 34 | #define pud_ERROR(e) \ |
35 | printk("%s:%d: bad pud %p(%016lx).\n", \ | 35 | pr_err("%s:%d: bad pud %p(%016lx)\n", \ |
36 | __FILE__, __LINE__, &(e), pud_val(e)) | 36 | __FILE__, __LINE__, &(e), pud_val(e)) |
37 | #define pgd_ERROR(e) \ | 37 | #define pgd_ERROR(e) \ |
38 | printk("%s:%d: bad pgd %p(%016lx).\n", \ | 38 | pr_err("%s:%d: bad pgd %p(%016lx)\n", \ |
39 | __FILE__, __LINE__, &(e), pgd_val(e)) | 39 | __FILE__, __LINE__, &(e), pgd_val(e)) |
40 | 40 | ||
41 | struct mm_struct; | 41 | struct mm_struct; |
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index f8ab3eaad128..aea1d1d848c7 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -44,6 +44,7 @@ | |||
44 | */ | 44 | */ |
45 | #define X86_CR3_PWT 0x00000008 /* Page Write Through */ | 45 | #define X86_CR3_PWT 0x00000008 /* Page Write Through */ |
46 | #define X86_CR3_PCD 0x00000010 /* Page Cache Disable */ | 46 | #define X86_CR3_PCD 0x00000010 /* Page Cache Disable */ |
47 | #define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */ | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * Intel CPU features in CR4 | 50 | * Intel CPU features in CR4 |
@@ -61,6 +62,7 @@ | |||
61 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ | 62 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ |
62 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ | 63 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ |
63 | #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */ | 64 | #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */ |
65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ | ||
64 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | 66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ |
65 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ | 67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ |
66 | 68 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 39bc5777211a..d048cad9bcad 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -61,6 +61,19 @@ static inline void *current_text_addr(void) | |||
61 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 | 61 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | enum tlb_infos { | ||
65 | ENTRIES, | ||
66 | NR_INFO | ||
67 | }; | ||
68 | |||
69 | extern u16 __read_mostly tlb_lli_4k[NR_INFO]; | ||
70 | extern u16 __read_mostly tlb_lli_2m[NR_INFO]; | ||
71 | extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | ||
72 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | ||
73 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | ||
74 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | ||
75 | extern s8 __read_mostly tlb_flushall_shift; | ||
76 | |||
64 | /* | 77 | /* |
65 | * CPU type and hardware bug flags. Kept separately for each CPU. | 78 | * CPU type and hardware bug flags. Kept separately for each CPU. |
66 | * Members of this structure are referenced in head.S, so think twice | 79 | * Members of this structure are referenced in head.S, so think twice |
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index fce3f4ae5bd6..fe1ec5bcd846 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h | |||
@@ -21,8 +21,9 @@ struct real_mode_header { | |||
21 | u32 wakeup_header; | 21 | u32 wakeup_header; |
22 | #endif | 22 | #endif |
23 | /* APM/BIOS reboot */ | 23 | /* APM/BIOS reboot */ |
24 | #ifdef CONFIG_X86_32 | ||
25 | u32 machine_real_restart_asm; | 24 | u32 machine_real_restart_asm; |
25 | #ifdef CONFIG_X86_64 | ||
26 | u32 machine_real_restart_seg; | ||
26 | #endif | 27 | #endif |
27 | }; | 28 | }; |
28 | 29 | ||
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 92f297069e87..a82c4f1b4d83 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h | |||
@@ -18,8 +18,8 @@ extern struct machine_ops machine_ops; | |||
18 | 18 | ||
19 | void native_machine_crash_shutdown(struct pt_regs *regs); | 19 | void native_machine_crash_shutdown(struct pt_regs *regs); |
20 | void native_machine_shutdown(void); | 20 | void native_machine_shutdown(void); |
21 | void machine_real_restart(unsigned int type); | 21 | void __noreturn machine_real_restart(unsigned int type); |
22 | /* These must match dispatch_table in reboot_32.S */ | 22 | /* These must match dispatch in arch/x86/realmore/rm/reboot.S */ |
23 | #define MRR_BIOS 0 | 23 | #define MRR_BIOS 0 |
24 | #define MRR_APM 1 | 24 | #define MRR_APM 1 |
25 | 25 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index f48394513c37..4f19a1526037 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -31,12 +31,12 @@ static inline bool cpu_has_ht_siblings(void) | |||
31 | return has_siblings; | 31 | return has_siblings; |
32 | } | 32 | } |
33 | 33 | ||
34 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); | 34 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
35 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); | 35 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
36 | /* cpus sharing the last level cache: */ | 36 | /* cpus sharing the last level cache: */ |
37 | DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); | 37 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
38 | DECLARE_PER_CPU(u16, cpu_llc_id); | 38 | DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); |
39 | DECLARE_PER_CPU(int, cpu_number); | 39 | DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); |
40 | 40 | ||
41 | static inline struct cpumask *cpu_sibling_mask(int cpu) | 41 | static inline struct cpumask *cpu_sibling_mask(int cpu) |
42 | { | 42 | { |
@@ -53,10 +53,10 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) | |||
53 | return per_cpu(cpu_llc_shared_map, cpu); | 53 | return per_cpu(cpu_llc_shared_map, cpu); |
54 | } | 54 | } |
55 | 55 | ||
56 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 56 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); |
57 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 57 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); |
58 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | 58 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
59 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); | 59 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | /* Static state in head.S used to set up a CPU */ | 62 | /* Static state in head.S used to set up a CPU */ |
@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); | |||
169 | void smp_store_cpu_info(int id); | 169 | void smp_store_cpu_info(int id); |
170 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | 170 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
171 | 171 | ||
172 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
173 | static inline int num_booting_cpus(void) | ||
174 | { | ||
175 | return cpumask_weight(cpu_callout_mask); | ||
176 | } | ||
177 | #else /* !CONFIG_SMP */ | 172 | #else /* !CONFIG_SMP */ |
178 | #define wbinvd_on_cpu(cpu) wbinvd() | 173 | #define wbinvd_on_cpu(cpu) wbinvd() |
179 | static inline int wbinvd_on_all_cpus(void) | 174 | static inline int wbinvd_on_all_cpus(void) |
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 829215fef9ee..4fef20773b8f 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h | |||
@@ -4,7 +4,14 @@ | |||
4 | #define tlb_start_vma(tlb, vma) do { } while (0) | 4 | #define tlb_start_vma(tlb, vma) do { } while (0) |
5 | #define tlb_end_vma(tlb, vma) do { } while (0) | 5 | #define tlb_end_vma(tlb, vma) do { } while (0) |
6 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | 6 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
7 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 7 | |
8 | #define tlb_flush(tlb) \ | ||
9 | { \ | ||
10 | if (tlb->fullmm == 0) \ | ||
11 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ | ||
12 | else \ | ||
13 | flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ | ||
14 | } | ||
8 | 15 | ||
9 | #include <asm-generic/tlb.h> | 16 | #include <asm-generic/tlb.h> |
10 | 17 | ||
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 36a1a2ab87d2..74a44333545a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
73 | * - flush_tlb_page(vma, vmaddr) flushes one page | 73 | * - flush_tlb_page(vma, vmaddr) flushes one page |
74 | * - flush_tlb_range(vma, start, end) flushes a range of pages | 74 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
75 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | 75 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
76 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | 76 | * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus |
77 | * | 77 | * |
78 | * ..but the i386 has somewhat limited tlb flushing capabilities, | 78 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
79 | * and page-granular flushes are available only on i486 and up. | 79 | * and page-granular flushes are available only on i486 and up. |
80 | * | ||
81 | * x86-64 can only flush individual pages or full VMs. For a range flush | ||
82 | * we always do the full VM. Might be worth trying if for a small | ||
83 | * range a few INVLPGs in a row are a win. | ||
84 | */ | 80 | */ |
85 | 81 | ||
86 | #ifndef CONFIG_SMP | 82 | #ifndef CONFIG_SMP |
@@ -109,9 +105,17 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
109 | __flush_tlb(); | 105 | __flush_tlb(); |
110 | } | 106 | } |
111 | 107 | ||
108 | static inline void flush_tlb_mm_range(struct mm_struct *mm, | ||
109 | unsigned long start, unsigned long end, unsigned long vmflag) | ||
110 | { | ||
111 | if (mm == current->active_mm) | ||
112 | __flush_tlb(); | ||
113 | } | ||
114 | |||
112 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, | 115 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, |
113 | struct mm_struct *mm, | 116 | struct mm_struct *mm, |
114 | unsigned long va) | 117 | unsigned long start, |
118 | unsigned long end) | ||
115 | { | 119 | { |
116 | } | 120 | } |
117 | 121 | ||
@@ -119,27 +123,35 @@ static inline void reset_lazy_tlbstate(void) | |||
119 | { | 123 | { |
120 | } | 124 | } |
121 | 125 | ||
126 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
127 | unsigned long end) | ||
128 | { | ||
129 | flush_tlb_all(); | ||
130 | } | ||
131 | |||
122 | #else /* SMP */ | 132 | #else /* SMP */ |
123 | 133 | ||
124 | #include <asm/smp.h> | 134 | #include <asm/smp.h> |
125 | 135 | ||
126 | #define local_flush_tlb() __flush_tlb() | 136 | #define local_flush_tlb() __flush_tlb() |
127 | 137 | ||
138 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) | ||
139 | |||
140 | #define flush_tlb_range(vma, start, end) \ | ||
141 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | ||
142 | |||
128 | extern void flush_tlb_all(void); | 143 | extern void flush_tlb_all(void); |
129 | extern void flush_tlb_current_task(void); | 144 | extern void flush_tlb_current_task(void); |
130 | extern void flush_tlb_mm(struct mm_struct *); | ||
131 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | 145 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
146 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | ||
147 | unsigned long end, unsigned long vmflag); | ||
148 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
132 | 149 | ||
133 | #define flush_tlb() flush_tlb_current_task() | 150 | #define flush_tlb() flush_tlb_current_task() |
134 | 151 | ||
135 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
136 | unsigned long start, unsigned long end) | ||
137 | { | ||
138 | flush_tlb_mm(vma->vm_mm); | ||
139 | } | ||
140 | |||
141 | void native_flush_tlb_others(const struct cpumask *cpumask, | 152 | void native_flush_tlb_others(const struct cpumask *cpumask, |
142 | struct mm_struct *mm, unsigned long va); | 153 | struct mm_struct *mm, |
154 | unsigned long start, unsigned long end); | ||
143 | 155 | ||
144 | #define TLBSTATE_OK 1 | 156 | #define TLBSTATE_OK 1 |
145 | #define TLBSTATE_LAZY 2 | 157 | #define TLBSTATE_LAZY 2 |
@@ -159,13 +171,8 @@ static inline void reset_lazy_tlbstate(void) | |||
159 | #endif /* SMP */ | 171 | #endif /* SMP */ |
160 | 172 | ||
161 | #ifndef CONFIG_PARAVIRT | 173 | #ifndef CONFIG_PARAVIRT |
162 | #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) | 174 | #define flush_tlb_others(mask, mm, start, end) \ |
175 | native_flush_tlb_others(mask, mm, start, end) | ||
163 | #endif | 176 | #endif |
164 | 177 | ||
165 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
166 | unsigned long end) | ||
167 | { | ||
168 | flush_tlb_all(); | ||
169 | } | ||
170 | |||
171 | #endif /* _ASM_X86_TLBFLUSH_H */ | 178 | #endif /* _ASM_X86_TLBFLUSH_H */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 8e796fbbf9c6..d8def8b3dba0 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -17,6 +17,8 @@ | |||
17 | 17 | ||
18 | /* Handles exceptions in both to and from, but doesn't do access_ok */ | 18 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
19 | __must_check unsigned long | 19 | __must_check unsigned long |
20 | copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); | ||
21 | __must_check unsigned long | ||
20 | copy_user_generic_string(void *to, const void *from, unsigned len); | 22 | copy_user_generic_string(void *to, const void *from, unsigned len); |
21 | __must_check unsigned long | 23 | __must_check unsigned long |
22 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); | 24 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); |
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len) | |||
26 | { | 28 | { |
27 | unsigned ret; | 29 | unsigned ret; |
28 | 30 | ||
29 | alternative_call(copy_user_generic_unrolled, | 31 | /* |
32 | * If CPU has ERMS feature, use copy_user_enhanced_fast_string. | ||
33 | * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. | ||
34 | * Otherwise, use copy_user_generic_unrolled. | ||
35 | */ | ||
36 | alternative_call_2(copy_user_generic_unrolled, | ||
30 | copy_user_generic_string, | 37 | copy_user_generic_string, |
31 | X86_FEATURE_REP_GOOD, | 38 | X86_FEATURE_REP_GOOD, |
39 | copy_user_enhanced_fast_string, | ||
40 | X86_FEATURE_ERMS, | ||
32 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), | 41 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), |
33 | "=d" (len)), | 42 | "=d" (len)), |
34 | "1" (to), "2" (from), "3" (len) | 43 | "1" (to), "2" (from), "3" (len) |
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 1e9bed14f7ae..f3971bbcd1de 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h | |||
@@ -48,7 +48,7 @@ struct arch_uprobe_task { | |||
48 | #endif | 48 | #endif |
49 | }; | 49 | }; |
50 | 50 | ||
51 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm); | 51 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); |
52 | extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); | 52 | extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); |
53 | extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); | 53 | extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); |
54 | extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); | 54 | extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); |
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 3bb9491b7659..b47c2a82ff15 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h | |||
@@ -15,7 +15,8 @@ extern void uv_nmi_init(void); | |||
15 | extern void uv_system_init(void); | 15 | extern void uv_system_init(void); |
16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 16 | extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
17 | struct mm_struct *mm, | 17 | struct mm_struct *mm, |
18 | unsigned long va, | 18 | unsigned long start, |
19 | unsigned end, | ||
19 | unsigned int cpu); | 20 | unsigned int cpu); |
20 | 21 | ||
21 | #else /* X86_UV */ | 22 | #else /* X86_UV */ |
@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void) { } | |||
26 | static inline void uv_system_init(void) { } | 27 | static inline void uv_system_init(void) { } |
27 | static inline const struct cpumask * | 28 | static inline const struct cpumask * |
28 | uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, | 29 | uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, |
29 | unsigned long va, unsigned int cpu) | 30 | unsigned long start, unsigned long end, unsigned int cpu) |
30 | { return cpumask; } | 31 | { return cpumask; } |
31 | 32 | ||
32 | #endif /* X86_UV */ | 33 | #endif /* X86_UV */ |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 6149b476d9df..a06983cdc125 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -140,6 +140,9 @@ | |||
140 | #define IPI_RESET_LIMIT 1 | 140 | #define IPI_RESET_LIMIT 1 |
141 | /* after this # consecutive successes, bump up the throttle if it was lowered */ | 141 | /* after this # consecutive successes, bump up the throttle if it was lowered */ |
142 | #define COMPLETE_THRESHOLD 5 | 142 | #define COMPLETE_THRESHOLD 5 |
143 | /* after this # of giveups (fall back to kernel IPI's) disable the use of | ||
144 | the BAU for a period of time */ | ||
145 | #define GIVEUP_LIMIT 100 | ||
143 | 146 | ||
144 | #define UV_LB_SUBNODEID 0x10 | 147 | #define UV_LB_SUBNODEID 0x10 |
145 | 148 | ||
@@ -166,7 +169,6 @@ | |||
166 | #define FLUSH_RETRY_TIMEOUT 2 | 169 | #define FLUSH_RETRY_TIMEOUT 2 |
167 | #define FLUSH_GIVEUP 3 | 170 | #define FLUSH_GIVEUP 3 |
168 | #define FLUSH_COMPLETE 4 | 171 | #define FLUSH_COMPLETE 4 |
169 | #define FLUSH_RETRY_BUSYBUG 5 | ||
170 | 172 | ||
171 | /* | 173 | /* |
172 | * tuning the action when the numalink network is extremely delayed | 174 | * tuning the action when the numalink network is extremely delayed |
@@ -175,7 +177,7 @@ | |||
175 | microseconds */ | 177 | microseconds */ |
176 | #define CONGESTED_REPS 10 /* long delays averaged over | 178 | #define CONGESTED_REPS 10 /* long delays averaged over |
177 | this many broadcasts */ | 179 | this many broadcasts */ |
178 | #define CONGESTED_PERIOD 30 /* time for the bau to be | 180 | #define DISABLED_PERIOD 10 /* time for the bau to be |
179 | disabled, in seconds */ | 181 | disabled, in seconds */ |
180 | /* see msg_type: */ | 182 | /* see msg_type: */ |
181 | #define MSG_NOOP 0 | 183 | #define MSG_NOOP 0 |
@@ -520,6 +522,12 @@ struct ptc_stats { | |||
520 | unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ | 522 | unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ |
521 | unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ | 523 | unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ |
522 | unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ | 524 | unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ |
525 | unsigned long s_overipilimit; /* over the ipi reset limit */ | ||
526 | unsigned long s_giveuplimit; /* disables, over giveup limit*/ | ||
527 | unsigned long s_enters; /* entries to the driver */ | ||
528 | unsigned long s_ipifordisabled; /* fall back to IPI; disabled */ | ||
529 | unsigned long s_plugged; /* plugged by h/w bug*/ | ||
530 | unsigned long s_congested; /* giveup on long wait */ | ||
523 | /* destination statistics */ | 531 | /* destination statistics */ |
524 | unsigned long d_alltlb; /* times all tlb's on this | 532 | unsigned long d_alltlb; /* times all tlb's on this |
525 | cpu were flushed */ | 533 | cpu were flushed */ |
@@ -586,8 +594,8 @@ struct bau_control { | |||
586 | int timeout_tries; | 594 | int timeout_tries; |
587 | int ipi_attempts; | 595 | int ipi_attempts; |
588 | int conseccompletes; | 596 | int conseccompletes; |
589 | int baudisabled; | 597 | short nobau; |
590 | int set_bau_off; | 598 | short baudisabled; |
591 | short cpu; | 599 | short cpu; |
592 | short osnode; | 600 | short osnode; |
593 | short uvhub_cpu; | 601 | short uvhub_cpu; |
@@ -596,14 +604,16 @@ struct bau_control { | |||
596 | short cpus_in_socket; | 604 | short cpus_in_socket; |
597 | short cpus_in_uvhub; | 605 | short cpus_in_uvhub; |
598 | short partition_base_pnode; | 606 | short partition_base_pnode; |
599 | short using_desc; /* an index, like uvhub_cpu */ | 607 | short busy; /* all were busy (war) */ |
600 | unsigned int inuse_map; | ||
601 | unsigned short message_number; | 608 | unsigned short message_number; |
602 | unsigned short uvhub_quiesce; | 609 | unsigned short uvhub_quiesce; |
603 | short socket_acknowledge_count[DEST_Q_SIZE]; | 610 | short socket_acknowledge_count[DEST_Q_SIZE]; |
604 | cycles_t send_message; | 611 | cycles_t send_message; |
612 | cycles_t period_end; | ||
613 | cycles_t period_time; | ||
605 | spinlock_t uvhub_lock; | 614 | spinlock_t uvhub_lock; |
606 | spinlock_t queue_lock; | 615 | spinlock_t queue_lock; |
616 | spinlock_t disable_lock; | ||
607 | /* tunables */ | 617 | /* tunables */ |
608 | int max_concurr; | 618 | int max_concurr; |
609 | int max_concurr_const; | 619 | int max_concurr_const; |
@@ -614,9 +624,9 @@ struct bau_control { | |||
614 | int complete_threshold; | 624 | int complete_threshold; |
615 | int cong_response_us; | 625 | int cong_response_us; |
616 | int cong_reps; | 626 | int cong_reps; |
617 | int cong_period; | 627 | cycles_t disabled_period; |
618 | unsigned long clocks_per_100_usec; | 628 | int period_giveups; |
619 | cycles_t period_time; | 629 | int giveup_limit; |
620 | long period_requests; | 630 | long period_requests; |
621 | struct hub_and_pnode *thp; | 631 | struct hub_and_pnode *thp; |
622 | }; | 632 | }; |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 31f180c21ce9..74fcb963595b 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -60,6 +60,7 @@ | |||
60 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 60 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
61 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 61 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
62 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | 62 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 |
63 | #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 | ||
63 | 64 | ||
64 | 65 | ||
65 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | 66 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 |
@@ -281,6 +282,7 @@ enum vmcs_field { | |||
281 | #define EXIT_REASON_EPT_MISCONFIG 49 | 282 | #define EXIT_REASON_EPT_MISCONFIG 49 |
282 | #define EXIT_REASON_WBINVD 54 | 283 | #define EXIT_REASON_WBINVD 54 |
283 | #define EXIT_REASON_XSETBV 55 | 284 | #define EXIT_REASON_XSETBV 55 |
285 | #define EXIT_REASON_INVPCID 58 | ||
284 | 286 | ||
285 | /* | 287 | /* |
286 | * Interruption-information format | 288 | * Interruption-information format |
@@ -404,6 +406,7 @@ enum vmcs_field { | |||
404 | #define VMX_EPTP_WB_BIT (1ull << 14) | 406 | #define VMX_EPTP_WB_BIT (1ull << 14) |
405 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) | 407 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) |
406 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) | 408 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) |
409 | #define VMX_EPT_AD_BIT (1ull << 21) | ||
407 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | 410 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) |
408 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | 411 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) |
409 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | 412 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) |
@@ -415,11 +418,14 @@ enum vmcs_field { | |||
415 | #define VMX_EPT_MAX_GAW 0x4 | 418 | #define VMX_EPT_MAX_GAW 0x4 |
416 | #define VMX_EPT_MT_EPTE_SHIFT 3 | 419 | #define VMX_EPT_MT_EPTE_SHIFT 3 |
417 | #define VMX_EPT_GAW_EPTP_SHIFT 3 | 420 | #define VMX_EPT_GAW_EPTP_SHIFT 3 |
421 | #define VMX_EPT_AD_ENABLE_BIT (1ull << 6) | ||
418 | #define VMX_EPT_DEFAULT_MT 0x6ull | 422 | #define VMX_EPT_DEFAULT_MT 0x6ull |
419 | #define VMX_EPT_READABLE_MASK 0x1ull | 423 | #define VMX_EPT_READABLE_MASK 0x1ull |
420 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 424 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
421 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 425 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
422 | #define VMX_EPT_IPAT_BIT (1ull << 6) | 426 | #define VMX_EPT_IPAT_BIT (1ull << 6) |
427 | #define VMX_EPT_ACCESS_BIT (1ull << 8) | ||
428 | #define VMX_EPT_DIRTY_BIT (1ull << 9) | ||
423 | 429 | ||
424 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 430 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
425 | 431 | ||
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h index 92e54abf89e0..f90f0a587c66 100644 --- a/arch/x86/include/asm/x2apic.h +++ b/arch/x86/include/asm/x2apic.h | |||
@@ -9,15 +9,6 @@ | |||
9 | #include <asm/ipi.h> | 9 | #include <asm/ipi.h> |
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | 11 | ||
12 | /* | ||
13 | * Need to use more than cpu 0, because we need more vectors | ||
14 | * when MSI-X are used. | ||
15 | */ | ||
16 | static const struct cpumask *x2apic_target_cpus(void) | ||
17 | { | ||
18 | return cpu_online_mask; | ||
19 | } | ||
20 | |||
21 | static int x2apic_apic_id_valid(int apicid) | 12 | static int x2apic_apic_id_valid(int apicid) |
22 | { | 13 | { |
23 | return 1; | 14 | return 1; |
@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void) | |||
28 | return 1; | 19 | return 1; |
29 | } | 20 | } |
30 | 21 | ||
31 | /* | ||
32 | * For now each logical cpu is in its own vector allocation domain. | ||
33 | */ | ||
34 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
35 | { | ||
36 | cpumask_clear(retmask); | ||
37 | cpumask_set_cpu(cpu, retmask); | ||
38 | } | ||
39 | |||
40 | static void | 22 | static void |
41 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) | 23 | __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) |
42 | { | 24 | { |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index c090af10ac7d..38155f667144 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -156,7 +156,6 @@ struct x86_cpuinit_ops { | |||
156 | /** | 156 | /** |
157 | * struct x86_platform_ops - platform specific runtime functions | 157 | * struct x86_platform_ops - platform specific runtime functions |
158 | * @calibrate_tsc: calibrate TSC | 158 | * @calibrate_tsc: calibrate TSC |
159 | * @wallclock_init: init the wallclock device | ||
160 | * @get_wallclock: get time from HW clock like RTC etc. | 159 | * @get_wallclock: get time from HW clock like RTC etc. |
161 | * @set_wallclock: set time back to HW clock | 160 | * @set_wallclock: set time back to HW clock |
162 | * @is_untracked_pat_range exclude from PAT logic | 161 | * @is_untracked_pat_range exclude from PAT logic |
@@ -164,10 +163,10 @@ struct x86_cpuinit_ops { | |||
164 | * @i8042_detect pre-detect if i8042 controller exists | 163 | * @i8042_detect pre-detect if i8042 controller exists |
165 | * @save_sched_clock_state: save state for sched_clock() on suspend | 164 | * @save_sched_clock_state: save state for sched_clock() on suspend |
166 | * @restore_sched_clock_state: restore state for sched_clock() on resume | 165 | * @restore_sched_clock_state: restore state for sched_clock() on resume |
166 | * @apic_post_init: adjust apic if neeeded | ||
167 | */ | 167 | */ |
168 | struct x86_platform_ops { | 168 | struct x86_platform_ops { |
169 | unsigned long (*calibrate_tsc)(void); | 169 | unsigned long (*calibrate_tsc)(void); |
170 | void (*wallclock_init)(void); | ||
171 | unsigned long (*get_wallclock)(void); | 170 | unsigned long (*get_wallclock)(void); |
172 | int (*set_wallclock)(unsigned long nowtime); | 171 | int (*set_wallclock)(unsigned long nowtime); |
173 | void (*iommu_shutdown)(void); | 172 | void (*iommu_shutdown)(void); |
@@ -177,6 +176,7 @@ struct x86_platform_ops { | |||
177 | int (*i8042_detect)(void); | 176 | int (*i8042_detect)(void); |
178 | void (*save_sched_clock_state)(void); | 177 | void (*save_sched_clock_state)(void); |
179 | void (*restore_sched_clock_state)(void); | 178 | void (*restore_sched_clock_state)(void); |
179 | void (*apic_post_init)(void); | ||
180 | }; | 180 | }; |
181 | 181 | ||
182 | struct pci_dev; | 182 | struct pci_dev; |