aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 19:26:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 19:26:03 -0500
commit67c707e451e12f59e57bca6cf33b5803cb74b022 (patch)
tree0a0166807867a8ca7543dd7f56a5e197efc98bb9
parent463eb8ac337bad30ace10835108a56df5817cc76 (diff)
parent0105c8d8334fc941e0297ca6708fa57854114c0e (diff)
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar: "The main changes in this cycle were: - code patching and cpu_has cleanups (Borislav Petkov) - paravirt cleanups (Juergen Gross) - TSC cleanup (Thomas Gleixner) - ptrace cleanup (Chen Gang)" * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: arch/x86/kernel/ptrace.c: Remove unused arg_offs_table x86/mm: Align macro defines x86/cpu: Provide a config option to disable static_cpu_has x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros x86/cpufeature: Cleanup get_cpu_cap() x86/cpufeature: Move some of the scattered feature bits to x86_capability x86/paravirt: Remove paravirt ops pmd_update[_defer] and pte_update_defer x86/paravirt: Remove unused pv_apic_ops structure x86/tsc: Remove unused tsc_pre_init() hook x86: Remove unused function cpu_has_ht_siblings() x86/paravirt: Kill some unused patching functions
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/crypto/chacha20_glue.c2
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c2
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h115
-rw-r--r--arch/x86/include/asm/page_types.h6
-rw-r--r--arch/x86/include/asm/paravirt.h26
-rw-r--r--arch/x86/include/asm/paravirt_types.h18
-rw-r--r--arch/x86/include/asm/pgtable.h15
-rw-r--r--arch/x86/include/asm/smp.h12
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xor_32.h2
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/common.c52
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c11
-rw-r--r--arch/x86/kernel/cpu/scattered.c20
-rw-r--r--arch/x86/kernel/cpu/transmeta.c4
-rw-r--r--arch/x86/kernel/fpu/init.c4
-rw-r--r--arch/x86/kernel/hw_breakpoint.c6
-rw-r--r--arch/x86/kernel/paravirt.c24
-rw-r--r--arch/x86/kernel/ptrace.c15
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/mm/setup_nx.c4
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--arch/x86/xen/mmu.c1
-rw-r--r--drivers/char/hw_random/via-rng.c5
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--fs/btrfs/disk-io.c2
42 files changed, 148 insertions, 275 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 965fc4216f76..258965d56beb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -349,6 +349,17 @@ config X86_FEATURE_NAMES
349 349
350 If in doubt, say Y. 350 If in doubt, say Y.
351 351
352config X86_FAST_FEATURE_TESTS
353 bool "Fast CPU feature tests" if EMBEDDED
354 default y
355 ---help---
356 Some fast-paths in the kernel depend on the capabilities of the CPU.
357 Say Y here for the kernel to patch in the appropriate code at runtime
358 based on the capabilities of the CPU. The infrastructure for patching
359 code at runtime takes up some additional space; space-constrained
360 embedded systems may wish to say N here to produce smaller, slightly
361 slower code.
362
352config X86_X2APIC 363config X86_X2APIC
353 bool "Support x2apic" 364 bool "Support x2apic"
354 depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) 365 depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 722bacea040e..8baaff5af0b5 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
125 125
126static int __init chacha20_simd_mod_init(void) 126static int __init chacha20_simd_mod_init(void)
127{ 127{
128 if (!cpu_has_ssse3) 128 if (!boot_cpu_has(X86_FEATURE_SSSE3))
129 return -ENODEV; 129 return -ENODEV;
130 130
131#ifdef CONFIG_AS_AVX2 131#ifdef CONFIG_AS_AVX2
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 81a595d75cf5..0e9871693f24 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void)
257 if (!x86_match_cpu(crc32c_cpu_id)) 257 if (!x86_match_cpu(crc32c_cpu_id))
258 return -ENODEV; 258 return -ENODEV;
259#ifdef CONFIG_X86_64 259#ifdef CONFIG_X86_64
260 if (cpu_has_pclmulqdq) { 260 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
261 alg.update = crc32c_pcl_intel_update; 261 alg.update = crc32c_pcl_intel_update;
262 alg.finup = crc32c_pcl_intel_finup; 262 alg.finup = crc32c_pcl_intel_finup;
263 alg.digest = crc32c_pcl_intel_digest; 263 alg.digest = crc32c_pcl_intel_digest;
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481..e4959d023af8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
109 109
110#endif 110#endif
111 111
112#define system_has_cmpxchg_double() cpu_has_cx8 112#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
113 113
114#endif /* _ASM_X86_CMPXCHG_32_H */ 114#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae5..caa23a34c963 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
18 cmpxchg_local((ptr), (o), (n)); \ 18 cmpxchg_local((ptr), (o), (n)); \
19}) 19})
20 20
21#define system_has_cmpxchg_double() cpu_has_cx16 21#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
22 22
23#endif /* _ASM_X86_CMPXCHG_64_H */ 23#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f7ba9fbf12ee..7ad8c9464297 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -12,7 +12,7 @@
12#include <asm/disabled-features.h> 12#include <asm/disabled-features.h>
13#endif 13#endif
14 14
15#define NCAPINTS 14 /* N 32-bit words worth of info */ 15#define NCAPINTS 16 /* N 32-bit words worth of info */
16#define NBUGINTS 1 /* N 32-bit bug flags */ 16#define NBUGINTS 1 /* N 32-bit bug flags */
17 17
18/* 18/*
@@ -181,22 +181,17 @@
181 181
182/* 182/*
183 * Auxiliary flags: Linux defined - For features scattered in various 183 * Auxiliary flags: Linux defined - For features scattered in various
184 * CPUID levels like 0x6, 0xA etc, word 7 184 * CPUID levels like 0x6, 0xA etc, word 7.
185 *
186 * Reuse free bits when adding new feature flags!
185 */ 187 */
186#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ 188
187#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
188#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 189#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
189#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 190#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
190#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ 191
191#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
192#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
193#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 192#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 193#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 194
196#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
197#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
198#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
199#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
200#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 195#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
201 196
202/* Virtualization flags: Linux defined, word 8 */ 197/* Virtualization flags: Linux defined, word 8 */
@@ -205,16 +200,7 @@
205#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 200#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
206#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 201#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
207#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 202#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
208#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ 203
209#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
210#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
211#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
212#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
213#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
214#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
215#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
216#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
217#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
218#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 204#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
219#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 205#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
220 206
@@ -259,6 +245,30 @@
259/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 245/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
260#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 246#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
261 247
248/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
249#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
250#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
251#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
252#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
253#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
254#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
255#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
256#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
257#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
258#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
259
260/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
261#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
262#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
263#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
264#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
265#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
266#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
267#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
268#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
269#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
270#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
271
262/* 272/*
263 * BUG word(s) 273 * BUG word(s)
264 */ 274 */
@@ -279,6 +289,26 @@
279#include <asm/asm.h> 289#include <asm/asm.h>
280#include <linux/bitops.h> 290#include <linux/bitops.h>
281 291
292enum cpuid_leafs
293{
294 CPUID_1_EDX = 0,
295 CPUID_8000_0001_EDX,
296 CPUID_8086_0001_EDX,
297 CPUID_LNX_1,
298 CPUID_1_ECX,
299 CPUID_C000_0001_EDX,
300 CPUID_8000_0001_ECX,
301 CPUID_LNX_2,
302 CPUID_LNX_3,
303 CPUID_7_0_EBX,
304 CPUID_D_1_EAX,
305 CPUID_F_0_EDX,
306 CPUID_F_1_EDX,
307 CPUID_8000_0008_EBX,
308 CPUID_6_EAX,
309 CPUID_8000_000A_EDX,
310};
311
282#ifdef CONFIG_X86_FEATURE_NAMES 312#ifdef CONFIG_X86_FEATURE_NAMES
283extern const char * const x86_cap_flags[NCAPINTS*32]; 313extern const char * const x86_cap_flags[NCAPINTS*32];
284extern const char * const x86_power_flags[32]; 314extern const char * const x86_power_flags[32];
@@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
356} while (0) 386} while (0)
357 387
358#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 388#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
359#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
360#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 389#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
361#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 390#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
362#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 391#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
363#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 392#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
364#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
365#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
366#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
367#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 393#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
368#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 394#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
369#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 395#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
370#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
371#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
372#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 396#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
373#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 397#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
374#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 398#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
375#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
376#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
377#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
378#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
379#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
380#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
381#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
382#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
383#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
384#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
385#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
386#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
387#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
388#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
389#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 399#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
390#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
391#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 400#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
392#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 401#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
393#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 402#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
394#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
395#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
396#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 403#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
397#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 404#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
398#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
399#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 405#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
400#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 406#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
401#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 407#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
402#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 408/*
403#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 409 * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
404#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 410 * fast paths and boot_cpu_has() otherwise!
405#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 411 */
406#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 412
407#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 413#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
408#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
409#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
410#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
411
412#if __GNUC__ >= 4
413extern void warn_pre_alternatives(void); 414extern void warn_pre_alternatives(void);
414extern bool __static_cpu_has_safe(u16 bit); 415extern bool __static_cpu_has_safe(u16 bit);
415 416
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index cc071c6f7d4d..7bd0099384ca 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -5,9 +5,9 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7/* PAGE_SHIFT determines the page size */ 7/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 12 8#define PAGE_SHIFT 12
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8f28d8412a6d..f6192502149e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -291,15 +291,6 @@ static inline void slow_down_io(void)
291#endif 291#endif
292} 292}
293 293
294#ifdef CONFIG_SMP
295static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
296 unsigned long start_esp)
297{
298 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
299 phys_apicid, start_eip, start_esp);
300}
301#endif
302
303static inline void paravirt_activate_mm(struct mm_struct *prev, 294static inline void paravirt_activate_mm(struct mm_struct *prev,
304 struct mm_struct *next) 295 struct mm_struct *next)
305{ 296{
@@ -381,23 +372,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
381{ 372{
382 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 373 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
383} 374}
384static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
385 pmd_t *pmdp)
386{
387 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
388}
389
390static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
391 pte_t *ptep)
392{
393 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
394}
395
396static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
397 pmd_t *pmdp)
398{
399 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
400}
401 375
402static inline pte_t __pte(pteval_t val) 376static inline pte_t __pte(pteval_t val)
403{ 377{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 4752ff8c0704..77db5616a473 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -203,14 +203,6 @@ struct pv_irq_ops {
203#endif 203#endif
204}; 204};
205 205
206struct pv_apic_ops {
207#ifdef CONFIG_X86_LOCAL_APIC
208 void (*startup_ipi_hook)(int phys_apicid,
209 unsigned long start_eip,
210 unsigned long start_esp);
211#endif
212};
213
214struct pv_mmu_ops { 206struct pv_mmu_ops {
215 unsigned long (*read_cr2)(void); 207 unsigned long (*read_cr2)(void);
216 void (*write_cr2)(unsigned long); 208 void (*write_cr2)(unsigned long);
@@ -262,12 +254,6 @@ struct pv_mmu_ops {
262 pmd_t *pmdp, pmd_t pmdval); 254 pmd_t *pmdp, pmd_t pmdval);
263 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 255 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
264 pte_t *ptep); 256 pte_t *ptep);
265 void (*pte_update_defer)(struct mm_struct *mm,
266 unsigned long addr, pte_t *ptep);
267 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
268 pmd_t *pmdp);
269 void (*pmd_update_defer)(struct mm_struct *mm,
270 unsigned long addr, pmd_t *pmdp);
271 257
272 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 258 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
273 pte_t *ptep); 259 pte_t *ptep);
@@ -342,7 +328,6 @@ struct paravirt_patch_template {
342 struct pv_time_ops pv_time_ops; 328 struct pv_time_ops pv_time_ops;
343 struct pv_cpu_ops pv_cpu_ops; 329 struct pv_cpu_ops pv_cpu_ops;
344 struct pv_irq_ops pv_irq_ops; 330 struct pv_irq_ops pv_irq_ops;
345 struct pv_apic_ops pv_apic_ops;
346 struct pv_mmu_ops pv_mmu_ops; 331 struct pv_mmu_ops pv_mmu_ops;
347 struct pv_lock_ops pv_lock_ops; 332 struct pv_lock_ops pv_lock_ops;
348}; 333};
@@ -352,7 +337,6 @@ extern struct pv_init_ops pv_init_ops;
352extern struct pv_time_ops pv_time_ops; 337extern struct pv_time_ops pv_time_ops;
353extern struct pv_cpu_ops pv_cpu_ops; 338extern struct pv_cpu_ops pv_cpu_ops;
354extern struct pv_irq_ops pv_irq_ops; 339extern struct pv_irq_ops pv_irq_ops;
355extern struct pv_apic_ops pv_apic_ops;
356extern struct pv_mmu_ops pv_mmu_ops; 340extern struct pv_mmu_ops pv_mmu_ops;
357extern struct pv_lock_ops pv_lock_ops; 341extern struct pv_lock_ops pv_lock_ops;
358 342
@@ -390,10 +374,8 @@ extern struct pv_lock_ops pv_lock_ops;
390 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 374 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
391 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 375 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
392 376
393unsigned paravirt_patch_nop(void);
394unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); 377unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
395unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); 378unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
396unsigned paravirt_patch_ignore(unsigned len);
397unsigned paravirt_patch_call(void *insnbuf, 379unsigned paravirt_patch_call(void *insnbuf,
398 const void *target, u16 tgt_clobbers, 380 const void *target, u16 tgt_clobbers,
399 unsigned long addr, u16 site_clobbers, 381 unsigned long addr, u16 site_clobbers,
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 6ec0c8b2e9df..d3eee663c41f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
69#define pmd_clear(pmd) native_pmd_clear(pmd) 69#define pmd_clear(pmd) native_pmd_clear(pmd)
70 70
71#define pte_update(mm, addr, ptep) do { } while (0) 71#define pte_update(mm, addr, ptep) do { } while (0)
72#define pte_update_defer(mm, addr, ptep) do { } while (0)
73#define pmd_update(mm, addr, ptep) do { } while (0)
74#define pmd_update_defer(mm, addr, ptep) do { } while (0)
75 72
76#define pgd_val(x) native_pgd_val(x) 73#define pgd_val(x) native_pgd_val(x)
77#define __pgd(x) native_make_pgd(x) 74#define __pgd(x) native_make_pgd(x)
@@ -731,14 +728,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
731 * updates should either be sets, clears, or set_pte_atomic for P->P 728 * updates should either be sets, clears, or set_pte_atomic for P->P
732 * transitions, which means this hook should only be called for user PTEs. 729 * transitions, which means this hook should only be called for user PTEs.
733 * This hook implies a P->P protection or access change has taken place, which 730 * This hook implies a P->P protection or access change has taken place, which
734 * requires a subsequent TLB flush. The notification can optionally be delayed 731 * requires a subsequent TLB flush.
735 * until the TLB flush event by using the pte_update_defer form of the
736 * interface, but care must be taken to assure that the flush happens while
737 * still holding the same page table lock so that the shadow and primary pages
738 * do not become out of sync on SMP.
739 */ 732 */
740#define pte_update(mm, addr, ptep) do { } while (0) 733#define pte_update(mm, addr, ptep) do { } while (0)
741#define pte_update_defer(mm, addr, ptep) do { } while (0)
742#endif 734#endif
743 735
744/* 736/*
@@ -830,9 +822,7 @@ static inline int pmd_write(pmd_t pmd)
830static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, 822static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
831 pmd_t *pmdp) 823 pmd_t *pmdp)
832{ 824{
833 pmd_t pmd = native_pmdp_get_and_clear(pmdp); 825 return native_pmdp_get_and_clear(pmdp);
834 pmd_update(mm, addr, pmdp);
835 return pmd;
836} 826}
837 827
838#define __HAVE_ARCH_PMDP_SET_WRPROTECT 828#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -840,7 +830,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
840 unsigned long addr, pmd_t *pmdp) 830 unsigned long addr, pmd_t *pmdp)
841{ 831{
842 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 832 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
843 pmd_update(mm, addr, pmdp);
844} 833}
845 834
846/* 835/*
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5..dfcf0727623b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -21,15 +21,6 @@
21extern int smp_num_siblings; 21extern int smp_num_siblings;
22extern unsigned int num_processors; 22extern unsigned int num_processors;
23 23
24static inline bool cpu_has_ht_siblings(void)
25{
26 bool has_siblings = false;
27#ifdef CONFIG_SMP
28 has_siblings = cpu_has_ht && smp_num_siblings > 1;
29#endif
30 return has_siblings;
31}
32
33DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 24DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 25DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
35/* cpus sharing the last level cache: */ 26/* cpus sharing the last level cache: */
@@ -74,9 +65,6 @@ struct smp_ops {
74extern void set_cpu_sibling_map(int cpu); 65extern void set_cpu_sibling_map(int cpu);
75 66
76#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
77#ifndef CONFIG_PARAVIRT
78#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
79#endif
80extern struct smp_ops smp_ops; 68extern struct smp_ops smp_ops;
81 69
82static inline void smp_send_stop(void) 70static inline void smp_send_stop(void)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index cd0fc0cc78bc..1ae89a2721d6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -82,13 +82,11 @@ struct x86_init_paging {
82 * struct x86_init_timers - platform specific timer setup 82 * struct x86_init_timers - platform specific timer setup
83 * @setup_perpcu_clockev: set up the per cpu clock event device for the 83 * @setup_perpcu_clockev: set up the per cpu clock event device for the
84 * boot cpu 84 * boot cpu
85 * @tsc_pre_init: platform function called before TSC init
86 * @timer_init: initialize the platform timer (default PIT/HPET) 85 * @timer_init: initialize the platform timer (default PIT/HPET)
87 * @wallclock_init: init the wallclock device 86 * @wallclock_init: init the wallclock device
88 */ 87 */
89struct x86_init_timers { 88struct x86_init_timers {
90 void (*setup_percpu_clockev)(void); 89 void (*setup_percpu_clockev)(void);
91 void (*tsc_pre_init)(void);
92 void (*timer_init)(void); 90 void (*timer_init)(void);
93 void (*wallclock_init)(void); 91 void (*wallclock_init)(void);
94}; 92};
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff33..c54beb44c4c1 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do { \
553 if (cpu_has_xmm) { \ 553 if (cpu_has_xmm) { \
554 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
555 xor_speed(&xor_block_sse_pf64); \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \ 556 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
557 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
558 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \ 559 } else { \
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a8816b325162..34c3ad608dd4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
304 int cpu = smp_processor_id(); 304 int cpu = smp_processor_id();
305 305
306 /* get information required for multi-node processors */ 306 /* get information required for multi-node processors */
307 if (cpu_has_topoext) { 307 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
308 u32 eax, ebx, ecx, edx; 308 u32 eax, ebx, ecx, edx;
309 309
310 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 310 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -922,7 +922,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
922 922
923void set_dr_addr_mask(unsigned long mask, int dr) 923void set_dr_addr_mask(unsigned long mask, int dr)
924{ 924{
925 if (!cpu_has_bpext) 925 if (!boot_cpu_has(X86_FEATURE_BPEXT))
926 return; 926 return;
927 927
928 switch (dr) { 928 switch (dr) {
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index d8fba5c15fbd..ae20be6e483c 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
43 /* store Centaur Extended Feature Flags as 43 /* store Centaur Extended Feature Flags as
44 * word 5 of the CPU capability bit array 44 * word 5 of the CPU capability bit array
45 */ 45 */
46 c->x86_capability[5] = cpuid_edx(0xC0000001); 46 c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
47 } 47 }
48#ifdef CONFIG_X86_32 48#ifdef CONFIG_X86_32
49 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ 49 /* Cyrix III family needs CX8 & PGE explicitly enabled. */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c2b7522cbf35..4d5279c95d5f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -599,50 +599,47 @@ void cpu_detect(struct cpuinfo_x86 *c)
599 599
600void get_cpu_cap(struct cpuinfo_x86 *c) 600void get_cpu_cap(struct cpuinfo_x86 *c)
601{ 601{
602 u32 tfms, xlvl; 602 u32 eax, ebx, ecx, edx;
603 u32 ebx;
604 603
605 /* Intel-defined flags: level 0x00000001 */ 604 /* Intel-defined flags: level 0x00000001 */
606 if (c->cpuid_level >= 0x00000001) { 605 if (c->cpuid_level >= 0x00000001) {
607 u32 capability, excap; 606 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
608 607
609 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 608 c->x86_capability[CPUID_1_ECX] = ecx;
610 c->x86_capability[0] = capability; 609 c->x86_capability[CPUID_1_EDX] = edx;
611 c->x86_capability[4] = excap;
612 } 610 }
613 611
614 /* Additional Intel-defined flags: level 0x00000007 */ 612 /* Additional Intel-defined flags: level 0x00000007 */
615 if (c->cpuid_level >= 0x00000007) { 613 if (c->cpuid_level >= 0x00000007) {
616 u32 eax, ebx, ecx, edx;
617
618 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 614 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
619 615
620 c->x86_capability[9] = ebx; 616 c->x86_capability[CPUID_7_0_EBX] = ebx;
617
618 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
621 } 619 }
622 620
623 /* Extended state features: level 0x0000000d */ 621 /* Extended state features: level 0x0000000d */
624 if (c->cpuid_level >= 0x0000000d) { 622 if (c->cpuid_level >= 0x0000000d) {
625 u32 eax, ebx, ecx, edx;
626
627 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 623 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
628 624
629 c->x86_capability[10] = eax; 625 c->x86_capability[CPUID_D_1_EAX] = eax;
630 } 626 }
631 627
632 /* Additional Intel-defined flags: level 0x0000000F */ 628 /* Additional Intel-defined flags: level 0x0000000F */
633 if (c->cpuid_level >= 0x0000000F) { 629 if (c->cpuid_level >= 0x0000000F) {
634 u32 eax, ebx, ecx, edx;
635 630
636 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 631 /* QoS sub-leaf, EAX=0Fh, ECX=0 */
637 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 632 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
638 c->x86_capability[11] = edx; 633 c->x86_capability[CPUID_F_0_EDX] = edx;
634
639 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 635 if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
640 /* will be overridden if occupancy monitoring exists */ 636 /* will be overridden if occupancy monitoring exists */
641 c->x86_cache_max_rmid = ebx; 637 c->x86_cache_max_rmid = ebx;
642 638
643 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 639 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
644 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 640 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
645 c->x86_capability[12] = edx; 641 c->x86_capability[CPUID_F_1_EDX] = edx;
642
646 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { 643 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
647 c->x86_cache_max_rmid = ecx; 644 c->x86_cache_max_rmid = ecx;
648 c->x86_cache_occ_scale = ebx; 645 c->x86_cache_occ_scale = ebx;
@@ -654,22 +651,24 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
654 } 651 }
655 652
656 /* AMD-defined flags: level 0x80000001 */ 653 /* AMD-defined flags: level 0x80000001 */
657 xlvl = cpuid_eax(0x80000000); 654 eax = cpuid_eax(0x80000000);
658 c->extended_cpuid_level = xlvl; 655 c->extended_cpuid_level = eax;
659 656
660 if ((xlvl & 0xffff0000) == 0x80000000) { 657 if ((eax & 0xffff0000) == 0x80000000) {
661 if (xlvl >= 0x80000001) { 658 if (eax >= 0x80000001) {
662 c->x86_capability[1] = cpuid_edx(0x80000001); 659 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
663 c->x86_capability[6] = cpuid_ecx(0x80000001); 660
661 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
662 c->x86_capability[CPUID_8000_0001_EDX] = edx;
664 } 663 }
665 } 664 }
666 665
667 if (c->extended_cpuid_level >= 0x80000008) { 666 if (c->extended_cpuid_level >= 0x80000008) {
668 u32 eax = cpuid_eax(0x80000008); 667 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
669 668
670 c->x86_virt_bits = (eax >> 8) & 0xff; 669 c->x86_virt_bits = (eax >> 8) & 0xff;
671 c->x86_phys_bits = eax & 0xff; 670 c->x86_phys_bits = eax & 0xff;
672 c->x86_capability[13] = cpuid_ebx(0x80000008); 671 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
673 } 672 }
674#ifdef CONFIG_X86_32 673#ifdef CONFIG_X86_32
675 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 674 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
@@ -679,6 +678,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
679 if (c->extended_cpuid_level >= 0x80000007) 678 if (c->extended_cpuid_level >= 0x80000007)
680 c->x86_power = cpuid_edx(0x80000007); 679 c->x86_power = cpuid_edx(0x80000007);
681 680
681 if (c->extended_cpuid_level >= 0x8000000a)
682 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
683
682 init_scattered_cpuid_features(c); 684 init_scattered_cpuid_features(c);
683} 685}
684 686
@@ -1443,7 +1445,9 @@ void cpu_init(void)
1443 1445
1444 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1446 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1445 1447
1446 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) 1448 if (cpu_feature_enabled(X86_FEATURE_VME) ||
1449 cpu_has_tsc ||
1450 boot_cpu_has(X86_FEATURE_DE))
1447 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1451 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1448 1452
1449 load_current_idt(); 1453 load_current_idt();
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 209ac1e7d1f0..565648bc1a0a 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c)
445 445
446 if (cpu_has_xmm2) 446 if (cpu_has_xmm2)
447 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 447 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
448 if (cpu_has_ds) { 448
449 if (boot_cpu_has(X86_FEATURE_DS)) {
449 unsigned int l1; 450 unsigned int l1;
450 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 451 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
451 if (!(l1 & (1<<11))) 452 if (!(l1 & (1<<11)))
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index e38d338a6447..0b6c52388cf4 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
591 unsigned edx; 591 unsigned edx;
592 592
593 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 593 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
594 if (cpu_has_topoext) 594 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
595 cpuid_count(0x8000001d, index, &eax.full, 595 cpuid_count(0x8000001d, index, &eax.full,
596 &ebx.full, &ecx.full, &edx); 596 &ebx.full, &ecx.full, &edx);
597 else 597 else
@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
637void init_amd_cacheinfo(struct cpuinfo_x86 *c) 637void init_amd_cacheinfo(struct cpuinfo_x86 *c)
638{ 638{
639 639
640 if (cpu_has_topoext) { 640 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
641 num_cache_leaves = find_num_cache_leaves(c); 641 num_cache_leaves = find_num_cache_leaves(c);
642 } else if (c->extended_cpuid_level >= 0x80000006) { 642 } else if (c->extended_cpuid_level >= 0x80000006) {
643 if (cpuid_edx(0x80000006) & 0xf000) 643 if (cpuid_edx(0x80000006) & 0xf000)
@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
809 struct cacheinfo *this_leaf; 809 struct cacheinfo *this_leaf;
810 int i, sibling; 810 int i, sibling;
811 811
812 if (cpu_has_topoext) { 812 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
813 unsigned int apicid, nshared, first, last; 813 unsigned int apicid, nshared, first, last;
814 814
815 this_leaf = this_cpu_ci->info_list + index; 815 this_leaf = this_cpu_ci->info_list + index;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 3b533cf37c74..c870af161008 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
349 349
350void mtrr_save_fixed_ranges(void *info) 350void mtrr_save_fixed_ranges(void *info)
351{ 351{
352 if (cpu_has_mtrr) 352 if (boot_cpu_has(X86_FEATURE_MTRR))
353 get_fixed_ranges(mtrr_state.fixed_ranges); 353 get_fixed_ranges(mtrr_state.fixed_ranges);
354} 354}
355 355
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index f891b4750f04..5c3d149ee91c 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
682 682
683 phys_addr = 32; 683 phys_addr = 32;
684 684
685 if (cpu_has_mtrr) { 685 if (boot_cpu_has(X86_FEATURE_MTRR)) {
686 mtrr_if = &generic_mtrr_ops; 686 mtrr_if = &generic_mtrr_ops;
687 size_or_mask = SIZE_OR_MASK_BITS(36); 687 size_or_mask = SIZE_OR_MASK_BITS(36);
688 size_and_mask = 0x00f00000; 688 size_and_mask = 0x00f00000;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 05e76bf65781..58610539b048 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
160 if (offset) 160 if (offset)
161 return offset; 161 return offset;
162 162
163 if (!cpu_has_perfctr_core) 163 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
164 offset = index; 164 offset = index;
165 else 165 else
166 offset = index << 1; 166 offset = index << 1;
@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
652 652
653static int __init amd_core_pmu_init(void) 653static int __init amd_core_pmu_init(void)
654{ 654{
655 if (!cpu_has_perfctr_core) 655 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
656 return 0; 656 return 0;
657 657
658 switch (boot_cpu_data.x86) { 658 switch (boot_cpu_data.x86) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index cc6cedb8f25d..49742746a6c9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
523 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 523 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
524 goto fail_nodev; 524 goto fail_nodev;
525 525
526 if (!cpu_has_topoext) 526 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
527 goto fail_nodev; 527 goto fail_nodev;
528 528
529 if (cpu_has_perfctr_nb) { 529 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
530 amd_uncore_nb = alloc_percpu(struct amd_uncore *); 530 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
531 if (!amd_uncore_nb) { 531 if (!amd_uncore_nb) {
532 ret = -ENOMEM; 532 ret = -ENOMEM;
@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
540 ret = 0; 540 ret = 0;
541 } 541 }
542 542
543 if (cpu_has_perfctr_l2) { 543 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
544 amd_uncore_l2 = alloc_percpu(struct amd_uncore *); 544 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
545 if (!amd_uncore_l2) { 545 if (!amd_uncore_l2) {
546 ret = -ENOMEM; 546 ret = -ENOMEM;
@@ -583,10 +583,11 @@ fail_online:
583 583
584 /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ 584 /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
585 amd_uncore_nb = amd_uncore_l2 = NULL; 585 amd_uncore_nb = amd_uncore_l2 = NULL;
586 if (cpu_has_perfctr_l2) 586
587 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
587 perf_pmu_unregister(&amd_l2_pmu); 588 perf_pmu_unregister(&amd_l2_pmu);
588fail_l2: 589fail_l2:
589 if (cpu_has_perfctr_nb) 590 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
590 perf_pmu_unregister(&amd_nb_pmu); 591 perf_pmu_unregister(&amd_nb_pmu);
591 if (amd_uncore_l2) 592 if (amd_uncore_l2)
592 free_percpu(amd_uncore_l2); 593 free_percpu(amd_uncore_l2);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 608fb26c7254..8cb57df9398d 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
40 { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
44 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 }, 34 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
45 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 35 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
46 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 36 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
47 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 37 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
48 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, 38 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
49 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, 39 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
50 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
51 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
52 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
53 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
54 { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
55 { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
56 { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
57 { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
58 { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
59 { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
60 { 0, 0, 0, 0, 0 } 40 { 0, 0, 0, 0, 0 }
61 }; 41 };
62 42
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 3fa0e5ad86b4..252da7aceca6 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
12 xlvl = cpuid_eax(0x80860000); 12 xlvl = cpuid_eax(0x80860000);
13 if ((xlvl & 0xffff0000) == 0x80860000) { 13 if ((xlvl & 0xffff0000) == 0x80860000) {
14 if (xlvl >= 0x80860001) 14 if (xlvl >= 0x80860001)
15 c->x86_capability[2] = cpuid_edx(0x80860001); 15 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
16 } 16 }
17} 17}
18 18
@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
82 /* Unhide possibly hidden capability flags */ 82 /* Unhide possibly hidden capability flags */
83 rdmsr(0x80860004, cap_mask, uk); 83 rdmsr(0x80860004, cap_mask, uk);
84 wrmsr(0x80860004, ~0, uk); 84 wrmsr(0x80860004, ~0, uk);
85 c->x86_capability[0] = cpuid_edx(0x00000001); 85 c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
86 wrmsr(0x80860004, cap_mask, uk); 86 wrmsr(0x80860004, cap_mask, uk);
87 87
88 /* All Transmeta CPUs have a constant TSC */ 88 /* All Transmeta CPUs have a constant TSC */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 8e839e7f5e2f..0d4e092ae1bf 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -12,7 +12,7 @@
12 */ 12 */
13static void fpu__init_cpu_ctx_switch(void) 13static void fpu__init_cpu_ctx_switch(void)
14{ 14{
15 if (!cpu_has_eager_fpu) 15 if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
16 stts(); 16 stts();
17 else 17 else
18 clts(); 18 clts();
@@ -296,7 +296,7 @@ static void __init fpu__init_system_ctx_switch(void)
296 current_thread_info()->status = 0; 296 current_thread_info()->status = 0;
297 297
298 /* Auto enable eagerfpu for xsaveopt */ 298 /* Auto enable eagerfpu for xsaveopt */
299 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 299 if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
300 eagerfpu = ENABLE; 300 eagerfpu = ENABLE;
301 301
302 if (xfeatures_mask & XFEATURE_MASK_EAGER) { 302 if (xfeatures_mask & XFEATURE_MASK_EAGER) {
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 50a3fad5b89f..2bcfb5f2bc44 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
300 return -EINVAL; 300 return -EINVAL;
301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) 301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
302 return -EINVAL; 302 return -EINVAL;
303
304 if (!boot_cpu_has(X86_FEATURE_BPEXT))
305 return -EOPNOTSUPP;
306
303 /* 307 /*
304 * It's impossible to use a range breakpoint to fake out 308 * It's impossible to use a range breakpoint to fake out
305 * user vs kernel detection because bp_len - 1 can't 309 * user vs kernel detection because bp_len - 1 can't
@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
307 * breakpoints, then we'll have to check for kprobe-blacklisted 311 * breakpoints, then we'll have to check for kprobe-blacklisted
308 * addresses anywhere in the range. 312 * addresses anywhere in the range.
309 */ 313 */
310 if (!cpu_has_bpext)
311 return -EOPNOTSUPP;
312 info->mask = bp->attr.bp_len - 1; 314 info->mask = bp->attr.bp_len - 1;
313 info->len = X86_BREAKPOINT_LEN_1; 315 info->len = X86_BREAKPOINT_LEN_1;
314 } 316 }
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8c19b4d5e719..f08ac28b8136 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -74,16 +74,6 @@ void __init default_banner(void)
74/* Undefined instruction for dealing with missing ops pointers. */ 74/* Undefined instruction for dealing with missing ops pointers. */
75static const unsigned char ud2a[] = { 0x0f, 0x0b }; 75static const unsigned char ud2a[] = { 0x0f, 0x0b };
76 76
77unsigned paravirt_patch_nop(void)
78{
79 return 0;
80}
81
82unsigned paravirt_patch_ignore(unsigned len)
83{
84 return len;
85}
86
87struct branch { 77struct branch {
88 unsigned char opcode; 78 unsigned char opcode;
89 u32 delta; 79 u32 delta;
@@ -133,7 +123,6 @@ static void *get_call_destination(u8 type)
133 .pv_time_ops = pv_time_ops, 123 .pv_time_ops = pv_time_ops,
134 .pv_cpu_ops = pv_cpu_ops, 124 .pv_cpu_ops = pv_cpu_ops,
135 .pv_irq_ops = pv_irq_ops, 125 .pv_irq_ops = pv_irq_ops,
136 .pv_apic_ops = pv_apic_ops,
137 .pv_mmu_ops = pv_mmu_ops, 126 .pv_mmu_ops = pv_mmu_ops,
138#ifdef CONFIG_PARAVIRT_SPINLOCKS 127#ifdef CONFIG_PARAVIRT_SPINLOCKS
139 .pv_lock_ops = pv_lock_ops, 128 .pv_lock_ops = pv_lock_ops,
@@ -152,8 +141,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
152 /* If there's no function, patch it with a ud2a (BUG) */ 141 /* If there's no function, patch it with a ud2a (BUG) */
153 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); 142 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
154 else if (opfunc == _paravirt_nop) 143 else if (opfunc == _paravirt_nop)
155 /* If the operation is a nop, then nop the callsite */ 144 ret = 0;
156 ret = paravirt_patch_nop();
157 145
158 /* identity functions just return their single argument */ 146 /* identity functions just return their single argument */
159 else if (opfunc == _paravirt_ident_32) 147 else if (opfunc == _paravirt_ident_32)
@@ -391,12 +379,6 @@ NOKPROBE_SYMBOL(native_get_debugreg);
391NOKPROBE_SYMBOL(native_set_debugreg); 379NOKPROBE_SYMBOL(native_set_debugreg);
392NOKPROBE_SYMBOL(native_load_idt); 380NOKPROBE_SYMBOL(native_load_idt);
393 381
394struct pv_apic_ops pv_apic_ops = {
395#ifdef CONFIG_X86_LOCAL_APIC
396 .startup_ipi_hook = paravirt_nop,
397#endif
398};
399
400#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 382#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
401/* 32-bit pagetable entries */ 383/* 32-bit pagetable entries */
402#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) 384#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
@@ -432,9 +414,6 @@ struct pv_mmu_ops pv_mmu_ops = {
432 .set_pmd = native_set_pmd, 414 .set_pmd = native_set_pmd,
433 .set_pmd_at = native_set_pmd_at, 415 .set_pmd_at = native_set_pmd_at,
434 .pte_update = paravirt_nop, 416 .pte_update = paravirt_nop,
435 .pte_update_defer = paravirt_nop,
436 .pmd_update = paravirt_nop,
437 .pmd_update_defer = paravirt_nop,
438 417
439 .ptep_modify_prot_start = __ptep_modify_prot_start, 418 .ptep_modify_prot_start = __ptep_modify_prot_start,
440 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 419 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
@@ -480,6 +459,5 @@ struct pv_mmu_ops pv_mmu_ops = {
480EXPORT_SYMBOL_GPL(pv_time_ops); 459EXPORT_SYMBOL_GPL(pv_time_ops);
481EXPORT_SYMBOL (pv_cpu_ops); 460EXPORT_SYMBOL (pv_cpu_ops);
482EXPORT_SYMBOL (pv_mmu_ops); 461EXPORT_SYMBOL (pv_mmu_ops);
483EXPORT_SYMBOL_GPL(pv_apic_ops);
484EXPORT_SYMBOL_GPL(pv_info); 462EXPORT_SYMBOL_GPL(pv_info);
485EXPORT_SYMBOL (pv_irq_ops); 463EXPORT_SYMBOL (pv_irq_ops);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 558f50edebca..32e9d9cbb884 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -124,21 +124,6 @@ const char *regs_query_register_name(unsigned int offset)
124 return NULL; 124 return NULL;
125} 125}
126 126
127static const int arg_offs_table[] = {
128#ifdef CONFIG_X86_32
129 [0] = offsetof(struct pt_regs, ax),
130 [1] = offsetof(struct pt_regs, dx),
131 [2] = offsetof(struct pt_regs, cx)
132#else /* CONFIG_X86_64 */
133 [0] = offsetof(struct pt_regs, di),
134 [1] = offsetof(struct pt_regs, si),
135 [2] = offsetof(struct pt_regs, dx),
136 [3] = offsetof(struct pt_regs, cx),
137 [4] = offsetof(struct pt_regs, r8),
138 [5] = offsetof(struct pt_regs, r9)
139#endif
140};
141
142/* 127/*
143 * does not yet catch signals sent when the child dies. 128 * does not yet catch signals sent when the child dies.
144 * in exit.c or in signal.c. 129 * in exit.c or in signal.c.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fbabe4fcc7fb..24d57f77b3c1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -304,7 +304,7 @@ do { \
304 304
305static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 305static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
306{ 306{
307 if (cpu_has_topoext) { 307 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
308 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 308 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
309 309
310 if (c->phys_proc_id == o->phys_proc_id && 310 if (c->phys_proc_id == o->phys_proc_id &&
@@ -630,13 +630,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
630 num_starts = 0; 630 num_starts = 0;
631 631
632 /* 632 /*
633 * Paravirt / VMI wants a startup IPI hook here to set up the
634 * target processor state.
635 */
636 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
637 stack_start);
638
639 /*
640 * Run STARTUP IPI loop. 633 * Run STARTUP IPI loop.
641 */ 634 */
642 pr_debug("#startup loops: %d\n", num_starts); 635 pr_debug("#startup loops: %d\n", num_starts);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c7c4d9c51e99..3d743da828d3 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1185,8 +1185,6 @@ void __init tsc_init(void)
1185 u64 lpj; 1185 u64 lpj;
1186 int cpu; 1186 int cpu;
1187 1187
1188 x86_init.timers.tsc_pre_init();
1189
1190 if (!cpu_has_tsc) { 1188 if (!cpu_has_tsc) {
1191 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1189 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1192 return; 1190 return;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 524619351961..483231ebbb0b 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
357 tss = &per_cpu(cpu_tss, get_cpu()); 357 tss = &per_cpu(cpu_tss, get_cpu());
358 /* make room for real-mode segments */ 358 /* make room for real-mode segments */
359 tsk->thread.sp0 += 16; 359 tsk->thread.sp0 += 16;
360 if (cpu_has_sep) 360
361 if (static_cpu_has_safe(X86_FEATURE_SEP))
361 tsk->thread.sysenter_cs = 0; 362 tsk->thread.sysenter_cs = 0;
363
362 load_sp0(tss, &tsk->thread); 364 load_sp0(tss, &tsk->thread);
363 put_cpu(); 365 put_cpu();
364 366
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 3839628d962e..dad5fe9633a3 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -68,7 +68,6 @@ struct x86_init_ops x86_init __initdata = {
68 68
69 .timers = { 69 .timers = {
70 .setup_percpu_clockev = setup_boot_APIC_clock, 70 .setup_percpu_clockev = setup_boot_APIC_clock,
71 .tsc_pre_init = x86_init_noop,
72 .timer_init = hpet_time_init, 71 .timer_init = hpet_time_init,
73 .wallclock_init = x86_init_noop, 72 .wallclock_init = x86_init_noop,
74 }, 73 },
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a43b2eafc466..4ba229ac3f4f 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1473,7 +1473,6 @@ __init void lguest_init(void)
1473 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1473 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1474 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; 1474 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
1475 pv_mmu_ops.pte_update = lguest_pte_update; 1475 pv_mmu_ops.pte_update = lguest_pte_update;
1476 pv_mmu_ops.pte_update_defer = lguest_pte_update;
1477 1476
1478#ifdef CONFIG_X86_LOCAL_APIC 1477#ifdef CONFIG_X86_LOCAL_APIC
1479 /* APIC read/write intercepts */ 1478 /* APIC read/write intercepts */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index fb0a9dd1d6e4..ee9c2e3a7199 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -414,7 +414,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
414 414
415 if (changed && dirty) { 415 if (changed && dirty) {
416 *ptep = entry; 416 *ptep = entry;
417 pte_update_defer(vma->vm_mm, address, ptep); 417 pte_update(vma->vm_mm, address, ptep);
418 } 418 }
419 419
420 return changed; 420 return changed;
@@ -431,7 +431,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
431 431
432 if (changed && dirty) { 432 if (changed && dirty) {
433 *pmdp = entry; 433 *pmdp = entry;
434 pmd_update_defer(vma->vm_mm, address, pmdp);
435 /* 434 /*
436 * We had a write-protection fault here and changed the pmd 435 * We had a write-protection fault here and changed the pmd
437 * to to more permissive. No need to flush the TLB for that, 436 * to to more permissive. No need to flush the TLB for that,
@@ -469,9 +468,6 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
469 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 468 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
470 (unsigned long *)pmdp); 469 (unsigned long *)pmdp);
471 470
472 if (ret)
473 pmd_update(vma->vm_mm, addr, pmdp);
474
475 return ret; 471 return ret;
476} 472}
477#endif 473#endif
@@ -518,7 +514,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
518 set = !test_and_set_bit(_PAGE_BIT_SPLITTING, 514 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
519 (unsigned long *)pmdp); 515 (unsigned long *)pmdp);
520 if (set) { 516 if (set) {
521 pmd_update(vma->vm_mm, address, pmdp);
522 /* need tlb flush only to serialize against gup-fast */ 517 /* need tlb flush only to serialize against gup-fast */
523 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 518 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
524 } 519 }
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf60aa4..92e2eacb3321 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
31 31
32void x86_configure_nx(void) 32void x86_configure_nx(void)
33{ 33{
34 if (cpu_has_nx && !disable_nx) 34 if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
35 __supported_pte_mask |= _PAGE_NX; 35 __supported_pte_mask |= _PAGE_NX;
36 else 36 else
37 __supported_pte_mask &= ~_PAGE_NX; 37 __supported_pte_mask &= ~_PAGE_NX;
@@ -39,7 +39,7 @@ void x86_configure_nx(void)
39 39
40void __init x86_report_nx(void) 40void __init x86_report_nx(void)
41{ 41{
42 if (!cpu_has_nx) { 42 if (!boot_cpu_has(X86_FEATURE_NX)) {
43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " 43 printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
44 "missing in CPU!\n"); 44 "missing in CPU!\n");
45 } else { 45 } else {
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd37ccabcacc..23063923e364 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1262,12 +1262,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1262 .end_context_switch = xen_end_context_switch, 1262 .end_context_switch = xen_end_context_switch,
1263}; 1263};
1264 1264
1265static const struct pv_apic_ops xen_apic_ops __initconst = {
1266#ifdef CONFIG_X86_LOCAL_APIC
1267 .startup_ipi_hook = paravirt_nop,
1268#endif
1269};
1270
1271static void xen_reboot(int reason) 1265static void xen_reboot(int reason)
1272{ 1266{
1273 struct sched_shutdown r = { .reason = reason }; 1267 struct sched_shutdown r = { .reason = reason };
@@ -1535,7 +1529,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
1535 if (xen_initial_domain()) 1529 if (xen_initial_domain())
1536 pv_info.features |= PV_SUPPORTED_RTC; 1530 pv_info.features |= PV_SUPPORTED_RTC;
1537 pv_init_ops = xen_init_ops; 1531 pv_init_ops = xen_init_ops;
1538 pv_apic_ops = xen_apic_ops;
1539 if (!xen_pvh_domain()) { 1532 if (!xen_pvh_domain()) {
1540 pv_cpu_ops = xen_cpu_ops; 1533 pv_cpu_ops = xen_cpu_ops;
1541 1534
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index cb5e266a8bf7..c913ca4f6958 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2436,7 +2436,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2436 .flush_tlb_others = xen_flush_tlb_others, 2436 .flush_tlb_others = xen_flush_tlb_others,
2437 2437
2438 .pte_update = paravirt_nop, 2438 .pte_update = paravirt_nop,
2439 .pte_update_defer = paravirt_nop,
2440 2439
2441 .pgd_alloc = xen_pgd_alloc, 2440 .pgd_alloc = xen_pgd_alloc,
2442 .pgd_free = xen_pgd_free, 2441 .pgd_free = xen_pgd_free,
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0c98a9d51a24..44ce80606944 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
140 * RNG configuration like it used to be the case in this 140 * RNG configuration like it used to be the case in this
141 * register */ 141 * register */
142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { 142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
143 if (!cpu_has_xstore_enabled) { 143 if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
144 pr_err(PFX "can't enable hardware RNG " 144 pr_err(PFX "can't enable hardware RNG "
145 "if XSTORE is not enabled\n"); 145 "if XSTORE is not enabled\n");
146 return -ENODEV; 146 return -ENODEV;
@@ -200,8 +200,9 @@ static int __init mod_init(void)
200{ 200{
201 int err; 201 int err;
202 202
203 if (!cpu_has_xstore) 203 if (!boot_cpu_has(X86_FEATURE_XSTORE))
204 return -ENODEV; 204 return -ENODEV;
205
205 pr_info("VIA RNG detected\n"); 206 pr_info("VIA RNG detected\n");
206 err = hwrng_register(&via_rng); 207 err = hwrng_register(&via_rng);
207 if (err) { 208 if (err) {
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index da2d6777bd09..97a364694bfc 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -515,7 +515,7 @@ static int __init padlock_init(void)
515 if (!x86_match_cpu(padlock_cpu_id)) 515 if (!x86_match_cpu(padlock_cpu_id))
516 return -ENODEV; 516 return -ENODEV;
517 517
518 if (!cpu_has_xcrypt_enabled) { 518 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
519 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 519 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
520 return -ENODEV; 520 return -ENODEV;
521 } 521 }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 4e154c9b9206..8c5f90647b7a 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -540,7 +540,7 @@ static int __init padlock_init(void)
540 struct shash_alg *sha1; 540 struct shash_alg *sha1;
541 struct shash_alg *sha256; 541 struct shash_alg *sha256;
542 542
543 if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) 543 if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
544 return -ENODEV; 544 return -ENODEV;
545 545
546 /* Register the newly added algorithm module if on * 546 /* Register the newly added algorithm module if on *
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 1fae1881648c..c12ba4516df2 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
753 * should have X86_FEATURE_CX16 support, this has been confirmed 753 * should have X86_FEATURE_CX16 support, this has been confirmed
754 * with Intel hardware guys. 754 * with Intel hardware guys.
755 */ 755 */
756 if ( cpu_has_cx16 ) 756 if (boot_cpu_has(X86_FEATURE_CX16))
757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; 757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
758 758
759 for_each_iommu(iommu, drhd) 759 for_each_iommu(iommu, drhd)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 974be09e7556..42a378a4eefb 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
923 if (bio_flags & EXTENT_BIO_TREE_LOG) 923 if (bio_flags & EXTENT_BIO_TREE_LOG)
924 return 0; 924 return 0;
925#ifdef CONFIG_X86 925#ifdef CONFIG_X86
926 if (cpu_has_xmm4_2) 926 if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
927 return 0; 927 return 0;
928#endif 928#endif
929 return 1; 929 return 1;