aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
commit37507717de51a8332a34ee07fd88700be88df5bf (patch)
treed6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /arch/x86/kernel
parenta68fb48380bb993306dd62a58cbd946b4348222a (diff)
parenta66734297f78707ce39d756b656bfae861d53f62 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/cpu/common.c17
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c76
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/i387.c3
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/xsave.c3
17 files changed, 98 insertions, 40 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 31368207837c..d1daead5fcdd 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -78,7 +78,7 @@ int x86_acpi_suspend_lowlevel(void)
78 78
79 header->pmode_cr0 = read_cr0(); 79 header->pmode_cr0 = read_cr0();
80 if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { 80 if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
81 header->pmode_cr4 = read_cr4(); 81 header->pmode_cr4 = __read_cr4();
82 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4); 82 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
83 } 83 }
84 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, 84 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b15bffcaba6d..b5c8ff5e9dfc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -19,6 +19,7 @@
19#include <asm/archrandom.h> 19#include <asm/archrandom.h>
20#include <asm/hypervisor.h> 20#include <asm/hypervisor.h>
21#include <asm/processor.h> 21#include <asm/processor.h>
22#include <asm/tlbflush.h>
22#include <asm/debugreg.h> 23#include <asm/debugreg.h>
23#include <asm/sections.h> 24#include <asm/sections.h>
24#include <asm/vsyscall.h> 25#include <asm/vsyscall.h>
@@ -278,7 +279,7 @@ __setup("nosmep", setup_disable_smep);
278static __always_inline void setup_smep(struct cpuinfo_x86 *c) 279static __always_inline void setup_smep(struct cpuinfo_x86 *c)
279{ 280{
280 if (cpu_has(c, X86_FEATURE_SMEP)) 281 if (cpu_has(c, X86_FEATURE_SMEP))
281 set_in_cr4(X86_CR4_SMEP); 282 cr4_set_bits(X86_CR4_SMEP);
282} 283}
283 284
284static __init int setup_disable_smap(char *arg) 285static __init int setup_disable_smap(char *arg)
@@ -298,9 +299,9 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
298 299
299 if (cpu_has(c, X86_FEATURE_SMAP)) { 300 if (cpu_has(c, X86_FEATURE_SMAP)) {
300#ifdef CONFIG_X86_SMAP 301#ifdef CONFIG_X86_SMAP
301 set_in_cr4(X86_CR4_SMAP); 302 cr4_set_bits(X86_CR4_SMAP);
302#else 303#else
303 clear_in_cr4(X86_CR4_SMAP); 304 cr4_clear_bits(X86_CR4_SMAP);
304#endif 305#endif
305 } 306 }
306} 307}
@@ -1295,6 +1296,12 @@ void cpu_init(void)
1295 wait_for_master_cpu(cpu); 1296 wait_for_master_cpu(cpu);
1296 1297
1297 /* 1298 /*
1299 * Initialize the CR4 shadow before doing anything that could
1300 * try to read it.
1301 */
1302 cr4_init_shadow();
1303
1304 /*
1298 * Load microcode on this cpu if a valid microcode is available. 1305 * Load microcode on this cpu if a valid microcode is available.
1299 * This is early microcode loading procedure. 1306 * This is early microcode loading procedure.
1300 */ 1307 */
@@ -1313,7 +1320,7 @@ void cpu_init(void)
1313 1320
1314 pr_debug("Initializing CPU#%d\n", cpu); 1321 pr_debug("Initializing CPU#%d\n", cpu);
1315 1322
1316 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1323 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1317 1324
1318 /* 1325 /*
1319 * Initialize the per-CPU GDT with the boot GDT, 1326 * Initialize the per-CPU GDT with the boot GDT,
@@ -1394,7 +1401,7 @@ void cpu_init(void)
1394 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1401 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1395 1402
1396 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) 1403 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
1397 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1404 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1398 1405
1399 load_current_idt(); 1406 load_current_idt();
1400 switch_to_new_gdt(cpu); 1407 switch_to_new_gdt(cpu);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index cdfed7953963..3be9fa69f875 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -44,6 +44,7 @@
44 44
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/traps.h> 46#include <asm/traps.h>
47#include <asm/tlbflush.h>
47#include <asm/mce.h> 48#include <asm/mce.h>
48#include <asm/msr.h> 49#include <asm/msr.h>
49 50
@@ -1452,7 +1453,7 @@ static void __mcheck_cpu_init_generic(void)
1452 bitmap_fill(all_banks, MAX_NR_BANKS); 1453 bitmap_fill(all_banks, MAX_NR_BANKS);
1453 machine_check_poll(MCP_UC | m_fl, &all_banks); 1454 machine_check_poll(MCP_UC | m_fl, &all_banks);
1454 1455
1455 set_in_cr4(X86_CR4_MCE); 1456 cr4_set_bits(X86_CR4_MCE);
1456 1457
1457 rdmsrl(MSR_IA32_MCG_CAP, cap); 1458 rdmsrl(MSR_IA32_MCG_CAP, cap);
1458 if (cap & MCG_CTL_P) 1459 if (cap & MCG_CTL_P)
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index ec2663a708e4..737b0ad4e61a 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -9,6 +9,7 @@
9 9
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/traps.h> 11#include <asm/traps.h>
12#include <asm/tlbflush.h>
12#include <asm/mce.h> 13#include <asm/mce.h>
13#include <asm/msr.h> 14#include <asm/msr.h>
14 15
@@ -65,7 +66,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
65 "Intel old style machine check architecture supported.\n"); 66 "Intel old style machine check architecture supported.\n");
66 67
67 /* Enable MCE: */ 68 /* Enable MCE: */
68 set_in_cr4(X86_CR4_MCE); 69 cr4_set_bits(X86_CR4_MCE);
69 printk(KERN_INFO 70 printk(KERN_INFO
70 "Intel old style machine check reporting enabled on CPU#%d.\n", 71 "Intel old style machine check reporting enabled on CPU#%d.\n",
71 smp_processor_id()); 72 smp_processor_id());
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index bd5d46a32210..44f138296fbe 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -8,6 +8,7 @@
8 8
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/traps.h> 10#include <asm/traps.h>
11#include <asm/tlbflush.h>
11#include <asm/mce.h> 12#include <asm/mce.h>
12#include <asm/msr.h> 13#include <asm/msr.h>
13 14
@@ -36,7 +37,7 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
36 lo &= ~(1<<4); /* Enable MCE */ 37 lo &= ~(1<<4); /* Enable MCE */
37 wrmsr(MSR_IDT_FCR1, lo, hi); 38 wrmsr(MSR_IDT_FCR1, lo, hi);
38 39
39 set_in_cr4(X86_CR4_MCE); 40 cr4_set_bits(X86_CR4_MCE);
40 41
41 printk(KERN_INFO 42 printk(KERN_INFO
42 "Winchip machine check reporting enabled on CPU#0.\n"); 43 "Winchip machine check reporting enabled on CPU#0.\n");
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 9e451b0876b5..f8c81ba0b465 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -138,8 +138,8 @@ static void prepare_set(void)
138 138
139 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 139 /* Save value of CR4 and clear Page Global Enable (bit 7) */
140 if (cpu_has_pge) { 140 if (cpu_has_pge) {
141 cr4 = read_cr4(); 141 cr4 = __read_cr4();
142 write_cr4(cr4 & ~X86_CR4_PGE); 142 __write_cr4(cr4 & ~X86_CR4_PGE);
143 } 143 }
144 144
145 /* 145 /*
@@ -171,7 +171,7 @@ static void post_set(void)
171 171
172 /* Restore value of CR4 */ 172 /* Restore value of CR4 */
173 if (cpu_has_pge) 173 if (cpu_has_pge)
174 write_cr4(cr4); 174 __write_cr4(cr4);
175} 175}
176 176
177static void cyrix_set_arr(unsigned int reg, unsigned long base, 177static void cyrix_set_arr(unsigned int reg, unsigned long base,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0e25a1bc5ab5..7d74f7b3c6ba 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -678,8 +678,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
678 678
679 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 679 /* Save value of CR4 and clear Page Global Enable (bit 7) */
680 if (cpu_has_pge) { 680 if (cpu_has_pge) {
681 cr4 = read_cr4(); 681 cr4 = __read_cr4();
682 write_cr4(cr4 & ~X86_CR4_PGE); 682 __write_cr4(cr4 & ~X86_CR4_PGE);
683 } 683 }
684 684
685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 685 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
@@ -708,7 +708,7 @@ static void post_set(void) __releases(set_atomicity_lock)
708 708
709 /* Restore value of CR4 */ 709 /* Restore value of CR4 */
710 if (cpu_has_pge) 710 if (cpu_has_pge)
711 write_cr4(cr4); 711 __write_cr4(cr4);
712 raw_spin_unlock(&set_atomicity_lock); 712 raw_spin_unlock(&set_atomicity_lock);
713} 713}
714 714
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 143e5f5dc855..b71a7f86d68a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -31,6 +31,8 @@
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/alternative.h> 33#include <asm/alternative.h>
34#include <asm/mmu_context.h>
35#include <asm/tlbflush.h>
34#include <asm/timer.h> 36#include <asm/timer.h>
35#include <asm/desc.h> 37#include <asm/desc.h>
36#include <asm/ldt.h> 38#include <asm/ldt.h>
@@ -43,6 +45,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
43 .enabled = 1, 45 .enabled = 1,
44}; 46};
45 47
48struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
49
46u64 __read_mostly hw_cache_event_ids 50u64 __read_mostly hw_cache_event_ids
47 [PERF_COUNT_HW_CACHE_MAX] 51 [PERF_COUNT_HW_CACHE_MAX]
48 [PERF_COUNT_HW_CACHE_OP_MAX] 52 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1327,8 +1331,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1327 break; 1331 break;
1328 1332
1329 case CPU_STARTING: 1333 case CPU_STARTING:
1330 if (x86_pmu.attr_rdpmc)
1331 set_in_cr4(X86_CR4_PCE);
1332 if (x86_pmu.cpu_starting) 1334 if (x86_pmu.cpu_starting)
1333 x86_pmu.cpu_starting(cpu); 1335 x86_pmu.cpu_starting(cpu);
1334 break; 1336 break;
@@ -1804,14 +1806,44 @@ static int x86_pmu_event_init(struct perf_event *event)
1804 event->destroy(event); 1806 event->destroy(event);
1805 } 1807 }
1806 1808
1809 if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
1810 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
1811
1807 return err; 1812 return err;
1808} 1813}
1809 1814
1815static void refresh_pce(void *ignored)
1816{
1817 if (current->mm)
1818 load_mm_cr4(current->mm);
1819}
1820
1821static void x86_pmu_event_mapped(struct perf_event *event)
1822{
1823 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
1824 return;
1825
1826 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
1827 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
1828}
1829
1830static void x86_pmu_event_unmapped(struct perf_event *event)
1831{
1832 if (!current->mm)
1833 return;
1834
1835 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
1836 return;
1837
1838 if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
1839 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
1840}
1841
1810static int x86_pmu_event_idx(struct perf_event *event) 1842static int x86_pmu_event_idx(struct perf_event *event)
1811{ 1843{
1812 int idx = event->hw.idx; 1844 int idx = event->hw.idx;
1813 1845
1814 if (!x86_pmu.attr_rdpmc) 1846 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
1815 return 0; 1847 return 0;
1816 1848
1817 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { 1849 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
@@ -1829,16 +1861,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
1829 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); 1861 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
1830} 1862}
1831 1863
1832static void change_rdpmc(void *info)
1833{
1834 bool enable = !!(unsigned long)info;
1835
1836 if (enable)
1837 set_in_cr4(X86_CR4_PCE);
1838 else
1839 clear_in_cr4(X86_CR4_PCE);
1840}
1841
1842static ssize_t set_attr_rdpmc(struct device *cdev, 1864static ssize_t set_attr_rdpmc(struct device *cdev,
1843 struct device_attribute *attr, 1865 struct device_attribute *attr,
1844 const char *buf, size_t count) 1866 const char *buf, size_t count)
@@ -1850,14 +1872,27 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
1850 if (ret) 1872 if (ret)
1851 return ret; 1873 return ret;
1852 1874
1875 if (val > 2)
1876 return -EINVAL;
1877
1853 if (x86_pmu.attr_rdpmc_broken) 1878 if (x86_pmu.attr_rdpmc_broken)
1854 return -ENOTSUPP; 1879 return -ENOTSUPP;
1855 1880
1856 if (!!val != !!x86_pmu.attr_rdpmc) { 1881 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
1857 x86_pmu.attr_rdpmc = !!val; 1882 /*
1858 on_each_cpu(change_rdpmc, (void *)val, 1); 1883 * Changing into or out of always available, aka
1884 * perf-event-bypassing mode. This path is extremely slow,
1885 * but only root can trigger it, so it's okay.
1886 */
1887 if (val == 2)
1888 static_key_slow_inc(&rdpmc_always_available);
1889 else
1890 static_key_slow_dec(&rdpmc_always_available);
1891 on_each_cpu(refresh_pce, NULL, 1);
1859 } 1892 }
1860 1893
1894 x86_pmu.attr_rdpmc = val;
1895
1861 return count; 1896 return count;
1862} 1897}
1863 1898
@@ -1900,6 +1935,9 @@ static struct pmu pmu = {
1900 1935
1901 .event_init = x86_pmu_event_init, 1936 .event_init = x86_pmu_event_init,
1902 1937
1938 .event_mapped = x86_pmu_event_mapped,
1939 .event_unmapped = x86_pmu_event_unmapped,
1940
1903 .add = x86_pmu_add, 1941 .add = x86_pmu_add,
1904 .del = x86_pmu_del, 1942 .del = x86_pmu_del,
1905 .start = x86_pmu_start, 1943 .start = x86_pmu_start,
@@ -1914,13 +1952,15 @@ static struct pmu pmu = {
1914 .flush_branch_stack = x86_pmu_flush_branch_stack, 1952 .flush_branch_stack = x86_pmu_flush_branch_stack,
1915}; 1953};
1916 1954
1917void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1955void arch_perf_update_userpage(struct perf_event *event,
1956 struct perf_event_mmap_page *userpg, u64 now)
1918{ 1957{
1919 struct cyc2ns_data *data; 1958 struct cyc2ns_data *data;
1920 1959
1921 userpg->cap_user_time = 0; 1960 userpg->cap_user_time = 0;
1922 userpg->cap_user_time_zero = 0; 1961 userpg->cap_user_time_zero = 0;
1923 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; 1962 userpg->cap_user_rdpmc =
1963 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
1924 userpg->pmc_width = x86_pmu.cntval_bits; 1964 userpg->pmc_width = x86_pmu.cntval_bits;
1925 1965
1926 if (!sched_clock_stable()) 1966 if (!sched_clock_stable())
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 4e6cdb0ddc70..df525d2be1e8 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -71,6 +71,8 @@ struct event_constraint {
71#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ 71#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
72#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ 72#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
73#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ 73#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
74#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
75
74 76
75struct amd_nb { 77struct amd_nb {
76 int nb_id; /* NorthBridge id */ 78 int nb_id; /* NorthBridge id */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index d6c1b9836995..2911ef3a9f1c 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -31,6 +31,7 @@ static void __init i386_default_early_setup(void)
31 31
32asmlinkage __visible void __init i386_start_kernel(void) 32asmlinkage __visible void __init i386_start_kernel(void)
33{ 33{
34 cr4_init_shadow();
34 sanitize_boot_params(&boot_params); 35 sanitize_boot_params(&boot_params);
35 36
36 /* Call the subarch specific early setup function */ 37 /* Call the subarch specific early setup function */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index efcddfaf05f9..c4f8d4659070 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -156,6 +156,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
156 (__START_KERNEL & PGDIR_MASK))); 156 (__START_KERNEL & PGDIR_MASK)));
157 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); 157 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
158 158
159 cr4_init_shadow();
160
159 /* Kill off the identity-map trampoline */ 161 /* Kill off the identity-map trampoline */
160 reset_early_page_tables(); 162 reset_early_page_tables();
161 163
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 81049ffab2d6..d5651fce0b71 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -13,6 +13,7 @@
13#include <asm/sigcontext.h> 13#include <asm/sigcontext.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/math_emu.h> 15#include <asm/math_emu.h>
16#include <asm/tlbflush.h>
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17#include <asm/ptrace.h> 18#include <asm/ptrace.h>
18#include <asm/i387.h> 19#include <asm/i387.h>
@@ -193,7 +194,7 @@ void fpu_init(void)
193 if (cpu_has_xmm) 194 if (cpu_has_xmm)
194 cr4_mask |= X86_CR4_OSXMMEXCPT; 195 cr4_mask |= X86_CR4_OSXMMEXCPT;
195 if (cr4_mask) 196 if (cr4_mask)
196 set_in_cr4(cr4_mask); 197 cr4_set_bits(cr4_mask);
197 198
198 cr0 = read_cr0(); 199 cr0 = read_cr0();
199 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ 200 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e127ddaa2d5a..046e2d620bbe 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -28,6 +28,7 @@
28#include <asm/fpu-internal.h> 28#include <asm/fpu-internal.h>
29#include <asm/debugreg.h> 29#include <asm/debugreg.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/tlbflush.h>
31 32
32/* 33/*
33 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 34 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -141,7 +142,7 @@ void flush_thread(void)
141 142
142static void hard_disable_TSC(void) 143static void hard_disable_TSC(void)
143{ 144{
144 write_cr4(read_cr4() | X86_CR4_TSD); 145 cr4_set_bits(X86_CR4_TSD);
145} 146}
146 147
147void disable_TSC(void) 148void disable_TSC(void)
@@ -158,7 +159,7 @@ void disable_TSC(void)
158 159
159static void hard_enable_TSC(void) 160static void hard_enable_TSC(void)
160{ 161{
161 write_cr4(read_cr4() & ~X86_CR4_TSD); 162 cr4_clear_bits(X86_CR4_TSD);
162} 163}
163 164
164static void enable_TSC(void) 165static void enable_TSC(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8f3ebfe710d0..603c4f99cb5a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -101,7 +101,7 @@ void __show_regs(struct pt_regs *regs, int all)
101 cr0 = read_cr0(); 101 cr0 = read_cr0();
102 cr2 = read_cr2(); 102 cr2 = read_cr2();
103 cr3 = read_cr3(); 103 cr3 = read_cr3();
104 cr4 = read_cr4_safe(); 104 cr4 = __read_cr4_safe();
105 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 105 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
106 cr0, cr2, cr3, cr4); 106 cr0, cr2, cr3, cr4);
107 107
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 5a2c02913af3..67fcc43577d2 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, int all)
93 cr0 = read_cr0(); 93 cr0 = read_cr0();
94 cr2 = read_cr2(); 94 cr2 = read_cr2();
95 cr3 = read_cr3(); 95 cr3 = read_cr3();
96 cr4 = read_cr4(); 96 cr4 = __read_cr4();
97 97
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs); 99 fs, fsindex, gs, gsindex, shadowgs);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 27d200929864..0a2421cca01f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1179,7 +1179,7 @@ void __init setup_arch(char **cmdline_p)
1179 1179
1180 if (boot_cpu_data.cpuid_level >= 0) { 1180 if (boot_cpu_data.cpuid_level >= 0) {
1181 /* A CPU has %cr4 if and only if it has CPUID */ 1181 /* A CPU has %cr4 if and only if it has CPUID */
1182 mmu_cr4_features = read_cr4(); 1182 mmu_cr4_features = __read_cr4();
1183 if (trampoline_cr4_features) 1183 if (trampoline_cr4_features)
1184 *trampoline_cr4_features = mmu_cr4_features; 1184 *trampoline_cr4_features = mmu_cr4_features;
1185 } 1185 }
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 0de1fae2bdf0..34f66e58a896 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -12,6 +12,7 @@
12#include <asm/i387.h> 12#include <asm/i387.h>
13#include <asm/fpu-internal.h> 13#include <asm/fpu-internal.h>
14#include <asm/sigframe.h> 14#include <asm/sigframe.h>
15#include <asm/tlbflush.h>
15#include <asm/xcr.h> 16#include <asm/xcr.h>
16 17
17/* 18/*
@@ -453,7 +454,7 @@ static void prepare_fx_sw_frame(void)
453 */ 454 */
454static inline void xstate_enable(void) 455static inline void xstate_enable(void)
455{ 456{
456 set_in_cr4(X86_CR4_OSXSAVE); 457 cr4_set_bits(X86_CR4_OSXSAVE);
457 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); 458 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
458} 459}
459 460