diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-10-24 18:58:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-04 06:10:47 -0500 |
commit | 7911d3f7af14a614617e38245fedf98a724e46a9 (patch) | |
tree | 8a8d163aee11daadeda7ecd9e835b86c2eb5890e /arch/x86/kernel | |
parent | c1317ec2b906442930318d9d6e51425c5a69e9cb (diff) |
perf/x86: Only allow rdpmc if a perf_event is mapped
We currently allow any process to use rdpmc. This significantly
weakens the protection offered by PR_TSC_DISABLED, and it could be
helpful to users attempting to exploit timing attacks.
Since we can't enable access to individual counters, use a very
coarse heuristic to limit access to rdpmc: allow access only when
a perf_event is mmapped. This protects seccomp sandboxes.
There is plenty of room to further tighen these restrictions. For
example, this allows rdpmc for any x86_pmu event, but it's only
useful for self-monitoring tasks.
As a side effect, cap_user_rdpmc will now be false for AMD uncore
events. This isn't a real regression, since .event_idx is disabled
for these events anyway for the time being. Whenever that gets
re-added, the cap_user_rdpmc code can be adjusted or refactored
accordingly.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/a2bdb3cf3a1d70c26980d7c6dddfbaa69f3182bf.1414190806.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 57 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 2 |
2 files changed, 40 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 73e84a348de1..bec5cff7dc80 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
33 | #include <asm/alternative.h> | 33 | #include <asm/alternative.h> |
34 | #include <asm/mmu_context.h> | ||
34 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
35 | #include <asm/timer.h> | 36 | #include <asm/timer.h> |
36 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
@@ -1328,8 +1329,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1328 | break; | 1329 | break; |
1329 | 1330 | ||
1330 | case CPU_STARTING: | 1331 | case CPU_STARTING: |
1331 | if (x86_pmu.attr_rdpmc) | ||
1332 | cr4_set_bits(X86_CR4_PCE); | ||
1333 | if (x86_pmu.cpu_starting) | 1332 | if (x86_pmu.cpu_starting) |
1334 | x86_pmu.cpu_starting(cpu); | 1333 | x86_pmu.cpu_starting(cpu); |
1335 | break; | 1334 | break; |
@@ -1805,14 +1804,44 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
1805 | event->destroy(event); | 1804 | event->destroy(event); |
1806 | } | 1805 | } |
1807 | 1806 | ||
1807 | if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) | ||
1808 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; | ||
1809 | |||
1808 | return err; | 1810 | return err; |
1809 | } | 1811 | } |
1810 | 1812 | ||
1813 | static void refresh_pce(void *ignored) | ||
1814 | { | ||
1815 | if (current->mm) | ||
1816 | load_mm_cr4(current->mm); | ||
1817 | } | ||
1818 | |||
1819 | static void x86_pmu_event_mapped(struct perf_event *event) | ||
1820 | { | ||
1821 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | ||
1822 | return; | ||
1823 | |||
1824 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) | ||
1825 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | ||
1826 | } | ||
1827 | |||
1828 | static void x86_pmu_event_unmapped(struct perf_event *event) | ||
1829 | { | ||
1830 | if (!current->mm) | ||
1831 | return; | ||
1832 | |||
1833 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | ||
1834 | return; | ||
1835 | |||
1836 | if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed)) | ||
1837 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | ||
1838 | } | ||
1839 | |||
1811 | static int x86_pmu_event_idx(struct perf_event *event) | 1840 | static int x86_pmu_event_idx(struct perf_event *event) |
1812 | { | 1841 | { |
1813 | int idx = event->hw.idx; | 1842 | int idx = event->hw.idx; |
1814 | 1843 | ||
1815 | if (!x86_pmu.attr_rdpmc) | 1844 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) |
1816 | return 0; | 1845 | return 0; |
1817 | 1846 | ||
1818 | if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { | 1847 | if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { |
@@ -1830,16 +1859,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev, | |||
1830 | return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); | 1859 | return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); |
1831 | } | 1860 | } |
1832 | 1861 | ||
1833 | static void change_rdpmc(void *info) | ||
1834 | { | ||
1835 | bool enable = !!(unsigned long)info; | ||
1836 | |||
1837 | if (enable) | ||
1838 | cr4_set_bits(X86_CR4_PCE); | ||
1839 | else | ||
1840 | cr4_clear_bits(X86_CR4_PCE); | ||
1841 | } | ||
1842 | |||
1843 | static ssize_t set_attr_rdpmc(struct device *cdev, | 1862 | static ssize_t set_attr_rdpmc(struct device *cdev, |
1844 | struct device_attribute *attr, | 1863 | struct device_attribute *attr, |
1845 | const char *buf, size_t count) | 1864 | const char *buf, size_t count) |
@@ -1854,11 +1873,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, | |||
1854 | if (x86_pmu.attr_rdpmc_broken) | 1873 | if (x86_pmu.attr_rdpmc_broken) |
1855 | return -ENOTSUPP; | 1874 | return -ENOTSUPP; |
1856 | 1875 | ||
1857 | if (!!val != !!x86_pmu.attr_rdpmc) { | 1876 | x86_pmu.attr_rdpmc = !!val; |
1858 | x86_pmu.attr_rdpmc = !!val; | ||
1859 | on_each_cpu(change_rdpmc, (void *)val, 1); | ||
1860 | } | ||
1861 | |||
1862 | return count; | 1877 | return count; |
1863 | } | 1878 | } |
1864 | 1879 | ||
@@ -1901,6 +1916,9 @@ static struct pmu pmu = { | |||
1901 | 1916 | ||
1902 | .event_init = x86_pmu_event_init, | 1917 | .event_init = x86_pmu_event_init, |
1903 | 1918 | ||
1919 | .event_mapped = x86_pmu_event_mapped, | ||
1920 | .event_unmapped = x86_pmu_event_unmapped, | ||
1921 | |||
1904 | .add = x86_pmu_add, | 1922 | .add = x86_pmu_add, |
1905 | .del = x86_pmu_del, | 1923 | .del = x86_pmu_del, |
1906 | .start = x86_pmu_start, | 1924 | .start = x86_pmu_start, |
@@ -1922,7 +1940,8 @@ void arch_perf_update_userpage(struct perf_event *event, | |||
1922 | 1940 | ||
1923 | userpg->cap_user_time = 0; | 1941 | userpg->cap_user_time = 0; |
1924 | userpg->cap_user_time_zero = 0; | 1942 | userpg->cap_user_time_zero = 0; |
1925 | userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; | 1943 | userpg->cap_user_rdpmc = |
1944 | !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); | ||
1926 | userpg->pmc_width = x86_pmu.cntval_bits; | 1945 | userpg->pmc_width = x86_pmu.cntval_bits; |
1927 | 1946 | ||
1928 | if (!sched_clock_stable()) | 1947 | if (!sched_clock_stable()) |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 4e6cdb0ddc70..df525d2be1e8 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -71,6 +71,8 @@ struct event_constraint { | |||
71 | #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ | 71 | #define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ |
72 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ | 72 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */ |
73 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ | 73 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */ |
74 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */ | ||
75 | |||
74 | 76 | ||
75 | struct amd_nb { | 77 | struct amd_nb { |
76 | int nb_id; /* NorthBridge id */ | 78 | int nb_id; /* NorthBridge id */ |