diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 57 |
1 files changed, 38 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 73e84a348de1..bec5cff7dc80 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
33 | #include <asm/alternative.h> | 33 | #include <asm/alternative.h> |
34 | #include <asm/mmu_context.h> | ||
34 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
35 | #include <asm/timer.h> | 36 | #include <asm/timer.h> |
36 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
@@ -1328,8 +1329,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1328 | break; | 1329 | break; |
1329 | 1330 | ||
1330 | case CPU_STARTING: | 1331 | case CPU_STARTING: |
1331 | if (x86_pmu.attr_rdpmc) | ||
1332 | cr4_set_bits(X86_CR4_PCE); | ||
1333 | if (x86_pmu.cpu_starting) | 1332 | if (x86_pmu.cpu_starting) |
1334 | x86_pmu.cpu_starting(cpu); | 1333 | x86_pmu.cpu_starting(cpu); |
1335 | break; | 1334 | break; |
@@ -1805,14 +1804,44 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
1805 | event->destroy(event); | 1804 | event->destroy(event); |
1806 | } | 1805 | } |
1807 | 1806 | ||
1807 | if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) | ||
1808 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; | ||
1809 | |||
1808 | return err; | 1810 | return err; |
1809 | } | 1811 | } |
1810 | 1812 | ||
1813 | static void refresh_pce(void *ignored) | ||
1814 | { | ||
1815 | if (current->mm) | ||
1816 | load_mm_cr4(current->mm); | ||
1817 | } | ||
1818 | |||
1819 | static void x86_pmu_event_mapped(struct perf_event *event) | ||
1820 | { | ||
1821 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | ||
1822 | return; | ||
1823 | |||
1824 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) | ||
1825 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | ||
1826 | } | ||
1827 | |||
1828 | static void x86_pmu_event_unmapped(struct perf_event *event) | ||
1829 | { | ||
1830 | if (!current->mm) | ||
1831 | return; | ||
1832 | |||
1833 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | ||
1834 | return; | ||
1835 | |||
1836 | if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed)) | ||
1837 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | ||
1838 | } | ||
1839 | |||
1811 | static int x86_pmu_event_idx(struct perf_event *event) | 1840 | static int x86_pmu_event_idx(struct perf_event *event) |
1812 | { | 1841 | { |
1813 | int idx = event->hw.idx; | 1842 | int idx = event->hw.idx; |
1814 | 1843 | ||
1815 | if (!x86_pmu.attr_rdpmc) | 1844 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) |
1816 | return 0; | 1845 | return 0; |
1817 | 1846 | ||
1818 | if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { | 1847 | if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { |
@@ -1830,16 +1859,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev, | |||
1830 | return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); | 1859 | return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); |
1831 | } | 1860 | } |
1832 | 1861 | ||
1833 | static void change_rdpmc(void *info) | ||
1834 | { | ||
1835 | bool enable = !!(unsigned long)info; | ||
1836 | |||
1837 | if (enable) | ||
1838 | cr4_set_bits(X86_CR4_PCE); | ||
1839 | else | ||
1840 | cr4_clear_bits(X86_CR4_PCE); | ||
1841 | } | ||
1842 | |||
1843 | static ssize_t set_attr_rdpmc(struct device *cdev, | 1862 | static ssize_t set_attr_rdpmc(struct device *cdev, |
1844 | struct device_attribute *attr, | 1863 | struct device_attribute *attr, |
1845 | const char *buf, size_t count) | 1864 | const char *buf, size_t count) |
@@ -1854,11 +1873,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, | |||
1854 | if (x86_pmu.attr_rdpmc_broken) | 1873 | if (x86_pmu.attr_rdpmc_broken) |
1855 | return -ENOTSUPP; | 1874 | return -ENOTSUPP; |
1856 | 1875 | ||
1857 | if (!!val != !!x86_pmu.attr_rdpmc) { | 1876 | x86_pmu.attr_rdpmc = !!val; |
1858 | x86_pmu.attr_rdpmc = !!val; | ||
1859 | on_each_cpu(change_rdpmc, (void *)val, 1); | ||
1860 | } | ||
1861 | |||
1862 | return count; | 1877 | return count; |
1863 | } | 1878 | } |
1864 | 1879 | ||
@@ -1901,6 +1916,9 @@ static struct pmu pmu = { | |||
1901 | 1916 | ||
1902 | .event_init = x86_pmu_event_init, | 1917 | .event_init = x86_pmu_event_init, |
1903 | 1918 | ||
1919 | .event_mapped = x86_pmu_event_mapped, | ||
1920 | .event_unmapped = x86_pmu_event_unmapped, | ||
1921 | |||
1904 | .add = x86_pmu_add, | 1922 | .add = x86_pmu_add, |
1905 | .del = x86_pmu_del, | 1923 | .del = x86_pmu_del, |
1906 | .start = x86_pmu_start, | 1924 | .start = x86_pmu_start, |
@@ -1922,7 +1940,8 @@ void arch_perf_update_userpage(struct perf_event *event, | |||
1922 | 1940 | ||
1923 | userpg->cap_user_time = 0; | 1941 | userpg->cap_user_time = 0; |
1924 | userpg->cap_user_time_zero = 0; | 1942 | userpg->cap_user_time_zero = 0; |
1925 | userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; | 1943 | userpg->cap_user_rdpmc = |
1944 | !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); | ||
1926 | userpg->pmc_width = x86_pmu.cntval_bits; | 1945 | userpg->pmc_width = x86_pmu.cntval_bits; |
1927 | 1946 | ||
1928 | if (!sched_clock_stable()) | 1947 | if (!sched_clock_stable()) |