aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8e132931614d..79f9f848bee4 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1192 for (i = 0; i < cpuc->n_events; i++) { 1192 for (i = 0; i < cpuc->n_events; i++) {
1193 if (event == cpuc->event_list[i]) { 1193 if (event == cpuc->event_list[i]) {
1194 1194
1195 if (i >= cpuc->n_events - cpuc->n_added)
1196 --cpuc->n_added;
1197
1195 if (x86_pmu.put_event_constraints) 1198 if (x86_pmu.put_event_constraints)
1196 x86_pmu.put_event_constraints(cpuc, event); 1199 x86_pmu.put_event_constraints(cpuc, event);
1197 1200
@@ -1521,6 +1524,8 @@ static int __init init_hw_perf_events(void)
1521 1524
1522 pr_cont("%s PMU driver.\n", x86_pmu.name); 1525 pr_cont("%s PMU driver.\n", x86_pmu.name);
1523 1526
1527 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1528
1524 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1529 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1525 quirk->func(); 1530 quirk->func();
1526 1531
@@ -1534,7 +1539,6 @@ static int __init init_hw_perf_events(void)
1534 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1539 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1535 0, x86_pmu.num_counters, 0, 0); 1540 0, x86_pmu.num_counters, 0, 0);
1536 1541
1537 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1538 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1542 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1539 1543
1540 if (x86_pmu.event_attrs) 1544 if (x86_pmu.event_attrs)
@@ -1820,9 +1824,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
1820 if (ret) 1824 if (ret)
1821 return ret; 1825 return ret;
1822 1826
1827 if (x86_pmu.attr_rdpmc_broken)
1828 return -ENOTSUPP;
1829
1823 if (!!val != !!x86_pmu.attr_rdpmc) { 1830 if (!!val != !!x86_pmu.attr_rdpmc) {
1824 x86_pmu.attr_rdpmc = !!val; 1831 x86_pmu.attr_rdpmc = !!val;
1825 smp_call_function(change_rdpmc, (void *)val, 1); 1832 on_each_cpu(change_rdpmc, (void *)val, 1);
1826 } 1833 }
1827 1834
1828 return count; 1835 return count;
@@ -1883,21 +1890,27 @@ static struct pmu pmu = {
1883 1890
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1891void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1892{
1893 struct cyc2ns_data *data;
1894
1886 userpg->cap_user_time = 0; 1895 userpg->cap_user_time = 0;
1887 userpg->cap_user_time_zero = 0; 1896 userpg->cap_user_time_zero = 0;
1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; 1897 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1898 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1899
1891 if (!sched_clock_stable) 1900 if (!sched_clock_stable())
1892 return; 1901 return;
1893 1902
1903 data = cyc2ns_read_begin();
1904
1894 userpg->cap_user_time = 1; 1905 userpg->cap_user_time = 1;
1895 userpg->time_mult = this_cpu_read(cyc2ns); 1906 userpg->time_mult = data->cyc2ns_mul;
1896 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1907 userpg->time_shift = data->cyc2ns_shift;
1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1908 userpg->time_offset = data->cyc2ns_offset - now;
1898 1909
1899 userpg->cap_user_time_zero = 1; 1910 userpg->cap_user_time_zero = 1;
1900 userpg->time_zero = this_cpu_read(cyc2ns_offset); 1911 userpg->time_zero = data->cyc2ns_offset;
1912
1913 cyc2ns_read_end(data);
1901} 1914}
1902 1915
1903/* 1916/*