aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fdd97f2e996..2c89264ee79 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -530,7 +530,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
530/* 530/*
531 * Setup the hardware configuration for a given attr_type 531 * Setup the hardware configuration for a given attr_type
532 */ 532 */
533static int __hw_perf_event_init(struct perf_event *event) 533static int __x86_pmu_event_init(struct perf_event *event)
534{ 534{
535 int err; 535 int err;
536 536
@@ -1414,6 +1414,7 @@ void __init init_hw_perf_events(void)
1414 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); 1414 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1415 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); 1415 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1416 1416
1417 perf_pmu_register(&pmu);
1417 perf_cpu_notifier(x86_pmu_notifier); 1418 perf_cpu_notifier(x86_pmu_notifier);
1418} 1419}
1419 1420
@@ -1483,18 +1484,6 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1483 return 0; 1484 return 0;
1484} 1485}
1485 1486
1486static struct pmu pmu = {
1487 .enable = x86_pmu_enable,
1488 .disable = x86_pmu_disable,
1489 .start = x86_pmu_start,
1490 .stop = x86_pmu_stop,
1491 .read = x86_pmu_read,
1492 .unthrottle = x86_pmu_unthrottle,
1493 .start_txn = x86_pmu_start_txn,
1494 .cancel_txn = x86_pmu_cancel_txn,
1495 .commit_txn = x86_pmu_commit_txn,
1496};
1497
1498/* 1487/*
1499 * validate that we can schedule this event 1488 * validate that we can schedule this event
1500 */ 1489 */
@@ -1569,12 +1558,22 @@ out:
1569 return ret; 1558 return ret;
1570} 1559}
1571 1560
1572struct pmu *hw_perf_event_init(struct perf_event *event) 1561int x86_pmu_event_init(struct perf_event *event)
1573{ 1562{
1574 struct pmu *tmp; 1563 struct pmu *tmp;
1575 int err; 1564 int err;
1576 1565
1577 err = __hw_perf_event_init(event); 1566 switch (event->attr.type) {
1567 case PERF_TYPE_RAW:
1568 case PERF_TYPE_HARDWARE:
1569 case PERF_TYPE_HW_CACHE:
1570 break;
1571
1572 default:
1573 return -ENOENT;
1574 }
1575
1576 err = __x86_pmu_event_init(event);
1578 if (!err) { 1577 if (!err) {
1579 /* 1578 /*
1580 * we temporarily connect event to its pmu 1579 * we temporarily connect event to its pmu
@@ -1594,12 +1593,24 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
1594 if (err) { 1593 if (err) {
1595 if (event->destroy) 1594 if (event->destroy)
1596 event->destroy(event); 1595 event->destroy(event);
1597 return ERR_PTR(err);
1598 } 1596 }
1599 1597
1600 return &pmu; 1598 return err;
1601} 1599}
1602 1600
1601static struct pmu pmu = {
1602 .event_init = x86_pmu_event_init,
1603 .enable = x86_pmu_enable,
1604 .disable = x86_pmu_disable,
1605 .start = x86_pmu_start,
1606 .stop = x86_pmu_stop,
1607 .read = x86_pmu_read,
1608 .unthrottle = x86_pmu_unthrottle,
1609 .start_txn = x86_pmu_start_txn,
1610 .cancel_txn = x86_pmu_cancel_txn,
1611 .commit_txn = x86_pmu_commit_txn,
1612};
1613
1603/* 1614/*
1604 * callchain support 1615 * callchain support
1605 */ 1616 */