diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-19 15:26:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:17 -0400 |
commit | b8e83514b64577b48bfb794fe85fcde40a9343ca (patch) | |
tree | 3fd79b41c917f56fe5a5b1f779b892110e0ec909 /kernel | |
parent | e077df4f439681e43f0db8255b2d215b342ebdc6 (diff) |
perf_counter: revamp syscall input ABI
Impact: modify ABI
The hardware/software classification in hw_event->type became a little
strained due to the addition of tracepoint tracing.
Instead split up the field and provide a type field to explicitly specify
the counter type, while using the event_id field to specify which event to
use.
Raw counters still work as before, only the raw config now goes into
raw_event.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.836807573@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 83 |
1 files changed, 50 insertions, 33 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0bbe3e45ba0d..68a56a68bc74 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1395,12 +1395,6 @@ static void perf_swcounter_set_period(struct perf_counter *counter) | |||
1395 | atomic64_set(&hwc->count, -left); | 1395 | atomic64_set(&hwc->count, -left); |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | static void perf_swcounter_save_and_restart(struct perf_counter *counter) | ||
1399 | { | ||
1400 | perf_swcounter_update(counter); | ||
1401 | perf_swcounter_set_period(counter); | ||
1402 | } | ||
1403 | |||
1404 | static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data) | 1398 | static void perf_swcounter_store_irq(struct perf_counter *counter, u64 data) |
1405 | { | 1399 | { |
1406 | struct perf_data *irqdata = counter->irqdata; | 1400 | struct perf_data *irqdata = counter->irqdata; |
@@ -1421,7 +1415,7 @@ static void perf_swcounter_handle_group(struct perf_counter *sibling) | |||
1421 | 1415 | ||
1422 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { | 1416 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { |
1423 | counter->hw_ops->read(counter); | 1417 | counter->hw_ops->read(counter); |
1424 | perf_swcounter_store_irq(sibling, counter->hw_event.type); | 1418 | perf_swcounter_store_irq(sibling, counter->hw_event.event_config); |
1425 | perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); | 1419 | perf_swcounter_store_irq(sibling, atomic64_read(&counter->count)); |
1426 | } | 1420 | } |
1427 | } | 1421 | } |
@@ -1477,21 +1471,25 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
1477 | static void perf_swcounter_overflow(struct perf_counter *counter, | 1471 | static void perf_swcounter_overflow(struct perf_counter *counter, |
1478 | int nmi, struct pt_regs *regs) | 1472 | int nmi, struct pt_regs *regs) |
1479 | { | 1473 | { |
1480 | perf_swcounter_save_and_restart(counter); | 1474 | perf_swcounter_update(counter); |
1475 | perf_swcounter_set_period(counter); | ||
1481 | perf_swcounter_interrupt(counter, nmi, regs); | 1476 | perf_swcounter_interrupt(counter, nmi, regs); |
1482 | } | 1477 | } |
1483 | 1478 | ||
1484 | static int perf_swcounter_match(struct perf_counter *counter, | 1479 | static int perf_swcounter_match(struct perf_counter *counter, |
1485 | enum hw_event_types event, | 1480 | enum perf_event_types type, |
1486 | struct pt_regs *regs) | 1481 | u32 event, struct pt_regs *regs) |
1487 | { | 1482 | { |
1488 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 1483 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) |
1489 | return 0; | 1484 | return 0; |
1490 | 1485 | ||
1491 | if (counter->hw_event.raw) | 1486 | if (counter->hw_event.raw_type) |
1487 | return 0; | ||
1488 | |||
1489 | if (counter->hw_event.type != type) | ||
1492 | return 0; | 1490 | return 0; |
1493 | 1491 | ||
1494 | if (counter->hw_event.type != event) | 1492 | if (counter->hw_event.event_id != event) |
1495 | return 0; | 1493 | return 0; |
1496 | 1494 | ||
1497 | if (counter->hw_event.exclude_user && user_mode(regs)) | 1495 | if (counter->hw_event.exclude_user && user_mode(regs)) |
@@ -1512,8 +1510,8 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
1512 | } | 1510 | } |
1513 | 1511 | ||
1514 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 1512 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
1515 | enum hw_event_types event, u64 nr, | 1513 | enum perf_event_types type, u32 event, |
1516 | int nmi, struct pt_regs *regs) | 1514 | u64 nr, int nmi, struct pt_regs *regs) |
1517 | { | 1515 | { |
1518 | struct perf_counter *counter; | 1516 | struct perf_counter *counter; |
1519 | 1517 | ||
@@ -1522,24 +1520,31 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | |||
1522 | 1520 | ||
1523 | rcu_read_lock(); | 1521 | rcu_read_lock(); |
1524 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 1522 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
1525 | if (perf_swcounter_match(counter, event, regs)) | 1523 | if (perf_swcounter_match(counter, type, event, regs)) |
1526 | perf_swcounter_add(counter, nr, nmi, regs); | 1524 | perf_swcounter_add(counter, nr, nmi, regs); |
1527 | } | 1525 | } |
1528 | rcu_read_unlock(); | 1526 | rcu_read_unlock(); |
1529 | } | 1527 | } |
1530 | 1528 | ||
1531 | void perf_swcounter_event(enum hw_event_types event, u64 nr, | 1529 | static void __perf_swcounter_event(enum perf_event_types type, u32 event, |
1532 | int nmi, struct pt_regs *regs) | 1530 | u64 nr, int nmi, struct pt_regs *regs) |
1533 | { | 1531 | { |
1534 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 1532 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); |
1535 | 1533 | ||
1536 | perf_swcounter_ctx_event(&cpuctx->ctx, event, nr, nmi, regs); | 1534 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs); |
1537 | if (cpuctx->task_ctx) | 1535 | if (cpuctx->task_ctx) { |
1538 | perf_swcounter_ctx_event(cpuctx->task_ctx, event, nr, nmi, regs); | 1536 | perf_swcounter_ctx_event(cpuctx->task_ctx, type, event, |
1537 | nr, nmi, regs); | ||
1538 | } | ||
1539 | 1539 | ||
1540 | put_cpu_var(perf_cpu_context); | 1540 | put_cpu_var(perf_cpu_context); |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) | ||
1544 | { | ||
1545 | __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs); | ||
1546 | } | ||
1547 | |||
1543 | static void perf_swcounter_read(struct perf_counter *counter) | 1548 | static void perf_swcounter_read(struct perf_counter *counter) |
1544 | { | 1549 | { |
1545 | perf_swcounter_update(counter); | 1550 | perf_swcounter_update(counter); |
@@ -1733,8 +1738,12 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { | |||
1733 | #ifdef CONFIG_EVENT_PROFILE | 1738 | #ifdef CONFIG_EVENT_PROFILE |
1734 | void perf_tpcounter_event(int event_id) | 1739 | void perf_tpcounter_event(int event_id) |
1735 | { | 1740 | { |
1736 | perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1, | 1741 | struct pt_regs *regs = get_irq_regs(); |
1737 | task_pt_regs(current)); | 1742 | |
1743 | if (!regs) | ||
1744 | regs = task_pt_regs(current); | ||
1745 | |||
1746 | __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs); | ||
1738 | } | 1747 | } |
1739 | 1748 | ||
1740 | extern int ftrace_profile_enable(int); | 1749 | extern int ftrace_profile_enable(int); |
@@ -1742,15 +1751,13 @@ extern void ftrace_profile_disable(int); | |||
1742 | 1751 | ||
1743 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 1752 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
1744 | { | 1753 | { |
1745 | int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; | 1754 | ftrace_profile_disable(counter->hw_event.event_id); |
1746 | |||
1747 | ftrace_profile_disable(event_id); | ||
1748 | } | 1755 | } |
1749 | 1756 | ||
1750 | static const struct hw_perf_counter_ops * | 1757 | static const struct hw_perf_counter_ops * |
1751 | tp_perf_counter_init(struct perf_counter *counter) | 1758 | tp_perf_counter_init(struct perf_counter *counter) |
1752 | { | 1759 | { |
1753 | int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; | 1760 | int event_id = counter->hw_event.event_id; |
1754 | int ret; | 1761 | int ret; |
1755 | 1762 | ||
1756 | ret = ftrace_profile_enable(event_id); | 1763 | ret = ftrace_profile_enable(event_id); |
@@ -1758,6 +1765,7 @@ tp_perf_counter_init(struct perf_counter *counter) | |||
1758 | return NULL; | 1765 | return NULL; |
1759 | 1766 | ||
1760 | counter->destroy = tp_perf_counter_destroy; | 1767 | counter->destroy = tp_perf_counter_destroy; |
1768 | counter->hw.irq_period = counter->hw_event.irq_period; | ||
1761 | 1769 | ||
1762 | return &perf_ops_generic; | 1770 | return &perf_ops_generic; |
1763 | } | 1771 | } |
@@ -1783,7 +1791,7 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
1783 | * to be kernel events, and page faults are never hypervisor | 1791 | * to be kernel events, and page faults are never hypervisor |
1784 | * events. | 1792 | * events. |
1785 | */ | 1793 | */ |
1786 | switch (counter->hw_event.type) { | 1794 | switch (counter->hw_event.event_id) { |
1787 | case PERF_COUNT_CPU_CLOCK: | 1795 | case PERF_COUNT_CPU_CLOCK: |
1788 | hw_ops = &perf_ops_cpu_clock; | 1796 | hw_ops = &perf_ops_cpu_clock; |
1789 | 1797 | ||
@@ -1813,9 +1821,6 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
1813 | if (!counter->hw_event.exclude_kernel) | 1821 | if (!counter->hw_event.exclude_kernel) |
1814 | hw_ops = &perf_ops_cpu_migrations; | 1822 | hw_ops = &perf_ops_cpu_migrations; |
1815 | break; | 1823 | break; |
1816 | default: | ||
1817 | hw_ops = tp_perf_counter_init(counter); | ||
1818 | break; | ||
1819 | } | 1824 | } |
1820 | 1825 | ||
1821 | if (hw_ops) | 1826 | if (hw_ops) |
@@ -1870,10 +1875,22 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
1870 | counter->state = PERF_COUNTER_STATE_OFF; | 1875 | counter->state = PERF_COUNTER_STATE_OFF; |
1871 | 1876 | ||
1872 | hw_ops = NULL; | 1877 | hw_ops = NULL; |
1873 | if (!hw_event->raw && hw_event->type < 0) | 1878 | |
1874 | hw_ops = sw_perf_counter_init(counter); | 1879 | if (hw_event->raw_type) |
1875 | else | 1880 | hw_ops = hw_perf_counter_init(counter); |
1881 | else switch (hw_event->type) { | ||
1882 | case PERF_TYPE_HARDWARE: | ||
1876 | hw_ops = hw_perf_counter_init(counter); | 1883 | hw_ops = hw_perf_counter_init(counter); |
1884 | break; | ||
1885 | |||
1886 | case PERF_TYPE_SOFTWARE: | ||
1887 | hw_ops = sw_perf_counter_init(counter); | ||
1888 | break; | ||
1889 | |||
1890 | case PERF_TYPE_TRACEPOINT: | ||
1891 | hw_ops = tp_perf_counter_init(counter); | ||
1892 | break; | ||
1893 | } | ||
1877 | 1894 | ||
1878 | if (!hw_ops) { | 1895 | if (!hw_ops) { |
1879 | kfree(counter); | 1896 | kfree(counter); |