diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-23 13:22:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:25 -0400 |
commit | f4a2deb4860497f4332cf6a1acddab3dd628ddf0 (patch) | |
tree | 1655c7c000edce20d2c5b54cf12f99c23340371e /kernel/perf_counter.c | |
parent | af9522cf133e9be6da8525a46a9ed7e7659f0e1a (diff) |
perf_counter: remove the event config bitfields
Since the bitfields turned into a bit of a mess, remove them and rely on
good old masks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Orig-LKML-Reference: <20090323172417.059499915@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f054b8c9bf96..ca14fc41ccdf 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1379,7 +1379,7 @@ static void perf_counter_handle_group(struct perf_counter *counter) | |||
1379 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | 1379 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { |
1380 | if (sub != counter) | 1380 | if (sub != counter) |
1381 | sub->hw_ops->read(sub); | 1381 | sub->hw_ops->read(sub); |
1382 | perf_counter_store_irq(counter, sub->hw_event.event_config); | 1382 | perf_counter_store_irq(counter, sub->hw_event.config); |
1383 | perf_counter_store_irq(counter, atomic64_read(&sub->count)); | 1383 | perf_counter_store_irq(counter, atomic64_read(&sub->count)); |
1384 | } | 1384 | } |
1385 | } | 1385 | } |
@@ -1489,13 +1489,13 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
1489 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 1489 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) |
1490 | return 0; | 1490 | return 0; |
1491 | 1491 | ||
1492 | if (counter->hw_event.raw_type) | 1492 | if (perf_event_raw(&counter->hw_event)) |
1493 | return 0; | 1493 | return 0; |
1494 | 1494 | ||
1495 | if (counter->hw_event.type != type) | 1495 | if (perf_event_type(&counter->hw_event) != type) |
1496 | return 0; | 1496 | return 0; |
1497 | 1497 | ||
1498 | if (counter->hw_event.event_id != event) | 1498 | if (perf_event_id(&counter->hw_event) != event) |
1499 | return 0; | 1499 | return 0; |
1500 | 1500 | ||
1501 | if (counter->hw_event.exclude_user && user_mode(regs)) | 1501 | if (counter->hw_event.exclude_user && user_mode(regs)) |
@@ -1757,13 +1757,13 @@ extern void ftrace_profile_disable(int); | |||
1757 | 1757 | ||
1758 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 1758 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
1759 | { | 1759 | { |
1760 | ftrace_profile_disable(counter->hw_event.event_id); | 1760 | ftrace_profile_disable(perf_event_id(&counter->hw_event)); |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | static const struct hw_perf_counter_ops * | 1763 | static const struct hw_perf_counter_ops * |
1764 | tp_perf_counter_init(struct perf_counter *counter) | 1764 | tp_perf_counter_init(struct perf_counter *counter) |
1765 | { | 1765 | { |
1766 | int event_id = counter->hw_event.event_id; | 1766 | int event_id = perf_event_id(&counter->hw_event); |
1767 | int ret; | 1767 | int ret; |
1768 | 1768 | ||
1769 | ret = ftrace_profile_enable(event_id); | 1769 | ret = ftrace_profile_enable(event_id); |
@@ -1797,7 +1797,7 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
1797 | * to be kernel events, and page faults are never hypervisor | 1797 | * to be kernel events, and page faults are never hypervisor |
1798 | * events. | 1798 | * events. |
1799 | */ | 1799 | */ |
1800 | switch (counter->hw_event.event_id) { | 1800 | switch (perf_event_id(&counter->hw_event)) { |
1801 | case PERF_COUNT_CPU_CLOCK: | 1801 | case PERF_COUNT_CPU_CLOCK: |
1802 | hw_ops = &perf_ops_cpu_clock; | 1802 | hw_ops = &perf_ops_cpu_clock; |
1803 | 1803 | ||
@@ -1882,9 +1882,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
1882 | 1882 | ||
1883 | hw_ops = NULL; | 1883 | hw_ops = NULL; |
1884 | 1884 | ||
1885 | if (hw_event->raw_type) | 1885 | if (perf_event_raw(hw_event)) { |
1886 | hw_ops = hw_perf_counter_init(counter); | 1886 | hw_ops = hw_perf_counter_init(counter); |
1887 | else switch (hw_event->type) { | 1887 | goto done; |
1888 | } | ||
1889 | |||
1890 | switch (perf_event_type(hw_event)) { | ||
1888 | case PERF_TYPE_HARDWARE: | 1891 | case PERF_TYPE_HARDWARE: |
1889 | hw_ops = hw_perf_counter_init(counter); | 1892 | hw_ops = hw_perf_counter_init(counter); |
1890 | break; | 1893 | break; |
@@ -1902,6 +1905,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
1902 | kfree(counter); | 1905 | kfree(counter); |
1903 | return NULL; | 1906 | return NULL; |
1904 | } | 1907 | } |
1908 | done: | ||
1905 | counter->hw_ops = hw_ops; | 1909 | counter->hw_ops = hw_ops; |
1906 | 1910 | ||
1907 | return counter; | 1911 | return counter; |