aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-12-08 13:35:37 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-11 09:45:47 -0500
commitdfa7c899b401d7dc5d85aca416aee64ac82812f2 (patch)
tree496b0fee69989fd4127905a888de7135a7969e9e
parenteab656ae04b9d3b83265e3db01c0d2c46b748ef7 (diff)
perf counters: expand use of counter->event
Impact: change syscall, cleanup Make use of the new perf_counters event type. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c22
-rw-r--r--include/linux/perf_counter.h4
-rw-r--r--kernel/perf_counter.c10
3 files changed, 17 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 0a7f3bea2dc6..30e7ebf78275 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
56/* 56/*
57 * Setup the hardware configuration for a given hw_event_type 57 * Setup the hardware configuration for a given hw_event_type
58 */ 58 */
59int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) 59int hw_perf_counter_init(struct perf_counter *counter)
60{ 60{
61 struct hw_perf_counter *hwc = &counter->hw; 61 struct hw_perf_counter *hwc = &counter->hw;
62 u32 hw_event_type = counter->event.hw_event_type;
62 63
63 if (unlikely(!perf_counters_initialized)) 64 if (unlikely(!perf_counters_initialized))
64 return -EINVAL; 65 return -EINVAL;
@@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
83 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; 84 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
84 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; 85 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
85 86
86 hwc->irq_period = counter->__irq_period; 87 hwc->irq_period = counter->event.hw_event_period;
87 /* 88 /*
88 * Intel PMCs cannot be accessed sanely above 32 bit width, 89 * Intel PMCs cannot be accessed sanely above 32 bit width,
89 * so we install an artificial 1<<31 period regardless of 90 * so we install an artificial 1<<31 period regardless of
@@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
95 hwc->next_count = -((s32) hwc->irq_period); 96 hwc->next_count = -((s32) hwc->irq_period);
96 97
97 /* 98 /*
98 * Negative event types mean raw encoded event+umask values: 99 * Raw event type provide the config in the event structure
99 */ 100 */
100 if (hw_event_type < 0) { 101 hw_event_type &= ~PERF_COUNT_NMI;
101 counter->hw_event_type = -hw_event_type; 102 if (hw_event_type == PERF_COUNT_RAW) {
102 counter->hw_event_type &= ~PERF_COUNT_NMI; 103 hwc->config |= counter->event.hw_raw_ctrl;
103 } else { 104 } else {
104 hw_event_type &= ~PERF_COUNT_NMI;
105 if (hw_event_type >= max_intel_perfmon_events) 105 if (hw_event_type >= max_intel_perfmon_events)
106 return -EINVAL; 106 return -EINVAL;
107 /* 107 /*
108 * The generic map: 108 * The generic map:
109 */ 109 */
110 counter->hw_event_type = intel_perfmon_event_map[hw_event_type]; 110 hwc->config |= intel_perfmon_event_map[hw_event_type];
111 } 111 }
112 hwc->config |= counter->hw_event_type;
113 counter->wakeup_pending = 0; 112 counter->wakeup_pending = 0;
114 113
115 return 0; 114 return 0;
@@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
373 perf_save_and_restart(counter); 372 perf_save_and_restart(counter);
374 } 373 }
375 } 374 }
376 perf_store_irq_data(leader, counter->hw_event_type); 375 perf_store_irq_data(leader, counter->event.hw_event_type);
377 perf_store_irq_data(leader, atomic64_counter_read(counter)); 376 perf_store_irq_data(leader, atomic64_counter_read(counter));
378 } 377 }
379} 378}
@@ -418,7 +417,8 @@ again:
418 perf_store_irq_data(counter, instruction_pointer(regs)); 417 perf_store_irq_data(counter, instruction_pointer(regs));
419 break; 418 break;
420 case PERF_RECORD_GROUP: 419 case PERF_RECORD_GROUP:
421 perf_store_irq_data(counter, counter->hw_event_type); 420 perf_store_irq_data(counter,
421 counter->event.hw_event_type);
422 perf_store_irq_data(counter, 422 perf_store_irq_data(counter,
423 atomic64_counter_read(counter)); 423 atomic64_counter_read(counter));
424 perf_handle_group(counter, &status, &ack); 424 perf_handle_group(counter, &status, &ack);
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index daedd7d87c2a..1f0017673e77 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -96,8 +96,7 @@ struct perf_counter {
96#else 96#else
97 atomic_t count32[2]; 97 atomic_t count32[2];
98#endif 98#endif
99 u64 __irq_period; 99 struct perf_counter_event event;
100
101 struct hw_perf_counter hw; 100 struct hw_perf_counter hw;
102 101
103 struct perf_counter_context *ctx; 102 struct perf_counter_context *ctx;
@@ -111,7 +110,6 @@ struct perf_counter {
111 int oncpu; 110 int oncpu;
112 int cpu; 111 int cpu;
113 112
114 s32 hw_event_type;
115 enum perf_record_type record_type; 113 enum perf_record_type record_type;
116 114
117 /* read() / irq related data */ 115 /* read() / irq related data */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 96c333a5b0fc..2557c670a3bb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -37,7 +37,7 @@ static DEFINE_MUTEX(perf_resource_mutex);
37 * Architecture provided APIs - weak aliases: 37 * Architecture provided APIs - weak aliases:
38 */ 38 */
39 39
40int __weak hw_perf_counter_init(struct perf_counter *counter, u32 hw_event_type) 40int __weak hw_perf_counter_init(struct perf_counter *counter)
41{ 41{
42 return -EINVAL; 42 return -EINVAL;
43} 43}
@@ -707,7 +707,7 @@ static const struct file_operations perf_fops = {
707 * Allocate and initialize a counter structure 707 * Allocate and initialize a counter structure
708 */ 708 */
709static struct perf_counter * 709static struct perf_counter *
710perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type) 710perf_counter_alloc(struct perf_counter_event *event, int cpu, u32 record_type)
711{ 711{
712 struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL); 712 struct perf_counter *counter = kzalloc(sizeof(*counter), GFP_KERNEL);
713 713
@@ -722,7 +722,7 @@ perf_counter_alloc(u32 hw_event_period, int cpu, u32 record_type)
722 counter->usrdata = &counter->data[1]; 722 counter->usrdata = &counter->data[1];
723 counter->cpu = cpu; 723 counter->cpu = cpu;
724 counter->record_type = record_type; 724 counter->record_type = record_type;
725 counter->__irq_period = hw_event_period; 725 counter->event = *event;
726 counter->wakeup_pending = 0; 726 counter->wakeup_pending = 0;
727 727
728 return counter; 728 return counter;
@@ -750,11 +750,11 @@ sys_perf_counter_open(struct perf_counter_event __user *uevent, u32 record_type,
750 return PTR_ERR(ctx); 750 return PTR_ERR(ctx);
751 751
752 ret = -ENOMEM; 752 ret = -ENOMEM;
753 counter = perf_counter_alloc(event.hw_event_period, cpu, record_type); 753 counter = perf_counter_alloc(&event, cpu, record_type);
754 if (!counter) 754 if (!counter)
755 goto err_put_context; 755 goto err_put_context;
756 756
757 ret = hw_perf_counter_init(counter, event.hw_event_type); 757 ret = hw_perf_counter_init(counter);
758 if (ret) 758 if (ret)
759 goto err_free_put_context; 759 goto err_free_put_context;
760 760