diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-10 15:02:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-10 20:39:02 -0400 |
commit | df1a132bf3d3508f863336c80a27806a2ac947e0 (patch) | |
tree | 2aa26b9c5d0528e816a80bd3b58c9b2442670d5c /kernel | |
parent | ea1900e571d40a3ce60c835c2f21e1fd8c5cb663 (diff) |
perf_counter: Introduce struct for sample data
For easy extension of the sample data, put it in a structure.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 38 |
1 files changed, 22 insertions, 16 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ae591a1275a6..4fe85e804f43 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | |||
2378 | return task_pid_nr_ns(p, counter->ns); | 2378 | return task_pid_nr_ns(p, counter->ns); |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | static void perf_counter_output(struct perf_counter *counter, | 2381 | static void perf_counter_output(struct perf_counter *counter, int nmi, |
2382 | int nmi, struct pt_regs *regs, u64 addr) | 2382 | struct perf_sample_data *data) |
2383 | { | 2383 | { |
2384 | int ret; | 2384 | int ret; |
2385 | u64 sample_type = counter->attr.sample_type; | 2385 | u64 sample_type = counter->attr.sample_type; |
@@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2404 | header.size = sizeof(header); | 2404 | header.size = sizeof(header); |
2405 | 2405 | ||
2406 | header.misc = PERF_EVENT_MISC_OVERFLOW; | 2406 | header.misc = PERF_EVENT_MISC_OVERFLOW; |
2407 | header.misc |= perf_misc_flags(regs); | 2407 | header.misc |= perf_misc_flags(data->regs); |
2408 | 2408 | ||
2409 | if (sample_type & PERF_SAMPLE_IP) { | 2409 | if (sample_type & PERF_SAMPLE_IP) { |
2410 | ip = perf_instruction_pointer(regs); | 2410 | ip = perf_instruction_pointer(data->regs); |
2411 | header.type |= PERF_SAMPLE_IP; | 2411 | header.type |= PERF_SAMPLE_IP; |
2412 | header.size += sizeof(ip); | 2412 | header.size += sizeof(ip); |
2413 | } | 2413 | } |
@@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2460 | } | 2460 | } |
2461 | 2461 | ||
2462 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2462 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2463 | callchain = perf_callchain(regs); | 2463 | callchain = perf_callchain(data->regs); |
2464 | 2464 | ||
2465 | if (callchain) { | 2465 | if (callchain) { |
2466 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 2466 | callchain_size = (1 + callchain->nr) * sizeof(u64); |
@@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2486 | perf_output_put(&handle, time); | 2486 | perf_output_put(&handle, time); |
2487 | 2487 | ||
2488 | if (sample_type & PERF_SAMPLE_ADDR) | 2488 | if (sample_type & PERF_SAMPLE_ADDR) |
2489 | perf_output_put(&handle, addr); | 2489 | perf_output_put(&handle, data->addr); |
2490 | 2490 | ||
2491 | if (sample_type & PERF_SAMPLE_ID) | 2491 | if (sample_type & PERF_SAMPLE_ID) |
2492 | perf_output_put(&handle, counter->id); | 2492 | perf_output_put(&handle, counter->id); |
@@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
2950 | * Generic counter overflow handling. | 2950 | * Generic counter overflow handling. |
2951 | */ | 2951 | */ |
2952 | 2952 | ||
2953 | int perf_counter_overflow(struct perf_counter *counter, | 2953 | int perf_counter_overflow(struct perf_counter *counter, int nmi, |
2954 | int nmi, struct pt_regs *regs, u64 addr) | 2954 | struct perf_sample_data *data) |
2955 | { | 2955 | { |
2956 | int events = atomic_read(&counter->event_limit); | 2956 | int events = atomic_read(&counter->event_limit); |
2957 | int throttle = counter->pmu->unthrottle != NULL; | 2957 | int throttle = counter->pmu->unthrottle != NULL; |
@@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter, | |||
3005 | perf_counter_disable(counter); | 3005 | perf_counter_disable(counter); |
3006 | } | 3006 | } |
3007 | 3007 | ||
3008 | perf_counter_output(counter, nmi, regs, addr); | 3008 | perf_counter_output(counter, nmi, data); |
3009 | return ret; | 3009 | return ret; |
3010 | } | 3010 | } |
3011 | 3011 | ||
@@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter) | |||
3054 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | 3054 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) |
3055 | { | 3055 | { |
3056 | enum hrtimer_restart ret = HRTIMER_RESTART; | 3056 | enum hrtimer_restart ret = HRTIMER_RESTART; |
3057 | struct perf_sample_data data; | ||
3057 | struct perf_counter *counter; | 3058 | struct perf_counter *counter; |
3058 | struct pt_regs *regs; | ||
3059 | u64 period; | 3059 | u64 period; |
3060 | 3060 | ||
3061 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | 3061 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); |
3062 | counter->pmu->read(counter); | 3062 | counter->pmu->read(counter); |
3063 | 3063 | ||
3064 | regs = get_irq_regs(); | 3064 | data.addr = 0; |
3065 | data.regs = get_irq_regs(); | ||
3065 | /* | 3066 | /* |
3066 | * In case we exclude kernel IPs or are somehow not in interrupt | 3067 | * In case we exclude kernel IPs or are somehow not in interrupt |
3067 | * context, provide the next best thing, the user IP. | 3068 | * context, provide the next best thing, the user IP. |
3068 | */ | 3069 | */ |
3069 | if ((counter->attr.exclude_kernel || !regs) && | 3070 | if ((counter->attr.exclude_kernel || !data.regs) && |
3070 | !counter->attr.exclude_user) | 3071 | !counter->attr.exclude_user) |
3071 | regs = task_pt_regs(current); | 3072 | data.regs = task_pt_regs(current); |
3072 | 3073 | ||
3073 | if (regs) { | 3074 | if (data.regs) { |
3074 | if (perf_counter_overflow(counter, 0, regs, 0)) | 3075 | if (perf_counter_overflow(counter, 0, &data)) |
3075 | ret = HRTIMER_NORESTART; | 3076 | ret = HRTIMER_NORESTART; |
3076 | } | 3077 | } |
3077 | 3078 | ||
@@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
3084 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3085 | static void perf_swcounter_overflow(struct perf_counter *counter, |
3085 | int nmi, struct pt_regs *regs, u64 addr) | 3086 | int nmi, struct pt_regs *regs, u64 addr) |
3086 | { | 3087 | { |
3088 | struct perf_sample_data data = { | ||
3089 | .regs = regs, | ||
3090 | .addr = addr, | ||
3091 | }; | ||
3092 | |||
3087 | perf_swcounter_update(counter); | 3093 | perf_swcounter_update(counter); |
3088 | perf_swcounter_set_period(counter); | 3094 | perf_swcounter_set_period(counter); |
3089 | if (perf_counter_overflow(counter, nmi, regs, addr)) | 3095 | if (perf_counter_overflow(counter, nmi, &data)) |
3090 | /* soft-disable the counter */ | 3096 | /* soft-disable the counter */ |
3091 | ; | 3097 | ; |
3092 | 3098 | ||