diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-19 12:11:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-20 06:30:30 -0400 |
commit | 92bf309a9cd5fedd6c8eefbce0b9a95ada82d0a9 (patch) | |
tree | c52fef4c699f626c0730feccb899eca75c9e4915 /kernel/perf_counter.c | |
parent | 9cffa8d53335d891cc0ecb3824a67118b3ee4b2f (diff) |
perf_counter: Push perf_sample_data through the swcounter code
Push the perf_sample_data further outwards to the swcounter interface,
to abstract it away some more.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 55 |
1 files changed, 29 insertions, 26 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index adb6ae506d5b..1a933a221ea4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3171,20 +3171,15 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
3171 | } | 3171 | } |
3172 | 3172 | ||
3173 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3173 | static void perf_swcounter_overflow(struct perf_counter *counter, |
3174 | int nmi, struct pt_regs *regs, u64 addr) | 3174 | int nmi, struct perf_sample_data *data) |
3175 | { | 3175 | { |
3176 | struct perf_sample_data data = { | 3176 | data->period = counter->hw.last_period; |
3177 | .regs = regs, | ||
3178 | .addr = addr, | ||
3179 | .period = counter->hw.last_period, | ||
3180 | }; | ||
3181 | 3177 | ||
3182 | perf_swcounter_update(counter); | 3178 | perf_swcounter_update(counter); |
3183 | perf_swcounter_set_period(counter); | 3179 | perf_swcounter_set_period(counter); |
3184 | if (perf_counter_overflow(counter, nmi, &data)) | 3180 | if (perf_counter_overflow(counter, nmi, data)) |
3185 | /* soft-disable the counter */ | 3181 | /* soft-disable the counter */ |
3186 | ; | 3182 | ; |
3187 | |||
3188 | } | 3183 | } |
3189 | 3184 | ||
3190 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3185 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
@@ -3249,18 +3244,18 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3249 | } | 3244 | } |
3250 | 3245 | ||
3251 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | 3246 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, |
3252 | int nmi, struct pt_regs *regs, u64 addr) | 3247 | int nmi, struct perf_sample_data *data) |
3253 | { | 3248 | { |
3254 | int neg = atomic64_add_negative(nr, &counter->hw.count); | 3249 | int neg = atomic64_add_negative(nr, &counter->hw.count); |
3255 | 3250 | ||
3256 | if (counter->hw.sample_period && !neg && regs) | 3251 | if (counter->hw.sample_period && !neg && data->regs) |
3257 | perf_swcounter_overflow(counter, nmi, regs, addr); | 3252 | perf_swcounter_overflow(counter, nmi, data); |
3258 | } | 3253 | } |
3259 | 3254 | ||
3260 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3255 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
3261 | enum perf_type_id type, u32 event, | 3256 | enum perf_type_id type, |
3262 | u64 nr, int nmi, struct pt_regs *regs, | 3257 | u32 event, u64 nr, int nmi, |
3263 | u64 addr) | 3258 | struct perf_sample_data *data) |
3264 | { | 3259 | { |
3265 | struct perf_counter *counter; | 3260 | struct perf_counter *counter; |
3266 | 3261 | ||
@@ -3269,8 +3264,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | |||
3269 | 3264 | ||
3270 | rcu_read_lock(); | 3265 | rcu_read_lock(); |
3271 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 3266 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
3272 | if (perf_swcounter_match(counter, type, event, regs)) | 3267 | if (perf_swcounter_match(counter, type, event, data->regs)) |
3273 | perf_swcounter_add(counter, nr, nmi, regs, addr); | 3268 | perf_swcounter_add(counter, nr, nmi, data); |
3274 | } | 3269 | } |
3275 | rcu_read_unlock(); | 3270 | rcu_read_unlock(); |
3276 | } | 3271 | } |
@@ -3289,9 +3284,9 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) | |||
3289 | return &cpuctx->recursion[0]; | 3284 | return &cpuctx->recursion[0]; |
3290 | } | 3285 | } |
3291 | 3286 | ||
3292 | static void __perf_swcounter_event(enum perf_type_id type, u32 event, | 3287 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, |
3293 | u64 nr, int nmi, struct pt_regs *regs, | 3288 | u64 nr, int nmi, |
3294 | u64 addr) | 3289 | struct perf_sample_data *data) |
3295 | { | 3290 | { |
3296 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 3291 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); |
3297 | int *recursion = perf_swcounter_recursion_context(cpuctx); | 3292 | int *recursion = perf_swcounter_recursion_context(cpuctx); |
@@ -3304,7 +3299,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event, | |||
3304 | barrier(); | 3299 | barrier(); |
3305 | 3300 | ||
3306 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, | 3301 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, |
3307 | nr, nmi, regs, addr); | 3302 | nr, nmi, data); |
3308 | rcu_read_lock(); | 3303 | rcu_read_lock(); |
3309 | /* | 3304 | /* |
3310 | * doesn't really matter which of the child contexts the | 3305 | * doesn't really matter which of the child contexts the |
@@ -3312,7 +3307,7 @@ static void __perf_swcounter_event(enum perf_type_id type, u32 event, | |||
3312 | */ | 3307 | */ |
3313 | ctx = rcu_dereference(current->perf_counter_ctxp); | 3308 | ctx = rcu_dereference(current->perf_counter_ctxp); |
3314 | if (ctx) | 3309 | if (ctx) |
3315 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr); | 3310 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); |
3316 | rcu_read_unlock(); | 3311 | rcu_read_unlock(); |
3317 | 3312 | ||
3318 | barrier(); | 3313 | barrier(); |
@@ -3325,7 +3320,12 @@ out: | |||
3325 | void | 3320 | void |
3326 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 3321 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
3327 | { | 3322 | { |
3328 | __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr); | 3323 | struct perf_sample_data data = { |
3324 | .regs = regs, | ||
3325 | .addr = addr, | ||
3326 | }; | ||
3327 | |||
3328 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); | ||
3329 | } | 3329 | } |
3330 | 3330 | ||
3331 | static void perf_swcounter_read(struct perf_counter *counter) | 3331 | static void perf_swcounter_read(struct perf_counter *counter) |
@@ -3469,12 +3469,15 @@ static const struct pmu perf_ops_task_clock = { | |||
3469 | #ifdef CONFIG_EVENT_PROFILE | 3469 | #ifdef CONFIG_EVENT_PROFILE |
3470 | void perf_tpcounter_event(int event_id) | 3470 | void perf_tpcounter_event(int event_id) |
3471 | { | 3471 | { |
3472 | struct pt_regs *regs = get_irq_regs(); | 3472 | struct perf_sample_data data = { |
3473 | .regs = get_irq_regs(); | ||
3474 | .addr = 0, | ||
3475 | }; | ||
3473 | 3476 | ||
3474 | if (!regs) | 3477 | if (!data.regs) |
3475 | regs = task_pt_regs(current); | 3478 | data.regs = task_pt_regs(current); |
3476 | 3479 | ||
3477 | __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0); | 3480 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); |
3478 | } | 3481 | } |
3479 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3482 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3480 | 3483 | ||