diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-05-14 08:43:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-14 08:43:40 -0400 |
commit | 9cba26e66d09bf394ae5a739627a1dc8b7cae6f4 (patch) | |
tree | f03743d576a0c7826b9921ad47e70370ebe80a22 /kernel/events | |
parent | ec83db0f78cd44c3b586ec1c3a348d1a8a389797 (diff) | |
parent | 73eff9f56e15598c8399c0b86899fd889b97f085 (diff) |
Merge branch 'perf/uprobes' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/uprobes
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index a6a9ec4cd8f5..e82c7a1face9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, | |||
2039 | * accessing the event control register. If a NMI hits, then it will | 2039 | * accessing the event control register. If a NMI hits, then it will |
2040 | * not restart the event. | 2040 | * not restart the event. |
2041 | */ | 2041 | */ |
2042 | void __perf_event_task_sched_out(struct task_struct *task, | 2042 | static void __perf_event_task_sched_out(struct task_struct *task, |
2043 | struct task_struct *next) | 2043 | struct task_struct *next) |
2044 | { | 2044 | { |
2045 | int ctxn; | 2045 | int ctxn; |
2046 | 2046 | ||
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev, | |||
2279 | * accessing the event control register. If a NMI hits, then it will | 2279 | * accessing the event control register. If a NMI hits, then it will |
2280 | * keep the event running. | 2280 | * keep the event running. |
2281 | */ | 2281 | */ |
2282 | void __perf_event_task_sched_in(struct task_struct *prev, | 2282 | static void __perf_event_task_sched_in(struct task_struct *prev, |
2283 | struct task_struct *task) | 2283 | struct task_struct *task) |
2284 | { | 2284 | { |
2285 | struct perf_event_context *ctx; | 2285 | struct perf_event_context *ctx; |
2286 | int ctxn; | 2286 | int ctxn; |
@@ -2305,6 +2305,12 @@ void __perf_event_task_sched_in(struct task_struct *prev, | |||
2305 | perf_branch_stack_sched_in(prev, task); | 2305 | perf_branch_stack_sched_in(prev, task); |
2306 | } | 2306 | } |
2307 | 2307 | ||
2308 | void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next) | ||
2309 | { | ||
2310 | __perf_event_task_sched_out(prev, next); | ||
2311 | __perf_event_task_sched_in(prev, next); | ||
2312 | } | ||
2313 | |||
2308 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 2314 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
2309 | { | 2315 | { |
2310 | u64 frequency = event->attr.sample_freq; | 2316 | u64 frequency = event->attr.sample_freq; |
@@ -4957,7 +4963,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
4957 | if (rctx < 0) | 4963 | if (rctx < 0) |
4958 | return; | 4964 | return; |
4959 | 4965 | ||
4960 | perf_sample_data_init(&data, addr); | 4966 | perf_sample_data_init(&data, addr, 0); |
4961 | 4967 | ||
4962 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); | 4968 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); |
4963 | 4969 | ||
@@ -5215,7 +5221,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | |||
5215 | .data = record, | 5221 | .data = record, |
5216 | }; | 5222 | }; |
5217 | 5223 | ||
5218 | perf_sample_data_init(&data, addr); | 5224 | perf_sample_data_init(&data, addr, 0); |
5219 | data.raw = &raw; | 5225 | data.raw = &raw; |
5220 | 5226 | ||
5221 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 5227 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
@@ -5318,7 +5324,7 @@ void perf_bp_event(struct perf_event *bp, void *data) | |||
5318 | struct perf_sample_data sample; | 5324 | struct perf_sample_data sample; |
5319 | struct pt_regs *regs = data; | 5325 | struct pt_regs *regs = data; |
5320 | 5326 | ||
5321 | perf_sample_data_init(&sample, bp->attr.bp_addr); | 5327 | perf_sample_data_init(&sample, bp->attr.bp_addr, 0); |
5322 | 5328 | ||
5323 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) | 5329 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) |
5324 | perf_swevent_event(bp, 1, &sample, regs); | 5330 | perf_swevent_event(bp, 1, &sample, regs); |
@@ -5344,13 +5350,12 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
5344 | 5350 | ||
5345 | event->pmu->read(event); | 5351 | event->pmu->read(event); |
5346 | 5352 | ||
5347 | perf_sample_data_init(&data, 0); | 5353 | perf_sample_data_init(&data, 0, event->hw.last_period); |
5348 | data.period = event->hw.last_period; | ||
5349 | regs = get_irq_regs(); | 5354 | regs = get_irq_regs(); |
5350 | 5355 | ||
5351 | if (regs && !perf_exclude_event(event, regs)) { | 5356 | if (regs && !perf_exclude_event(event, regs)) { |
5352 | if (!(event->attr.exclude_idle && is_idle_task(current))) | 5357 | if (!(event->attr.exclude_idle && is_idle_task(current))) |
5353 | if (perf_event_overflow(event, &data, regs)) | 5358 | if (__perf_event_overflow(event, 1, &data, regs)) |
5354 | ret = HRTIMER_NORESTART; | 5359 | ret = HRTIMER_NORESTART; |
5355 | } | 5360 | } |
5356 | 5361 | ||