aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-01-27 06:07:57 -0500
committerIngo Molnar <mingo@elte.hu>2012-01-27 06:08:09 -0500
commit44a683971119bafb5bc30778f92ee773680ebb6f (patch)
tree58648459f29d45c447bd2352e81844d4d9aa3a15 /kernel/events
parent801493c2e249a7314e9e8e54ad60d613d0a86f14 (diff)
parent08aa0d1f376e9b966568316bd2019b3c1274d885 (diff)
Merge branch 'perf/fast' into perf/core
Merge reason: Lets ready it for v3.4 Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c53
-rw-r--r--kernel/events/hw_breakpoint.c7
2 files changed, 47 insertions, 13 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 32b48c88971..de859fb4038 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3208,10 +3208,6 @@ int perf_event_task_disable(void)
3208 return 0; 3208 return 0;
3209} 3209}
3210 3210
3211#ifndef PERF_EVENT_INDEX_OFFSET
3212# define PERF_EVENT_INDEX_OFFSET 0
3213#endif
3214
3215static int perf_event_index(struct perf_event *event) 3211static int perf_event_index(struct perf_event *event)
3216{ 3212{
3217 if (event->hw.state & PERF_HES_STOPPED) 3213 if (event->hw.state & PERF_HES_STOPPED)
@@ -3220,21 +3216,26 @@ static int perf_event_index(struct perf_event *event)
3220 if (event->state != PERF_EVENT_STATE_ACTIVE) 3216 if (event->state != PERF_EVENT_STATE_ACTIVE)
3221 return 0; 3217 return 0;
3222 3218
3223 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; 3219 return event->pmu->event_idx(event);
3224} 3220}
3225 3221
3226static void calc_timer_values(struct perf_event *event, 3222static void calc_timer_values(struct perf_event *event,
3223 u64 *now,
3227 u64 *enabled, 3224 u64 *enabled,
3228 u64 *running) 3225 u64 *running)
3229{ 3226{
3230 u64 now, ctx_time; 3227 u64 ctx_time;
3231 3228
3232 now = perf_clock(); 3229 *now = perf_clock();
3233 ctx_time = event->shadow_ctx_time + now; 3230 ctx_time = event->shadow_ctx_time + *now;
3234 *enabled = ctx_time - event->tstamp_enabled; 3231 *enabled = ctx_time - event->tstamp_enabled;
3235 *running = ctx_time - event->tstamp_running; 3232 *running = ctx_time - event->tstamp_running;
3236} 3233}
3237 3234
3235void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
3236{
3237}
3238
3238/* 3239/*
3239 * Callers need to ensure there can be no nesting of this function, otherwise 3240 * Callers need to ensure there can be no nesting of this function, otherwise
3240 * the seqlock logic goes bad. We can not serialize this because the arch 3241 * the seqlock logic goes bad. We can not serialize this because the arch
@@ -3244,7 +3245,7 @@ void perf_event_update_userpage(struct perf_event *event)
3244{ 3245{
3245 struct perf_event_mmap_page *userpg; 3246 struct perf_event_mmap_page *userpg;
3246 struct ring_buffer *rb; 3247 struct ring_buffer *rb;
3247 u64 enabled, running; 3248 u64 enabled, running, now;
3248 3249
3249 rcu_read_lock(); 3250 rcu_read_lock();
3250 /* 3251 /*
@@ -3256,7 +3257,7 @@ void perf_event_update_userpage(struct perf_event *event)
3256 * because of locking issue as we can be called in 3257 * because of locking issue as we can be called in
3257 * NMI context 3258 * NMI context
3258 */ 3259 */
3259 calc_timer_values(event, &enabled, &running); 3260 calc_timer_values(event, &now, &enabled, &running);
3260 rb = rcu_dereference(event->rb); 3261 rb = rcu_dereference(event->rb);
3261 if (!rb) 3262 if (!rb)
3262 goto unlock; 3263 goto unlock;
@@ -3272,7 +3273,7 @@ void perf_event_update_userpage(struct perf_event *event)
3272 barrier(); 3273 barrier();
3273 userpg->index = perf_event_index(event); 3274 userpg->index = perf_event_index(event);
3274 userpg->offset = perf_event_count(event); 3275 userpg->offset = perf_event_count(event);
3275 if (event->state == PERF_EVENT_STATE_ACTIVE) 3276 if (userpg->index)
3276 userpg->offset -= local64_read(&event->hw.prev_count); 3277 userpg->offset -= local64_read(&event->hw.prev_count);
3277 3278
3278 userpg->time_enabled = enabled + 3279 userpg->time_enabled = enabled +
@@ -3281,6 +3282,8 @@ void perf_event_update_userpage(struct perf_event *event)
3281 userpg->time_running = running + 3282 userpg->time_running = running +
3282 atomic64_read(&event->child_total_time_running); 3283 atomic64_read(&event->child_total_time_running);
3283 3284
3285 perf_update_user_clock(userpg, now);
3286
3284 barrier(); 3287 barrier();
3285 ++userpg->lock; 3288 ++userpg->lock;
3286 preempt_enable(); 3289 preempt_enable();
@@ -3538,6 +3541,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3538 event->mmap_user = get_current_user(); 3541 event->mmap_user = get_current_user();
3539 vma->vm_mm->pinned_vm += event->mmap_locked; 3542 vma->vm_mm->pinned_vm += event->mmap_locked;
3540 3543
3544 perf_event_update_userpage(event);
3545
3541unlock: 3546unlock:
3542 if (!ret) 3547 if (!ret)
3543 atomic_inc(&event->mmap_count); 3548 atomic_inc(&event->mmap_count);
@@ -3769,7 +3774,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3769static void perf_output_read(struct perf_output_handle *handle, 3774static void perf_output_read(struct perf_output_handle *handle,
3770 struct perf_event *event) 3775 struct perf_event *event)
3771{ 3776{
3772 u64 enabled = 0, running = 0; 3777 u64 enabled = 0, running = 0, now;
3773 u64 read_format = event->attr.read_format; 3778 u64 read_format = event->attr.read_format;
3774 3779
3775 /* 3780 /*
@@ -3782,7 +3787,7 @@ static void perf_output_read(struct perf_output_handle *handle,
3782 * NMI context 3787 * NMI context
3783 */ 3788 */
3784 if (read_format & PERF_FORMAT_TOTAL_TIMES) 3789 if (read_format & PERF_FORMAT_TOTAL_TIMES)
3785 calc_timer_values(event, &enabled, &running); 3790 calc_timer_values(event, &now, &enabled, &running);
3786 3791
3787 if (event->attr.read_format & PERF_FORMAT_GROUP) 3792 if (event->attr.read_format & PERF_FORMAT_GROUP)
3788 perf_output_read_group(handle, event, enabled, running); 3793 perf_output_read_group(handle, event, enabled, running);
@@ -4994,6 +4999,11 @@ static int perf_swevent_init(struct perf_event *event)
4994 return 0; 4999 return 0;
4995} 5000}
4996 5001
5002static int perf_swevent_event_idx(struct perf_event *event)
5003{
5004 return 0;
5005}
5006
4997static struct pmu perf_swevent = { 5007static struct pmu perf_swevent = {
4998 .task_ctx_nr = perf_sw_context, 5008 .task_ctx_nr = perf_sw_context,
4999 5009
@@ -5003,6 +5013,8 @@ static struct pmu perf_swevent = {
5003 .start = perf_swevent_start, 5013 .start = perf_swevent_start,
5004 .stop = perf_swevent_stop, 5014 .stop = perf_swevent_stop,
5005 .read = perf_swevent_read, 5015 .read = perf_swevent_read,
5016
5017 .event_idx = perf_swevent_event_idx,
5006}; 5018};
5007 5019
5008#ifdef CONFIG_EVENT_TRACING 5020#ifdef CONFIG_EVENT_TRACING
@@ -5089,6 +5101,8 @@ static struct pmu perf_tracepoint = {
5089 .start = perf_swevent_start, 5101 .start = perf_swevent_start,
5090 .stop = perf_swevent_stop, 5102 .stop = perf_swevent_stop,
5091 .read = perf_swevent_read, 5103 .read = perf_swevent_read,
5104
5105 .event_idx = perf_swevent_event_idx,
5092}; 5106};
5093 5107
5094static inline void perf_tp_register(void) 5108static inline void perf_tp_register(void)
@@ -5308,6 +5322,8 @@ static struct pmu perf_cpu_clock = {
5308 .start = cpu_clock_event_start, 5322 .start = cpu_clock_event_start,
5309 .stop = cpu_clock_event_stop, 5323 .stop = cpu_clock_event_stop,
5310 .read = cpu_clock_event_read, 5324 .read = cpu_clock_event_read,
5325
5326 .event_idx = perf_swevent_event_idx,
5311}; 5327};
5312 5328
5313/* 5329/*
@@ -5380,6 +5396,8 @@ static struct pmu perf_task_clock = {
5380 .start = task_clock_event_start, 5396 .start = task_clock_event_start,
5381 .stop = task_clock_event_stop, 5397 .stop = task_clock_event_stop,
5382 .read = task_clock_event_read, 5398 .read = task_clock_event_read,
5399
5400 .event_idx = perf_swevent_event_idx,
5383}; 5401};
5384 5402
5385static void perf_pmu_nop_void(struct pmu *pmu) 5403static void perf_pmu_nop_void(struct pmu *pmu)
@@ -5407,6 +5425,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
5407 perf_pmu_enable(pmu); 5425 perf_pmu_enable(pmu);
5408} 5426}
5409 5427
5428static int perf_event_idx_default(struct perf_event *event)
5429{
5430 return event->hw.idx + 1;
5431}
5432
5410/* 5433/*
5411 * Ensures all contexts with the same task_ctx_nr have the same 5434 * Ensures all contexts with the same task_ctx_nr have the same
5412 * pmu_cpu_context too. 5435 * pmu_cpu_context too.
@@ -5493,6 +5516,7 @@ static int pmu_dev_alloc(struct pmu *pmu)
5493 if (!pmu->dev) 5516 if (!pmu->dev)
5494 goto out; 5517 goto out;
5495 5518
5519 pmu->dev->groups = pmu->attr_groups;
5496 device_initialize(pmu->dev); 5520 device_initialize(pmu->dev);
5497 ret = dev_set_name(pmu->dev, "%s", pmu->name); 5521 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5498 if (ret) 5522 if (ret)
@@ -5596,6 +5620,9 @@ got_cpu_context:
5596 pmu->pmu_disable = perf_pmu_nop_void; 5620 pmu->pmu_disable = perf_pmu_nop_void;
5597 } 5621 }
5598 5622
5623 if (!pmu->event_idx)
5624 pmu->event_idx = perf_event_idx_default;
5625
5599 list_add_rcu(&pmu->entry, &pmus); 5626 list_add_rcu(&pmu->entry, &pmus);
5600 ret = 0; 5627 ret = 0;
5601unlock: 5628unlock:
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b7971d6f38b..b0309f76d77 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -613,6 +613,11 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags)
613 bp->hw.state = PERF_HES_STOPPED; 613 bp->hw.state = PERF_HES_STOPPED;
614} 614}
615 615
616static int hw_breakpoint_event_idx(struct perf_event *bp)
617{
618 return 0;
619}
620
616static struct pmu perf_breakpoint = { 621static struct pmu perf_breakpoint = {
617 .task_ctx_nr = perf_sw_context, /* could eventually get its own */ 622 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
618 623
@@ -622,6 +627,8 @@ static struct pmu perf_breakpoint = {
622 .start = hw_breakpoint_start, 627 .start = hw_breakpoint_start,
623 .stop = hw_breakpoint_stop, 628 .stop = hw_breakpoint_stop,
624 .read = hw_breakpoint_pmu_read, 629 .read = hw_breakpoint_pmu_read,
630
631 .event_idx = hw_breakpoint_event_idx,
625}; 632};
626 633
627int __init init_hw_breakpoint(void) 634int __init init_hw_breakpoint(void)