diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:20:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:20:11 -0400 |
commit | ade0899b298ba2c43bfd6abd8cbc2545944cde0c (patch) | |
tree | a448dfb440b3b958b6306bb43620cd5d76f504bf /kernel | |
parent | 871a0596cb2f51b57dc583d1a7c4be0186582fe7 (diff) | |
parent | 95cf59ea72331d0093010543b8951bb43f262cac (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"This tree includes some late late perf items that missed the first
round:
tools:
- Bash auto completion improvements, now we can auto complete the
tools long options, tracepoint event names, etc, from Namhyung Kim.
- Look up thread using tid instead of pid in 'perf sched'.
- Move global variables into a perf_kvm struct, from David Ahern.
- Hists refactorings, preparatory for improved 'diff' command, from
Jiri Olsa.
- Hists refactorings, preparatory for event group viewieng work, from
Namhyung Kim.
- Remove double negation on optional feature macro definitions, from
Namhyung Kim.
- Remove several cases of needless global variables, on most
builtins.
- misc fixes
kernel:
- sysfs support for IBS on AMD CPUs, from Robert Richter.
- Support for an upcoming Intel CPU, the Xeon-Phi / Knights Corner
HPC blade PMU, from Vince Weaver.
- misc fixes"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
perf: Fix perf_cgroup_switch for sw-events
perf: Clarify perf_cpu_context::active_pmu usage by renaming it to ::unique_pmu
perf/AMD/IBS: Add sysfs support
perf hists: Add more helpers for hist entry stat
perf hists: Move he->stat.nr_events initialization to a template
perf hists: Introduce struct he_stat
perf diff: Removing the total_period argument from output code
perf tool: Add hpp interface to enable/disable hpp column
perf tools: Removing hists pair argument from output path
perf hists: Separate overhead and baseline columns
perf diff: Refactor diff displacement possition info
perf hists: Add struct hists pointer to struct hist_entry
perf tools: Complete tracepoint event names
perf/x86: Add support for Intel Xeon-Phi Knights Corner PMU
perf evlist: Remove some unused methods
perf evlist: Introduce add_newtp method
perf kvm: Move global variables into a perf_kvm struct
perf tools: Convert to BACKTRACE_SUPPORT
perf tools: Long option completion support for each subcommands
perf tools: Complete long option names of perf command
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index cda3ebd49e86..dbccf83c134d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -372,6 +372,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
372 | 372 | ||
373 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 373 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
374 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 374 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
375 | if (cpuctx->unique_pmu != pmu) | ||
376 | continue; /* ensure we process each cpuctx once */ | ||
375 | 377 | ||
376 | /* | 378 | /* |
377 | * perf_cgroup_events says at least one | 379 | * perf_cgroup_events says at least one |
@@ -395,9 +397,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode) | |||
395 | 397 | ||
396 | if (mode & PERF_CGROUP_SWIN) { | 398 | if (mode & PERF_CGROUP_SWIN) { |
397 | WARN_ON_ONCE(cpuctx->cgrp); | 399 | WARN_ON_ONCE(cpuctx->cgrp); |
398 | /* set cgrp before ctxsw in to | 400 | /* |
399 | * allow event_filter_match() to not | 401 | * set cgrp before ctxsw in to allow |
400 | * have to pass task around | 402 | * event_filter_match() to not have to pass |
403 | * task around | ||
401 | */ | 404 | */ |
402 | cpuctx->cgrp = perf_cgroup_from_task(task); | 405 | cpuctx->cgrp = perf_cgroup_from_task(task); |
403 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); | 406 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |
@@ -4412,7 +4415,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
4412 | rcu_read_lock(); | 4415 | rcu_read_lock(); |
4413 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4416 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
4414 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4417 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
4415 | if (cpuctx->active_pmu != pmu) | 4418 | if (cpuctx->unique_pmu != pmu) |
4416 | goto next; | 4419 | goto next; |
4417 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 4420 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
4418 | 4421 | ||
@@ -4558,7 +4561,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
4558 | rcu_read_lock(); | 4561 | rcu_read_lock(); |
4559 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4562 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
4560 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4563 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
4561 | if (cpuctx->active_pmu != pmu) | 4564 | if (cpuctx->unique_pmu != pmu) |
4562 | goto next; | 4565 | goto next; |
4563 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | 4566 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); |
4564 | 4567 | ||
@@ -4754,7 +4757,7 @@ got_name: | |||
4754 | rcu_read_lock(); | 4757 | rcu_read_lock(); |
4755 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4758 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
4756 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4759 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
4757 | if (cpuctx->active_pmu != pmu) | 4760 | if (cpuctx->unique_pmu != pmu) |
4758 | goto next; | 4761 | goto next; |
4759 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, | 4762 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, |
4760 | vma->vm_flags & VM_EXEC); | 4763 | vma->vm_flags & VM_EXEC); |
@@ -5855,8 +5858,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) | |||
5855 | 5858 | ||
5856 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 5859 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
5857 | 5860 | ||
5858 | if (cpuctx->active_pmu == old_pmu) | 5861 | if (cpuctx->unique_pmu == old_pmu) |
5859 | cpuctx->active_pmu = pmu; | 5862 | cpuctx->unique_pmu = pmu; |
5860 | } | 5863 | } |
5861 | } | 5864 | } |
5862 | 5865 | ||
@@ -5991,7 +5994,7 @@ skip_type: | |||
5991 | cpuctx->ctx.pmu = pmu; | 5994 | cpuctx->ctx.pmu = pmu; |
5992 | cpuctx->jiffies_interval = 1; | 5995 | cpuctx->jiffies_interval = 1; |
5993 | INIT_LIST_HEAD(&cpuctx->rotation_list); | 5996 | INIT_LIST_HEAD(&cpuctx->rotation_list); |
5994 | cpuctx->active_pmu = pmu; | 5997 | cpuctx->unique_pmu = pmu; |
5995 | } | 5998 | } |
5996 | 5999 | ||
5997 | got_cpu_context: | 6000 | got_cpu_context: |