diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-13 18:35:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-13 18:35:47 -0400 |
commit | 560ae37178b12e3bd37626f7b1e0b29c503ea558 (patch) | |
tree | 24105534940152466aa331fb76a9438c85e634b5 /kernel | |
parent | 4fa109b13042868de84bedc70ea9b8337b502cf9 (diff) | |
parent | baf64b85445546a38b44052d71782dfe7531e350 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner:
- fix for do_div() abuse on x86
- locking fix in perf core
- a pile of (build) fixes and cleanups in perf tools
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
perf/x86: Fix incorrect use of do_div() in NMI warning
perf: Fix perf_lock_task_context() vs RCU
perf: Remove WARN_ON_ONCE() check in __perf_event_enable() for valid scenario
perf: Clone child context from parent context pmu
perf script: Fix broken include in Context.xs
perf tools: Fix -ldw/-lelf link test when static linking
perf tools: Revert regression in configuration of Python support
perf tools: Fix perf version generation
perf stat: Fix per-socket output bug for uncore events
perf symbols: Fix vdso list searching
perf evsel: Fix missing increment in sample parsing
perf tools: Update symbol_conf.nr_events when processing attribute events
perf tools: Fix new_term() missing free on error path
perf tools: Fix parse_events_terms() segfault on error path
perf evsel: Fix count parameter to read call in event_format__new
perf tools: fix a typo of a Power7 event name
perf tools: Fix -x/--exclude-other option for report command
perf evlist: Enhance perf_evlist__start_workload()
perf record: Remove -f/--force option
perf record: Remove -A/--append option
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 28 |
1 files changed, 25 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1833bc5a84a7..eba8fb5834ae 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -947,8 +947,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) | |||
947 | { | 947 | { |
948 | struct perf_event_context *ctx; | 948 | struct perf_event_context *ctx; |
949 | 949 | ||
950 | rcu_read_lock(); | ||
951 | retry: | 950 | retry: |
951 | /* | ||
952 | * One of the few rules of preemptible RCU is that one cannot do | ||
953 | * rcu_read_unlock() while holding a scheduler (or nested) lock when | ||
954 | * part of the read side critical section was preemptible -- see | ||
955 | * rcu_read_unlock_special(). | ||
956 | * | ||
957 | * Since ctx->lock nests under rq->lock we must ensure the entire read | ||
958 | * side critical section is non-preemptible. | ||
959 | */ | ||
960 | preempt_disable(); | ||
961 | rcu_read_lock(); | ||
952 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); | 962 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
953 | if (ctx) { | 963 | if (ctx) { |
954 | /* | 964 | /* |
@@ -964,6 +974,8 @@ retry: | |||
964 | raw_spin_lock_irqsave(&ctx->lock, *flags); | 974 | raw_spin_lock_irqsave(&ctx->lock, *flags); |
965 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { | 975 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
966 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 976 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); |
977 | rcu_read_unlock(); | ||
978 | preempt_enable(); | ||
967 | goto retry; | 979 | goto retry; |
968 | } | 980 | } |
969 | 981 | ||
@@ -973,6 +985,7 @@ retry: | |||
973 | } | 985 | } |
974 | } | 986 | } |
975 | rcu_read_unlock(); | 987 | rcu_read_unlock(); |
988 | preempt_enable(); | ||
976 | return ctx; | 989 | return ctx; |
977 | } | 990 | } |
978 | 991 | ||
@@ -1950,7 +1963,16 @@ static int __perf_event_enable(void *info) | |||
1950 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | 1963 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
1951 | int err; | 1964 | int err; |
1952 | 1965 | ||
1953 | if (WARN_ON_ONCE(!ctx->is_active)) | 1966 | /* |
1967 | * There's a time window between 'ctx->is_active' check | ||
1968 | * in perf_event_enable function and this place having: | ||
1969 | * - IRQs on | ||
1970 | * - ctx->lock unlocked | ||
1971 | * | ||
1972 | * where the task could be killed and 'ctx' deactivated | ||
1973 | * by perf_event_exit_task. | ||
1974 | */ | ||
1975 | if (!ctx->is_active) | ||
1954 | return -EINVAL; | 1976 | return -EINVAL; |
1955 | 1977 | ||
1956 | raw_spin_lock(&ctx->lock); | 1978 | raw_spin_lock(&ctx->lock); |
@@ -7465,7 +7487,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
7465 | * child. | 7487 | * child. |
7466 | */ | 7488 | */ |
7467 | 7489 | ||
7468 | child_ctx = alloc_perf_context(event->pmu, child); | 7490 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
7469 | if (!child_ctx) | 7491 | if (!child_ctx) |
7470 | return -ENOMEM; | 7492 | return -ENOMEM; |
7471 | 7493 | ||