diff options
-rw-r--r-- | kernel/events/core.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index ef5e7cc686e3..eba8fb5834ae 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -947,8 +947,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) | |||
947 | { | 947 | { |
948 | struct perf_event_context *ctx; | 948 | struct perf_event_context *ctx; |
949 | 949 | ||
950 | rcu_read_lock(); | ||
951 | retry: | 950 | retry: |
951 | /* | ||
952 | * One of the few rules of preemptible RCU is that one cannot do | ||
953 | * rcu_read_unlock() while holding a scheduler (or nested) lock when | ||
954 | * part of the read side critical section was preemptible -- see | ||
955 | * rcu_read_unlock_special(). | ||
956 | * | ||
957 | * Since ctx->lock nests under rq->lock we must ensure the entire read | ||
958 | * side critical section is non-preemptible. | ||
959 | */ | ||
960 | preempt_disable(); | ||
961 | rcu_read_lock(); | ||
952 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); | 962 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
953 | if (ctx) { | 963 | if (ctx) { |
954 | /* | 964 | /* |
@@ -964,6 +974,8 @@ retry: | |||
964 | raw_spin_lock_irqsave(&ctx->lock, *flags); | 974 | raw_spin_lock_irqsave(&ctx->lock, *flags); |
965 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { | 975 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
966 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); | 976 | raw_spin_unlock_irqrestore(&ctx->lock, *flags); |
977 | rcu_read_unlock(); | ||
978 | preempt_enable(); | ||
967 | goto retry; | 979 | goto retry; |
968 | } | 980 | } |
969 | 981 | ||
@@ -973,6 +985,7 @@ retry: | |||
973 | } | 985 | } |
974 | } | 986 | } |
975 | rcu_read_unlock(); | 987 | rcu_read_unlock(); |
988 | preempt_enable(); | ||
976 | return ctx; | 989 | return ctx; |
977 | } | 990 | } |
978 | 991 | ||