aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cb8744a1b120..e76e4959908c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -761,8 +761,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
761{ 761{
762 struct perf_event_context *ctx; 762 struct perf_event_context *ctx;
763 763
764 rcu_read_lock();
765retry: 764retry:
765 /*
766 * One of the few rules of preemptible RCU is that one cannot do
767 * rcu_read_unlock() while holding a scheduler (or nested) lock when
768 * part of the read side critical section was preemptible -- see
769 * rcu_read_unlock_special().
770 *
771 * Since ctx->lock nests under rq->lock we must ensure the entire read
772 * side critical section is non-preemptible.
773 */
774 preempt_disable();
775 rcu_read_lock();
766 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); 776 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
767 if (ctx) { 777 if (ctx) {
768 /* 778 /*
@@ -778,6 +788,8 @@ retry:
778 raw_spin_lock_irqsave(&ctx->lock, *flags); 788 raw_spin_lock_irqsave(&ctx->lock, *flags);
779 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { 789 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
780 raw_spin_unlock_irqrestore(&ctx->lock, *flags); 790 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
791 rcu_read_unlock();
792 preempt_enable();
781 goto retry; 793 goto retry;
782 } 794 }
783 795
@@ -787,6 +799,7 @@ retry:
787 } 799 }
788 } 800 }
789 rcu_read_unlock(); 801 rcu_read_unlock();
802 preempt_enable();
790 return ctx; 803 return ctx;
791} 804}
792 805