aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2015-11-12 05:00:04 -0500
committerIngo Molnar <mingo@kernel.org>2015-11-23 03:21:03 -0500
commit614e4c4ebc75517295bccd29b20ddbc5b52af6fc (patch)
tree67931aaaac4c9e81176140293348f3582ef2a742
parentddaaf4e291dd63db0667991e4a335fcf3a7df13e (diff)
perf/core: Robustify the perf_cgroup_from_task() RCU checks
This patch reinforces the lockdep checks performed by perf_cgroup_from_tsk() by passing the perf_event_context whenever possible. It is okay to not hold the RCU read lock when we know we hold the ctx->lock. This patch makes sure this property holds. In some functions, such as perf_cgroup_sched_in(), we do not pass the context because we are sure we are holding the RCU read lock. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: edumazet@google.com Link: http://lkml.kernel.org/r/1447322404-10920-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--kernel/events/core.c20
3 files changed, 18 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33bcdc9..f9828a48f16a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
697 * if there is no cgroup event for the current CPU context. 697 * if there is no cgroup event for the current CPU context.
698 */ 698 */
699static inline struct perf_cgroup * 699static inline struct perf_cgroup *
700perf_cgroup_from_task(struct task_struct *task) 700perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
701{ 701{
702 return container_of(task_css(task, perf_event_cgrp_id), 702 return container_of(task_css_check(task, perf_event_cgrp_id,
703 ctx ? lockdep_is_held(&ctx->lock)
704 : true),
703 struct perf_cgroup, css); 705 struct perf_cgroup, css);
704} 706}
705#endif /* CONFIG_CGROUP_PERF */ 707#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 60e71ca42c22..1ac857aff7b0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
435 if (!is_cgroup_event(event)) 435 if (!is_cgroup_event(event))
436 return; 436 return;
437 437
438 cgrp = perf_cgroup_from_task(current); 438 cgrp = perf_cgroup_from_task(current, event->ctx);
439 /* 439 /*
440 * Do not update time when cgroup is not active 440 * Do not update time when cgroup is not active
441 */ 441 */
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
458 if (!task || !ctx->nr_cgroups) 458 if (!task || !ctx->nr_cgroups)
459 return; 459 return;
460 460
461 cgrp = perf_cgroup_from_task(task); 461 cgrp = perf_cgroup_from_task(task, ctx);
462 info = this_cpu_ptr(cgrp->info); 462 info = this_cpu_ptr(cgrp->info);
463 info->timestamp = ctx->timestamp; 463 info->timestamp = ctx->timestamp;
464} 464}
@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
521 * set cgrp before ctxsw in to allow 521 * set cgrp before ctxsw in to allow
522 * event_filter_match() to not have to pass 522 * event_filter_match() to not have to pass
523 * task around 523 * task around
524 * we pass the cpuctx->ctx to perf_cgroup_from_task()
525 * because cgorup events are only per-cpu
524 */ 526 */
525 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
526 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
527 } 529 }
528 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_pmu_enable(cpuctx->ctx.pmu);
@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
542 rcu_read_lock(); 544 rcu_read_lock();
543 /* 545 /*
544 * we come here when we know perf_cgroup_events > 0 546 * we come here when we know perf_cgroup_events > 0
547 * we do not need to pass the ctx here because we know
548 * we are holding the rcu lock
545 */ 549 */
546 cgrp1 = perf_cgroup_from_task(task); 550 cgrp1 = perf_cgroup_from_task(task, NULL);
547 551
548 /* 552 /*
549 * next is NULL when called from perf_event_enable_on_exec() 553 * next is NULL when called from perf_event_enable_on_exec()
550 * that will systematically cause a cgroup_switch() 554 * that will systematically cause a cgroup_switch()
551 */ 555 */
552 if (next) 556 if (next)
553 cgrp2 = perf_cgroup_from_task(next); 557 cgrp2 = perf_cgroup_from_task(next, NULL);
554 558
555 /* 559 /*
556 * only schedule out current cgroup events if we know 560 * only schedule out current cgroup events if we know
@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
572 rcu_read_lock(); 576 rcu_read_lock();
573 /* 577 /*
574 * we come here when we know perf_cgroup_events > 0 578 * we come here when we know perf_cgroup_events > 0
579 * we do not need to pass the ctx here because we know
580 * we are holding the rcu lock
575 */ 581 */
576 cgrp1 = perf_cgroup_from_task(task); 582 cgrp1 = perf_cgroup_from_task(task, NULL);
577 583
578 /* prev can never be NULL */ 584 /* prev can never be NULL */
579 cgrp2 = perf_cgroup_from_task(prev); 585 cgrp2 = perf_cgroup_from_task(prev, NULL);
580 586
581 /* 587 /*
582 * only need to schedule in cgroup events if we are changing 588 * only need to schedule in cgroup events if we are changing