diff options
author | Stephane Eranian <eranian@google.com> | 2015-11-12 05:00:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-11-23 03:21:03 -0500 |
commit | ddaaf4e291dd63db0667991e4a335fcf3a7df13e (patch) | |
tree | 844099db01306e6badb1cd9bc867e19263547812 | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
perf/core: Fix RCU problem with cgroup context switching code
The RCU checker detected RCU violation in the cgroup switching routines
perf_cgroup_sched_in() and perf_cgroup_sched_out(). We were dereferencing
cgroup from task without holding the RCU lock.
Fix this by holding the RCU read lock. We move the locking from
perf_cgroup_switch() to avoid double locking.
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: edumazet@google.com
Link: http://lkml.kernel.org/r/1447322404-10920-2-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/events/core.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 36babfd20648..60e71ca42c22 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) | |||
489 | * we reschedule only in the presence of cgroup | 489 | * we reschedule only in the presence of cgroup |
490 | * constrained events. | 490 | * constrained events. |
491 | */ | 491 | */ |
492 | rcu_read_lock(); | ||
493 | 492 | ||
494 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 493 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
495 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 494 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
@@ -531,8 +530,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) | |||
531 | } | 530 | } |
532 | } | 531 | } |
533 | 532 | ||
534 | rcu_read_unlock(); | ||
535 | |||
536 | local_irq_restore(flags); | 533 | local_irq_restore(flags); |
537 | } | 534 | } |
538 | 535 | ||
@@ -542,6 +539,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task, | |||
542 | struct perf_cgroup *cgrp1; | 539 | struct perf_cgroup *cgrp1; |
543 | struct perf_cgroup *cgrp2 = NULL; | 540 | struct perf_cgroup *cgrp2 = NULL; |
544 | 541 | ||
542 | rcu_read_lock(); | ||
545 | /* | 543 | /* |
546 | * we come here when we know perf_cgroup_events > 0 | 544 | * we come here when we know perf_cgroup_events > 0 |
547 | */ | 545 | */ |
@@ -561,6 +559,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task, | |||
561 | */ | 559 | */ |
562 | if (cgrp1 != cgrp2) | 560 | if (cgrp1 != cgrp2) |
563 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); | 561 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); |
562 | |||
563 | rcu_read_unlock(); | ||
564 | } | 564 | } |
565 | 565 | ||
566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, | 566 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
@@ -569,6 +569,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev, | |||
569 | struct perf_cgroup *cgrp1; | 569 | struct perf_cgroup *cgrp1; |
570 | struct perf_cgroup *cgrp2 = NULL; | 570 | struct perf_cgroup *cgrp2 = NULL; |
571 | 571 | ||
572 | rcu_read_lock(); | ||
572 | /* | 573 | /* |
573 | * we come here when we know perf_cgroup_events > 0 | 574 | * we come here when we know perf_cgroup_events > 0 |
574 | */ | 575 | */ |
@@ -584,6 +585,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev, | |||
584 | */ | 585 | */ |
585 | if (cgrp1 != cgrp2) | 586 | if (cgrp1 != cgrp2) |
586 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); | 587 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); |
588 | |||
589 | rcu_read_unlock(); | ||
587 | } | 590 | } |
588 | 591 | ||
589 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, | 592 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
@@ -9452,7 +9455,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css) | |||
9452 | static int __perf_cgroup_move(void *info) | 9455 | static int __perf_cgroup_move(void *info) |
9453 | { | 9456 | { |
9454 | struct task_struct *task = info; | 9457 | struct task_struct *task = info; |
9458 | rcu_read_lock(); | ||
9455 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); | 9459 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); |
9460 | rcu_read_unlock(); | ||
9456 | return 0; | 9461 | return 0; |
9457 | } | 9462 | } |
9458 | 9463 | ||