diff options
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | lib/idr.c | 4 |
3 files changed, 13 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f8b8996228dd..a2d215d132f6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2494,7 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2494 | if (p->sched_class->task_fork) | 2494 | if (p->sched_class->task_fork) |
2495 | p->sched_class->task_fork(p); | 2495 | p->sched_class->task_fork(p); |
2496 | 2496 | ||
2497 | /* | ||
2498 | * The child is not yet in the pid-hash so no cgroup attach races, | ||
2499 | * and the cgroup is pinned to this child due to cgroup_fork() | ||
2500 | * is ran before sched_fork(). | ||
2501 | * | ||
2502 | * Silence PROVE_RCU. | ||
2503 | */ | ||
2504 | rcu_read_lock(); | ||
2497 | set_task_cpu(p, cpu); | 2505 | set_task_cpu(p, cpu); |
2506 | rcu_read_unlock(); | ||
2498 | 2507 | ||
2499 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2508 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2500 | if (likely(sched_info_on())) | 2509 | if (likely(sched_info_on())) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index eed35eded602..a878b5332daa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1240 | * effect of the currently running task from the load | 1240 | * effect of the currently running task from the load |
1241 | * of the current CPU: | 1241 | * of the current CPU: |
1242 | */ | 1242 | */ |
1243 | rcu_read_lock(); | ||
1243 | if (sync) { | 1244 | if (sync) { |
1244 | tg = task_group(current); | 1245 | tg = task_group(current); |
1245 | weight = current->se.load.weight; | 1246 | weight = current->se.load.weight; |
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1275 | balanced = this_eff_load <= prev_eff_load; | 1276 | balanced = this_eff_load <= prev_eff_load; |
1276 | } else | 1277 | } else |
1277 | balanced = true; | 1278 | balanced = true; |
1279 | rcu_read_unlock(); | ||
1278 | 1280 | ||
1279 | /* | 1281 | /* |
1280 | * If the currently running task will sleep within | 1282 | * If the currently running task will sleep within |
@@ -602,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
602 | /* find first ent */ | 602 | /* find first ent */ |
603 | n = idp->layers * IDR_BITS; | 603 | n = idp->layers * IDR_BITS; |
604 | max = 1 << n; | 604 | max = 1 << n; |
605 | p = rcu_dereference(idp->top); | 605 | p = rcu_dereference_raw(idp->top); |
606 | if (!p) | 606 | if (!p) |
607 | return NULL; | 607 | return NULL; |
608 | 608 | ||
@@ -610,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
610 | while (n > 0 && p) { | 610 | while (n > 0 && p) { |
611 | n -= IDR_BITS; | 611 | n -= IDR_BITS; |
612 | *paa++ = p; | 612 | *paa++ = p; |
613 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 613 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
614 | } | 614 | } |
615 | 615 | ||
616 | if (p) { | 616 | if (p) { |