aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 11:23:48 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-14 02:52:37 -0400
commit3fe1698b7fe05aeb063564e71e40d09f28d8e80c (patch)
treed068bd6a075cd85c64a040cf9fc8e661f8be00a8 /kernel
parent74f8e4b2335de45485b8d5b31a504747f13c8070 (diff)
sched: Deal with non-atomic min_vruntime reads on 32bits
In order to avoid reading partial updated min_vruntime values on 32bit implement a seqcount like solution. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.111378493@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_fair.c19
2 files changed, 20 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 46f42cac4eb1..7a5eb2620785 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -312,6 +312,9 @@ struct cfs_rq {
312 312
313 u64 exec_clock; 313 u64 exec_clock;
314 u64 min_vruntime; 314 u64 min_vruntime;
315#ifndef CONFIG_64BIT
316 u64 min_vruntime_copy;
317#endif
315 318
316 struct rb_root tasks_timeline; 319 struct rb_root tasks_timeline;
317 struct rb_node *rb_leftmost; 320 struct rb_node *rb_leftmost;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ad4c414f456d..054cebb81f7b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
358 } 358 }
359 359
360 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 360 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
361#ifndef CONFIG_64BIT
362 smp_wmb();
363 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
364#endif
361} 365}
362 366
363/* 367/*
@@ -1376,10 +1380,21 @@ static void task_waking_fair(struct task_struct *p)
1376{ 1380{
1377 struct sched_entity *se = &p->se; 1381 struct sched_entity *se = &p->se;
1378 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1382 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1383 u64 min_vruntime;
1379 1384
1380 lockdep_assert_held(&task_rq(p)->lock); 1385#ifndef CONFIG_64BIT
1386 u64 min_vruntime_copy;
1381 1387
1382 se->vruntime -= cfs_rq->min_vruntime; 1388 do {
1389 min_vruntime_copy = cfs_rq->min_vruntime_copy;
1390 smp_rmb();
1391 min_vruntime = cfs_rq->min_vruntime;
1392 } while (min_vruntime != min_vruntime_copy);
1393#else
1394 min_vruntime = cfs_rq->min_vruntime;
1395#endif
1396
1397 se->vruntime -= min_vruntime;
1383} 1398}
1384 1399
1385#ifdef CONFIG_FAIR_GROUP_SCHED 1400#ifdef CONFIG_FAIR_GROUP_SCHED