aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e9304cdc26fe..a848f526b941 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
246 * if we should look at the mask. It would be a shame 246 * if we should look at the mask. It would be a shame
247 * if we looked at the mask, but the mask was not 247 * if we looked at the mask, but the mask was not
248 * updated yet. 248 * updated yet.
249 *
250 * Matched by the barrier in pull_rt_task().
249 */ 251 */
250 wmb(); 252 smp_wmb();
251 atomic_inc(&rq->rd->rto_count); 253 atomic_inc(&rq->rd->rto_count);
252} 254}
253 255
@@ -1626,6 +1628,12 @@ static int pull_rt_task(struct rq *this_rq)
1626 if (likely(!rt_overloaded(this_rq))) 1628 if (likely(!rt_overloaded(this_rq)))
1627 return 0; 1629 return 0;
1628 1630
1631 /*
1632 * Match the barrier from rt_set_overloaded; this guarantees that if we
1633 * see overloaded we must also see the rto_mask bit.
1634 */
1635 smp_rmb();
1636
1629 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1637 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1630 if (this_cpu == cpu) 1638 if (this_cpu == cpu)
1631 continue; 1639 continue;