diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0870db23d79c..dc0f435a2779 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { } | |||
1380 | 1380 | ||
1381 | extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); | 1381 | extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); |
1382 | 1382 | ||
1383 | /* | ||
1384 | * __task_rq_lock - lock the rq @p resides on. | ||
1385 | */ | ||
1386 | static inline struct rq *__task_rq_lock(struct task_struct *p) | ||
1387 | __acquires(rq->lock) | ||
1388 | { | ||
1389 | struct rq *rq; | ||
1390 | |||
1391 | lockdep_assert_held(&p->pi_lock); | ||
1392 | |||
1393 | for (;;) { | ||
1394 | rq = task_rq(p); | ||
1395 | raw_spin_lock(&rq->lock); | ||
1396 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
1397 | return rq; | ||
1398 | raw_spin_unlock(&rq->lock); | ||
1399 | |||
1400 | while (unlikely(task_on_rq_migrating(p))) | ||
1401 | cpu_relax(); | ||
1402 | } | ||
1403 | } | ||
1404 | |||
1405 | /* | ||
1406 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. | ||
1407 | */ | ||
1408 | static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | ||
1409 | __acquires(p->pi_lock) | ||
1410 | __acquires(rq->lock) | ||
1411 | { | ||
1412 | struct rq *rq; | ||
1413 | |||
1414 | for (;;) { | ||
1415 | raw_spin_lock_irqsave(&p->pi_lock, *flags); | ||
1416 | rq = task_rq(p); | ||
1417 | raw_spin_lock(&rq->lock); | ||
1418 | /* | ||
1419 | * move_queued_task() task_rq_lock() | ||
1420 | * | ||
1421 | * ACQUIRE (rq->lock) | ||
1422 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() | ||
1423 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); | ||
1424 | * [S] ->cpu = new_cpu [L] task_rq() | ||
1425 | * [L] ->on_rq | ||
1426 | * RELEASE (rq->lock) | ||
1427 | * | ||
1428 | * If we observe the old cpu in task_rq_lock, the acquire of | ||
1429 | * the old rq->lock will fully serialize against the stores. | ||
1430 | * | ||
1431 | * If we observe the new cpu in task_rq_lock, the acquire will | ||
1432 | * pair with the WMB to ensure we must then also see migrating. | ||
1433 | */ | ||
1434 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
1435 | return rq; | ||
1436 | raw_spin_unlock(&rq->lock); | ||
1437 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
1438 | |||
1439 | while (unlikely(task_on_rq_migrating(p))) | ||
1440 | cpu_relax(); | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | static inline void __task_rq_unlock(struct rq *rq) | ||
1445 | __releases(rq->lock) | ||
1446 | { | ||
1447 | raw_spin_unlock(&rq->lock); | ||
1448 | } | ||
1449 | |||
1450 | static inline void | ||
1451 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) | ||
1452 | __releases(rq->lock) | ||
1453 | __releases(p->pi_lock) | ||
1454 | { | ||
1455 | raw_spin_unlock(&rq->lock); | ||
1456 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
1457 | } | ||
1458 | |||
1383 | #ifdef CONFIG_SMP | 1459 | #ifdef CONFIG_SMP |
1384 | #ifdef CONFIG_PREEMPT | 1460 | #ifdef CONFIG_PREEMPT |
1385 | 1461 | ||