diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 98 |
1 files changed, 96 insertions, 2 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a2a45c970e7..dc0f435a2779 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -558,8 +558,6 @@ struct rq { | |||
| 558 | #ifdef CONFIG_NO_HZ_FULL | 558 | #ifdef CONFIG_NO_HZ_FULL |
| 559 | unsigned long last_sched_tick; | 559 | unsigned long last_sched_tick; |
| 560 | #endif | 560 | #endif |
| 561 | int skip_clock_update; | ||
| 562 | |||
| 563 | /* capture load from *all* tasks on this cpu: */ | 561 | /* capture load from *all* tasks on this cpu: */ |
| 564 | struct load_weight load; | 562 | struct load_weight load; |
| 565 | unsigned long nr_load_updates; | 563 | unsigned long nr_load_updates; |
| @@ -588,6 +586,7 @@ struct rq { | |||
| 588 | unsigned long next_balance; | 586 | unsigned long next_balance; |
| 589 | struct mm_struct *prev_mm; | 587 | struct mm_struct *prev_mm; |
| 590 | 588 | ||
| 589 | unsigned int clock_skip_update; | ||
| 591 | u64 clock; | 590 | u64 clock; |
| 592 | u64 clock_task; | 591 | u64 clock_task; |
| 593 | 592 | ||
| @@ -687,16 +686,35 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | |||
| 687 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 686 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
| 688 | #define raw_rq() raw_cpu_ptr(&runqueues) | 687 | #define raw_rq() raw_cpu_ptr(&runqueues) |
| 689 | 688 | ||
| 689 | static inline u64 __rq_clock_broken(struct rq *rq) | ||
| 690 | { | ||
| 691 | return ACCESS_ONCE(rq->clock); | ||
| 692 | } | ||
| 693 | |||
| 690 | static inline u64 rq_clock(struct rq *rq) | 694 | static inline u64 rq_clock(struct rq *rq) |
| 691 | { | 695 | { |
| 696 | lockdep_assert_held(&rq->lock); | ||
| 692 | return rq->clock; | 697 | return rq->clock; |
| 693 | } | 698 | } |
| 694 | 699 | ||
| 695 | static inline u64 rq_clock_task(struct rq *rq) | 700 | static inline u64 rq_clock_task(struct rq *rq) |
| 696 | { | 701 | { |
| 702 | lockdep_assert_held(&rq->lock); | ||
| 697 | return rq->clock_task; | 703 | return rq->clock_task; |
| 698 | } | 704 | } |
| 699 | 705 | ||
| 706 | #define RQCF_REQ_SKIP 0x01 | ||
| 707 | #define RQCF_ACT_SKIP 0x02 | ||
| 708 | |||
| 709 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) | ||
| 710 | { | ||
| 711 | lockdep_assert_held(&rq->lock); | ||
| 712 | if (skip) | ||
| 713 | rq->clock_skip_update |= RQCF_REQ_SKIP; | ||
| 714 | else | ||
| 715 | rq->clock_skip_update &= ~RQCF_REQ_SKIP; | ||
| 716 | } | ||
| 717 | |||
| 700 | #ifdef CONFIG_NUMA | 718 | #ifdef CONFIG_NUMA |
| 701 | enum numa_topology_type { | 719 | enum numa_topology_type { |
| 702 | NUMA_DIRECT, | 720 | NUMA_DIRECT, |
| @@ -1362,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { } | |||
| 1362 | 1380 | ||
| 1363 | extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); | 1381 | extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); |
| 1364 | 1382 | ||
| 1383 | /* | ||
| 1384 | * __task_rq_lock - lock the rq @p resides on. | ||
| 1385 | */ | ||
| 1386 | static inline struct rq *__task_rq_lock(struct task_struct *p) | ||
| 1387 | __acquires(rq->lock) | ||
| 1388 | { | ||
| 1389 | struct rq *rq; | ||
| 1390 | |||
| 1391 | lockdep_assert_held(&p->pi_lock); | ||
| 1392 | |||
| 1393 | for (;;) { | ||
| 1394 | rq = task_rq(p); | ||
| 1395 | raw_spin_lock(&rq->lock); | ||
| 1396 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
| 1397 | return rq; | ||
| 1398 | raw_spin_unlock(&rq->lock); | ||
| 1399 | |||
| 1400 | while (unlikely(task_on_rq_migrating(p))) | ||
| 1401 | cpu_relax(); | ||
| 1402 | } | ||
| 1403 | } | ||
| 1404 | |||
| 1405 | /* | ||
| 1406 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. | ||
| 1407 | */ | ||
| 1408 | static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | ||
| 1409 | __acquires(p->pi_lock) | ||
| 1410 | __acquires(rq->lock) | ||
| 1411 | { | ||
| 1412 | struct rq *rq; | ||
| 1413 | |||
| 1414 | for (;;) { | ||
| 1415 | raw_spin_lock_irqsave(&p->pi_lock, *flags); | ||
| 1416 | rq = task_rq(p); | ||
| 1417 | raw_spin_lock(&rq->lock); | ||
| 1418 | /* | ||
| 1419 | * move_queued_task() task_rq_lock() | ||
| 1420 | * | ||
| 1421 | * ACQUIRE (rq->lock) | ||
| 1422 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() | ||
| 1423 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); | ||
| 1424 | * [S] ->cpu = new_cpu [L] task_rq() | ||
| 1425 | * [L] ->on_rq | ||
| 1426 | * RELEASE (rq->lock) | ||
| 1427 | * | ||
| 1428 | * If we observe the old cpu in task_rq_lock, the acquire of | ||
| 1429 | * the old rq->lock will fully serialize against the stores. | ||
| 1430 | * | ||
| 1431 | * If we observe the new cpu in task_rq_lock, the acquire will | ||
| 1432 | * pair with the WMB to ensure we must then also see migrating. | ||
| 1433 | */ | ||
| 1434 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
| 1435 | return rq; | ||
| 1436 | raw_spin_unlock(&rq->lock); | ||
| 1437 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
| 1438 | |||
| 1439 | while (unlikely(task_on_rq_migrating(p))) | ||
| 1440 | cpu_relax(); | ||
| 1441 | } | ||
| 1442 | } | ||
| 1443 | |||
| 1444 | static inline void __task_rq_unlock(struct rq *rq) | ||
| 1445 | __releases(rq->lock) | ||
| 1446 | { | ||
| 1447 | raw_spin_unlock(&rq->lock); | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | static inline void | ||
| 1451 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) | ||
| 1452 | __releases(rq->lock) | ||
| 1453 | __releases(p->pi_lock) | ||
| 1454 | { | ||
| 1455 | raw_spin_unlock(&rq->lock); | ||
| 1456 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
| 1457 | } | ||
| 1458 | |||
| 1365 | #ifdef CONFIG_SMP | 1459 | #ifdef CONFIG_SMP |
| 1366 | #ifdef CONFIG_PREEMPT | 1460 | #ifdef CONFIG_PREEMPT |
| 1367 | 1461 | ||
