diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 38 |
1 files changed, 33 insertions, 5 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dc0f435a2779..e0e129993958 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/mutex.h> | 6 | #include <linux/mutex.h> |
| 7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/stop_machine.h> | 8 | #include <linux/stop_machine.h> |
| 9 | #include <linux/irq_work.h> | ||
| 9 | #include <linux/tick.h> | 10 | #include <linux/tick.h> |
| 10 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 11 | 12 | ||
| @@ -362,8 +363,14 @@ struct cfs_rq { | |||
| 362 | * Under CFS, load is tracked on a per-entity basis and aggregated up. | 363 | * Under CFS, load is tracked on a per-entity basis and aggregated up. |
| 363 | * This allows for the description of both thread and group usage (in | 364 | * This allows for the description of both thread and group usage (in |
| 364 | * the FAIR_GROUP_SCHED case). | 365 | * the FAIR_GROUP_SCHED case). |
| 366 | * runnable_load_avg is the sum of the load_avg_contrib of the | ||
| 367 | * sched_entities on the rq. | ||
| 368 | * blocked_load_avg is similar to runnable_load_avg except that its | ||
| 369 | * the blocked sched_entities on the rq. | ||
| 370 | * utilization_load_avg is the sum of the average running time of the | ||
| 371 | * sched_entities on the rq. | ||
| 365 | */ | 372 | */ |
| 366 | unsigned long runnable_load_avg, blocked_load_avg; | 373 | unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg; |
| 367 | atomic64_t decay_counter; | 374 | atomic64_t decay_counter; |
| 368 | u64 last_decay; | 375 | u64 last_decay; |
| 369 | atomic_long_t removed_load; | 376 | atomic_long_t removed_load; |
| @@ -418,6 +425,11 @@ static inline int rt_bandwidth_enabled(void) | |||
| 418 | return sysctl_sched_rt_runtime >= 0; | 425 | return sysctl_sched_rt_runtime >= 0; |
| 419 | } | 426 | } |
| 420 | 427 | ||
| 428 | /* RT IPI pull logic requires IRQ_WORK */ | ||
| 429 | #ifdef CONFIG_IRQ_WORK | ||
| 430 | # define HAVE_RT_PUSH_IPI | ||
| 431 | #endif | ||
| 432 | |||
| 421 | /* Real-Time classes' related field in a runqueue: */ | 433 | /* Real-Time classes' related field in a runqueue: */ |
| 422 | struct rt_rq { | 434 | struct rt_rq { |
| 423 | struct rt_prio_array active; | 435 | struct rt_prio_array active; |
| @@ -435,7 +447,13 @@ struct rt_rq { | |||
| 435 | unsigned long rt_nr_total; | 447 | unsigned long rt_nr_total; |
| 436 | int overloaded; | 448 | int overloaded; |
| 437 | struct plist_head pushable_tasks; | 449 | struct plist_head pushable_tasks; |
| 450 | #ifdef HAVE_RT_PUSH_IPI | ||
| 451 | int push_flags; | ||
| 452 | int push_cpu; | ||
| 453 | struct irq_work push_work; | ||
| 454 | raw_spinlock_t push_lock; | ||
| 438 | #endif | 455 | #endif |
| 456 | #endif /* CONFIG_SMP */ | ||
| 439 | int rt_queued; | 457 | int rt_queued; |
| 440 | 458 | ||
| 441 | int rt_throttled; | 459 | int rt_throttled; |
| @@ -597,6 +615,7 @@ struct rq { | |||
| 597 | struct sched_domain *sd; | 615 | struct sched_domain *sd; |
| 598 | 616 | ||
| 599 | unsigned long cpu_capacity; | 617 | unsigned long cpu_capacity; |
| 618 | unsigned long cpu_capacity_orig; | ||
| 600 | 619 | ||
| 601 | unsigned char idle_balance; | 620 | unsigned char idle_balance; |
| 602 | /* For active balancing */ | 621 | /* For active balancing */ |
| @@ -807,7 +826,7 @@ struct sched_group_capacity { | |||
| 807 | * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity | 826 | * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity |
| 808 | * for a single CPU. | 827 | * for a single CPU. |
| 809 | */ | 828 | */ |
| 810 | unsigned int capacity, capacity_orig; | 829 | unsigned int capacity; |
| 811 | unsigned long next_update; | 830 | unsigned long next_update; |
| 812 | int imbalance; /* XXX unrelated to capacity but shared group state */ | 831 | int imbalance; /* XXX unrelated to capacity but shared group state */ |
| 813 | /* | 832 | /* |
| @@ -1368,9 +1387,18 @@ static inline int hrtick_enabled(struct rq *rq) | |||
| 1368 | 1387 | ||
| 1369 | #ifdef CONFIG_SMP | 1388 | #ifdef CONFIG_SMP |
| 1370 | extern void sched_avg_update(struct rq *rq); | 1389 | extern void sched_avg_update(struct rq *rq); |
| 1390 | |||
| 1391 | #ifndef arch_scale_freq_capacity | ||
| 1392 | static __always_inline | ||
| 1393 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) | ||
| 1394 | { | ||
| 1395 | return SCHED_CAPACITY_SCALE; | ||
| 1396 | } | ||
| 1397 | #endif | ||
| 1398 | |||
| 1371 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1399 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
| 1372 | { | 1400 | { |
| 1373 | rq->rt_avg += rt_delta; | 1401 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); |
| 1374 | sched_avg_update(rq); | 1402 | sched_avg_update(rq); |
| 1375 | } | 1403 | } |
| 1376 | #else | 1404 | #else |
| @@ -1643,8 +1671,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu); | |||
| 1643 | extern void print_dl_stats(struct seq_file *m, int cpu); | 1671 | extern void print_dl_stats(struct seq_file *m, int cpu); |
| 1644 | 1672 | ||
| 1645 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); | 1673 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
| 1646 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); | 1674 | extern void init_rt_rq(struct rt_rq *rt_rq); |
| 1647 | extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq); | 1675 | extern void init_dl_rq(struct dl_rq *dl_rq); |
| 1648 | 1676 | ||
| 1649 | extern void cfs_bandwidth_usage_inc(void); | 1677 | extern void cfs_bandwidth_usage_inc(void); |
| 1650 | extern void cfs_bandwidth_usage_dec(void); | 1678 | extern void cfs_bandwidth_usage_dec(void); |
