diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 63 |
1 files changed, 39 insertions, 24 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 802b1f3405f2..b3cb895d14a2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -335,8 +335,6 @@ struct cfs_bandwidth { | |||
335 | u64 quota; | 335 | u64 quota; |
336 | u64 runtime; | 336 | u64 runtime; |
337 | s64 hierarchical_quota; | 337 | s64 hierarchical_quota; |
338 | u64 runtime_expires; | ||
339 | int expires_seq; | ||
340 | 338 | ||
341 | u8 idle; | 339 | u8 idle; |
342 | u8 period_active; | 340 | u8 period_active; |
@@ -393,6 +391,16 @@ struct task_group { | |||
393 | #endif | 391 | #endif |
394 | 392 | ||
395 | struct cfs_bandwidth cfs_bandwidth; | 393 | struct cfs_bandwidth cfs_bandwidth; |
394 | |||
395 | #ifdef CONFIG_UCLAMP_TASK_GROUP | ||
396 | /* The two decimal precision [%] value requested from user-space */ | ||
397 | unsigned int uclamp_pct[UCLAMP_CNT]; | ||
398 | /* Clamp values requested for a task group */ | ||
399 | struct uclamp_se uclamp_req[UCLAMP_CNT]; | ||
400 | /* Effective clamp values used for a task group */ | ||
401 | struct uclamp_se uclamp[UCLAMP_CNT]; | ||
402 | #endif | ||
403 | |||
396 | }; | 404 | }; |
397 | 405 | ||
398 | #ifdef CONFIG_FAIR_GROUP_SCHED | 406 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -483,7 +491,8 @@ struct cfs_rq { | |||
483 | struct load_weight load; | 491 | struct load_weight load; |
484 | unsigned long runnable_weight; | 492 | unsigned long runnable_weight; |
485 | unsigned int nr_running; | 493 | unsigned int nr_running; |
486 | unsigned int h_nr_running; | 494 | unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ |
495 | unsigned int idle_h_nr_running; /* SCHED_IDLE */ | ||
487 | 496 | ||
488 | u64 exec_clock; | 497 | u64 exec_clock; |
489 | u64 min_vruntime; | 498 | u64 min_vruntime; |
@@ -556,8 +565,6 @@ struct cfs_rq { | |||
556 | 565 | ||
557 | #ifdef CONFIG_CFS_BANDWIDTH | 566 | #ifdef CONFIG_CFS_BANDWIDTH |
558 | int runtime_enabled; | 567 | int runtime_enabled; |
559 | int expires_seq; | ||
560 | u64 runtime_expires; | ||
561 | s64 runtime_remaining; | 568 | s64 runtime_remaining; |
562 | 569 | ||
563 | u64 throttled_clock; | 570 | u64 throttled_clock; |
@@ -777,9 +784,6 @@ struct root_domain { | |||
777 | struct perf_domain __rcu *pd; | 784 | struct perf_domain __rcu *pd; |
778 | }; | 785 | }; |
779 | 786 | ||
780 | extern struct root_domain def_root_domain; | ||
781 | extern struct mutex sched_domains_mutex; | ||
782 | |||
783 | extern void init_defrootdomain(void); | 787 | extern void init_defrootdomain(void); |
784 | extern int sched_init_domains(const struct cpumask *cpu_map); | 788 | extern int sched_init_domains(const struct cpumask *cpu_map); |
785 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); | 789 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
@@ -1261,16 +1265,18 @@ enum numa_topology_type { | |||
1261 | extern enum numa_topology_type sched_numa_topology_type; | 1265 | extern enum numa_topology_type sched_numa_topology_type; |
1262 | extern int sched_max_numa_distance; | 1266 | extern int sched_max_numa_distance; |
1263 | extern bool find_numa_distance(int distance); | 1267 | extern bool find_numa_distance(int distance); |
1264 | #endif | ||
1265 | |||
1266 | #ifdef CONFIG_NUMA | ||
1267 | extern void sched_init_numa(void); | 1268 | extern void sched_init_numa(void); |
1268 | extern void sched_domains_numa_masks_set(unsigned int cpu); | 1269 | extern void sched_domains_numa_masks_set(unsigned int cpu); |
1269 | extern void sched_domains_numa_masks_clear(unsigned int cpu); | 1270 | extern void sched_domains_numa_masks_clear(unsigned int cpu); |
1271 | extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); | ||
1270 | #else | 1272 | #else |
1271 | static inline void sched_init_numa(void) { } | 1273 | static inline void sched_init_numa(void) { } |
1272 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } | 1274 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } |
1273 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } | 1275 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } |
1276 | static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) | ||
1277 | { | ||
1278 | return nr_cpu_ids; | ||
1279 | } | ||
1274 | #endif | 1280 | #endif |
1275 | 1281 | ||
1276 | #ifdef CONFIG_NUMA_BALANCING | 1282 | #ifdef CONFIG_NUMA_BALANCING |
@@ -1449,10 +1455,14 @@ static inline void unregister_sched_domain_sysctl(void) | |||
1449 | } | 1455 | } |
1450 | #endif | 1456 | #endif |
1451 | 1457 | ||
1458 | extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf); | ||
1459 | |||
1452 | #else | 1460 | #else |
1453 | 1461 | ||
1454 | static inline void sched_ttwu_pending(void) { } | 1462 | static inline void sched_ttwu_pending(void) { } |
1455 | 1463 | ||
1464 | static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } | ||
1465 | |||
1456 | #endif /* CONFIG_SMP */ | 1466 | #endif /* CONFIG_SMP */ |
1457 | 1467 | ||
1458 | #include "stats.h" | 1468 | #include "stats.h" |
@@ -1700,17 +1710,21 @@ struct sched_class { | |||
1700 | void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); | 1710 | void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); |
1701 | 1711 | ||
1702 | /* | 1712 | /* |
1703 | * It is the responsibility of the pick_next_task() method that will | 1713 | * Both @prev and @rf are optional and may be NULL, in which case the |
1704 | * return the next task to call put_prev_task() on the @prev task or | 1714 | * caller must already have invoked put_prev_task(rq, prev, rf). |
1705 | * something equivalent. | 1715 | * |
1716 | * Otherwise it is the responsibility of the pick_next_task() to call | ||
1717 | * put_prev_task() on the @prev task or something equivalent, IFF it | ||
1718 | * returns a next task. | ||
1706 | * | 1719 | * |
1707 | * May return RETRY_TASK when it finds a higher prio class has runnable | 1720 | * In that case (@rf != NULL) it may return RETRY_TASK when it finds a |
1708 | * tasks. | 1721 | * higher prio class has runnable tasks. |
1709 | */ | 1722 | */ |
1710 | struct task_struct * (*pick_next_task)(struct rq *rq, | 1723 | struct task_struct * (*pick_next_task)(struct rq *rq, |
1711 | struct task_struct *prev, | 1724 | struct task_struct *prev, |
1712 | struct rq_flags *rf); | 1725 | struct rq_flags *rf); |
1713 | void (*put_prev_task)(struct rq *rq, struct task_struct *p); | 1726 | void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf); |
1727 | void (*set_next_task)(struct rq *rq, struct task_struct *p); | ||
1714 | 1728 | ||
1715 | #ifdef CONFIG_SMP | 1729 | #ifdef CONFIG_SMP |
1716 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); | 1730 | int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); |
@@ -1725,7 +1739,6 @@ struct sched_class { | |||
1725 | void (*rq_offline)(struct rq *rq); | 1739 | void (*rq_offline)(struct rq *rq); |
1726 | #endif | 1740 | #endif |
1727 | 1741 | ||
1728 | void (*set_curr_task)(struct rq *rq); | ||
1729 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); | 1742 | void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
1730 | void (*task_fork)(struct task_struct *p); | 1743 | void (*task_fork)(struct task_struct *p); |
1731 | void (*task_dead)(struct task_struct *p); | 1744 | void (*task_dead)(struct task_struct *p); |
@@ -1755,12 +1768,14 @@ struct sched_class { | |||
1755 | 1768 | ||
1756 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) | 1769 | static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
1757 | { | 1770 | { |
1758 | prev->sched_class->put_prev_task(rq, prev); | 1771 | WARN_ON_ONCE(rq->curr != prev); |
1772 | prev->sched_class->put_prev_task(rq, prev, NULL); | ||
1759 | } | 1773 | } |
1760 | 1774 | ||
1761 | static inline void set_curr_task(struct rq *rq, struct task_struct *curr) | 1775 | static inline void set_next_task(struct rq *rq, struct task_struct *next) |
1762 | { | 1776 | { |
1763 | curr->sched_class->set_curr_task(rq); | 1777 | WARN_ON_ONCE(rq->curr != next); |
1778 | next->sched_class->set_next_task(rq, next); | ||
1764 | } | 1779 | } |
1765 | 1780 | ||
1766 | #ifdef CONFIG_SMP | 1781 | #ifdef CONFIG_SMP |
@@ -1943,7 +1958,7 @@ unsigned long arch_scale_freq_capacity(int cpu) | |||
1943 | #endif | 1958 | #endif |
1944 | 1959 | ||
1945 | #ifdef CONFIG_SMP | 1960 | #ifdef CONFIG_SMP |
1946 | #ifdef CONFIG_PREEMPT | 1961 | #ifdef CONFIG_PREEMPTION |
1947 | 1962 | ||
1948 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); | 1963 | static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); |
1949 | 1964 | ||
@@ -1995,7 +2010,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1995 | return ret; | 2010 | return ret; |
1996 | } | 2011 | } |
1997 | 2012 | ||
1998 | #endif /* CONFIG_PREEMPT */ | 2013 | #endif /* CONFIG_PREEMPTION */ |
1999 | 2014 | ||
2000 | /* | 2015 | /* |
2001 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | 2016 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
@@ -2266,7 +2281,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} | |||
2266 | #endif /* CONFIG_CPU_FREQ */ | 2281 | #endif /* CONFIG_CPU_FREQ */ |
2267 | 2282 | ||
2268 | #ifdef CONFIG_UCLAMP_TASK | 2283 | #ifdef CONFIG_UCLAMP_TASK |
2269 | unsigned int uclamp_eff_value(struct task_struct *p, unsigned int clamp_id); | 2284 | enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
2270 | 2285 | ||
2271 | static __always_inline | 2286 | static __always_inline |
2272 | unsigned int uclamp_util_with(struct rq *rq, unsigned int util, | 2287 | unsigned int uclamp_util_with(struct rq *rq, unsigned int util, |