summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h63
1 files changed, 39 insertions, 24 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 802b1f3405f2..b3cb895d14a2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -335,8 +335,6 @@ struct cfs_bandwidth {
335 u64 quota; 335 u64 quota;
336 u64 runtime; 336 u64 runtime;
337 s64 hierarchical_quota; 337 s64 hierarchical_quota;
338 u64 runtime_expires;
339 int expires_seq;
340 338
341 u8 idle; 339 u8 idle;
342 u8 period_active; 340 u8 period_active;
@@ -393,6 +391,16 @@ struct task_group {
393#endif 391#endif
394 392
395 struct cfs_bandwidth cfs_bandwidth; 393 struct cfs_bandwidth cfs_bandwidth;
394
395#ifdef CONFIG_UCLAMP_TASK_GROUP
396 /* The two decimal precision [%] value requested from user-space */
397 unsigned int uclamp_pct[UCLAMP_CNT];
398 /* Clamp values requested for a task group */
399 struct uclamp_se uclamp_req[UCLAMP_CNT];
400 /* Effective clamp values used for a task group */
401 struct uclamp_se uclamp[UCLAMP_CNT];
402#endif
403
396}; 404};
397 405
398#ifdef CONFIG_FAIR_GROUP_SCHED 406#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -483,7 +491,8 @@ struct cfs_rq {
483 struct load_weight load; 491 struct load_weight load;
484 unsigned long runnable_weight; 492 unsigned long runnable_weight;
485 unsigned int nr_running; 493 unsigned int nr_running;
486 unsigned int h_nr_running; 494 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
495 unsigned int idle_h_nr_running; /* SCHED_IDLE */
487 496
488 u64 exec_clock; 497 u64 exec_clock;
489 u64 min_vruntime; 498 u64 min_vruntime;
@@ -556,8 +565,6 @@ struct cfs_rq {
556 565
557#ifdef CONFIG_CFS_BANDWIDTH 566#ifdef CONFIG_CFS_BANDWIDTH
558 int runtime_enabled; 567 int runtime_enabled;
559 int expires_seq;
560 u64 runtime_expires;
561 s64 runtime_remaining; 568 s64 runtime_remaining;
562 569
563 u64 throttled_clock; 570 u64 throttled_clock;
@@ -777,9 +784,6 @@ struct root_domain {
777 struct perf_domain __rcu *pd; 784 struct perf_domain __rcu *pd;
778}; 785};
779 786
780extern struct root_domain def_root_domain;
781extern struct mutex sched_domains_mutex;
782
783extern void init_defrootdomain(void); 787extern void init_defrootdomain(void);
784extern int sched_init_domains(const struct cpumask *cpu_map); 788extern int sched_init_domains(const struct cpumask *cpu_map);
785extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 789extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
@@ -1261,16 +1265,18 @@ enum numa_topology_type {
1261extern enum numa_topology_type sched_numa_topology_type; 1265extern enum numa_topology_type sched_numa_topology_type;
1262extern int sched_max_numa_distance; 1266extern int sched_max_numa_distance;
1263extern bool find_numa_distance(int distance); 1267extern bool find_numa_distance(int distance);
1264#endif
1265
1266#ifdef CONFIG_NUMA
1267extern void sched_init_numa(void); 1268extern void sched_init_numa(void);
1268extern void sched_domains_numa_masks_set(unsigned int cpu); 1269extern void sched_domains_numa_masks_set(unsigned int cpu);
1269extern void sched_domains_numa_masks_clear(unsigned int cpu); 1270extern void sched_domains_numa_masks_clear(unsigned int cpu);
1271extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1270#else 1272#else
1271static inline void sched_init_numa(void) { } 1273static inline void sched_init_numa(void) { }
1272static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1274static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1273static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1275static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1276static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1277{
1278 return nr_cpu_ids;
1279}
1274#endif 1280#endif
1275 1281
1276#ifdef CONFIG_NUMA_BALANCING 1282#ifdef CONFIG_NUMA_BALANCING
@@ -1449,10 +1455,14 @@ static inline void unregister_sched_domain_sysctl(void)
1449} 1455}
1450#endif 1456#endif
1451 1457
1458extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1459
1452#else 1460#else
1453 1461
1454static inline void sched_ttwu_pending(void) { } 1462static inline void sched_ttwu_pending(void) { }
1455 1463
1464static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
1465
1456#endif /* CONFIG_SMP */ 1466#endif /* CONFIG_SMP */
1457 1467
1458#include "stats.h" 1468#include "stats.h"
@@ -1700,17 +1710,21 @@ struct sched_class {
1700 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1710 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1701 1711
1702 /* 1712 /*
1703 * It is the responsibility of the pick_next_task() method that will 1713 * Both @prev and @rf are optional and may be NULL, in which case the
1704 * return the next task to call put_prev_task() on the @prev task or 1714 * caller must already have invoked put_prev_task(rq, prev, rf).
1705 * something equivalent. 1715 *
1716 * Otherwise it is the responsibility of the pick_next_task() to call
1717 * put_prev_task() on the @prev task or something equivalent, IFF it
1718 * returns a next task.
1706 * 1719 *
1707 * May return RETRY_TASK when it finds a higher prio class has runnable 1720 * In that case (@rf != NULL) it may return RETRY_TASK when it finds a
1708 * tasks. 1721 * higher prio class has runnable tasks.
1709 */ 1722 */
1710 struct task_struct * (*pick_next_task)(struct rq *rq, 1723 struct task_struct * (*pick_next_task)(struct rq *rq,
1711 struct task_struct *prev, 1724 struct task_struct *prev,
1712 struct rq_flags *rf); 1725 struct rq_flags *rf);
1713 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1726 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
1727 void (*set_next_task)(struct rq *rq, struct task_struct *p);
1714 1728
1715#ifdef CONFIG_SMP 1729#ifdef CONFIG_SMP
1716 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1730 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
@@ -1725,7 +1739,6 @@ struct sched_class {
1725 void (*rq_offline)(struct rq *rq); 1739 void (*rq_offline)(struct rq *rq);
1726#endif 1740#endif
1727 1741
1728 void (*set_curr_task)(struct rq *rq);
1729 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1742 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1730 void (*task_fork)(struct task_struct *p); 1743 void (*task_fork)(struct task_struct *p);
1731 void (*task_dead)(struct task_struct *p); 1744 void (*task_dead)(struct task_struct *p);
@@ -1755,12 +1768,14 @@ struct sched_class {
1755 1768
1756static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1769static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1757{ 1770{
1758 prev->sched_class->put_prev_task(rq, prev); 1771 WARN_ON_ONCE(rq->curr != prev);
1772 prev->sched_class->put_prev_task(rq, prev, NULL);
1759} 1773}
1760 1774
1761static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1775static inline void set_next_task(struct rq *rq, struct task_struct *next)
1762{ 1776{
1763 curr->sched_class->set_curr_task(rq); 1777 WARN_ON_ONCE(rq->curr != next);
1778 next->sched_class->set_next_task(rq, next);
1764} 1779}
1765 1780
1766#ifdef CONFIG_SMP 1781#ifdef CONFIG_SMP
@@ -1943,7 +1958,7 @@ unsigned long arch_scale_freq_capacity(int cpu)
1943#endif 1958#endif
1944 1959
1945#ifdef CONFIG_SMP 1960#ifdef CONFIG_SMP
1946#ifdef CONFIG_PREEMPT 1961#ifdef CONFIG_PREEMPTION
1947 1962
1948static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1963static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1949 1964
@@ -1995,7 +2010,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1995 return ret; 2010 return ret;
1996} 2011}
1997 2012
1998#endif /* CONFIG_PREEMPT */ 2013#endif /* CONFIG_PREEMPTION */
1999 2014
2000/* 2015/*
2001 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2016 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
@@ -2266,7 +2281,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2266#endif /* CONFIG_CPU_FREQ */ 2281#endif /* CONFIG_CPU_FREQ */
2267 2282
2268#ifdef CONFIG_UCLAMP_TASK 2283#ifdef CONFIG_UCLAMP_TASK
2269unsigned int uclamp_eff_value(struct task_struct *p, unsigned int clamp_id); 2284enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
2270 2285
2271static __always_inline 2286static __always_inline
2272unsigned int uclamp_util_with(struct rq *rq, unsigned int util, 2287unsigned int uclamp_util_with(struct rq *rq, unsigned int util,