aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h76
1 files changed, 73 insertions, 3 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 52453a2d0a79..ad4f4fbd002e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -74,6 +74,13 @@ extern void update_cpu_load_active(struct rq *this_rq);
74#define NICE_0_SHIFT SCHED_LOAD_SHIFT 74#define NICE_0_SHIFT SCHED_LOAD_SHIFT
75 75
76/* 76/*
77 * Single value that decides SCHED_DEADLINE internal math precision.
78 * 10 -> just above 1us
79 * 9 -> just above 0.5us
80 */
81#define DL_SCALE (10)
82
83/*
77 * These are the 'tuning knobs' of the scheduler: 84 * These are the 'tuning knobs' of the scheduler:
78 */ 85 */
79 86
@@ -107,7 +114,7 @@ static inline int task_has_dl_policy(struct task_struct *p)
107 return dl_policy(p->policy); 114 return dl_policy(p->policy);
108} 115}
109 116
110static inline int dl_time_before(u64 a, u64 b) 117static inline bool dl_time_before(u64 a, u64 b)
111{ 118{
112 return (s64)(a - b) < 0; 119 return (s64)(a - b) < 0;
113} 120}
@@ -115,8 +122,8 @@ static inline int dl_time_before(u64 a, u64 b)
115/* 122/*
116 * Tells if entity @a should preempt entity @b. 123 * Tells if entity @a should preempt entity @b.
117 */ 124 */
118static inline 125static inline bool
119int dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 126dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
120{ 127{
121 return dl_time_before(a->deadline, b->deadline); 128 return dl_time_before(a->deadline, b->deadline);
122} 129}
@@ -136,6 +143,50 @@ struct rt_bandwidth {
136 u64 rt_runtime; 143 u64 rt_runtime;
137 struct hrtimer rt_period_timer; 144 struct hrtimer rt_period_timer;
138}; 145};
146/*
147 * To keep the bandwidth of -deadline tasks and groups under control
148 * we need some place where:
149 * - store the maximum -deadline bandwidth of the system (the group);
150 * - cache the fraction of that bandwidth that is currently allocated.
151 *
152 * This is all done in the data structure below. It is similar to the
153 * one used for RT-throttling (rt_bandwidth), with the main difference
154 * that, since here we are only interested in admission control, we
155 * do not decrease any runtime while the group "executes", neither we
156 * need a timer to replenish it.
157 *
158 * With respect to SMP, the bandwidth is given on a per-CPU basis,
159 * meaning that:
160 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
161 * - dl_total_bw array contains, in the i-eth element, the currently
162 * allocated bandwidth on the i-eth CPU.
163 * Moreover, groups consume bandwidth on each CPU, while tasks only
164 * consume bandwidth on the CPU they're running on.
165 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
166 * that will be shown the next time the proc or cgroup controls will
167 * be red. It on its turn can be changed by writing on its own
168 * control.
169 */
170struct dl_bandwidth {
171 raw_spinlock_t dl_runtime_lock;
172 u64 dl_runtime;
173 u64 dl_period;
174};
175
176static inline int dl_bandwidth_enabled(void)
177{
178 return sysctl_sched_dl_runtime >= 0;
179}
180
181extern struct dl_bw *dl_bw_of(int i);
182
183struct dl_bw {
184 raw_spinlock_t lock;
185 u64 bw, total_bw;
186};
187
188static inline u64 global_dl_period(void);
189static inline u64 global_dl_runtime(void);
139 190
140extern struct mutex sched_domains_mutex; 191extern struct mutex sched_domains_mutex;
141 192
@@ -423,6 +474,8 @@ struct dl_rq {
423 */ 474 */
424 struct rb_root pushable_dl_tasks_root; 475 struct rb_root pushable_dl_tasks_root;
425 struct rb_node *pushable_dl_tasks_leftmost; 476 struct rb_node *pushable_dl_tasks_leftmost;
477#else
478 struct dl_bw dl_bw;
426#endif 479#endif
427}; 480};
428 481
@@ -449,6 +502,7 @@ struct root_domain {
449 */ 502 */
450 cpumask_var_t dlo_mask; 503 cpumask_var_t dlo_mask;
451 atomic_t dlo_count; 504 atomic_t dlo_count;
505 struct dl_bw dl_bw;
452 506
453 /* 507 /*
454 * The "RT overload" flag: it gets set if a CPU has more than 508 * The "RT overload" flag: it gets set if a CPU has more than
@@ -897,7 +951,18 @@ static inline u64 global_rt_runtime(void)
897 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 951 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
898} 952}
899 953
954static inline u64 global_dl_period(void)
955{
956 return (u64)sysctl_sched_dl_period * NSEC_PER_USEC;
957}
958
959static inline u64 global_dl_runtime(void)
960{
961 if (sysctl_sched_dl_runtime < 0)
962 return RUNTIME_INF;
900 963
964 return (u64)sysctl_sched_dl_runtime * NSEC_PER_USEC;
965}
901 966
902static inline int task_current(struct rq *rq, struct task_struct *p) 967static inline int task_current(struct rq *rq, struct task_struct *p)
903{ 968{
@@ -1145,6 +1210,7 @@ extern void update_max_interval(void);
1145extern void init_sched_dl_class(void); 1210extern void init_sched_dl_class(void);
1146extern void init_sched_rt_class(void); 1211extern void init_sched_rt_class(void);
1147extern void init_sched_fair_class(void); 1212extern void init_sched_fair_class(void);
1213extern void init_sched_dl_class(void);
1148 1214
1149extern void resched_task(struct task_struct *p); 1215extern void resched_task(struct task_struct *p);
1150extern void resched_cpu(int cpu); 1216extern void resched_cpu(int cpu);
@@ -1152,8 +1218,12 @@ extern void resched_cpu(int cpu);
1152extern struct rt_bandwidth def_rt_bandwidth; 1218extern struct rt_bandwidth def_rt_bandwidth;
1153extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1219extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1154 1220
1221extern struct dl_bandwidth def_dl_bandwidth;
1222extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1155extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1223extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1156 1224
1225unsigned long to_ratio(u64 period, u64 runtime);
1226
1157extern void update_idle_cpu_load(struct rq *this_rq); 1227extern void update_idle_cpu_load(struct rq *this_rq);
1158 1228
1159extern void init_task_runnable_average(struct task_struct *p); 1229extern void init_task_runnable_average(struct task_struct *p);