aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorDario Faggioli <raistlin@linux.it>2013-11-28 05:14:43 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-13 07:41:06 -0500
commitaab03e05e8f7e26f51dee792beddcb5cca9215a5 (patch)
treebae7f6033c849e7ca77a98783c732caea412ae75 /kernel/sched/sched.h
parentd50dde5a10f305253cbc3855307f608f8a3c5f73 (diff)
sched/deadline: Add SCHED_DEADLINE structures & implementation
Introduces the data structures, constants and symbols needed for SCHED_DEADLINE implementation. Core data structure of SCHED_DEADLINE are defined, along with their initializers. Hooks for checking if a task belong to the new policy are also added where they are needed. Adds a scheduling class, in sched/dl.c and a new policy called SCHED_DEADLINE. It is an implementation of the Earliest Deadline First (EDF) scheduling algorithm, augmented with a mechanism (called Constant Bandwidth Server, CBS) that makes it possible to isolate the behaviour of tasks between each other. The typical -deadline task will be made up of a computation phase (instance) which is activated on a periodic or sporadic fashion. The expected (maximum) duration of such computation is called the task's runtime; the time interval by which each instance need to be completed is called the task's relative deadline. The task's absolute deadline is dynamically calculated as the time instant a task (better, an instance) activates plus the relative deadline. The EDF algorithms selects the task with the smallest absolute deadline as the one to be executed first, while the CBS ensures each task to run for at most its runtime every (relative) deadline length time interval, avoiding any interference between different tasks (bandwidth isolation). Thanks to this feature, also tasks that do not strictly comply with the computational model sketched above can effectively use the new policy. To summarize, this patch: - introduces the data structures, constants and symbols needed; - implements the core logic of the scheduling algorithm in the new scheduling class file; - provides all the glue code between the new scheduling class and the core scheduler and refines the interactions between sched/dl and the other existing scheduling classes. Signed-off-by: Dario Faggioli <raistlin@linux.it> Signed-off-by: Michael Trimarchi <michael@amarulasolutions.com> Signed-off-by: Fabio Checconi <fchecconi@gmail.com> Signed-off-by: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1383831828-15501-4-git-send-email-juri.lelli@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index df023db7721c..83eb5390f753 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/sched/sysctl.h> 3#include <linux/sched/sysctl.h>
4#include <linux/sched/rt.h> 4#include <linux/sched/rt.h>
5#include <linux/sched/deadline.h>
5#include <linux/mutex.h> 6#include <linux/mutex.h>
6#include <linux/spinlock.h> 7#include <linux/spinlock.h>
7#include <linux/stop_machine.h> 8#include <linux/stop_machine.h>
@@ -91,11 +92,21 @@ static inline int rt_policy(int policy)
91 return policy == SCHED_FIFO || policy == SCHED_RR; 92 return policy == SCHED_FIFO || policy == SCHED_RR;
92} 93}
93 94
95static inline int dl_policy(int policy)
96{
97 return policy == SCHED_DEADLINE;
98}
99
94static inline int task_has_rt_policy(struct task_struct *p) 100static inline int task_has_rt_policy(struct task_struct *p)
95{ 101{
96 return rt_policy(p->policy); 102 return rt_policy(p->policy);
97} 103}
98 104
105static inline int task_has_dl_policy(struct task_struct *p)
106{
107 return dl_policy(p->policy);
108}
109
99/* 110/*
100 * This is the priority-queue data structure of the RT scheduling class: 111 * This is the priority-queue data structure of the RT scheduling class:
101 */ 112 */
@@ -367,6 +378,15 @@ struct rt_rq {
367#endif 378#endif
368}; 379};
369 380
381/* Deadline class' related fields in a runqueue */
382struct dl_rq {
383 /* runqueue is an rbtree, ordered by deadline */
384 struct rb_root rb_root;
385 struct rb_node *rb_leftmost;
386
387 unsigned long dl_nr_running;
388};
389
370#ifdef CONFIG_SMP 390#ifdef CONFIG_SMP
371 391
372/* 392/*
@@ -435,6 +455,7 @@ struct rq {
435 455
436 struct cfs_rq cfs; 456 struct cfs_rq cfs;
437 struct rt_rq rt; 457 struct rt_rq rt;
458 struct dl_rq dl;
438 459
439#ifdef CONFIG_FAIR_GROUP_SCHED 460#ifdef CONFIG_FAIR_GROUP_SCHED
440 /* list of leaf cfs_rq on this cpu: */ 461 /* list of leaf cfs_rq on this cpu: */
@@ -991,6 +1012,7 @@ static const u32 prio_to_wmult[40] = {
991#else 1012#else
992#define ENQUEUE_WAKING 0 1013#define ENQUEUE_WAKING 0
993#endif 1014#endif
1015#define ENQUEUE_REPLENISH 8
994 1016
995#define DEQUEUE_SLEEP 1 1017#define DEQUEUE_SLEEP 1
996 1018
@@ -1046,6 +1068,7 @@ struct sched_class {
1046 for (class = sched_class_highest; class; class = class->next) 1068 for (class = sched_class_highest; class; class = class->next)
1047 1069
1048extern const struct sched_class stop_sched_class; 1070extern const struct sched_class stop_sched_class;
1071extern const struct sched_class dl_sched_class;
1049extern const struct sched_class rt_sched_class; 1072extern const struct sched_class rt_sched_class;
1050extern const struct sched_class fair_sched_class; 1073extern const struct sched_class fair_sched_class;
1051extern const struct sched_class idle_sched_class; 1074extern const struct sched_class idle_sched_class;
@@ -1081,6 +1104,8 @@ extern void resched_cpu(int cpu);
1081extern struct rt_bandwidth def_rt_bandwidth; 1104extern struct rt_bandwidth def_rt_bandwidth;
1082extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1105extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1083 1106
1107extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1108
1084extern void update_idle_cpu_load(struct rq *this_rq); 1109extern void update_idle_cpu_load(struct rq *this_rq);
1085 1110
1086extern void init_task_runnable_average(struct task_struct *p); 1111extern void init_task_runnable_average(struct task_struct *p);
@@ -1357,6 +1382,7 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1357 1382
1358extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1383extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1359extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1384extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1385extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq);
1360 1386
1361extern void cfs_bandwidth_usage_inc(void); 1387extern void cfs_bandwidth_usage_inc(void);
1362extern void cfs_bandwidth_usage_dec(void); 1388extern void cfs_bandwidth_usage_dec(void);