aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-04-23 20:08:48 -0400
committerJohn M. Calandrino <jmc@jupiter-cs.cs.unc.edu>2007-04-23 20:08:48 -0400
commit7c071e2f42c301ebf73c9b8e63ad7ecd1d1a1d8e (patch)
tree6ec6c0c6a5a638cb704afe0a5d6bc524f19ed00f /include
parentf6f293bee0fd39c2ffe3cd945da961f1470ad52c (diff)
parent3695e6b8dd1eeb288419674df951a35dd69ade52 (diff)
Merge with git+ssh://cvs.cs.unc.edu/cvs/proj/litmus/repo/litmus
Bjoern's changes + mine. Still need to fill in some stubs and ensure that nothing broke.
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/linux/edf_common.h30
-rw-r--r--include/linux/litmus.h31
-rw-r--r--include/linux/pfair_common.h40
-rw-r--r--include/linux/pfair_math.h77
-rw-r--r--include/linux/rt_param.h70
-rw-r--r--include/linux/sched_plugin.h30
7 files changed, 237 insertions, 44 deletions
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 98920efd2c..d6ed041f88 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -336,7 +336,8 @@
336#define __NR_reset_stat 326 336#define __NR_reset_stat 326
337#define __NR_sleep_next_period 327 337#define __NR_sleep_next_period 327
338#define __NR_scheduler_setup 328 338#define __NR_scheduler_setup 328
339/* 329-330 reserved for Bjoern... */ 339#define __NR_enter_np 329
340#define __NR_exit_np 330
340#define __NR_pi_sema_init 331 341#define __NR_pi_sema_init 331
341#define __NR_pi_down 332 342#define __NR_pi_down 332
342#define __NR_pi_up 333 343#define __NR_pi_up 333
diff --git a/include/linux/edf_common.h b/include/linux/edf_common.h
index 9c00bb1937..3b6a1de457 100644
--- a/include/linux/edf_common.h
+++ b/include/linux/edf_common.h
@@ -25,18 +25,44 @@ typedef struct _edf_domain {
25 (list_entry((edf)->ready_queue.next, struct task_struct, rt_list)) 25 (list_entry((edf)->ready_queue.next, struct task_struct, rt_list))
26 26
27void edf_domain_init(edf_domain_t *edf, edf_check_resched_needed_t f); 27void edf_domain_init(edf_domain_t *edf, edf_check_resched_needed_t f);
28void add_ready(edf_domain_t* edf, struct task_struct *new); 28
29int edf_higher_prio(struct task_struct* first,
30 struct task_struct* second);
31
32void __add_ready(edf_domain_t* edf, struct task_struct *new);
33void __add_release(edf_domain_t* edf, struct task_struct *task);
34
29struct task_struct* __take_ready(edf_domain_t* edf); 35struct task_struct* __take_ready(edf_domain_t* edf);
30void add_release(edf_domain_t* edf, struct task_struct *task); 36
31void try_release_pending(edf_domain_t* edf); 37void try_release_pending(edf_domain_t* edf);
32void __prepare_new_release(struct task_struct *t, jiffie_t start); 38void __prepare_new_release(struct task_struct *t, jiffie_t start);
33#define prepare_new_release(t) __prepare_new_release(t, jiffies) 39#define prepare_new_release(t) __prepare_new_release(t, jiffies)
34void prepare_for_next_period(struct task_struct *t); 40void prepare_for_next_period(struct task_struct *t);
35void prepare_new_releases(edf_domain_t *edf, jiffie_t start); 41void prepare_new_releases(edf_domain_t *edf, jiffie_t start);
42void __prepare_new_releases(edf_domain_t *edf, jiffie_t start);
36int preemption_needed(edf_domain_t* edf, struct task_struct *t); 43int preemption_needed(edf_domain_t* edf, struct task_struct *t);
37long edf_sleep_next_period(void); 44long edf_sleep_next_period(void);
38 45
39#define job_completed(t) (!is_be(t) && \ 46#define job_completed(t) (!is_be(t) && \
40 (t)->rt_param.times.exec_time == (t)->rt_param.basic_params.exec_cost) 47 (t)->rt_param.times.exec_time == (t)->rt_param.basic_params.exec_cost)
41 48
49static inline void add_ready(edf_domain_t* edf, struct task_struct *new)
50{
51 unsigned long flags;
52 /* first we need the write lock for edf_ready_queue */
53 write_lock_irqsave(&edf->ready_lock, flags);
54 __add_ready(edf, new);
55 write_unlock_irqrestore(&edf->ready_lock, flags);
56}
57
58static inline void add_release(edf_domain_t* edf, struct task_struct *task)
59{
60 unsigned long flags;
61 /* first we need the write lock for edf_ready_queue */
62 spin_lock_irqsave(&edf->release_lock, flags);
63 __add_release(edf, task);
64 spin_unlock_irqrestore(&edf->release_lock, flags);
65}
66
67
42#endif 68#endif
diff --git a/include/linux/litmus.h b/include/linux/litmus.h
index 94c2735422..c325164e93 100644
--- a/include/linux/litmus.h
+++ b/include/linux/litmus.h
@@ -10,22 +10,23 @@
10#include <linux/sched_trace.h> 10#include <linux/sched_trace.h>
11 11
12typedef enum { 12typedef enum {
13 SCHED_BEG = 0, 13 SCHED_BEG = 0,
14 SCHED_LINUX = 0, 14 SCHED_LINUX = 0,
15 SCHED_PFAIR = 1, 15 SCHED_PFAIR = 1,
16 SCHED_PFAIR_STAGGER = 2, 16 SCHED_PFAIR_STAGGER = 2,
17 SCHED_PART_EDF = 3, 17 SCHED_PART_EDF = 3,
18 SCHED_PART_EEVDF = 4, 18 SCHED_PART_EEVDF = 4,
19 SCHED_GLOBAL_EDF = 5, 19 SCHED_GLOBAL_EDF = 5,
20 SCHED_PFAIR_DESYNC = 6, 20 SCHED_PFAIR_DESYNC = 6,
21 SCHED_GLOBAL_EDF_NP = 7, 21 SCHED_GLOBAL_EDF_NP = 7,
22 SCHED_CUSTOM = 8, 22 SCHED_CUSTOM = 8,
23 SCHED_EDF_HSB = 9, 23 SCHED_EDF_HSB = 9,
24 SCHED_GSN_EDF = 10,
24 25
25 /* Add your scheduling policy here */ 26 /* Add your scheduling policy here */
26 27
27 SCHED_END = 9, 28 SCHED_END = 10,
28 SCHED_DEFAULT = 0, 29 SCHED_DEFAULT = 0,
29 SCHED_INVALID = -1, 30 SCHED_INVALID = -1,
30} spolicy; 31} spolicy;
31 32
@@ -62,6 +63,7 @@ typedef enum {
62#define PLUGIN_PFAIR_DESYNC "desync" 63#define PLUGIN_PFAIR_DESYNC "desync"
63#define PLUGIN_GLOBAL_EDF_NP "global_edf_np" 64#define PLUGIN_GLOBAL_EDF_NP "global_edf_np"
64#define PLUGIN_EDF_HSB "edf_hsb" 65#define PLUGIN_EDF_HSB "edf_hsb"
66#define PLUGIN_GSN_EDF "gsn_edf"
65 67
66/* Additional clone flags 68/* Additional clone flags
67 Indicates that the thread is to be used in 69 Indicates that the thread is to be used in
@@ -95,6 +97,7 @@ extern atomic_t rt_mode;
95extern spinlock_t litmus_task_set_lock; 97extern spinlock_t litmus_task_set_lock;
96 98
97 99
98#define TRACE(fmt, args...) sched_trace_log_message(fmt, ## args); 100#define TRACE(fmt, args...) \
101 sched_trace_log_message("%d: " fmt, raw_smp_processor_id(), ## args);
99 102
100#endif 103#endif
diff --git a/include/linux/pfair_common.h b/include/linux/pfair_common.h
new file mode 100644
index 0000000000..67e18c601a
--- /dev/null
+++ b/include/linux/pfair_common.h
@@ -0,0 +1,40 @@
1/* PFAIR common data structures and utility functions shared by all PFAIR
2 * based scheduler plugins
3 */
4
5#ifndef __UNC_PFAIR_COMMON_H__
6#define __UNC_PFAIR_COMMON_H__
7
8#include <linux/queuelock.h>
9#include <linux/cpumask.h>
10
11typedef struct _pfair_domain {
12 /* Global lock to protect the data structures */
13 queuelock_t pfair_lock;
14 /* runnable rt tasks are in here */
15 struct list_head ready_queue;
16
17 /* real-time tasks waiting for release are in here */
18 struct list_head release_queue;
19
20 /* CPU's in the domain */
21 cpumask_t domain_cpus;
22
23} pfair_domain_t;
24
25#define next_ready(pfair) \
26 (list_entry((pfair)->ready_queue.next, struct task_struct, rt_list))
27void pfair_domain_init(pfair_domain_t *pfair);
28void pfair_add_ready(pfair_domain_t* pfair, struct task_struct *new);
29struct task_struct* __pfair_take_ready(pfair_domain_t* pfair);
30void pfair_add_release(pfair_domain_t* pfair, struct task_struct *task);
31void pfair_try_release_pending(pfair_domain_t* pfair);
32void __pfair_prepare_new_release(struct task_struct *t, jiffie_t start);
33
34void pfair_prepare_next_job(struct task_struct *t);
35void pfair_prepare_next_subtask(struct task_struct *t);
36
37void pfair_prepare_new_releases(pfair_domain_t *pfair, jiffie_t start);
38
39#endif
40
diff --git a/include/linux/pfair_math.h b/include/linux/pfair_math.h
new file mode 100644
index 0000000000..dab1778f0b
--- /dev/null
+++ b/include/linux/pfair_math.h
@@ -0,0 +1,77 @@
1/* PFAIR Mathematical functions */
2#ifndef __UNC_PFAIR_MATH_H__
3#define __UNC_PFAIR_MATH_H__
4
5#include <linux/rt_param.h>
6#include <asm/div64.h>
7#include <linux/litmus.h>
8#include <linux/sched.h>
9
10/*
11* This file defines mathematical functions "ceiling", "floor",
12* and PFAIR specific functions for computing the release and
13* the deadline of a subtask, as well as tie breakers:
14* b-bit and group deadline.
15*/
16static inline quantum_t FLOOR(quantum_t a, unsigned long b)
17{
18 BUG_ON( b == 0);
19 do_div(a, b);
20 return a;
21}
22static inline quantum_t CEIL(quantum_t a, unsigned long b)
23{
24 quantum_t t = FLOOR(a, b);
25 return (quantum_t)((t * b == a) ? t : (t + 1));
26}
27
28
29/*
30* invariant - i-1=get_passed_quanta(t)
31*
32* release time of i-th subtask of j-th job is
33* r_{ij}+\lfloor i-1/wt(T) \rfloor
34* This operation should be robust to wrap-around
35* so we can compare the result with jiffies safely
36*/
37static inline quantum_t release_time(struct task_struct * t)
38{
39 quantum_t e = get_exec_cost(t);
40 quantum_t p = get_rt_period(t);
41 return FLOOR((get_passed_quanta(t)) * p, e);
42}
43/*
44* deadline time of i-th subtask of j-th job is
45* r_{ij}+\lceil i/wt(T) \rceil
46* This operation should be robust to wrap-around
47* so we can compare the result with jiffies safely
48*/
49static inline quantum_t pfair_deadline(struct task_struct * t)
50{
51 quantum_t e = get_exec_cost(t);
52 quantum_t p = get_rt_period(t);
53 return CEIL((get_passed_quanta(t) + 1) * p, e);
54}
55/* In PFAIR b-bit is defined as
56* \lceil i/wt(T) \rceil-\lfloor i/wt(T) \rfloor
57*/
58static inline int b_bit(struct task_struct *t)
59{
60 quantum_t e = get_exec_cost(t);
61 quantum_t p = get_rt_period(t);
62 return CEIL((get_passed_quanta(t) + 1) * p, e)-
63 FLOOR((get_passed_quanta(t) + 1) * p, e);
64}
65/*
66* Group deadline
67*/
68static inline quantum_t group_deadline(struct task_struct * t)
69{
70 quantum_t p = get_rt_period(t);
71 quantum_t e = get_exec_cost(t);
72 quantum_t stage1 = CEIL((get_passed_quanta(t) + 1) * p, e);
73 quantum_t stage2 = CEIL(stage1 * (p - e), p);
74 return CEIL(stage2 * p, p - e);
75}
76
77#endif /* __UNC_PFAIR_MATH_H__ */
diff --git a/include/linux/rt_param.h b/include/linux/rt_param.h
index b20f5b44a6..f2d6de639e 100644
--- a/include/linux/rt_param.h
+++ b/include/linux/rt_param.h
@@ -23,6 +23,30 @@ typedef struct rt_param {
23 task_class_t class; 23 task_class_t class;
24} rt_param_t; 24} rt_param_t;
25 25
26typedef struct {
27 /* when will this task be release the next time? */
28 jiffie_t release;
29 /* time instant the last job was released */
30 jiffie_t last_release;
31 /* what is the current deadline? */
32 jiffie_t deadline;
33 /* b-bit tie breaker for PFAIR, it is ignored in EDF */
34 int b_bit;
35 /* group deadline tie breaker, it is ignored in EDF */
36 jiffie_t group_deadline;
37 /* how long has this task executed so far?
38 * In case of capacity sharing a job completion cannot be
39 * detected by checking time_slice == 0 as the job may have
40 * executed while using another capacity. Use this counter
41 * to keep track of the time spent on a CPU by a job.
42 *
43 * In other words: The number of consumed quanta since the
44 * last job release.
45 */
46 unsigned int exec_time;
47} in_times_t;
48
49
26/* RT task parameters for scheduling extensions 50/* RT task parameters for scheduling extensions
27 * These parameters are inherited during clone and therefore must 51 * These parameters are inherited during clone and therefore must
28 * be explicitly set up before the task set is launched. 52 * be explicitly set up before the task set is launched.
@@ -46,25 +70,8 @@ typedef struct task_rt_param {
46 * sem is being held. 70 * sem is being held.
47 */ 71 */
48 struct task_struct **inh_task; 72 struct task_struct **inh_task;
49 struct { 73
50 /* when will this task be release the next time? */ 74 unsigned int is_non_preemptable;
51 jiffie_t release;
52 /* time instant the last job was released */
53 jiffie_t last_release;
54 /* what is the current deadline? */
55 jiffie_t deadline;
56
57 /* how long has this task executed so far?
58 * In case of capacity sharing a job completion cannot be
59 * detected by checking time_slice == 0 as the job may have
60 * executed while using another capacity. Use this counter
61 * to keep track of the time spent on a CPU by a job.
62 *
63 * In other words: The number of consumed quanta since the
64 * last job release.
65 */
66 unsigned int exec_time;
67 } times;
68 75
69 /* put information for feedback control stuff and 76 /* put information for feedback control stuff and
70 * information about the performance of the task here 77 * information about the performance of the task here
@@ -73,6 +80,9 @@ typedef struct task_rt_param {
73 /* How many non-tardy jobs since the last tardy job? */ 80 /* How many non-tardy jobs since the last tardy job? */
74 unsigned int nontardy_jobs_ctr; 81 unsigned int nontardy_jobs_ctr;
75 } stats; 82 } stats;
83
84 in_times_t times;
85 in_times_t backup;
76 86
77 /* for zone-based locking: flag indicating whether task 87 /* for zone-based locking: flag indicating whether task
78 * is waiting for the end of the blocking zone 88 * is waiting for the end of the blocking zone
@@ -86,12 +96,19 @@ typedef struct task_rt_param {
86 * data structures. 96 * data structures.
87 */ 97 */
88 int litmus_controlled; 98 int litmus_controlled;
99
100 /* This field can be used by plugins to store where the task
101 * is currently scheduled. It is the responsibility of the
102 * plugin to avoid race conditions.
103 */
104 int scheduled_on;
89} task_rt_param_t; 105} task_rt_param_t;
90 106
91/* Possible RT flags */ 107/* Possible RT flags */
92#define RT_F_RUNNING 0x00000000 108#define RT_F_RUNNING 0x00000000
93#define RT_F_SLEEP 0x00000001 109#define RT_F_SLEEP 0x00000001
94#define RT_F_EXP_QUANTA 0x00000002 110#define RT_F_EXP_QUANTA 0x00000002
111#define RT_F_NON_PREEMTABLE 0x00000004
95 112
96 113
97/* Realtime utility macros */ 114/* Realtime utility macros */
@@ -114,6 +131,7 @@ typedef struct task_rt_param {
114 ((t)->rt_param.basic_params.class == RT_CLASS_SOFT) 131 ((t)->rt_param.basic_params.class == RT_CLASS_SOFT)
115#define is_be(t) \ 132#define is_be(t) \
116 ((t)->rt_param.basic_params.class == RT_CLASS_BEST_EFFORT) 133 ((t)->rt_param.basic_params.class == RT_CLASS_BEST_EFFORT)
134#define is_np(t) ((t)->rt_param.is_non_preemptable)
117 135
118#define clear_rt_params(t) \ 136#define clear_rt_params(t) \
119memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) 137memset(&(t)->rt_param,0, sizeof(struct task_rt_param))
@@ -121,6 +139,9 @@ memset(&(t)->rt_param,0, sizeof(struct task_rt_param))
121#define get_last_release_time(t) ((t)->rt_param.times.last_release) 139#define get_last_release_time(t) ((t)->rt_param.times.last_release)
122#define set_last_release_time(t,r) ((t)->rt_param.times.last_release=(r)) 140#define set_last_release_time(t,r) ((t)->rt_param.times.last_release=(r))
123 141
142#define get_release(t) ((t)->rt_param.times.release)
143#define set_release(t,r) ((t)->rt_param.times.release=(r))
144
124#define is_running(t) ((t)->state == TASK_RUNNING) 145#define is_running(t) ((t)->state == TASK_RUNNING)
125#define is_released(t) (time_before_eq((t)->rt_param.times.release, jiffies)) 146#define is_released(t) (time_before_eq((t)->rt_param.times.release, jiffies))
126#define is_tardy(t) (time_before_eq((t)->rt_param.times.deadline, jiffies)) 147#define is_tardy(t) (time_before_eq((t)->rt_param.times.deadline, jiffies))
@@ -137,6 +158,11 @@ memset(&(t)->rt_param,0, sizeof(struct task_rt_param))
137 (a)->rt_param.times.release,\ 158 (a)->rt_param.times.release,\
138 (b)->rt_param.times.release)) 159 (b)->rt_param.times.release))
139 160
161#define backup_times(t) do { (t)->rt_param.backup=(t)->rt_param.times; \
162 } while(0);
163#define restore_times(t) do { (t)->rt_param.times=(t)->rt_param.backup; \
164 } while(0);
165
140/* struct for semaphore with priority inheritance */ 166/* struct for semaphore with priority inheritance */
141struct pi_semaphore { 167struct pi_semaphore {
142 atomic_t count; 168 atomic_t count;
diff --git a/include/linux/sched_plugin.h b/include/linux/sched_plugin.h
index 59e9e2f353..ad6cee3b30 100644
--- a/include/linux/sched_plugin.h
+++ b/include/linux/sched_plugin.h
@@ -38,9 +38,8 @@ typedef reschedule_check_t (*scheduler_tick_t) (void);
38typedef int (*schedule_t) (struct task_struct * prev, 38typedef int (*schedule_t) (struct task_struct * prev,
39 struct task_struct ** next, 39 struct task_struct ** next,
40 runqueue_t * rq); 40 runqueue_t * rq);
41/* clean up after the task switch has occured 41/* Clean up after the task switch has occured.
42 * it is guaranteed that the function is only called if prev is a real-time 42 * This function is called after every (even non-rt) task switch.
43 * task
44 */ 43 */
45typedef void (*finish_switch_t)(struct task_struct *prev); 44typedef void (*finish_switch_t)(struct task_struct *prev);
46 45
@@ -57,6 +56,19 @@ typedef void (*task_blocks_t) (struct task_struct *task);
57/* called when a real-time task exits. Free any allocated resources */ 56/* called when a real-time task exits. Free any allocated resources */
58typedef long (*tear_down_t) (struct task_struct *); 57typedef long (*tear_down_t) (struct task_struct *);
59 58
59/* called when a real-time task wants to enter a non-preemptable section */
60typedef long (*enter_np_t) (struct task_struct *);
61/* called when a real-time task wants to leave a non-preemptable section */
62typedef long (*exit_np_t) (struct task_struct *);
63
64
65typedef long (*inherit_priority_t) (struct task_struct *offspring,
66 struct task_struct *deceased);
67
68typedef long (*return_priority_t) (struct task_struct *offspring,
69 struct task_struct *deceased);
70
71
60 72
61/********************* sys call backends ********************/ 73/********************* sys call backends ********************/
62/* This function causes the caller to sleep until the next release */ 74/* This function causes the caller to sleep until the next release */
@@ -82,14 +94,22 @@ struct sched_plugin {
82 finish_switch_t finish_switch; 94 finish_switch_t finish_switch;
83 95
84 /* syscall backend */ 96 /* syscall backend */
85 sleep_next_period_t sleep_next_period; 97 sleep_next_period_t sleep_next_period;
86 scheduler_setup_t scheduler_setup; 98 scheduler_setup_t scheduler_setup;
87 99
88 /* task state changes */ 100 /* task state changes */
89 prepare_task_t prepare_task; 101 prepare_task_t prepare_task;
90 wake_up_task_t wake_up_task; 102 wake_up_task_t wake_up_task;
91 task_blocks_t task_blocks; 103 task_blocks_t task_blocks;
92 tear_down_t tear_down; 104 tear_down_t tear_down;
105
106 /* non-preemptable sections */
107 enter_np_t enter_np;
108 exit_np_t exit_np;
109
110 /* priority inheritance */
111 inherit_priority_t inherit_priority;
112 return_priority_t return_priority;
93} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); 113} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
94 114
95typedef struct sched_plugin sched_plugin_t; 115typedef struct sched_plugin sched_plugin_t;