diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-03 11:01:30 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-03 11:01:30 -0400 |
commit | 7e7bb31d1215300b6e60661a7ead02236ea1adda (patch) | |
tree | 403522bbea6a3ed758ca435f952cabd30c4ae771 | |
parent | 6e515de45f1ad108a8ae08d0be1b6f7bf2a707b6 (diff) |
Moved user-space state of tasks into new rt_param.user_job.
Both sched_color and sched_mc assumed seperate kernel and userspace views of job
states, where the kernel view is used for scheduling while the userspace view
is used for statistics (tardiness etc). This commit merges both approaches.
-rw-r--r-- | include/litmus/budget.h | 3 | ||||
-rw-r--r-- | include/litmus/ce_domain.h | 1 | ||||
-rw-r--r-- | include/litmus/debug_trace.h | 2 | ||||
-rw-r--r-- | include/litmus/domain.h | 6 | ||||
-rw-r--r-- | include/litmus/jobs.h | 1 | ||||
-rw-r--r-- | include/litmus/litmus.h | 7 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 1 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 6 | ||||
-rw-r--r-- | include/litmus/sched_mc.h | 20 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 5 | ||||
-rw-r--r-- | include/trace/events/litmus.h | 16 | ||||
-rw-r--r-- | litmus/budget.c | 24 | ||||
-rw-r--r-- | litmus/ce_domain.c | 4 | ||||
-rw-r--r-- | litmus/dgl.c | 2 | ||||
-rw-r--r-- | litmus/domain.c | 2 | ||||
-rw-r--r-- | litmus/jobs.c | 59 | ||||
-rw-r--r-- | litmus/rt_domain.c | 4 | ||||
-rw-r--r-- | litmus/sched_color.c | 3 | ||||
-rw-r--r-- | litmus/sched_mc.c | 147 | ||||
-rw-r--r-- | litmus/sched_mc_ce.c | 38 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 6 |
21 files changed, 164 insertions, 193 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index d1c73f5cf73e..79fec2257ad2 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -26,8 +26,7 @@ void cancel_enforcement_timer(struct enforcement_timer* et); | |||
26 | * itself. This happens when budget enforcement has caused a task to be | 26 | * itself. This happens when budget enforcement has caused a task to be |
27 | * booted off until the next period. | 27 | * booted off until the next period. |
28 | */ | 28 | */ |
29 | #define behind_server(t)\ | 29 | |
30 | (lt_before((t)->rt_param.job_params.real_release, get_release(t))) | ||
31 | 30 | ||
32 | /** | 31 | /** |
33 | * server_release() - Prepare the task server parameters for the next period. | 32 | * server_release() - Prepare the task server parameters for the next period. |
diff --git a/include/litmus/ce_domain.h b/include/litmus/ce_domain.h index 5d5fdf7d1efc..a088547e95d3 100644 --- a/include/litmus/ce_domain.h +++ b/include/litmus/ce_domain.h | |||
@@ -19,7 +19,6 @@ void ce_domain_init(domain_t*, | |||
19 | requeue_t, | 19 | requeue_t, |
20 | peek_ready_t, | 20 | peek_ready_t, |
21 | take_ready_t, | 21 | take_ready_t, |
22 | preempt_needed_t, | ||
23 | task_prio_t, | 22 | task_prio_t, |
24 | struct ce_dom_data*, | 23 | struct ce_dom_data*, |
25 | const int, | 24 | const int, |
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h index 48d086d5a44c..5ca88dec31a5 100644 --- a/include/litmus/debug_trace.h +++ b/include/litmus/debug_trace.h | |||
@@ -29,7 +29,7 @@ extern atomic_t __log_seq_no; | |||
29 | 29 | ||
30 | #define TRACE_TASK(t, fmt, args...) \ | 30 | #define TRACE_TASK(t, fmt, args...) \ |
31 | TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | 31 | TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ |
32 | (t)->rt_param.job_params.job_no, ##args) | 32 | get_user_job(t), ##args) |
33 | 33 | ||
34 | #define TRACE_CUR(fmt, args...) \ | 34 | #define TRACE_CUR(fmt, args...) \ |
35 | TRACE_TASK(current, fmt, ## args) | 35 | TRACE_TASK(current, fmt, ## args) |
diff --git a/include/litmus/domain.h b/include/litmus/domain.h index d16ed1872a52..eed7c485004e 100644 --- a/include/litmus/domain.h +++ b/include/litmus/domain.h | |||
@@ -13,7 +13,6 @@ typedef void (*requeue_t)(struct domain*, struct task_struct*); | |||
13 | typedef void (*remove_t)(struct domain*, struct task_struct*); | 13 | typedef void (*remove_t)(struct domain*, struct task_struct*); |
14 | typedef struct task_struct* (*peek_ready_t)(struct domain*); | 14 | typedef struct task_struct* (*peek_ready_t)(struct domain*); |
15 | typedef struct task_struct* (*take_ready_t)(struct domain*); | 15 | typedef struct task_struct* (*take_ready_t)(struct domain*); |
16 | typedef int (*preempt_needed_t)(struct domain*, struct task_struct*); | ||
17 | typedef int (*task_prio_t)(struct task_struct*, struct task_struct*); | 16 | typedef int (*task_prio_t)(struct task_struct*, struct task_struct*); |
18 | 17 | ||
19 | typedef struct domain { | 18 | typedef struct domain { |
@@ -30,10 +29,6 @@ typedef struct domain { | |||
30 | peek_ready_t peek_ready; | 29 | peek_ready_t peek_ready; |
31 | /* remove and return next ready task */ | 30 | /* remove and return next ready task */ |
32 | take_ready_t take_ready; | 31 | take_ready_t take_ready; |
33 | /* return true if the domain has a task which should preempt the | ||
34 | * task given | ||
35 | */ | ||
36 | preempt_needed_t preempt_needed; | ||
37 | /* for tasks within this domain, returns true if the first has | 32 | /* for tasks within this domain, returns true if the first has |
38 | * has a higher priority than the second | 33 | * has a higher priority than the second |
39 | */ | 34 | */ |
@@ -45,6 +40,5 @@ void domain_init(domain_t *dom, | |||
45 | requeue_t requeue, | 40 | requeue_t requeue, |
46 | peek_ready_t peek_ready, | 41 | peek_ready_t peek_ready, |
47 | take_ready_t take_ready, | 42 | take_ready_t take_ready, |
48 | preempt_needed_t preempt_needed, | ||
49 | task_prio_t priority); | 43 | task_prio_t priority); |
50 | #endif | 44 | #endif |
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h index 9bd361ef3943..b63d3e695b69 100644 --- a/include/litmus/jobs.h +++ b/include/litmus/jobs.h | |||
@@ -4,6 +4,7 @@ | |||
4 | void prepare_for_next_period(struct task_struct *t); | 4 | void prepare_for_next_period(struct task_struct *t); |
5 | void release_at(struct task_struct *t, lt_t start); | 5 | void release_at(struct task_struct *t, lt_t start); |
6 | long complete_job(void); | 6 | long complete_job(void); |
7 | void setup_user_release(struct task_struct *t, lt_t release); | ||
7 | 8 | ||
8 | #endif | 9 | #endif |
9 | 10 | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index c3b91fe8115c..2cd68ba3b752 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -44,7 +44,9 @@ void litmus_exit_task(struct task_struct *tsk); | |||
44 | 44 | ||
45 | #define tsk_rt(t) (&(t)->rt_param) | 45 | #define tsk_rt(t) (&(t)->rt_param) |
46 | 46 | ||
47 | #define get_server_job(t) (tsk_rt(t)->job_params.fake_job_no) | 47 | #define get_user_job(t) (tsk_rt(t)->user_job.job_no) |
48 | #define get_user_release(t) (tsk_rt(t)->user_job.release) | ||
49 | #define get_user_deadline(t) (tsk_rt(t)->user_job.deadline) | ||
48 | 50 | ||
49 | /* Realtime utility macros */ | 51 | /* Realtime utility macros */ |
50 | #define get_rt_flags(t) (tsk_rt(t)->flags) | 52 | #define get_rt_flags(t) (tsk_rt(t)->flags) |
@@ -73,6 +75,9 @@ void litmus_exit_task(struct task_struct *tsk); | |||
73 | #define is_server(t) (tsk_rt(t)->is_server) | 75 | #define is_server(t) (tsk_rt(t)->is_server) |
74 | #define get_task_server(task) (tsk_rt(task)->server) | 76 | #define get_task_server(task) (tsk_rt(task)->server) |
75 | 77 | ||
78 | #define userspace_behind(t)\ | ||
79 | (lt_before(get_user_release(t), get_release(t))) | ||
80 | |||
76 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 81 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
77 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 82 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
78 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | 83 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) |
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 03826228dbd9..cba96e25f39d 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -97,7 +97,6 @@ void pd_domain_init(domain_t *dom, | |||
97 | bheap_prio_t order, | 97 | bheap_prio_t order, |
98 | check_resched_needed_t check, | 98 | check_resched_needed_t check, |
99 | release_jobs_t release, | 99 | release_jobs_t release, |
100 | preempt_needed_t preempt_needed, | ||
101 | task_prio_t priority); | 100 | task_prio_t priority); |
102 | 101 | ||
103 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | 102 | void __add_ready(rt_domain_t* rt, struct task_struct *new); |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index a8c82eed5562..d63821d9467a 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -199,8 +199,10 @@ struct rt_param { | |||
199 | /* user controlled parameters */ | 199 | /* user controlled parameters */ |
200 | struct rt_task task_params; | 200 | struct rt_task task_params; |
201 | 201 | ||
202 | /* timing parameters */ | 202 | /* kernelspace view of the job */ |
203 | struct rt_job job_params; | 203 | struct rt_job job_params; |
204 | /* userspace view of the job */ | ||
205 | struct rt_job user_job; | ||
204 | 206 | ||
205 | /* task representing the current "inherited" task | 207 | /* task representing the current "inherited" task |
206 | * priority, assigned by inherit_priority and | 208 | * priority, assigned by inherit_priority and |
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index 1d491ce6a31a..f0526e0d5589 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h | |||
@@ -16,11 +16,6 @@ struct mc_task { | |||
16 | int lvl_a_eligible; | 16 | int lvl_a_eligible; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct mc_job { | ||
20 | int is_ghost:1; | ||
21 | lt_t ghost_budget; | ||
22 | }; | ||
23 | |||
24 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
25 | /* | 20 | /* |
26 | * These are used only in the kernel. Userspace programs like RTSpin won't see | 21 | * These are used only in the kernel. Userspace programs like RTSpin won't see |
@@ -28,19 +23,19 @@ struct mc_job { | |||
28 | */ | 23 | */ |
29 | struct mc_data { | 24 | struct mc_data { |
30 | struct mc_task mc_task; | 25 | struct mc_task mc_task; |
31 | struct mc_job mc_job; | ||
32 | }; | 26 | }; |
33 | 27 | ||
34 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) | 28 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) |
35 | #define tsk_mc_crit(t) (tsk_mc_data(t) ? tsk_mc_data(t)->mc_task.crit : CRIT_LEVEL_C) | 29 | #define tsk_mc_crit(t) (tsk_mc_data(t) ? tsk_mc_data(t)->mc_task.crit : CRIT_LEVEL_C) |
36 | #define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) | 30 | #define is_ghost(t) (get_rt_job(t) < get_user_job(t)) |
37 | 31 | ||
38 | #define TS "(%s/%d:%d:%s)" | 32 | #define TS "(%s/%d:%d:%d:%s)" |
39 | #define TA(t) (t) ? tsk_mc_data(t) ? is_ghost(t) ? "ghost" : t->comm \ | 33 | #define TA(t) (t) ? tsk_mc_data(t) ? is_ghost(t) ? "ghost" : t->comm \ |
40 | : t->comm : "NULL", \ | 34 | : t->comm : "NULL", \ |
41 | (t) ? t->pid : 1, \ | 35 | (t) ? t->pid : 1, \ |
42 | (t) ? t->rt_param.job_params.job_no : 1, \ | 36 | (t) ? get_rt_job(t) : 1, \ |
43 | (t && get_task_domain(t)) ? get_task_domain(t)->name : "" | 37 | (t) ? get_user_job(t) : 1, \ |
38 | (t && get_task_domain(t)) ? get_task_domain(t)->name : "" | ||
44 | #define STRACE(fmt, args...) \ | 39 | #define STRACE(fmt, args...) \ |
45 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | 40 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ |
46 | TRACE_ARGS, ## args) | 41 | TRACE_ARGS, ## args) |
@@ -128,6 +123,7 @@ lt_t mc_ce_timer_callback_common(domain_t*); | |||
128 | void mc_ce_release_at_common(struct task_struct*, lt_t); | 123 | void mc_ce_release_at_common(struct task_struct*, lt_t); |
129 | long mc_ce_activate_plugin_common(void); | 124 | long mc_ce_activate_plugin_common(void); |
130 | long mc_ce_deactivate_plugin_common(void); | 125 | long mc_ce_deactivate_plugin_common(void); |
126 | void mc_ce_job_completion(struct domain *dom, struct task_struct *ts); | ||
131 | 127 | ||
132 | #endif /* __KERNEL__ */ | 128 | #endif /* __KERNEL__ */ |
133 | 129 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 0e050ac3748c..6fb923bc5241 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -337,9 +337,10 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
337 | trace_litmus_server_switch_away(sid, job, tid, tjob); \ | 337 | trace_litmus_server_switch_away(sid, job, tid, tjob); \ |
338 | } while (0) | 338 | } while (0) |
339 | 339 | ||
340 | #define sched_trace_server_release(sid, job, rel, dead) \ | 340 | #define sched_trace_server_release(sid, job, job_params) \ |
341 | do { \ | 341 | do { \ |
342 | trace_litmus_server_release(sid, job, rel, dead); \ | 342 | trace_litmus_server_release(sid, job, (job_params).release, \ |
343 | (job_params).deadline); \ | ||
343 | } while (0) | 344 | } while (0) |
344 | 345 | ||
345 | #define sched_trace_server_completion(sid, job) \ | 346 | #define sched_trace_server_completion(sid, job) \ |
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h index 474aa129c233..58ffdcd4c546 100644 --- a/include/trace/events/litmus.h +++ b/include/trace/events/litmus.h | |||
@@ -29,7 +29,7 @@ TRACE_EVENT(litmus_task_param, | |||
29 | 29 | ||
30 | TP_fast_assign( | 30 | TP_fast_assign( |
31 | __entry->pid = t ? t->pid : 0; | 31 | __entry->pid = t ? t->pid : 0; |
32 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | 32 | __entry->job = t ? get_user_job(t) : 0, |
33 | __entry->wcet = get_exec_cost(t); | 33 | __entry->wcet = get_exec_cost(t); |
34 | __entry->period = get_rt_period(t); | 34 | __entry->period = get_rt_period(t); |
35 | __entry->phase = get_rt_phase(t); | 35 | __entry->phase = get_rt_phase(t); |
@@ -59,9 +59,9 @@ TRACE_EVENT(litmus_task_release, | |||
59 | 59 | ||
60 | TP_fast_assign( | 60 | TP_fast_assign( |
61 | __entry->pid = t ? t->pid : 0; | 61 | __entry->pid = t ? t->pid : 0; |
62 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | 62 | __entry->job = t ? get_user_job(t) : 0; |
63 | __entry->release = get_release(t); | 63 | __entry->release = get_user_release(t); |
64 | __entry->deadline = get_deadline(t); | 64 | __entry->deadline = get_user_deadline(t); |
65 | ), | 65 | ), |
66 | 66 | ||
67 | TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n", | 67 | TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n", |
@@ -86,7 +86,7 @@ TRACE_EVENT(litmus_switch_to, | |||
86 | 86 | ||
87 | TP_fast_assign( | 87 | TP_fast_assign( |
88 | __entry->pid = is_realtime(t) ? t->pid : 0; | 88 | __entry->pid = is_realtime(t) ? t->pid : 0; |
89 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; | 89 | __entry->job = is_realtime(t) ? get_user_job(t) : 0; |
90 | __entry->exec_time = get_exec_time(t); | 90 | __entry->exec_time = get_exec_time(t); |
91 | ), | 91 | ), |
92 | 92 | ||
@@ -111,7 +111,7 @@ TRACE_EVENT(litmus_switch_away, | |||
111 | 111 | ||
112 | TP_fast_assign( | 112 | TP_fast_assign( |
113 | __entry->pid = is_realtime(t) ? t->pid : 0; | 113 | __entry->pid = is_realtime(t) ? t->pid : 0; |
114 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; | 114 | __entry->job = is_realtime(t) ? get_user_job(t) : 0; |
115 | __entry->exec_time = get_exec_time(t); | 115 | __entry->exec_time = get_exec_time(t); |
116 | ), | 116 | ), |
117 | 117 | ||
@@ -136,7 +136,7 @@ TRACE_EVENT(litmus_task_completion, | |||
136 | 136 | ||
137 | TP_fast_assign( | 137 | TP_fast_assign( |
138 | __entry->pid = t ? t->pid : 0; | 138 | __entry->pid = t ? t->pid : 0; |
139 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | 139 | __entry->job = t ? get_user_job(t) : 0; |
140 | __entry->forced = forced; | 140 | __entry->forced = forced; |
141 | ), | 141 | ), |
142 | 142 | ||
@@ -249,7 +249,7 @@ TRACE_EVENT(litmus_task_resume, | |||
249 | 249 | ||
250 | TP_fast_assign( | 250 | TP_fast_assign( |
251 | __entry->pid = t ? t->pid : 0; | 251 | __entry->pid = t ? t->pid : 0; |
252 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | 252 | __entry->job = t ? get_user_job(t) : 0; |
253 | __entry->lid = lid; | 253 | __entry->lid = lid; |
254 | ), | 254 | ), |
255 | 255 | ||
diff --git a/litmus/budget.c b/litmus/budget.c index f7505b0f86e5..b1c0a4b84c02 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -109,28 +109,4 @@ static int __init init_budget_enforcement(void) | |||
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | 111 | ||
112 | void task_release(struct task_struct *t) | ||
113 | { | ||
114 | /* Also wrong */ | ||
115 | t->rt_param.job_params.real_release = t->rt_param.job_params.real_deadline; | ||
116 | t->rt_param.job_params.real_deadline += get_rt_period(t); | ||
117 | t->rt_param.job_params.job_no++; | ||
118 | sched_trace_task_release(t); | ||
119 | } | ||
120 | |||
121 | void server_release(struct task_struct *t) | ||
122 | { | ||
123 | t->rt_param.job_params.exec_time = 0; | ||
124 | t->rt_param.job_params.release = t->rt_param.job_params.deadline; | ||
125 | t->rt_param.job_params.deadline += get_rt_period(t); | ||
126 | t->rt_param.job_params.fake_job_no++; | ||
127 | |||
128 | /* don't confuse linux */ | ||
129 | t->rt.time_slice = 1; | ||
130 | |||
131 | sched_trace_server_release(-t->pid, get_server_job(t), | ||
132 | t->rt_param.job_params.release, | ||
133 | t->rt_param.job_params.deadline); | ||
134 | } | ||
135 | |||
136 | module_init(init_budget_enforcement); | 112 | module_init(init_budget_enforcement); |
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c index b2c5d4e935a5..c750b95581b9 100644 --- a/litmus/ce_domain.c +++ b/litmus/ce_domain.c | |||
@@ -80,14 +80,12 @@ void ce_domain_init(domain_t *dom, | |||
80 | requeue_t requeue, | 80 | requeue_t requeue, |
81 | peek_ready_t peek_ready, | 81 | peek_ready_t peek_ready, |
82 | take_ready_t take_ready, | 82 | take_ready_t take_ready, |
83 | preempt_needed_t preempt_needed, | ||
84 | task_prio_t task_prio, | 83 | task_prio_t task_prio, |
85 | struct ce_dom_data *dom_data, | 84 | struct ce_dom_data *dom_data, |
86 | const int cpu, | 85 | const int cpu, |
87 | ce_timer_callback_t ce_timer_callback) | 86 | ce_timer_callback_t ce_timer_callback) |
88 | { | 87 | { |
89 | domain_init(dom, lock, requeue, peek_ready, take_ready, preempt_needed, | 88 | domain_init(dom, lock, requeue, peek_ready, take_ready, task_prio); |
90 | task_prio); | ||
91 | dom->data = dom_data; | 89 | dom->data = dom_data; |
92 | dom->remove = ce_remove; | 90 | dom->remove = ce_remove; |
93 | dom_data->cpu = cpu; | 91 | dom_data->cpu = cpu; |
diff --git a/litmus/dgl.c b/litmus/dgl.c index dd2a42cc9ca6..e2286b3e9239 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c | |||
@@ -173,7 +173,6 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, | |||
173 | 173 | ||
174 | mask_idx(rid, &word, &bit); | 174 | mask_idx(rid, &word, &bit); |
175 | 175 | ||
176 | |||
177 | TRACE("0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word], | 176 | TRACE("0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word], |
178 | &greq->waiting[word]); | 177 | &greq->waiting[word]); |
179 | 178 | ||
@@ -290,7 +289,6 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) | |||
290 | if (try_acquire(dgl, resource, next)) { | 289 | if (try_acquire(dgl, resource, next)) { |
291 | list_del_init(&next->list); | 290 | list_del_init(&next->list); |
292 | print_waiting(dgl, resource); | 291 | print_waiting(dgl, resource); |
293 | |||
294 | } | 292 | } |
295 | } | 293 | } |
296 | } | 294 | } |
diff --git a/litmus/domain.c b/litmus/domain.c index 4dc3649a0389..54c060d4c8af 100644 --- a/litmus/domain.c +++ b/litmus/domain.c | |||
@@ -8,7 +8,6 @@ void domain_init(domain_t *dom, | |||
8 | requeue_t requeue, | 8 | requeue_t requeue, |
9 | peek_ready_t peek_ready, | 9 | peek_ready_t peek_ready, |
10 | take_ready_t take_ready, | 10 | take_ready_t take_ready, |
11 | preempt_needed_t preempt_needed, | ||
12 | task_prio_t priority) | 11 | task_prio_t priority) |
13 | { | 12 | { |
14 | INIT_LIST_HEAD(&dom->list); | 13 | INIT_LIST_HEAD(&dom->list); |
@@ -16,6 +15,5 @@ void domain_init(domain_t *dom, | |||
16 | dom->requeue = requeue; | 15 | dom->requeue = requeue; |
17 | dom->peek_ready = peek_ready; | 16 | dom->peek_ready = peek_ready; |
18 | dom->take_ready = take_ready; | 17 | dom->take_ready = take_ready; |
19 | dom->preempt_needed = preempt_needed; | ||
20 | dom->higher_prio = priority; | 18 | dom->higher_prio = priority; |
21 | } | 19 | } |
diff --git a/litmus/jobs.c b/litmus/jobs.c index bd3175baefae..983b084cf32b 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /* litmus/jobs.c - common job control code | 1 | /* litmus/jobs.c - common job control code |
2 | * TODO: modified heavily for sched_mc | ||
2 | */ | 3 | */ |
3 | 4 | ||
4 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
@@ -6,25 +7,23 @@ | |||
6 | #include <litmus/litmus.h> | 7 | #include <litmus/litmus.h> |
7 | #include <litmus/jobs.h> | 8 | #include <litmus/jobs.h> |
8 | #include <litmus/trace.h> | 9 | #include <litmus/trace.h> |
10 | #include <litmus/sched_trace.h> | ||
11 | #include <litmus/domain.h> | ||
12 | #include <litmus/sched_mc.h> | ||
9 | 13 | ||
10 | static inline void setup_release(struct task_struct *t, lt_t release) | 14 | static inline void setup_release(struct task_struct *t, struct rt_job *job, |
15 | lt_t release) | ||
11 | { | 16 | { |
12 | /* prepare next release */ | 17 | /* prepare next release */ |
13 | tsk_rt(t)->job_params.release = release; | 18 | job->release = release; |
14 | tsk_rt(t)->job_params.deadline = release + get_rt_relative_deadline(t); | 19 | job->deadline = release + get_rt_relative_deadline(t); |
15 | tsk_rt(t)->job_params.exec_time = 0; | 20 | job->exec_time = 0; |
16 | 21 | ||
17 | /* update job sequence number */ | 22 | /* update job sequence number */ |
18 | tsk_rt(t)->job_params.job_no++; | 23 | ++job->job_no; |
19 | |||
20 | /* don't confuse Linux */ | ||
21 | t->rt.time_slice = 1; | ||
22 | |||
23 | TRACE_TASK(t, "Releasing at %llu, deadline: %llu, period: %llu, now: %llu\n", | ||
24 | release, get_deadline(t), get_rt_period(t), litmus_clock()); | ||
25 | } | 24 | } |
26 | 25 | ||
27 | void prepare_for_next_period(struct task_struct *t) | 26 | static inline void setup_kernel_release(struct task_struct *t, lt_t release) |
28 | { | 27 | { |
29 | BUG_ON(!t); | 28 | BUG_ON(!t); |
30 | 29 | ||
@@ -32,21 +31,49 @@ void prepare_for_next_period(struct task_struct *t) | |||
32 | * release and deadline. Lateness may be negative. | 31 | * release and deadline. Lateness may be negative. |
33 | */ | 32 | */ |
34 | t->rt_param.job_params.lateness = | 33 | t->rt_param.job_params.lateness = |
35 | (long long)litmus_clock() - | 34 | (long long)litmus_clock() - |
36 | (long long)t->rt_param.job_params.deadline; | 35 | (long long)t->rt_param.job_params.deadline; |
37 | 36 | ||
38 | setup_release(t, get_release(t) + get_rt_period(t)); | 37 | t->rt.time_slice = 1; |
38 | |||
39 | setup_release(t, &tsk_rt(t)->job_params, release); | ||
40 | |||
41 | TRACE_MC_TASK(t, "kernel rel=%llu, dead=%llu\n", get_release(t), get_deadline(t)); | ||
42 | |||
43 | sched_trace_server_release(-t->pid, get_rt_job(t), | ||
44 | tsk_rt(t)->job_params); | ||
45 | } | ||
46 | |||
47 | void setup_user_release(struct task_struct *t, lt_t release) | ||
48 | { | ||
49 | setup_release(t, &tsk_rt(t)->user_job, release); | ||
50 | TRACE_MC_TASK(t, "user rel=%llu, dead=%llu\n", get_user_release(t), get_user_deadline(t)); | ||
51 | if (CRIT_LEVEL_A != tsk_mc_crit(t)) | ||
52 | sched_trace_task_release(t); | ||
53 | |||
54 | } | ||
55 | |||
56 | void prepare_for_next_period(struct task_struct *t) | ||
57 | { | ||
58 | setup_kernel_release(t, get_release(t) + get_rt_period(t)); | ||
39 | } | 59 | } |
40 | 60 | ||
41 | void release_at(struct task_struct *t, lt_t start) | 61 | void release_at(struct task_struct *t, lt_t start) |
42 | { | 62 | { |
43 | BUG_ON(!t); | 63 | BUG_ON(!t); |
44 | setup_release(t, start); | 64 | |
65 | TRACE_MC_TASK(t, "Releasing at %llu\n", start); | ||
66 | |||
67 | setup_kernel_release(t, start); | ||
68 | setup_user_release(t, start); | ||
69 | |||
70 | BUG_ON(!is_released(t, start)); | ||
71 | |||
45 | set_rt_flags(t, RT_F_RUNNING); | 72 | set_rt_flags(t, RT_F_RUNNING); |
46 | } | 73 | } |
47 | 74 | ||
48 | /* | 75 | /* |
49 | * Deactivate current task until the beginning of the next period. | 76 | * User-space job has completed execution |
50 | */ | 77 | */ |
51 | long complete_job(void) | 78 | long complete_job(void) |
52 | { | 79 | { |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index c63bd0303916..98a30b88baab 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -469,13 +469,11 @@ void pd_domain_init(domain_t *dom, | |||
469 | bheap_prio_t order, | 469 | bheap_prio_t order, |
470 | check_resched_needed_t check, | 470 | check_resched_needed_t check, |
471 | release_jobs_t release, | 471 | release_jobs_t release, |
472 | preempt_needed_t preempt_needed, | ||
473 | task_prio_t priority) | 472 | task_prio_t priority) |
474 | { | 473 | { |
475 | rt_domain_init(domain, order, check, release); | 474 | rt_domain_init(domain, order, check, release); |
476 | domain_init(dom, &domain->ready_lock, | 475 | domain_init(dom, &domain->ready_lock, |
477 | pd_requeue, pd_peek_ready, pd_take_ready, | 476 | pd_requeue, pd_peek_ready, pd_take_ready, priority); |
478 | preempt_needed, priority); | ||
479 | dom->remove = pd_remove; | 477 | dom->remove = pd_remove; |
480 | dom->data = domain; | 478 | dom->data = domain; |
481 | } | 479 | } |
diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 66ce40fd1b57..28a7cfa401b4 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c | |||
@@ -320,8 +320,7 @@ static void job_completion(struct rt_server *server) | |||
320 | prepare_for_next_period(t); | 320 | prepare_for_next_period(t); |
321 | 321 | ||
322 | if (is_server(t)) | 322 | if (is_server(t)) |
323 | sched_trace_server_release(t->pid, get_rt_job(t), | 323 | sched_trace_server_release(t->pid, get_rt_job(t), tsk_rt(t)->job_params); |
324 | get_release(t), get_deadline(t)); | ||
325 | else | 324 | else |
326 | sched_trace_task_release(t); | 325 | sched_trace_task_release(t); |
327 | 326 | ||
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index f24c51653fe2..8c2238c6bf43 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /** | 1 | /* |
2 | * litmus/sched_mc.c | 2 | * litmus/sched_mc.c |
3 | * | 3 | * |
4 | * Implementation of the Mixed Criticality scheduling algorithm. | 4 | * Implementation of the Mixed Criticality scheduling algorithm. |
@@ -270,13 +270,13 @@ static void update_ghost_time(struct task_struct *p) | |||
270 | delta = 0; | 270 | delta = 0; |
271 | TRACE_MC_TASK(p, "WARNING: negative time delta\n"); | 271 | TRACE_MC_TASK(p, "WARNING: negative time delta\n"); |
272 | } | 272 | } |
273 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { | 273 | if (budget_remaining(p) <= delta) { |
274 | TRACE_MC_TASK(p, "Ghost job could have ended\n"); | 274 | TRACE_MC_TASK(p, "Ghost job could have ended\n"); |
275 | tsk_mc_data(p)->mc_job.ghost_budget = 0; | 275 | tsk_rt(p)->job_params.exec_time = get_exec_cost(p); |
276 | p->se.exec_start = clock; | 276 | p->se.exec_start = clock; |
277 | } else { | 277 | } else { |
278 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n"); | 278 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n"); |
279 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; | 279 | tsk_rt(p)->job_params.exec_time += delta; |
280 | p->se.exec_start = clock; | 280 | p->se.exec_start = clock; |
281 | } | 281 | } |
282 | } | 282 | } |
@@ -302,13 +302,13 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
302 | ce->linked->rt_param.linked_on = NO_CPU; | 302 | ce->linked->rt_param.linked_on = NO_CPU; |
303 | if (is_ghost(ce->linked)) { | 303 | if (is_ghost(ce->linked)) { |
304 | cancel_ghost(ce); | 304 | cancel_ghost(ce); |
305 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { | 305 | if (!budget_exhausted(ce->linked)) { |
306 | /* Job isn't finished, so do accounting */ | 306 | /* Job isn't finished, so do accounting */ |
307 | update_ghost_time(ce->linked); | 307 | update_ghost_time(ce->linked); |
308 | } | 308 | } |
309 | } | 309 | } |
310 | sched_trace_server_switch_away(sid(ce), 0, ce->linked->pid, | 310 | sched_trace_server_switch_away(sid(ce), 0, -ce->linked->pid, |
311 | tsk_rt(ce->linked)->job_params.job_no); | 311 | get_rt_job(ce->linked)); |
312 | } | 312 | } |
313 | 313 | ||
314 | /* Actually link task */ | 314 | /* Actually link task */ |
@@ -321,15 +321,11 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
321 | * tasks. Otherwise reset the budget timer. | 321 | * tasks. Otherwise reset the budget timer. |
322 | */ | 322 | */ |
323 | task->se.exec_start = litmus_clock(); | 323 | task->se.exec_start = litmus_clock(); |
324 | when_to_fire = task->se.exec_start + | 324 | when_to_fire = task->se.exec_start + budget_remaining(task); |
325 | tsk_mc_data(task)->mc_job.ghost_budget; | ||
326 | arm_ghost(ce, when_to_fire); | 325 | arm_ghost(ce, when_to_fire); |
327 | |||
328 | sched_trace_server_switch_to(sid(ce), 0, 0, 0); | ||
329 | } else { | ||
330 | sched_trace_server_switch_to(sid(ce), 0, task->pid, | ||
331 | get_rt_job(ce->linked)); | ||
332 | } | 326 | } |
327 | sched_trace_server_switch_to(sid(ce), 0, -task->pid, | ||
328 | get_rt_job(ce->linked)); | ||
333 | } | 329 | } |
334 | } | 330 | } |
335 | 331 | ||
@@ -434,18 +430,19 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
434 | 430 | ||
435 | if (entry->linked) { | 431 | if (entry->linked) { |
436 | sched_trace_server_switch_away(-entry->linked->pid, | 432 | sched_trace_server_switch_away(-entry->linked->pid, |
437 | get_server_job(entry->linked), | 433 | get_rt_job(entry->linked), |
438 | entry->linked->pid, | 434 | entry->linked->pid, |
439 | get_rt_job(entry->linked)); | 435 | get_user_job(entry->linked)); |
440 | } | 436 | } |
441 | 437 | ||
442 | if (task){ | 438 | if (task) { |
443 | set_rt_flags(task, RT_F_RUNNING); | 439 | set_rt_flags(task, RT_F_RUNNING); |
444 | sched_trace_server_switch_to(-task->pid, | 440 | sched_trace_server_switch_to(-task->pid, |
445 | get_server_job(task), | 441 | get_rt_job(task), |
446 | task->pid, | 442 | task->pid, |
447 | get_rt_job(task)); | 443 | get_user_job(task)); |
448 | } | 444 | } |
445 | |||
449 | entry->linked = task; | 446 | entry->linked = task; |
450 | 447 | ||
451 | /* Higher criticality crit entries are now usable */ | 448 | /* Higher criticality crit entries are now usable */ |
@@ -480,7 +477,11 @@ static void preempt(struct domain *dom, struct crit_entry *ce) | |||
480 | } | 477 | } |
481 | update_crit_position(ce); | 478 | update_crit_position(ce); |
482 | 479 | ||
483 | /* Preempt actual execution if this is a running task */ | 480 | /* Preempt actual execution if this is a running task. |
481 | * We know that our task is higher priority than what is currently | ||
482 | * running on this CPU as otherwise the crit_entry would have | ||
483 | * been disabled and a preemption could not have occurred | ||
484 | */ | ||
484 | if (!is_ghost(task)) { | 485 | if (!is_ghost(task)) { |
485 | link_task_to_cpu(entry, task); | 486 | link_task_to_cpu(entry, task); |
486 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 487 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
@@ -564,7 +565,7 @@ static void check_for_preempt(struct domain *dom) | |||
564 | if (!can_use(ce)) | 565 | if (!can_use(ce)) |
565 | /* CPU disabled while locking! */ | 566 | /* CPU disabled while locking! */ |
566 | fix_crit_position(ce); | 567 | fix_crit_position(ce); |
567 | else if (dom->preempt_needed(dom, ce->linked)) | 568 | else if (mc_preempt_needed(dom, ce->linked)) |
568 | /* Success! Check for more preemptions */ | 569 | /* Success! Check for more preemptions */ |
569 | preempt(dom, ce); | 570 | preempt(dom, ce); |
570 | else { | 571 | else { |
@@ -582,7 +583,7 @@ static void check_for_preempt(struct domain *dom) | |||
582 | dom->peek_ready(dom); | 583 | dom->peek_ready(dom); |
583 | 584 | ||
584 | raw_spin_lock(&entry->lock); | 585 | raw_spin_lock(&entry->lock); |
585 | if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) { | 586 | if (can_use(ce) && mc_preempt_needed(dom, ce->linked)) { |
586 | preempt(dom, ce); | 587 | preempt(dom, ce); |
587 | update_crit_levels(entry); | 588 | update_crit_levels(entry); |
588 | } else { | 589 | } else { |
@@ -609,6 +610,7 @@ static void remove_from_all(struct task_struct* task) | |||
609 | 610 | ||
610 | /* Remove the task from any CPU state */ | 611 | /* Remove the task from any CPU state */ |
611 | if (task->rt_param.linked_on != NO_CPU) { | 612 | if (task->rt_param.linked_on != NO_CPU) { |
613 | TRACE_MC_TASK(task, "Linked to something\n"); | ||
612 | entry = &per_cpu(cpus, task->rt_param.linked_on); | 614 | entry = &per_cpu(cpus, task->rt_param.linked_on); |
613 | raw_spin_lock(&entry->lock); | 615 | raw_spin_lock(&entry->lock); |
614 | 616 | ||
@@ -618,7 +620,7 @@ static void remove_from_all(struct task_struct* task) | |||
618 | BUG_ON(ce->linked != task); | 620 | BUG_ON(ce->linked != task); |
619 | link_task_to_crit(ce, NULL); | 621 | link_task_to_crit(ce, NULL); |
620 | update_crit_position(ce); | 622 | update_crit_position(ce); |
621 | if (!is_ghost(task) && entry->linked == task) { | 623 | if (entry->linked == task) { |
622 | update = 1; | 624 | update = 1; |
623 | link_task_to_cpu(entry, NULL); | 625 | link_task_to_cpu(entry, NULL); |
624 | } | 626 | } |
@@ -646,41 +648,25 @@ static void remove_from_all(struct task_struct* task) | |||
646 | */ | 648 | */ |
647 | static void job_completion(struct task_struct *task, int forced) | 649 | static void job_completion(struct task_struct *task, int forced) |
648 | { | 650 | { |
649 | int behind; | ||
650 | TRACE_MC_TASK(task, "Completed\n"); | 651 | TRACE_MC_TASK(task, "Completed\n"); |
651 | 652 | ||
652 | /* Logically stop the task execution */ | 653 | /* Logically stop the task execution */ |
653 | set_rt_flags(task, RT_F_SLEEP); | 654 | set_rt_flags(task, RT_F_SLEEP); |
654 | remove_from_all(task); | 655 | remove_from_all(task); |
655 | 656 | ||
656 | /* Level-A tasks cannot ever get behind */ | 657 | if (!forced) { |
657 | behind = tsk_mc_crit(task) != CRIT_LEVEL_A && behind_server(task); | 658 | /* Userspace releases */ |
658 | 659 | sched_trace_task_completion(current, 0); | |
659 | if (!forced && !is_ghost(task)) { | 660 | setup_user_release(current, get_user_deadline(current)); |
660 | /* Task voluntarily ceased execution. Move on to next period */ | ||
661 | task_release(task); | ||
662 | sched_trace_task_completion(task, forced); | ||
663 | |||
664 | /* Convert to ghost job */ | ||
665 | tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); | ||
666 | tsk_mc_data(task)->mc_job.is_ghost = 1; | ||
667 | } | 661 | } |
668 | 662 | ||
669 | /* If the task has no ghost budget, convert back from ghost. | ||
670 | * If the task is behind, undo ghost conversion so that it | ||
671 | * can catch up. | ||
672 | */ | ||
673 | if (behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) { | ||
674 | TRACE_MC_TASK(task, "Not a ghost task\n"); | ||
675 | tsk_mc_data(task)->mc_job.is_ghost = 0; | ||
676 | tsk_mc_data(task)->mc_job.ghost_budget = 0; | ||
677 | } | ||
678 | 663 | ||
679 | /* If server has run out of budget, wait until next release */ | 664 | /* If server has run out of budget, wait until next release |
680 | if (budget_exhausted(task)) { | 665 | * TODO: Level A does this independently and should not. |
681 | sched_trace_server_completion(-task->pid, | 666 | */ |
682 | get_server_job(task)); | 667 | if (budget_exhausted(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { |
683 | server_release(task); | 668 | sched_trace_server_completion(-task->pid, get_rt_job(task)); |
669 | prepare_for_next_period(task); | ||
684 | } | 670 | } |
685 | 671 | ||
686 | /* Requeue non-blocking tasks */ | 672 | /* Requeue non-blocking tasks */ |
@@ -706,7 +692,7 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
706 | 692 | ||
707 | local_irq_save(flags); | 693 | local_irq_save(flags); |
708 | TRACE("Ghost exhausted\n"); | 694 | TRACE("Ghost exhausted\n"); |
709 | TRACE_CRIT_ENTRY(ce, "Firing here\n"); | 695 | TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); |
710 | 696 | ||
711 | /* Due to race conditions, we cannot just set the linked | 697 | /* Due to race conditions, we cannot just set the linked |
712 | * task's budget to 0 as it may no longer be the task | 698 | * task's budget to 0 as it may no longer be the task |
@@ -716,14 +702,14 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
716 | raw_spin_lock(&crit_cpu(ce)->lock); | 702 | raw_spin_lock(&crit_cpu(ce)->lock); |
717 | if (ce->linked && is_ghost(ce->linked)) { | 703 | if (ce->linked && is_ghost(ce->linked)) { |
718 | update_ghost_time(ce->linked); | 704 | update_ghost_time(ce->linked); |
719 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget == 0) { | 705 | if (budget_exhausted(ce->linked)) { |
720 | tmp = ce->linked; | 706 | tmp = ce->linked; |
721 | } | 707 | } |
722 | } | 708 | } |
723 | raw_spin_unlock(&crit_cpu(ce)->lock); | 709 | raw_spin_unlock(&crit_cpu(ce)->lock); |
724 | 710 | ||
725 | if (tmp) | 711 | if (tmp) |
726 | job_completion(tmp, 0); | 712 | job_completion(tmp, 1); |
727 | 713 | ||
728 | local_irq_restore(flags); | 714 | local_irq_restore(flags); |
729 | #ifndef CONFIG_MERGE_TIMERS | 715 | #ifndef CONFIG_MERGE_TIMERS |
@@ -748,12 +734,11 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) | |||
748 | 734 | ||
749 | raw_spin_lock(&crit_cpu(ce)->lock); | 735 | raw_spin_lock(&crit_cpu(ce)->lock); |
750 | if (ce->linked && | 736 | if (ce->linked && |
751 | ce->linked == ce_data->should_schedule && | 737 | ce->linked == ce_data->should_schedule) |
752 | is_ghost(ce->linked)) | ||
753 | { | 738 | { |
754 | old_link = ce->linked; | 739 | old_link = ce->linked; |
755 | tsk_mc_data(ce->linked)->mc_job.ghost_budget = 0; | ||
756 | link_task_to_crit(ce, NULL); | 740 | link_task_to_crit(ce, NULL); |
741 | mc_ce_job_completion(dom, old_link); | ||
757 | } | 742 | } |
758 | raw_spin_unlock(&crit_cpu(ce)->lock); | 743 | raw_spin_unlock(&crit_cpu(ce)->lock); |
759 | 744 | ||
@@ -764,7 +749,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) | |||
764 | if (NULL != old_link) { | 749 | if (NULL != old_link) { |
765 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); | 750 | STRACE("old_link " TS " so will call job completion\n", TA(old_link)); |
766 | raw_spin_unlock(dom->lock); | 751 | raw_spin_unlock(dom->lock); |
767 | job_completion(old_link, 0); | 752 | job_completion(old_link, 1); |
768 | } else { | 753 | } else { |
769 | STRACE("old_link was null, so will call check for preempt\n"); | 754 | STRACE("old_link was null, so will call check for preempt\n"); |
770 | raw_spin_unlock(dom->lock); | 755 | raw_spin_unlock(dom->lock); |
@@ -837,12 +822,13 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
837 | unsigned long flags; | 822 | unsigned long flags; |
838 | struct cpu_entry* entry; | 823 | struct cpu_entry* entry; |
839 | enum crit_level level = tsk_mc_crit(t); | 824 | enum crit_level level = tsk_mc_crit(t); |
840 | char name[TASK_COMM_LEN]; | ||
841 | strcpy(name, "rtspin"); | ||
842 | 825 | ||
843 | local_irq_save(flags); | 826 | local_irq_save(flags); |
844 | TRACE("New mixed criticality task %d\n", t->pid); | 827 | TRACE("New mixed criticality task %d\n", t->pid); |
845 | 828 | ||
829 | if (level == CRIT_LEVEL_A) | ||
830 | get_rt_relative_deadline(t) = get_exec_cost(t); | ||
831 | |||
846 | /* Assign domain */ | 832 | /* Assign domain */ |
847 | if (level < CRIT_LEVEL_C) | 833 | if (level < CRIT_LEVEL_C) |
848 | entry = &per_cpu(cpus, get_partition(t)); | 834 | entry = &per_cpu(cpus, get_partition(t)); |
@@ -850,19 +836,15 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
850 | entry = &per_cpu(cpus, task_cpu(t)); | 836 | entry = &per_cpu(cpus, task_cpu(t)); |
851 | t->rt_param._domain = entry->crit_entries[level].domain; | 837 | t->rt_param._domain = entry->crit_entries[level].domain; |
852 | 838 | ||
853 | if (budget_enforced(t)) { | 839 | /* Userspace and kernelspace view of task state may differ. |
854 | /* Userspace and kernelspace view of task state may differ. | 840 | * Model kernel state as an additional container |
855 | * Model kernel state as an additional container | 841 | */ |
856 | */ | 842 | sched_trace_container_param(t->pid, t->comm); |
857 | sched_trace_container_param(t->pid, name); | 843 | sched_trace_server_param(-t->pid, t->pid, |
858 | sched_trace_server_param(-t->pid, t->pid, | 844 | get_exec_cost(t), get_rt_period(t)); |
859 | get_exec_cost(t), get_rt_period(t)); | ||
860 | } | ||
861 | 845 | ||
862 | /* Setup job params */ | 846 | /* Setup job params */ |
863 | release_at(t, litmus_clock()); | 847 | release_at(t, litmus_clock()); |
864 | tsk_mc_data(t)->mc_job.ghost_budget = 0; | ||
865 | tsk_mc_data(t)->mc_job.is_ghost = 0; | ||
866 | if (running) { | 848 | if (running) { |
867 | BUG_ON(entry->scheduled); | 849 | BUG_ON(entry->scheduled); |
868 | entry->scheduled = t; | 850 | entry->scheduled = t; |
@@ -872,7 +854,6 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
872 | } | 854 | } |
873 | t->rt_param.linked_on = NO_CPU; | 855 | t->rt_param.linked_on = NO_CPU; |
874 | 856 | ||
875 | |||
876 | job_arrival(t); | 857 | job_arrival(t); |
877 | 858 | ||
878 | local_irq_restore(flags); | 859 | local_irq_restore(flags); |
@@ -1057,12 +1038,14 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1057 | raw_spin_lock(&entry->lock); | 1038 | raw_spin_lock(&entry->lock); |
1058 | 1039 | ||
1059 | if (!entry->linked && !ce->linked && dtask && can_use(ce)) { | 1040 | if (!entry->linked && !ce->linked && dtask && can_use(ce)) { |
1041 | /* Pop dtask */ | ||
1060 | dom->take_ready(dom); | 1042 | dom->take_ready(dom); |
1043 | |||
1061 | link_task_to_crit(ce, dtask); | 1044 | link_task_to_crit(ce, dtask); |
1062 | update_crit_position(ce); | 1045 | update_crit_position(ce); |
1063 | ready_task = (is_ghost(dtask)) ? NULL : dtask; | ||
1064 | 1046 | ||
1065 | /* Task found! */ | 1047 | /* Actual running task found */ |
1048 | ready_task = (is_ghost(dtask)) ? NULL : dtask; | ||
1066 | if (ready_task) { | 1049 | if (ready_task) { |
1067 | link_task_to_cpu(entry, ready_task); | 1050 | link_task_to_cpu(entry, ready_task); |
1068 | raw_spin_unlock(dom->lock); | 1051 | raw_spin_unlock(dom->lock); |
@@ -1084,6 +1067,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1084 | raw_spin_unlock(&entry->lock); | 1067 | raw_spin_unlock(&entry->lock); |
1085 | local_irq_restore(flags); | 1068 | local_irq_restore(flags); |
1086 | if (next) { | 1069 | if (next) { |
1070 | BUG_ON(!get_rt_job(next)); | ||
1087 | TRACE_MC_TASK(next, "Picked this task\n"); | 1071 | TRACE_MC_TASK(next, "Picked this task\n"); |
1088 | } else if (exists && !next) | 1072 | } else if (exists && !next) |
1089 | TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); | 1073 | TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); |
@@ -1105,11 +1089,7 @@ void mc_finish_switch(struct task_struct *prev) | |||
1105 | */ | 1089 | */ |
1106 | void mc_release_at(struct task_struct *ts, lt_t start) | 1090 | void mc_release_at(struct task_struct *ts, lt_t start) |
1107 | { | 1091 | { |
1108 | /* hack so that we can have CE timers start at the right time */ | 1092 | release_at(ts, start); |
1109 | if (CRIT_LEVEL_A == tsk_mc_crit(ts)) | ||
1110 | mc_ce_release_at_common(ts, start); | ||
1111 | else | ||
1112 | release_at(ts, start); | ||
1113 | } | 1093 | } |
1114 | 1094 | ||
1115 | long mc_deactivate_plugin(void) | 1095 | long mc_deactivate_plugin(void) |
@@ -1191,7 +1171,7 @@ static void mc_release_ts(lt_t time) | |||
1191 | strcpy(name, "LVL-A"); | 1171 | strcpy(name, "LVL-A"); |
1192 | for_each_online_cpu(cpu) { | 1172 | for_each_online_cpu(cpu) { |
1193 | entry = &per_cpu(cpus, cpu); | 1173 | entry = &per_cpu(cpus, cpu); |
1194 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1174 | sched_trace_container_param(++cont_id, (const char*)&name); |
1195 | ce = &entry->crit_entries[level]; | 1175 | ce = &entry->crit_entries[level]; |
1196 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1176 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1197 | } | 1177 | } |
@@ -1200,22 +1180,21 @@ static void mc_release_ts(lt_t time) | |||
1200 | strcpy(name, "LVL-B"); | 1180 | strcpy(name, "LVL-B"); |
1201 | for_each_online_cpu(cpu) { | 1181 | for_each_online_cpu(cpu) { |
1202 | entry = &per_cpu(cpus, cpu); | 1182 | entry = &per_cpu(cpus, cpu); |
1203 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1183 | sched_trace_container_param(++cont_id, (const char*)&name); |
1204 | ce = &entry->crit_entries[level]; | 1184 | ce = &entry->crit_entries[level]; |
1205 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1185 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1206 | } | 1186 | } |
1207 | 1187 | ||
1208 | level = CRIT_LEVEL_C; | 1188 | level = CRIT_LEVEL_C; |
1209 | strcpy(name, "LVL-C"); | 1189 | strcpy(name, "LVL-C"); |
1210 | trace_litmus_container_param(++cont_id, (const char*)&name); | 1190 | sched_trace_container_param(++cont_id, (const char*)&name); |
1211 | for_each_online_cpu(cpu) { | 1191 | for_each_online_cpu(cpu) { |
1212 | entry = &per_cpu(cpus, cpu); | 1192 | entry = &per_cpu(cpus, cpu); |
1213 | ce = &entry->crit_entries[level]; | 1193 | ce = &entry->crit_entries[level]; |
1214 | sched_trace_server_param(sid(ce), cont_id, 0, 0); | 1194 | sched_trace_server_param(sid(ce), cont_id, 0, 0); |
1215 | } | 1195 | } |
1216 | 1196 | ||
1217 | 1197 | mc_ce_release_at_common(NULL, time); | |
1218 | |||
1219 | } | 1198 | } |
1220 | 1199 | ||
1221 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | 1200 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { |
@@ -1287,8 +1266,7 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, | |||
1287 | enum crit_level prio, int is_partitioned, int cpu) | 1266 | enum crit_level prio, int is_partitioned, int cpu) |
1288 | { | 1267 | { |
1289 | pd_domain_init(dom, rt, edf_ready_order, NULL, | 1268 | pd_domain_init(dom, rt, edf_ready_order, NULL, |
1290 | mc_release_jobs, mc_preempt_needed, | 1269 | mc_release_jobs, edf_higher_prio); |
1291 | edf_higher_prio); | ||
1292 | rt->level = prio; | 1270 | rt->level = prio; |
1293 | #if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS) | 1271 | #if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS) |
1294 | /* All timers are on one CPU and release-master is using the event | 1272 | /* All timers are on one CPU and release-master is using the event |
@@ -1345,8 +1323,7 @@ static int __init init_mc(void) | |||
1345 | raw_spin_lock_init(a_dom_lock); | 1323 | raw_spin_lock_init(a_dom_lock); |
1346 | ce_domain_init(&dom_data->domain, | 1324 | ce_domain_init(&dom_data->domain, |
1347 | a_dom_lock, ce_requeue, ce_peek_and_take_ready, | 1325 | a_dom_lock, ce_requeue, ce_peek_and_take_ready, |
1348 | ce_peek_and_take_ready, mc_preempt_needed, | 1326 | ce_peek_and_take_ready, ce_higher_prio, ce_data, cpu, |
1349 | ce_higher_prio, ce_data, cpu, | ||
1350 | ce_timer_function); | 1327 | ce_timer_function); |
1351 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); | 1328 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); |
1352 | dom_data->domain.name = "LVL-A"; | 1329 | dom_data->domain.name = "LVL-A"; |
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c index 702b46da93d5..e4d66bad2138 100644 --- a/litmus/sched_mc_ce.c +++ b/litmus/sched_mc_ce.c | |||
@@ -130,7 +130,7 @@ static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time) | |||
130 | * | 130 | * |
131 | * Do not call prepare_for_next_period on Level-A tasks! | 131 | * Do not call prepare_for_next_period on Level-A tasks! |
132 | */ | 132 | */ |
133 | static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | 133 | void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) |
134 | { | 134 | { |
135 | const int cpu = task_cpu(ts); | 135 | const int cpu = task_cpu(ts); |
136 | const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; | 136 | const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; |
@@ -141,6 +141,7 @@ static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | |||
141 | 141 | ||
142 | /* sched_trace_task_completion(ts, 0); */ | 142 | /* sched_trace_task_completion(ts, 0); */ |
143 | /* post-increment is important here */ | 143 | /* post-increment is important here */ |
144 | sched_trace_server_completion(-ts->pid, get_rt_job(ts)); | ||
144 | just_finished = (tsk_rt(ts)->job_params.job_no)++; | 145 | just_finished = (tsk_rt(ts)->job_params.job_no)++; |
145 | 146 | ||
146 | /* Job completes in expected window: everything is normal. | 147 | /* Job completes in expected window: everything is normal. |
@@ -157,7 +158,6 @@ static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | |||
157 | printk(KERN_CRIT "job %u completed in expected job %u which " | 158 | printk(KERN_CRIT "job %u completed in expected job %u which " |
158 | "seems too early\n", just_finished, | 159 | "seems too early\n", just_finished, |
159 | pid_entry->expected_job); | 160 | pid_entry->expected_job); |
160 | BUG(); | ||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
@@ -451,7 +451,7 @@ lt_t mc_ce_timer_callback_common(struct domain *dom) | |||
451 | struct ce_pid_table *pid_table; | 451 | struct ce_pid_table *pid_table; |
452 | struct ce_pid_entry *pid_entry; | 452 | struct ce_pid_entry *pid_entry; |
453 | struct ce_dom_data *ce_data; | 453 | struct ce_dom_data *ce_data; |
454 | int idx, budget_overrun; | 454 | int idx, budget_overrun, expected; |
455 | 455 | ||
456 | ce_data = dom->data; | 456 | ce_data = dom->data; |
457 | pid_table = get_pid_table(ce_data->cpu); | 457 | pid_table = get_pid_table(ce_data->cpu); |
@@ -481,26 +481,31 @@ lt_t mc_ce_timer_callback_common(struct domain *dom) | |||
481 | * If jobs are not overrunning their budgets, then this | 481 | * If jobs are not overrunning their budgets, then this |
482 | * should not happen. | 482 | * should not happen. |
483 | */ | 483 | */ |
484 | pid_entry->expected_job++; | 484 | expected = ++pid_entry->expected_job; |
485 | budget_overrun = pid_entry->expected_job != | 485 | TRACE_MC_TASK(should_schedule, "Expected now: %d\n", expected); |
486 | budget_overrun = expected != | ||
486 | tsk_rt(should_schedule)->job_params.job_no; | 487 | tsk_rt(should_schedule)->job_params.job_no; |
487 | if (budget_overrun) | 488 | if (budget_overrun) { |
488 | TRACE_MC_TASK(should_schedule, | 489 | TRACE_MC_TASK(should_schedule, |
489 | "timer expected job number: %u " | 490 | "timer expected job number: %u " |
490 | "but current job: %u\n", | 491 | "but current job: %u\n", |
491 | pid_entry->expected_job, | 492 | expected, |
492 | tsk_rt(should_schedule)->job_params.job_no); | 493 | tsk_rt(should_schedule)->job_params.job_no); |
494 | } | ||
493 | } | 495 | } |
494 | 496 | ||
495 | if (ce_data->should_schedule) { | 497 | if (ce_data->should_schedule) { |
496 | tsk_rt(should_schedule)->job_params.deadline = | 498 | get_deadline(should_schedule) = |
497 | cycle_start_abs + pid_entry->acc_time; | 499 | cycle_start_abs + pid_entry->acc_time; |
498 | tsk_rt(should_schedule)->job_params.release = | 500 | get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - |
499 | tsk_rt(should_schedule)->job_params.deadline - | ||
500 | pid_entry->budget; | 501 | pid_entry->budget; |
501 | tsk_rt(should_schedule)->job_params.exec_time = 0; | 502 | tsk_rt(should_schedule)->job_params.exec_time = 0; |
502 | /* sched_trace_task_release(should_schedule); */ | 503 | |
504 | TRACE_MC_TASK(should_schedule, "Released!\n"); | ||
503 | set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); | 505 | set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); |
506 | sched_trace_task_release(should_schedule); | ||
507 | sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), | ||
508 | tsk_rt(should_schedule)->job_params); | ||
504 | } | 509 | } |
505 | return next_timer_abs; | 510 | return next_timer_abs; |
506 | } | 511 | } |
@@ -603,7 +608,7 @@ static void arm_all_timers(void) | |||
603 | if (0 == pid_table->num_pid_entries) | 608 | if (0 == pid_table->num_pid_entries) |
604 | continue; | 609 | continue; |
605 | for (idx = 0; idx < pid_table->num_pid_entries; idx++) { | 610 | for (idx = 0; idx < pid_table->num_pid_entries; idx++) { |
606 | pid_table->entries[idx].expected_job = 0; | 611 | pid_table->entries[idx].expected_job = 1; |
607 | } | 612 | } |
608 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | 613 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER |
609 | cpu_for_timer = interrupt_cpu; | 614 | cpu_for_timer = interrupt_cpu; |
@@ -629,7 +634,7 @@ static void arm_all_timers(void) | |||
629 | */ | 634 | */ |
630 | void mc_ce_release_at_common(struct task_struct *ts, lt_t start) | 635 | void mc_ce_release_at_common(struct task_struct *ts, lt_t start) |
631 | { | 636 | { |
632 | TRACE_TASK(ts, "release at\n"); | 637 | TRACE("release CE at %llu\n", start); |
633 | if (atomic_inc_and_test(&start_time_set)) { | 638 | if (atomic_inc_and_test(&start_time_set)) { |
634 | /* in this case, we won the race */ | 639 | /* in this case, we won the race */ |
635 | cancel_all_timers(); | 640 | cancel_all_timers(); |
@@ -664,8 +669,7 @@ long mc_ce_activate_plugin_common(void) | |||
664 | 669 | ||
665 | atomic_set(&start_time_set, -1); | 670 | atomic_set(&start_time_set, -1); |
666 | atomic64_set(&start_time, litmus_clock()); | 671 | atomic64_set(&start_time, litmus_clock()); |
667 | /* may not want to arm timers on activation, just after release */ | 672 | |
668 | arm_all_timers(); | ||
669 | ret = 0; | 673 | ret = 0; |
670 | out: | 674 | out: |
671 | return ret; | 675 | return ret; |
@@ -707,7 +711,7 @@ static void clear_pid_entries(void) | |||
707 | } | 711 | } |
708 | pid_table->entries[entry].budget = 0; | 712 | pid_table->entries[entry].budget = 0; |
709 | pid_table->entries[entry].acc_time = 0; | 713 | pid_table->entries[entry].acc_time = 0; |
710 | pid_table->entries[entry].expected_job = 0; | 714 | pid_table->entries[entry].expected_job = 1; |
711 | } | 715 | } |
712 | } | 716 | } |
713 | } | 717 | } |
@@ -752,7 +756,7 @@ static int __init init_sched_mc_ce(void) | |||
752 | raw_spin_lock_init(ce_lock); | 756 | raw_spin_lock_init(ce_lock); |
753 | dom_data = &per_cpu(_mc_ce_doms, cpu); | 757 | dom_data = &per_cpu(_mc_ce_doms, cpu); |
754 | dom = &dom_data->domain; | 758 | dom = &dom_data->domain; |
755 | ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, NULL, | 759 | ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, |
756 | &per_cpu(_mc_ce_dom_data, cpu), cpu, | 760 | &per_cpu(_mc_ce_dom_data, cpu), cpu, |
757 | mc_ce_timer_callback); | 761 | mc_ce_timer_callback); |
758 | } | 762 | } |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 67b01c1dd51b..a5e7f799c1a9 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -98,7 +98,7 @@ static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | |||
98 | rec->hdr.type = type; | 98 | rec->hdr.type = type; |
99 | rec->hdr.cpu = smp_processor_id(); | 99 | rec->hdr.cpu = smp_processor_id(); |
100 | rec->hdr.pid = t ? t->pid : 0; | 100 | rec->hdr.pid = t ? t->pid : 0; |
101 | rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | 101 | rec->hdr.job = t ? get_user_job(t) : 0; |
102 | } else { | 102 | } else { |
103 | put_cpu_var(st_event_buffer); | 103 | put_cpu_var(st_event_buffer); |
104 | } | 104 | } |
@@ -146,8 +146,8 @@ feather_callback void do_sched_trace_task_release(unsigned long id, unsigned lon | |||
146 | struct task_struct *t = (struct task_struct*) _task; | 146 | struct task_struct *t = (struct task_struct*) _task; |
147 | struct st_event_record* rec = get_record(ST_RELEASE, t); | 147 | struct st_event_record* rec = get_record(ST_RELEASE, t); |
148 | if (rec) { | 148 | if (rec) { |
149 | rec->data.release.release = tsk_rt(t)->job_params.real_release; | 149 | rec->data.release.release = get_user_release(t); |
150 | rec->data.release.deadline = tsk_rt(t)->job_params.real_deadline; | 150 | rec->data.release.deadline = get_user_deadline(t); |
151 | put_record(rec); | 151 | put_record(rec); |
152 | } | 152 | } |
153 | } | 153 | } |