aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-04-10 22:44:32 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-04-10 22:44:32 -0400
commitb91f30c8f0e03ea4bf7ec861469819c5435eb2d9 (patch)
treee1c18d263a01e20b9c3f4651a82ff95ce1332d6a
parentd5b900860ddcfa271fbde9ea6cd9666704696a36 (diff)
sched_color: Refactored for locking
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_param.h5
-rw-r--r--include/litmus/rt_server.h39
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/rt_server.c33
-rw-r--r--litmus/sched_color.c623
6 files changed, 359 insertions, 344 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index c587aa4b890c..8398a10171e0 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -59,7 +59,7 @@ void litmus_exit_task(struct task_struct *tsk);
59#define get_release(t) (tsk_rt(t)->job_params.release) 59#define get_release(t) (tsk_rt(t)->job_params.release)
60#define get_class(t) (tsk_rt(t)->task_params.cls) 60#define get_class(t) (tsk_rt(t)->task_params.cls)
61#define get_task_domain(t) (tsk_rt(t)->_domain) 61#define get_task_domain(t) (tsk_rt(t)->_domain)
62#define is_server(t) (tsk_rt(t)->server) 62#define is_server(t) (tsk_rt(t)->is_server)
63 63
64#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) 64#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
65#define get_boost_start(t) (tsk_rt(t)->boost_start_time) 65#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 31e8b8fed2a1..4d3a14992e0c 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -76,6 +76,7 @@ struct _rt_domain;
76struct bheap_node; 76struct bheap_node;
77struct release_heap; 77struct release_heap;
78struct domain; 78struct domain;
79struct rt_server;
79 80
80struct rt_job { 81struct rt_job {
81 /* Time instant the the job was or will be released. */ 82 /* Time instant the the job was or will be released. */
@@ -119,7 +120,7 @@ struct rt_param {
119 /* is the task present? (true if it can be scheduled) */ 120 /* is the task present? (true if it can be scheduled) */
120 unsigned int present:1; 121 unsigned int present:1;
121 122
122 unsigned int server:1; 123 unsigned int is_server:1;
123 124
124#ifdef CONFIG_LITMUS_LOCKING 125#ifdef CONFIG_LITMUS_LOCKING
125 /* Is the task being priority-boosted by a locking protocol? */ 126 /* Is the task being priority-boosted by a locking protocol? */
@@ -136,6 +137,8 @@ struct rt_param {
136 struct rt_event *event; 137 struct rt_event *event;
137#endif 138#endif
138 139
140 struct rt_server *server;
141
139 /* user controlled parameters */ 142 /* user controlled parameters */
140 struct rt_task task_params; 143 struct rt_task task_params;
141 144
diff --git a/include/litmus/rt_server.h b/include/litmus/rt_server.h
new file mode 100644
index 000000000000..17517790a104
--- /dev/null
+++ b/include/litmus/rt_server.h
@@ -0,0 +1,39 @@
1#ifndef __RT_SERVER_H
2#define __RT_SERVER_H
3
4#include <linux/sched.h>
5#include <litmus/litmus.h>
6#include <litmus/rt_domain.h>
7
8struct rt_server;
9
10typedef int (*need_preempt_t)(rt_domain_t *rt, struct task_struct *t);
11typedef void (*server_update_t)(struct rt_server *srv);
12typedef void (*server_requeue_t)(struct rt_server *srv, struct task_struct *t);
13typedef struct task_struct* (*server_take_t)(struct rt_server *srv);
14
15struct rt_server {
16 int sid;
17 struct task_struct* linked;
18 rt_domain_t* domain;
19 int running;
20
21 /* Does this server have a higher-priority task */
22 need_preempt_t need_preempt;
23 /* System state has changed, so should server */
24 server_update_t update;
25 /* Requeue task in domain */
26 server_requeue_t requeue;
27 /* Take next task from domain */
28 server_take_t take;
29};
30
31void init_rt_server(struct rt_server *server,
32 int sid, rt_domain_t *domain,
33 need_preempt_t need_preempt,
34 server_requeue_t requeue,
35 server_update_t update,
36 server_take_t take);
37
38
39#endif
diff --git a/litmus/Makefile b/litmus/Makefile
index 30345ad07aa7..9e58a9c78691 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \
21 sched_gsn_edf.o \ 21 sched_gsn_edf.o \
22 color_proc.o \ 22 color_proc.o \
23 sched_color.o \ 23 sched_color.o \
24 rt_server.o \
24 fifo_common.o 25 fifo_common.o
25 26
26 27
diff --git a/litmus/rt_server.c b/litmus/rt_server.c
new file mode 100644
index 000000000000..35f7d4dea079
--- /dev/null
+++ b/litmus/rt_server.c
@@ -0,0 +1,33 @@
1#include <litmus/rt_server.h>
2
3
4static struct task_struct* default_server_take(struct rt_server *srv)
5{
6 return __take_ready(srv->domain);
7}
8
9static void default_server_update(struct rt_server *srv)
10{
11}
12
13void init_rt_server(struct rt_server *server,
14 int sid, rt_domain_t *domain,
15 need_preempt_t need_preempt,
16 server_requeue_t requeue,
17 server_update_t update,
18 server_take_t take)
19{
20 if (!need_preempt || !requeue)
21 BUG_ON(1);
22
23 server->need_preempt = need_preempt;
24 server->requeue = requeue;
25
26 server->update = (update) ? update : default_server_update;
27 server->take = (take) ? take : default_server_take;
28
29 server->sid = sid;
30 server->linked = NULL;
31 server->domain = domain;
32 server->running = 0;
33}
diff --git a/litmus/sched_color.c b/litmus/sched_color.c
index 697d57194d3a..02325db677ab 100644
--- a/litmus/sched_color.c
+++ b/litmus/sched_color.c
@@ -14,54 +14,48 @@
14#include <litmus/color.h> 14#include <litmus/color.h>
15#include <litmus/fifo_common.h> 15#include <litmus/fifo_common.h>
16#include <litmus/budget.h> 16#include <litmus/budget.h>
17 17#include <litmus/rt_server.h>
18 18
19struct fifo_server { 19struct fifo_server {
20 struct task_struct* task; 20 struct rt_server server;
21 struct task_struct* linked; 21 struct task_struct* task;
22 22 struct enforcement_timer timer;
23 struct enforcement_timer timer; 23 lt_t start_time;
24 lt_t start_time; /* Used for exec time */
25 int running; /* For tracing niceties */
26}; 24};
27 25
28struct cpu_entry { 26struct cpu_entry {
29 int cpu; 27 int cpu;
28 struct rt_server server;
30 rt_domain_t edf_domain; 29 rt_domain_t edf_domain;
31
32 struct task_struct* linked; /* Top-level EDF scheduler */
33 struct task_struct* scheduled; /* Actually running, EDF or FIFO */ 30 struct task_struct* scheduled; /* Actually running, EDF or FIFO */
34 31 struct fifo_server fifo_server;
35 raw_spinlock_t* lock;
36
37 struct fifo_server server;
38}; 32};
39 33
40DEFINE_PER_CPU(struct cpu_entry, color_cpus); 34DEFINE_PER_CPU(struct cpu_entry, color_cpus);
41static rt_domain_t fifo_domain; 35static rt_domain_t fifo_domain;
42static raw_spinlock_t fifo_lock; 36static raw_spinlock_t fifo_lock;
43 37
44#define local_edf (&__get_cpu_var(color_cpus).edf_domain)
45#define local_entry (&__get_cpu_var(color_cpus)) 38#define local_entry (&__get_cpu_var(color_cpus))
46#define remote_edf(cpu) (&per_cpu(color_cpus, cpu).edf_domain) 39#define remote_edf(cpu) (&per_cpu(psnedf_domains, cpu).domain)
47#define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) 40#define remote_entry(cpu) (&per_cpu(color_cpus, cpu))
48#define task_edf(task) remote_edf(get_partition(task)) 41#define task_edf(task) remote_edf(get_partition(task))
49#define task_entry(task) remote_entry(get_partition(task)) 42#define task_entry(task) remote_entry(get_partition(task))
50#define server_running(entry) (entry->linked && entry->linked == entry->server.task) 43#define task_server(task) (&task_entry(task)->fifo_server.server)
51#define task_server(task) (&task_entry(task)->server) 44#define run_server(task) (tsk_rt(task)->server)
45#define entry_lock(entry) (&entry->edf_domain.ready_lock)
46#define task_domain(entry, task) (is_be(task)? &fifo_domain :&entry->edf_domain)
47#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry))
52 48
53/* 49/*
54 * Requeue task in appropriate domain 50 * Requeue onto domains release or ready queue based on task state.
55 */ 51 */
56static void requeue(struct task_struct* t) 52static void requeue(rt_domain_t *dom, struct task_struct* t)
57{ 53{
58 rt_domain_t *dom; 54 if (is_server(t) && !tsk_rt(t)->present)
55 return;
59 56
60 BUG_ON(is_queued(t)); 57 BUG_ON(is_queued(t));
61 BUG_ON(is_server(t) && !tsk_rt(t)->present); 58 TRACE_TASK(t, "FIFO requeueing\n");
62 TRACE_TASK(t, "Requeueing\n");
63
64 dom = is_be(t) ? &fifo_domain : task_edf(t);
65 59
66 set_rt_flags(t, RT_F_RUNNING); 60 set_rt_flags(t, RT_F_RUNNING);
67 if (is_released(t, litmus_clock())) 61 if (is_released(t, litmus_clock()))
@@ -71,115 +65,61 @@ static void requeue(struct task_struct* t)
71} 65}
72 66
73/* 67/*
74 * Logically begin server execution. 68 * Stop logically running the currently linked task.
75 */ 69 */
76static void stop_serving(struct fifo_server *server, struct task_struct *t) 70static void unlink(struct rt_server *server)
77{ 71{
78 int task_job, task_pid; 72 struct rt_server *tserv;
79 struct task_struct *stask = server->task;
80 73
81 BUG_ON(!server->running); 74 BUG_ON(!server->linked);
82
83 task_pid = (t) ? t->pid : 0;
84 task_job = (t) ? get_rt_job(t) : 0;
85 75
86 if (t) 76 if (is_server(server->linked)) {
87 tsk_rt(t)->linked_on = NO_CPU; 77 tserv = task_server(server->linked);
78 tserv->running = 0;
79 }
88 80
89 TRACE_TASK(stask, "No longer serving (%d:%d)\n", task_pid, task_job); 81 sched_trace_server_switch_away(server->sid, 0,
82 server->linked->pid,
83 get_rt_job(server->linked));
84 TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid);
90 85
91 sched_trace_server_switch_away(stask->pid, get_rt_job(stask), 86 run_server(server->linked) = NULL;
92 task_pid, task_job); 87 server->linked = NULL;
93 server->running = 0;
94} 88}
95 89
96/* 90static struct task_struct* schedule_server(struct rt_server *server);
97 * Logically stop server execution
98 */
99static void start_serving(struct fifo_server *server, struct task_struct *t)
100{
101 int next_pid, next_job;
102 struct cpu_entry *entry;
103
104 next_pid = (t) ? t->pid : 0;
105 next_job = (t) ? get_rt_job(t) : 0;
106 entry = container_of(server, struct cpu_entry, server);
107 sched_trace_server_switch_to(server->task->pid,
108 get_rt_job(server->task),
109 next_pid, next_job);
110
111 if (t) {
112 tsk_rt(t)->linked_on = entry->cpu;
113 TRACE_TASK(t, "Run by server %d\n", server->task->pid);
114 } else
115 TRACE("(NULL) Run by server %d\n", server->task->pid);
116
117 server->running = 1;
118}
119 91
120/* 92/*
121 * Remove from "linked" fields and, if necessary, drop switch_away rcords. 93 * Logically run @task.
122 */ 94 */
123static void unlink(struct task_struct *t) 95static void link(struct rt_server *server, struct task_struct *task)
124{ 96{
125 struct cpu_entry *entry = task_entry(t); 97 struct rt_server *tserv;
126
127 TRACE_TASK(t, "Unlinking\n");
128
129 if (is_be(t) && tsk_rt(t)->linked_on != NO_CPU) {
130 entry = remote_entry(tsk_rt(t)->linked_on);
131 BUG_ON(entry->scheduled != t);
132 98
133 stop_serving(&entry->server, entry->server.linked); 99 BUG_ON(server->linked);
100 BUG_ON(!server->running);
134 101
135 entry->server.linked = NULL; 102 if (is_server(task)) {
136 entry->scheduled = NULL; 103 tserv = task_server(task);
104 tserv->running = 1;
105 schedule_server(tserv);
137 } 106 }
138 107
139 if (entry->linked == t) { 108 server->linked = task;
140 entry->linked = NULL; 109 run_server(task) = server;
141 TRACE_TASK(t, "No longer running on %d\n", entry->cpu);
142 sched_trace_server_switch_away(entry->cpu + 1, 0,
143 t->pid, get_rt_job(t));
144 }
145 if (entry->scheduled == t)
146 entry->scheduled = NULL;
147 110
148 if (is_server(t) && entry->server.running) { 111 sched_trace_server_switch_to(server->sid, 0,
149 stop_serving(&entry->server, entry->server.linked); 112 task->pid, get_rt_job(task));
150 } 113 TRACE_TASK(task, "Run by server %d\n", server->sid);
151} 114}
152 115
153/* 116/*
154 * Unlink task. If task is a server running another task, unlink and requeue 117 * Complete job for task linked to @server.
155 * the sub-task.
156 */ 118 */
157static void preempt(struct task_struct *t) 119static void job_completion(struct rt_server *server)
158{ 120{
159 struct fifo_server *server; 121 struct task_struct *t = server->linked;
160
161 TRACE_TASK(t, "Preempting\n");
162
163 if (is_server(t)) {
164 server = task_server(t);
165 if (server->linked) {
166 BUG_ON(tsk_rt(server->linked)->linked_on == NO_CPU);
167 TRACE_TASK(server->linked, "Preempted with server\n");
168
169 raw_spin_lock(&fifo_lock);
170
171 requeue(server->linked);
172 unlink(server->linked);
173
174 raw_spin_unlock(&fifo_lock);
175 }
176 }
177
178 unlink(t);
179}
180 122
181static void job_completion(struct task_struct *t)
182{
183 TRACE_TASK(t, "Job completed\n"); 123 TRACE_TASK(t, "Job completed\n");
184 if (is_server(t)) 124 if (is_server(t))
185 sched_trace_server_completion(t->pid, get_rt_job(t)); 125 sched_trace_server_completion(t->pid, get_rt_job(t));
@@ -187,7 +127,7 @@ static void job_completion(struct task_struct *t)
187 sched_trace_task_completion(t, 0); 127 sched_trace_task_completion(t, 0);
188 128
189 129
190 preempt(t); 130 unlink(server);
191 set_rt_flags(t, RT_F_SLEEP); 131 set_rt_flags(t, RT_F_SLEEP);
192 prepare_for_next_period(t); 132 prepare_for_next_period(t);
193 133
@@ -198,67 +138,121 @@ static void job_completion(struct task_struct *t)
198 sched_trace_task_release(t); 138 sched_trace_task_release(t);
199 139
200 if (is_running(t)) 140 if (is_running(t))
201 requeue(t); 141 server->requeue(server, t);
202} 142}
203 143
204/* 144/*
205 * Update state of task and entries to reflect blocks or completions. 145 * Update server state to reflect task state.
206 */ 146 */
207static void update_task(struct task_struct *t) 147static void update_task(struct rt_server *server)
208{ 148{
209 int oot, sleep, block, np; 149 int oot, sleep, block, np;
150 struct task_struct *t = server->linked;
210 151
211 block = !is_running(t); 152 block = !is_running(t);
212 oot = budget_enforced(t) && budget_exhausted(t); 153 oot = budget_enforced(t) && budget_exhausted(t);
213 np = is_np(t); 154 np = is_np(t);
214 sleep = get_rt_flags(t) == RT_F_SLEEP; 155 sleep = get_rt_flags(t) == RT_F_SLEEP;
215 156
157
216 TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", 158 TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n",
217 block, oot, np, sleep); 159 block, oot, np, sleep);
218 160
219 if (block) 161 if (block)
220 preempt(t); 162 unlink(server);
221 else if ((oot || sleep) && !np) 163 else if ((oot || sleep) && !np)
222 job_completion(t); 164 job_completion(server);
165
223} 166}
224 167
225/* 168/*
226 * Update state of server and constituent tasks to reflect any blocking, 169 * Link next task for @server.
227 * time elapsed, or job completions encountered.
228 */ 170 */
229static void update_server(struct fifo_server *server) 171static struct task_struct* schedule_server(struct rt_server *server)
230{ 172{
231 struct cpu_entry *entry; 173 struct task_struct *next;
232 struct task_struct *task; 174 struct rt_server *lserver;
233 unsigned long long delta;
234 175
235 entry = task_entry(server->task); 176 TRACE("Scheduling server %d\n", server->sid);
236 task = server->task;
237 177
238 BUG_ON(!server->start_time); 178 if (server->linked) {
239 BUG_ON(!server_running(entry)); 179 if (is_server(server->linked)) {
180 lserver = task_server(server->linked);
181 lserver->update(lserver);
182 }
240 183
241 cancel_enforcement_timer(&server->timer); 184 update_task(server);
185 }
242 186
243 delta = litmus_clock() - server->start_time; 187 next = server->linked;
244 tsk_rt(task)->job_params.exec_time += delta; 188 if (server->need_preempt(server->domain, next)) {
245 server->start_time = 0; 189 if (next) {
190 TRACE_TASK(next, "Preempted\n");
191 unlink(server);
192 server->requeue(server, next);
193 }
194 next = __take_ready(server->domain);
195 link(server, next);
196 }
246 197
247 if (!tsk_rt(server->task)->present) { 198 return next;
248 /* Deactivate uses present to trigger server exits */ 199}
249 TRACE_TASK(server->task, "RIP server - %llu\n", litmus_clock());
250 preempt(server->task);
251 BUG_ON(is_queued(server->task));
252 BUG_ON(1);
253 } else {
254 200
255 TRACE_TASK(task, "Updating server, delta: %llu, exec: %llu\n", 201/*
256 delta, get_exec_time(task)); 202 * Dumb requeue for CPU servers.
203 */
204static void edf_requeue(struct rt_server *server, struct task_struct *t)
205{
206 requeue(server->domain, t);
207}
257 208
258 if (server->linked) 209/*
259 update_task(server->linked); 210 * Locking requeue for FIFO servers.
260 update_task(server->task); 211 */
261 } 212static void fifo_requeue(struct rt_server *server, struct task_struct *t)
213{
214 raw_spin_lock(&fifo_lock);
215 requeue(server->domain, t);
216 raw_spin_unlock(&fifo_lock);
217}
218
219
220/*
221 * Locking take for FIFO servers.
222 */
223static struct task_struct* fifo_take(struct rt_server *server)
224{
225 struct task_struct *ret;
226
227 raw_spin_lock(&fifo_lock);
228 ret = __take_ready(server->domain);
229 raw_spin_unlock(&fifo_lock);
230
231 return ret;
232}
233
234/*
235 * Update server state, including picking next running task and incrementing
236 * server execution time.
237 */
238static void fifo_update(struct rt_server *server)
239{
240 lt_t delta;
241 struct fifo_server *fserver;
242
243 fserver = container_of(server, struct fifo_server, server);
244 TRACE_TASK(fserver->task, "Updating FIFO server\n");
245
246 BUG_ON(!server->running);
247 BUG_ON(!fserver->start_time);
248
249 delta = litmus_clock() - fserver->start_time;
250 tsk_rt(fserver->task)->job_params.exec_time += delta;
251 fserver->start_time = 0;
252
253 cancel_enforcement_timer(&fserver->timer);
254
255 schedule_server(&fserver->server);
262} 256}
263 257
264/* 258/*
@@ -273,16 +267,16 @@ static void color_edf_release(rt_domain_t *edf, struct bheap *tasks)
273 "Released set of EDF tasks\n"); 267 "Released set of EDF tasks\n");
274 268
275 entry = container_of(edf, struct cpu_entry, edf_domain); 269 entry = container_of(edf, struct cpu_entry, edf_domain);
276 raw_spin_lock_irqsave(entry->lock, flags); 270 raw_spin_lock_irqsave(entry_lock(entry), flags);
277 271
278 __merge_ready(edf, tasks); 272 __merge_ready(edf, tasks);
279 273
280 if (edf_preemption_needed(edf, entry->linked) && 274 if (edf_preemption_needed(edf, entry->server.linked) &&
281 (!entry->linked || !is_kernel_np(entry->linked))) { 275 (!entry->server.linked || !is_kernel_np(entry->server.linked))) {
282 litmus_reschedule(entry->cpu); 276 litmus_reschedule(entry->cpu);
283 } 277 }
284 278
285 raw_spin_unlock_irqrestore(entry->lock, flags); 279 raw_spin_unlock_irqrestore(entry_lock(entry), flags);
286} 280}
287 281
288/* 282/*
@@ -292,20 +286,26 @@ static void check_for_fifo_preempt(void)
292{ 286{
293 int ret = 0, cpu; 287 int ret = 0, cpu;
294 struct cpu_entry *entry; 288 struct cpu_entry *entry;
289 struct rt_server *cpu_server, *fifo_server;
290
291 TRACE("Checking for FIFO preempt\n");
295 292
296 for_each_online_cpu(cpu) { 293 for_each_online_cpu(cpu) {
297 entry = remote_entry(cpu); 294 entry = remote_entry(cpu);
295 cpu_server = &entry->server;
296 fifo_server = &entry->fifo_server.server;
298 297
299 raw_spin_lock(entry->lock); 298 raw_spin_lock(entry_lock(entry));
300 raw_spin_lock(&fifo_lock); 299 raw_spin_lock(&fifo_lock);
301 300
302 if (server_running(entry) && !entry->server.linked) { 301 if (cpu_server->linked && is_server(cpu_server->linked) &&
302 !fifo_server->linked) {
303 litmus_reschedule(cpu); 303 litmus_reschedule(cpu);
304 ret = 1; 304 ret = 1;
305 } 305 }
306 306
307 raw_spin_unlock(&fifo_lock); 307 raw_spin_unlock(&fifo_lock);
308 raw_spin_unlock(entry->lock); 308 raw_spin_unlock(entry_lock(entry));
309 309
310 if (ret) 310 if (ret)
311 break; 311 break;
@@ -318,7 +318,6 @@ static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks)
318 318
319 TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)), 319 TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)),
320 "Released set of FIFO tasks\n"); 320 "Released set of FIFO tasks\n");
321
322 local_irq_save(flags); 321 local_irq_save(flags);
323 322
324 raw_spin_lock(&fifo_lock); 323 raw_spin_lock(&fifo_lock);
@@ -330,127 +329,46 @@ static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks)
330 local_irq_restore(flags); 329 local_irq_restore(flags);
331} 330}
332 331
333/* 332#define cpu_empty(entry, run) \
334 * Run top-level EDF scheduler. 333 (!(run) || (is_server(run) && !(entry)->fifo_server.server.linked))
335 */
336static struct task_struct* schedule_edf(struct cpu_entry *entry,
337 struct task_struct *prev)
338{
339 struct task_struct *next, *updated_link;
340
341 /* Update previously-running task */
342 if (prev) {
343 if (is_server(prev))
344 update_server(task_server(prev));
345 else
346 update_task(prev);
347 }
348
349 /* Select next task to run */
350 next = entry->linked;
351 updated_link = next;
352 if (edf_preemption_needed(&entry->edf_domain, entry->linked)) {
353 if (entry->linked) {
354 requeue(entry->linked);
355 preempt(entry->linked);
356 }
357 next = __take_ready(&entry->edf_domain);
358 BUG_ON(next == updated_link);
359 }
360 334
361 if (next != prev || /* New task to run or */
362 next != updated_link /* Same task, new job */) {
363 if (next) {
364 sched_trace_server_switch_to(entry->cpu + 1, 0,
365 next->pid,
366 get_rt_job(next));
367 TRACE_TASK(next, "Running on %d\n", entry->cpu);
368 } else {
369 TRACE("CPU %d going idle\n", entry->cpu);
370 }
371 }
372
373 entry->linked = next;
374
375 return next;
376}
377
378/*
379 * Run bottom-level fifo scheduler on entry running a server.
380 */
381static struct task_struct* schedule_fifo(struct cpu_entry *entry,
382 struct task_struct *prev_linked)
383{
384 struct task_struct *task, *prev_fifo, *next_fifo = NULL;
385 struct fifo_server *server = &entry->server;
386
387 BUG_ON(server->linked && prev_linked != server->task);
388 BUG_ON(!server_running(entry));
389
390 task = server->task;
391
392 /* Update previously-running task */
393 if (prev_linked == task && server->linked) {
394 update_task(server->linked);
395 }
396
397 /* Select next task to run */
398 next_fifo = server->linked;
399 prev_fifo = next_fifo;
400 if (fifo_preemption_needed(&fifo_domain, server->linked)) {
401 if (server->linked) {
402 raw_spin_lock(&fifo_lock);
403 requeue(server->linked);
404 preempt(server->linked);
405 raw_spin_unlock(&fifo_lock);
406 }
407 next_fifo = __take_ready(&fifo_domain);
408 }
409
410 /* Stop serving the NULL task */
411 if (server->running && !prev_fifo && prev_fifo != next_fifo) {
412 stop_serving(server, prev_fifo);
413 }
414
415 /* If the server was not running or wasn't running next_fifo */
416 if (!server->running || next_fifo != prev_fifo) {
417 start_serving(server, next_fifo);
418 }
419
420 server->linked = next_fifo;
421 server->start_time = litmus_clock();
422 arm_enforcement_timer(&server->timer, server->task);
423
424 return next_fifo;
425}
426
427/*
428 * Schedule hierarchically.
429 */
430static struct task_struct* color_schedule(struct task_struct *prev) 335static struct task_struct* color_schedule(struct task_struct *prev)
431{ 336{
432 unsigned long flags; 337 unsigned long flags;
433 struct cpu_entry *entry = local_entry; 338 struct cpu_entry *entry = local_entry;
434 struct task_struct *next, *prev_linked = entry->linked; 339 struct task_struct *next, *plink = entry->server.linked;
435 340
436 TRACE("Reschedule on %d at %llu\n", entry->cpu, litmus_clock()); 341 TRACE("Reschedule on %d at %llu\n", entry->cpu, litmus_clock());
437
438 raw_spin_lock_irqsave(entry->lock, flags);
439
440 BUG_ON(entry->scheduled && entry->scheduled != prev); 342 BUG_ON(entry->scheduled && entry->scheduled != prev);
441 BUG_ON(entry->scheduled && !is_realtime(prev)); 343 BUG_ON(entry->scheduled && !is_realtime(prev));
442 344
443 /* Top level */ 345 raw_spin_lock_irqsave(entry_lock(entry), flags);
444 next = schedule_edf(entry, prev_linked);
445 346
446 /* Bottom level */ 347 if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) {
447 if (next && is_server(next)) 348 TRACE_TASK(prev, "Snuck in on new!\n");
448 next = schedule_fifo(entry, prev_linked); 349 requeue(task_domain(entry, prev), prev);
350 }
351
352 /* Pick next top-level task */
353 next = schedule_server(&entry->server);
354
355 /* Arm timer if needed */
356 if (next && is_server(next)) {
357 next = task_server(next)->linked;
358 arm_enforcement_timer(&entry->fifo_server.timer,
359 entry->fifo_server.task);
360 entry->fifo_server.start_time = litmus_clock();
361 }
362
363 if (prev)
364 tsk_rt(prev)->scheduled_on = NO_CPU;
365 if (next)
366 tsk_rt(next)->scheduled_on = entry->cpu;
449 367
450 entry->scheduled = next; 368 entry->scheduled = next;
451 sched_state_task_picked(); 369 sched_state_task_picked();
452 370
453 raw_spin_unlock_irqrestore(entry->lock, flags); 371 raw_spin_unlock_irqrestore(entry_lock(entry), flags);
454 372
455 return entry->scheduled; 373 return entry->scheduled;
456} 374}
@@ -459,43 +377,31 @@ static void color_task_new(struct task_struct *t, int on_rq, int running)
459{ 377{
460 struct cpu_entry* entry; 378 struct cpu_entry* entry;
461 unsigned long flags; 379 unsigned long flags;
380 raw_spinlock_t *lock;
462 381
463 TRACE_TASK(t, "New colored task, cpu = %d, wcet = %llu, period = %llu\n", 382 TRACE_TASK(t, "New colored task\n");
464 t->rt_param.task_params.cpu,
465 get_exec_cost(t), get_rt_period(t));
466
467 local_irq_save(flags); 383 local_irq_save(flags);
468 384
469 release_at(t, litmus_clock()); 385 entry = (is_be(t)) ? local_entry : task_entry(t);
470 386 lock = task_lock(entry, t);
471 if (is_be(t)) {
472 entry = local_entry;
473 raw_spin_lock(entry->lock);
474 raw_spin_lock(&fifo_lock);
475 387
476 if (running) { 388 release_at(t, litmus_clock());
477 BUG_ON(entry->scheduled);
478 entry->scheduled = t;
479 } else {
480 requeue(t);
481 }
482 389
483 raw_spin_unlock(&fifo_lock); 390 raw_spin_lock(lock);
484 raw_spin_unlock(entry->lock); 391 if (running) {
392 /* Already running on a CPU, update CPU state to match */
393 BUG_ON(entry->scheduled);
394 entry->scheduled = t;
395 tsk_rt(t)->scheduled_on = entry->cpu;
485 } else { 396 } else {
486 entry = task_entry(t); 397 requeue(task_domain(entry, t), t);
487 raw_spin_lock(entry->lock);
488
489 if (running) {
490 BUG_ON(entry->scheduled);
491 entry->scheduled = t;
492 } else {
493 requeue(t);
494 preempt_if_preemptable(entry->scheduled, entry->cpu);
495 }
496
497 raw_spin_unlock(entry->lock);
498 } 398 }
399 raw_spin_unlock(lock);
400
401 if (is_be(t))
402 check_for_fifo_preempt();
403 else
404 litmus_reschedule(entry->cpu);
499 405
500 local_irq_restore(flags); 406 local_irq_restore(flags);
501} 407}
@@ -504,15 +410,14 @@ static void color_task_wake_up(struct task_struct *task)
504{ 410{
505 unsigned long flags; 411 unsigned long flags;
506 struct cpu_entry* entry = task_entry(task); 412 struct cpu_entry* entry = task_entry(task);
507 raw_spinlock_t *lock; 413 raw_spinlock_t *lock = task_lock(entry, task);
508 lt_t now = litmus_clock(); 414 lt_t now = litmus_clock();
509 415
510 TRACE_TASK(task, "Wake up at %llu\n", now); 416 TRACE_TASK(task, "Wake up at %llu\n", now);
511 417
512 local_irq_save(flags); 418 local_irq_save(flags);
513 419
514 lock = is_be(task) ? &fifo_lock : entry->lock; 420 /* Abuse sporadic model */
515
516 if (is_tardy(task, now)) { 421 if (is_tardy(task, now)) {
517 release_at(task, now); 422 release_at(task, now);
518 sched_trace_task_release(task); 423 sched_trace_task_release(task);
@@ -520,12 +425,17 @@ static void color_task_wake_up(struct task_struct *task)
520 425
521 if (entry->scheduled != task) { 426 if (entry->scheduled != task) {
522 raw_spin_lock(lock); 427 raw_spin_lock(lock);
523 requeue(task); 428 requeue(task_domain(entry, task), task);
524 raw_spin_unlock(lock); 429 raw_spin_unlock(lock);
430 } else {
431 TRACE_TASK(task, "Is already scheduled on %d!\n",
432 entry->scheduled);
525 } 433 }
526 434
527 if (is_be(task)) 435 if (is_be(task))
528 check_for_fifo_preempt(); 436 check_for_fifo_preempt();
437 else
438 litmus_reschedule(entry->cpu);
529 439
530 local_irq_restore(flags); 440 local_irq_restore(flags);
531} 441}
@@ -540,23 +450,38 @@ static void color_task_block(struct task_struct *t)
540static void color_task_exit(struct task_struct * t) 450static void color_task_exit(struct task_struct * t)
541{ 451{
542 unsigned long flags; 452 unsigned long flags;
543 struct cpu_entry* entry = task_entry(t); 453 struct cpu_entry *entry = task_entry(t);
544 rt_domain_t* dom; 454 raw_spinlock_t *lock = task_lock(entry, t);
455
456 TRACE_TASK(t, "RIP, now reschedule\n");
545 457
546 raw_spin_lock_irqsave(entry->lock, flags); 458 local_irq_save(flags);
459
460 /* Remove from scheduler consideration */
547 if (is_queued(t)) { 461 if (is_queued(t)) {
548 if (is_be(t)) 462 raw_spin_lock(lock);
549 dom = &fifo_domain; 463 remove(task_domain(entry, t), t);
550 else 464 raw_spin_unlock(lock);
551 dom = task_edf(t);
552 remove(dom, t);
553 } 465 }
554 unlink(t);
555 466
556 TRACE_TASK(t, "RIP, now reschedule\n"); 467 /* Stop parent server */
468 if (run_server(t))
469 unlink(run_server(t));
470
471 /* Unschedule running CPU */
472 if (tsk_rt(t)->scheduled_on != NO_CPU) {
473 entry = remote_entry(tsk_rt(t)->scheduled_on);
557 474
558 preempt_if_preemptable(entry->scheduled, entry->cpu); 475 raw_spin_lock(entry_lock(entry));
559 raw_spin_unlock_irqrestore(entry->lock, flags); 476
477 tsk_rt(t)->scheduled_on = NO_CPU;
478 entry->scheduled = NULL;
479 litmus_reschedule(entry->cpu);
480
481 raw_spin_unlock(entry_lock(entry));
482 }
483
484 local_irq_restore(flags);
560} 485}
561 486
562/* 487/*
@@ -599,40 +524,45 @@ static long color_activate_plugin(void)
599 524
600 for_each_online_cpu(cpu) { 525 for_each_online_cpu(cpu) {
601 entry = remote_entry(cpu); 526 entry = remote_entry(cpu);
602 entry->scheduled = NULL; 527 server_task = entry->fifo_server.task;
603 528
604 server_task = entry->server.task; 529 raw_spin_lock(entry_lock(entry));
605 530
606 ret = color_server_params(cpu, &tp.exec_cost, 531 ret = color_server_params(cpu, &tp.exec_cost,
607 &tp.period); 532 &tp.period);
608 533 if (ret) {
609 if (ret) 534 printk(KERN_WARNING "Uninitialized server for CPU %d\n",
610 continue; 535 entry->cpu);
536 goto loop_end;
537 }
611 538
612 /* Fill rt parameters */ 539 /* Fill rt parameters */
613 tp.phase = 0; 540 tp.phase = 0;
614 tp.cpu = cpu; 541 tp.cpu = cpu;
615 tp.cls = RT_CLASS_SOFT; 542 tp.cls = RT_CLASS_SOFT;
616 tp.budget_policy = PRECISE_ENFORCEMENT; 543 tp.budget_policy = PRECISE_ENFORCEMENT;
617 server_task->rt_param.task_params = tp; 544 tsk_rt(server_task)->task_params = tp;
545 tsk_rt(server_task)->present = 1;
618 546
547 /* Make runnable */
619 release_at(server_task, now); 548 release_at(server_task, now);
549 requeue(&entry->edf_domain, server_task);
620 550
621 entry->server.start_time = 0; 551 entry->fifo_server.start_time = 0;
622 entry->server.running = 1; 552 entry->scheduled = NULL;
623 553
624 TRACE_TASK(server_task, 554 TRACE_TASK(server_task, "Created server with wcet: %llu, "
625 "Created server with wcet: %llu, period: %llu\n", 555 "period: %llu\n", tp.exec_cost, tp.period);
626 tp.exec_cost, tp.period);
627 556
628 requeue(server_task); 557 loop_end:
558 raw_spin_unlock(entry_lock(entry));
629 } 559 }
630 out: 560
631 return ret; 561 return ret;
632} 562}
633 563
634/* 564/*
635 * Mark servers for system exit. 565 * Mark servers as unused, making future calls to requeue fail.
636 */ 566 */
637static long color_deactivate_plugin(void) 567static long color_deactivate_plugin(void)
638{ 568{
@@ -641,8 +571,8 @@ static long color_deactivate_plugin(void)
641 571
642 for_each_online_cpu(cpu) { 572 for_each_online_cpu(cpu) {
643 entry = remote_entry(cpu); 573 entry = remote_entry(cpu);
644 if (entry->server.task) { 574 if (entry->fifo_server.task) {
645 tsk_rt(entry->server.task)->present = 0; 575 tsk_rt(entry->fifo_server.task)->present = 0;
646 } 576 }
647 } 577 }
648 return 0; 578 return 0;
@@ -656,7 +586,7 @@ static void color_release_ts(lt_t time)
656 int cpu, fifo_cid; 586 int cpu, fifo_cid;
657 char fifo_name[TASK_COMM_LEN], cpu_name[TASK_COMM_LEN]; 587 char fifo_name[TASK_COMM_LEN], cpu_name[TASK_COMM_LEN];
658 struct cpu_entry *entry; 588 struct cpu_entry *entry;
659 struct task_struct *server; 589 struct task_struct *stask;
660 590
661 strcpy(cpu_name, "CPU"); 591 strcpy(cpu_name, "CPU");
662 strcpy(fifo_name, "BE"); 592 strcpy(fifo_name, "BE");
@@ -667,18 +597,11 @@ static void color_release_ts(lt_t time)
667 for_each_online_cpu(cpu) { 597 for_each_online_cpu(cpu) {
668 entry = remote_entry(cpu); 598 entry = remote_entry(cpu);
669 trace_litmus_container_param(cpu, cpu_name); 599 trace_litmus_container_param(cpu, cpu_name);
670 trace_litmus_server_param(cpu + 1, cpu, 0, 0); 600 trace_litmus_server_param(entry->server.sid, cpu, 0, 0);
671 601 stask = entry->fifo_server.task;
672 server = entry->server.task; 602 trace_litmus_server_param(stask->pid, fifo_cid,
673 BUG_ON(!server); 603 get_exec_cost(stask),
674 604 get_rt_period(stask));
675 TRACE_TASK(server, "Server (%llu, %llu)\n",
676 get_exec_cost(server),
677 get_rt_period(server));
678
679 trace_litmus_server_param(server->pid, fifo_cid,
680 get_exec_cost(server),
681 get_rt_period(server));
682 } 605 }
683} 606}
684 607
@@ -704,6 +627,8 @@ static int __init init_color(void)
704 int cpu; 627 int cpu;
705 struct cpu_entry *entry; 628 struct cpu_entry *entry;
706 struct task_struct *server_task; 629 struct task_struct *server_task;
630 struct fifo_server *fifo_server;
631 struct rt_server *cpu_server;
707 632
708 for_each_online_cpu(cpu) { 633 for_each_online_cpu(cpu) {
709 entry = remote_entry(cpu); 634 entry = remote_entry(cpu);
@@ -711,25 +636,39 @@ static int __init init_color(void)
711 636
712 entry->cpu = cpu; 637 entry->cpu = cpu;
713 entry->scheduled = NULL; 638 entry->scheduled = NULL;
714 entry->lock = &entry->edf_domain.ready_lock;
715 639
640 /* Create FIFO server */
641 fifo_server = &entry->fifo_server;
642 init_rt_server(&fifo_server->server,
643 cpu + num_online_cpus() + 1,
644 &fifo_domain,
645 fifo_preemption_needed,
646 fifo_requeue, fifo_update, fifo_take);
647
648
649 /* Create task struct for FIFO server */
716 server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC); 650 server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC);
717 memset(server_task, 0, sizeof(*server_task)); 651 memset(server_task, 0, sizeof(*server_task));
718 entry->server.task = server_task;
719
720 /* Fill linux parameters */
721 server_task->policy = SCHED_LITMUS; 652 server_task->policy = SCHED_LITMUS;
722 strcpy(server_task->comm, "server"); 653 strcpy(server_task->comm, "server");
723 server_task->pid = cpu + num_online_cpus() + 1; 654 server_task->pid = fifo_server->server.sid;
655 fifo_server->task = server_task;
724 656
725 /* Initialize rt_param data */ 657 /* Create rt_params for FIFO server */
726 tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC); 658 tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC);
727 tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC); 659 tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC);
728 bheap_node_init(&tsk_rt(server_task)->heap_node, server_task); 660 bheap_node_init(&tsk_rt(server_task)->heap_node, server_task);
729 tsk_rt(server_task)->server = 1; 661 tsk_rt(server_task)->is_server = 1;
730 tsk_rt(server_task)->present = 1; 662
663
664 /* Create CPU server */
665 cpu_server = &entry->server;
666 init_rt_server(cpu_server, cpu + 1,
667 &entry->edf_domain, edf_preemption_needed,
668 edf_requeue, NULL, NULL);
669 cpu_server->running = 1;
731 670
732 init_enforcement_timer(&entry->server.timer); 671 init_enforcement_timer(&fifo_server->timer);
733 } 672 }
734 673
735 fifo_domain_init(&fifo_domain, NULL, color_fifo_release); 674 fifo_domain_init(&fifo_domain, NULL, color_fifo_release);