diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-04-10 12:29:56 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-04-10 12:29:56 -0400 |
commit | d5b900860ddcfa271fbde9ea6cd9666704696a36 (patch) | |
tree | 9aef898059330b0dbcb8a15ec935004adb84d6e3 | |
parent | 61e0b8a6147c083d042e6ddf5b698bbdb012a2e3 (diff) |
sched_color: Scheduler working without locks
-rw-r--r-- | include/litmus/sched_trace.h | 12 | ||||
-rw-r--r-- | include/trace/events/litmus.h | 17 | ||||
-rw-r--r-- | litmus/Makefile | 2 | ||||
-rw-r--r-- | litmus/budget.c | 4 | ||||
-rw-r--r-- | litmus/color_proc.c | 7 | ||||
-rw-r--r-- | litmus/fifo_common.c | 1 | ||||
-rw-r--r-- | litmus/sched_color.c | 701 |
7 files changed, 460 insertions, 284 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index e193267a321f..0773aebcae16 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -187,8 +187,8 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
187 | 187 | ||
188 | #define trace_litmus_container_param(cid, name) | 188 | #define trace_litmus_container_param(cid, name) |
189 | #define trace_litmus_server_param(sid, cid, wcet, time) | 189 | #define trace_litmus_server_param(sid, cid, wcet, time) |
190 | #define trace_litmus_server_switch_to(sid, job, tid) | 190 | #define trace_litmus_server_switch_to(sid, job, tid, tjob) |
191 | #define trace_litmus_server_switch_away(sid, job, tid) | 191 | #define trace_litmus_server_switch_away(sid, job, tid, tjob) |
192 | #define trace_litmus_server_release(sid, job, release, deadline) | 192 | #define trace_litmus_server_release(sid, job, release, deadline) |
193 | #define trace_litmus_server_completion(sid, job) | 193 | #define trace_litmus_server_completion(sid, job) |
194 | 194 | ||
@@ -294,14 +294,14 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
294 | trace_litmus_server_param(sid, cid, wcet, period); \ | 294 | trace_litmus_server_param(sid, cid, wcet, period); \ |
295 | } while(0) | 295 | } while(0) |
296 | 296 | ||
297 | #define sched_trace_server_switch_to(sid, job, tid) \ | 297 | #define sched_trace_server_switch_to(sid, job, tid, tjob) \ |
298 | do { \ | 298 | do { \ |
299 | trace_litmus_server_switch_to(sid, job, tid); \ | 299 | trace_litmus_server_switch_to(sid, job, tid, tjob); \ |
300 | } while(0) | 300 | } while(0) |
301 | 301 | ||
302 | #define sched_trace_server_switch_away(sid, job, tid) \ | 302 | #define sched_trace_server_switch_away(sid, job, tid, tjob) \ |
303 | do { \ | 303 | do { \ |
304 | trace_litmus_server_switch_away(sid, job, tid); \ | 304 | trace_litmus_server_switch_away(sid, job, tid, tjob); \ |
305 | } while (0) | 305 | } while (0) |
306 | 306 | ||
307 | #define sched_trace_server_release(sid, job, rel, dead) \ | 307 | #define sched_trace_server_release(sid, job, rel, dead) \ |
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h index 7a34a3214ae6..136a80db54a4 100644 --- a/include/trace/events/litmus.h +++ b/include/trace/events/litmus.h | |||
@@ -331,43 +331,48 @@ TRACE_EVENT(litmus_server_param, | |||
331 | 331 | ||
332 | TRACE_EVENT(litmus_server_switch_to, | 332 | TRACE_EVENT(litmus_server_switch_to, |
333 | 333 | ||
334 | TP_PROTO(int sid, unsigned int job, int tid), | 334 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), |
335 | 335 | ||
336 | TP_ARGS(sid, job, tid), | 336 | TP_ARGS(sid, job, tid, tjob), |
337 | 337 | ||
338 | TP_STRUCT__entry( | 338 | TP_STRUCT__entry( |
339 | __field( int, sid) | 339 | __field( int, sid) |
340 | __field( unsigned int, job) | 340 | __field( unsigned int, job) |
341 | __field( int, tid) | 341 | __field( int, tid) |
342 | __field( unsigned int, tjob) | ||
342 | ), | 343 | ), |
343 | 344 | ||
344 | TP_fast_assign( | 345 | TP_fast_assign( |
345 | __entry->sid = sid; | 346 | __entry->sid = sid; |
346 | __entry->tid = tid; | 347 | __entry->tid = tid; |
347 | __entry->job = job; | 348 | __entry->job = job; |
349 | __entry->tjob = tjob; | ||
348 | ), | 350 | ), |
349 | 351 | ||
350 | TP_printk("switch_to(server(%d, %u)): %d\n", __entry->sid, __entry->job, __entry->tid) | 352 | TP_printk("switch_to(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob) |
351 | ); | 353 | ); |
352 | 354 | ||
353 | TRACE_EVENT(litmus_server_switch_away, | 355 | TRACE_EVENT(litmus_server_switch_away, |
354 | 356 | ||
355 | TP_PROTO(int sid, unsigned int job, int tid), | 357 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob), |
356 | 358 | ||
357 | TP_ARGS(sid, job, tid), | 359 | TP_ARGS(sid, job, tid, tjob), |
358 | 360 | ||
359 | TP_STRUCT__entry( | 361 | TP_STRUCT__entry( |
360 | __field( int, sid) | 362 | __field( int, sid) |
361 | __field( unsigned int, job) | 363 | __field( unsigned int, job) |
362 | __field( int, tid) | 364 | __field( int, tid) |
365 | __field( unsigned int, tjob) | ||
363 | ), | 366 | ), |
364 | 367 | ||
365 | TP_fast_assign( | 368 | TP_fast_assign( |
366 | __entry->sid = sid; | 369 | __entry->sid = sid; |
367 | __entry->tid = tid; | 370 | __entry->tid = tid; |
371 | __entry->job = job; | ||
372 | __entry->tjob = tjob; | ||
368 | ), | 373 | ), |
369 | 374 | ||
370 | TP_printk("switch_away(server(%d, %u)): %d\n", __entry->sid, __entry->job, __entry->tid) | 375 | TP_printk("switch_away(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob) |
371 | ); | 376 | ); |
372 | 377 | ||
373 | TRACE_EVENT(litmus_server_release, | 378 | TRACE_EVENT(litmus_server_release, |
diff --git a/litmus/Makefile b/litmus/Makefile index 1d68279af82d..30345ad07aa7 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -26,7 +26,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
26 | 26 | ||
27 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 27 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
28 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 28 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
29 | obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o | 29 | # obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o |
30 | obj-$(CONFIG_MERGE_TIMERS) += event_group.o | 30 | obj-$(CONFIG_MERGE_TIMERS) += event_group.o |
31 | 31 | ||
32 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 32 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
diff --git a/litmus/budget.c b/litmus/budget.c index 7fe91815341c..0a7bd665f814 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -53,7 +53,6 @@ void arm_enforcement_timer(struct enforcement_timer* et, | |||
53 | struct task_struct* t) | 53 | struct task_struct* t) |
54 | { | 54 | { |
55 | lt_t when_to_fire; | 55 | lt_t when_to_fire; |
56 | TRACE_TASK(t, "arming enforcement timer.\n"); | ||
57 | 56 | ||
58 | /* Calling this when there is no budget left for the task | 57 | /* Calling this when there is no budget left for the task |
59 | * makes no sense, unless the task is non-preemptive. */ | 58 | * makes no sense, unless the task is non-preemptive. */ |
@@ -64,6 +63,9 @@ void arm_enforcement_timer(struct enforcement_timer* et, | |||
64 | 63 | ||
65 | if (likely(!is_np(t))) { | 64 | if (likely(!is_np(t))) { |
66 | when_to_fire = litmus_clock() + budget_remaining(t); | 65 | when_to_fire = litmus_clock() + budget_remaining(t); |
66 | TRACE_TASK(t, "arming enforcement timer for %llu.\n", | ||
67 | when_to_fire); | ||
68 | |||
67 | __hrtimer_start_range_ns(&et->timer, | 69 | __hrtimer_start_range_ns(&et->timer, |
68 | ns_to_ktime(when_to_fire), | 70 | ns_to_ktime(when_to_fire), |
69 | 0 /* delta */, | 71 | 0 /* delta */, |
diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 88c8c772dce7..25915e714821 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c | |||
@@ -2,6 +2,9 @@ | |||
2 | #include <linux/sysctl.h> | 2 | #include <linux/sysctl.h> |
3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
4 | 4 | ||
5 | #include <litmus/sched_trace.h> | ||
6 | |||
7 | |||
5 | #define SPERIOD_LEN 7 | 8 | #define SPERIOD_LEN 7 |
6 | #define SPERIOD_FILE "period" | 9 | #define SPERIOD_FILE "period" |
7 | #define SWCET_LEN 5 | 10 | #define SWCET_LEN 5 |
@@ -41,7 +44,9 @@ int color_server_params(int cpu, unsigned long *wcet, unsigned long *period) | |||
41 | } | 44 | } |
42 | 45 | ||
43 | *wcet = server_wcet[cpu]; | 46 | *wcet = server_wcet[cpu]; |
44 | *server_period = server_period[cpu]; | 47 | *period = server_period[cpu]; |
48 | |||
49 | TRACE("For %d: %lu, %lu\n", cpu, server_wcet[cpu], server_period[cpu]); | ||
45 | 50 | ||
46 | return 0; | 51 | return 0; |
47 | } | 52 | } |
diff --git a/litmus/fifo_common.c b/litmus/fifo_common.c index d4b3cb8cc721..84ae98e42ae4 100644 --- a/litmus/fifo_common.c +++ b/litmus/fifo_common.c | |||
@@ -21,6 +21,7 @@ int fifo_higher_prio(struct task_struct* first, | |||
21 | if (first && first == second) { | 21 | if (first && first == second) { |
22 | TRACE_TASK(first, | 22 | TRACE_TASK(first, |
23 | "WARNING: pointless fifo priority comparison.\n"); | 23 | "WARNING: pointless fifo priority comparison.\n"); |
24 | BUG_ON(1); | ||
24 | return 0; | 25 | return 0; |
25 | } | 26 | } |
26 | 27 | ||
diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 8f21b2f36f66..697d57194d3a 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c | |||
@@ -18,18 +18,22 @@ | |||
18 | 18 | ||
19 | struct fifo_server { | 19 | struct fifo_server { |
20 | struct task_struct* task; | 20 | struct task_struct* task; |
21 | struct task_struct* linked; | ||
22 | |||
21 | struct enforcement_timer timer; | 23 | struct enforcement_timer timer; |
22 | lt_t start_time; | 24 | lt_t start_time; /* Used for exec time */ |
25 | int running; /* For tracing niceties */ | ||
23 | }; | 26 | }; |
24 | 27 | ||
25 | struct cpu_entry { | 28 | struct cpu_entry { |
26 | int cpu; | 29 | int cpu; |
27 | rt_domain_t edf_domain; | 30 | rt_domain_t edf_domain; |
28 | 31 | ||
29 | struct task_struct* linked; | 32 | struct task_struct* linked; /* Top-level EDF scheduler */ |
30 | struct task_struct* scheduled; | 33 | struct task_struct* scheduled; /* Actually running, EDF or FIFO */ |
31 | 34 | ||
32 | raw_spinlock_t* lock; | 35 | raw_spinlock_t* lock; |
36 | |||
33 | struct fifo_server server; | 37 | struct fifo_server server; |
34 | }; | 38 | }; |
35 | 39 | ||
@@ -44,13 +48,21 @@ static raw_spinlock_t fifo_lock; | |||
44 | #define task_edf(task) remote_edf(get_partition(task)) | 48 | #define task_edf(task) remote_edf(get_partition(task)) |
45 | #define task_entry(task) remote_entry(get_partition(task)) | 49 | #define task_entry(task) remote_entry(get_partition(task)) |
46 | #define server_running(entry) (entry->linked && entry->linked == entry->server.task) | 50 | #define server_running(entry) (entry->linked && entry->linked == entry->server.task) |
51 | #define task_server(task) (&task_entry(task)->server) | ||
47 | 52 | ||
48 | 53 | /* | |
54 | * Requeue task in appropriate domain | ||
55 | */ | ||
49 | static void requeue(struct task_struct* t) | 56 | static void requeue(struct task_struct* t) |
50 | { | 57 | { |
51 | rt_domain_t *dom = is_be(t) ? &fifo_domain : task_edf(t); | 58 | rt_domain_t *dom; |
59 | |||
60 | BUG_ON(is_queued(t)); | ||
61 | BUG_ON(is_server(t) && !tsk_rt(t)->present); | ||
52 | TRACE_TASK(t, "Requeueing\n"); | 62 | TRACE_TASK(t, "Requeueing\n"); |
53 | 63 | ||
64 | dom = is_be(t) ? &fifo_domain : task_edf(t); | ||
65 | |||
54 | set_rt_flags(t, RT_F_RUNNING); | 66 | set_rt_flags(t, RT_F_RUNNING); |
55 | if (is_released(t, litmus_clock())) | 67 | if (is_released(t, litmus_clock())) |
56 | __add_ready(dom, t); | 68 | __add_ready(dom, t); |
@@ -59,362 +71,468 @@ static void requeue(struct task_struct* t) | |||
59 | } | 71 | } |
60 | 72 | ||
61 | /* | 73 | /* |
62 | * Preempts any preemptable task. If that preemptable task is a | 74 | * Logically begin server execution. |
63 | * server, this will also check that the running task is preemptable. | ||
64 | */ | 75 | */ |
65 | static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) | 76 | static void stop_serving(struct fifo_server *server, struct task_struct *t) |
66 | { | 77 | { |
67 | unsigned long flags; | 78 | int task_job, task_pid; |
68 | struct cpu_entry *entry; | 79 | struct task_struct *stask = server->task; |
69 | int lpre, spre; | ||
70 | 80 | ||
71 | entry = container_of(edf, struct cpu_entry, edf_domain); | 81 | BUG_ON(!server->running); |
72 | 82 | ||
73 | raw_spin_lock_irqsave(entry->lock, flags); | 83 | task_pid = (t) ? t->pid : 0; |
84 | task_job = (t) ? get_rt_job(t) : 0; | ||
74 | 85 | ||
75 | __merge_ready(edf, tasks); | 86 | if (t) |
87 | tsk_rt(t)->linked_on = NO_CPU; | ||
76 | 88 | ||
77 | if (edf_preemption_needed(edf, entry->linked)) { | 89 | TRACE_TASK(stask, "No longer serving (%d:%d)\n", task_pid, task_job); |
78 | lpre = entry->linked ? !is_kernel_np(entry->linked) : 0; | ||
79 | spre = entry->scheduled ? !is_kernel_np(entry->scheduled) : 0; | ||
80 | if (lpre && spre) { | ||
81 | litmus_reschedule(entry->cpu); | ||
82 | } | ||
83 | } | ||
84 | 90 | ||
85 | raw_spin_unlock_irqrestore(entry->lock, flags); | 91 | sched_trace_server_switch_away(stask->pid, get_rt_job(stask), |
92 | task_pid, task_job); | ||
93 | server->running = 0; | ||
86 | } | 94 | } |
87 | 95 | ||
88 | /* | 96 | /* |
89 | * Trigger FIFO release on first server which is running nothing. | 97 | * Logically stop server execution |
90 | * Note: switching to preempting based on fifo_preemption_needed is not | ||
91 | * an easy thing! The cpus must be checked in reverse-fifo order, ala | ||
92 | * gsn-edf, and the tasks to run must be chosen _here_. | ||
93 | */ | 98 | */ |
94 | static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) | 99 | static void start_serving(struct fifo_server *server, struct task_struct *t) |
95 | { | 100 | { |
96 | unsigned long flags; | 101 | int next_pid, next_job; |
97 | int cpu, ret = 0; | ||
98 | struct cpu_entry *entry; | 102 | struct cpu_entry *entry; |
99 | 103 | ||
100 | local_irq_save(flags); | 104 | next_pid = (t) ? t->pid : 0; |
105 | next_job = (t) ? get_rt_job(t) : 0; | ||
106 | entry = container_of(server, struct cpu_entry, server); | ||
107 | sched_trace_server_switch_to(server->task->pid, | ||
108 | get_rt_job(server->task), | ||
109 | next_pid, next_job); | ||
101 | 110 | ||
102 | raw_spin_lock(&fifo_lock); | 111 | if (t) { |
103 | __merge_ready(dom, tasks); | 112 | tsk_rt(t)->linked_on = entry->cpu; |
104 | raw_spin_unlock(&fifo_lock); | 113 | TRACE_TASK(t, "Run by server %d\n", server->task->pid); |
114 | } else | ||
115 | TRACE("(NULL) Run by server %d\n", server->task->pid); | ||
105 | 116 | ||
106 | for_each_online_cpu(cpu) { | 117 | server->running = 1; |
107 | entry = remote_entry(cpu); | 118 | } |
108 | 119 | ||
109 | raw_spin_lock(entry->lock); | 120 | /* |
110 | raw_spin_lock(&fifo_lock); | 121 | * Remove from "linked" fields and, if necessary, drop switch_away rcords. |
122 | */ | ||
123 | static void unlink(struct task_struct *t) | ||
124 | { | ||
125 | struct cpu_entry *entry = task_entry(t); | ||
111 | 126 | ||
112 | if (server_running(entry) && !entry->scheduled) { | 127 | TRACE_TASK(t, "Unlinking\n"); |
113 | litmus_reschedule(cpu); | ||
114 | ret = 1; | ||
115 | } | ||
116 | 128 | ||
117 | raw_spin_unlock(&fifo_lock); | 129 | if (is_be(t) && tsk_rt(t)->linked_on != NO_CPU) { |
118 | raw_spin_unlock(entry->lock); | 130 | entry = remote_entry(tsk_rt(t)->linked_on); |
131 | BUG_ON(entry->scheduled != t); | ||
119 | 132 | ||
120 | if (ret) | 133 | stop_serving(&entry->server, entry->server.linked); |
121 | goto out; | 134 | |
135 | entry->server.linked = NULL; | ||
136 | entry->scheduled = NULL; | ||
137 | } | ||
138 | |||
139 | if (entry->linked == t) { | ||
140 | entry->linked = NULL; | ||
141 | TRACE_TASK(t, "No longer running on %d\n", entry->cpu); | ||
142 | sched_trace_server_switch_away(entry->cpu + 1, 0, | ||
143 | t->pid, get_rt_job(t)); | ||
144 | } | ||
145 | if (entry->scheduled == t) | ||
146 | entry->scheduled = NULL; | ||
147 | |||
148 | if (is_server(t) && entry->server.running) { | ||
149 | stop_serving(&entry->server, entry->server.linked); | ||
122 | } | 150 | } |
123 | out: | ||
124 | local_irq_restore(flags); | ||
125 | } | 151 | } |
126 | 152 | ||
127 | /* | 153 | /* |
128 | * Select second-level task to run and arm timer for server. | 154 | * Unlink task. If task is a server running another task, unlink and requeue |
155 | * the sub-task. | ||
129 | */ | 156 | */ |
130 | static void schedule_server(struct task_struct *server, | 157 | static void preempt(struct task_struct *t) |
131 | struct task_struct *prev) | ||
132 | { | 158 | { |
133 | struct cpu_entry *entry = task_entry(server); | 159 | struct fifo_server *server; |
134 | 160 | ||
135 | BUG_ON(!is_server(server)); | 161 | TRACE_TASK(t, "Preempting\n"); |
136 | BUG_ON(entry->server.start_time); | ||
137 | 162 | ||
138 | raw_spin_lock(&fifo_lock); | 163 | if (is_server(t)) { |
139 | if (prev != server || | 164 | server = task_server(t); |
140 | fifo_preemption_needed(&fifo_domain, entry->scheduled)) { | 165 | if (server->linked) { |
141 | BUG_ON(!is_be(entry->scheduled)); | 166 | BUG_ON(tsk_rt(server->linked)->linked_on == NO_CPU); |
142 | 167 | TRACE_TASK(server->linked, "Preempted with server\n"); | |
143 | if (is_be(entry->scheduled)) { | ||
144 | requeue(entry->scheduled); | ||
145 | |||
146 | TRACE_TASK(entry->scheduled, "Server %d stopped\n", | ||
147 | server->pid); | ||
148 | sched_trace_server_switch_away(server->pid, | ||
149 | get_rt_job(server), | ||
150 | entry->scheduled->pid); | ||
151 | } | ||
152 | 168 | ||
153 | entry->scheduled = __take_ready(&fifo_domain); | 169 | raw_spin_lock(&fifo_lock); |
154 | 170 | ||
155 | if (entry->scheduled) { | 171 | requeue(server->linked); |
156 | TRACE_TASK(entry->scheduled, "Running on server %d\n", | 172 | unlink(server->linked); |
157 | server->pid); | 173 | |
158 | sched_trace_server_switch_to(server->pid, | 174 | raw_spin_unlock(&fifo_lock); |
159 | get_rt_job(server), | ||
160 | entry->scheduled->pid); | ||
161 | } | 175 | } |
162 | } | 176 | } |
163 | raw_spin_unlock(&fifo_lock); | ||
164 | 177 | ||
165 | /* Trigger a reschedule when the server would be exhausted */ | 178 | unlink(t); |
166 | entry->server.start_time = litmus_clock(); | 179 | } |
167 | arm_enforcement_timer(&entry->server.timer, server); | 180 | |
181 | static void job_completion(struct task_struct *t) | ||
182 | { | ||
183 | TRACE_TASK(t, "Job completed\n"); | ||
184 | if (is_server(t)) | ||
185 | sched_trace_server_completion(t->pid, get_rt_job(t)); | ||
186 | else | ||
187 | sched_trace_task_completion(t, 0); | ||
188 | |||
189 | |||
190 | preempt(t); | ||
191 | set_rt_flags(t, RT_F_SLEEP); | ||
192 | prepare_for_next_period(t); | ||
193 | |||
194 | if (is_server(t)) | ||
195 | sched_trace_server_release(t->pid, get_rt_job(t), | ||
196 | get_release(t), get_rt_period(t)); | ||
197 | else | ||
198 | sched_trace_task_release(t); | ||
199 | |||
200 | if (is_running(t)) | ||
201 | requeue(t); | ||
168 | } | 202 | } |
169 | 203 | ||
170 | /* | 204 | /* |
171 | * Update server exec time since a server was started. | 205 | * Update state of task and entries to reflect blocks or completions. |
172 | */ | 206 | */ |
173 | static void update_server(struct task_struct *server) | 207 | static void update_task(struct task_struct *t) |
174 | { | 208 | { |
175 | struct cpu_entry *entry = task_entry(server); | 209 | int oot, sleep, block, np; |
210 | |||
211 | block = !is_running(t); | ||
212 | oot = budget_enforced(t) && budget_exhausted(t); | ||
213 | np = is_np(t); | ||
214 | sleep = get_rt_flags(t) == RT_F_SLEEP; | ||
215 | |||
216 | TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", | ||
217 | block, oot, np, sleep); | ||
218 | |||
219 | if (block) | ||
220 | preempt(t); | ||
221 | else if ((oot || sleep) && !np) | ||
222 | job_completion(t); | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Update state of server and constituent tasks to reflect any blocking, | ||
227 | * time elapsed, or job completions encountered. | ||
228 | */ | ||
229 | static void update_server(struct fifo_server *server) | ||
230 | { | ||
231 | struct cpu_entry *entry; | ||
232 | struct task_struct *task; | ||
176 | unsigned long long delta; | 233 | unsigned long long delta; |
177 | 234 | ||
178 | BUG_ON(!is_server(server)); | 235 | entry = task_entry(server->task); |
179 | BUG_ON(!entry->server.start_time); | 236 | task = server->task; |
237 | |||
238 | BUG_ON(!server->start_time); | ||
180 | BUG_ON(!server_running(entry)); | 239 | BUG_ON(!server_running(entry)); |
181 | 240 | ||
182 | delta = litmus_clock() - entry->server.start_time; | 241 | cancel_enforcement_timer(&server->timer); |
183 | entry->server.start_time = 0; | 242 | |
243 | delta = litmus_clock() - server->start_time; | ||
244 | tsk_rt(task)->job_params.exec_time += delta; | ||
245 | server->start_time = 0; | ||
246 | |||
247 | if (!tsk_rt(server->task)->present) { | ||
248 | /* Deactivate uses present to trigger server exits */ | ||
249 | TRACE_TASK(server->task, "RIP server - %llu\n", litmus_clock()); | ||
250 | preempt(server->task); | ||
251 | BUG_ON(is_queued(server->task)); | ||
252 | BUG_ON(1); | ||
253 | } else { | ||
254 | |||
255 | TRACE_TASK(task, "Updating server, delta: %llu, exec: %llu\n", | ||
256 | delta, get_exec_time(task)); | ||
257 | |||
258 | if (server->linked) | ||
259 | update_task(server->linked); | ||
260 | update_task(server->task); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Triggers preemption on edf-scheduled "linked" field only. | ||
266 | */ | ||
267 | static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) | ||
268 | { | ||
269 | unsigned long flags; | ||
270 | struct cpu_entry *entry; | ||
271 | |||
272 | TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)), | ||
273 | "Released set of EDF tasks\n"); | ||
274 | |||
275 | entry = container_of(edf, struct cpu_entry, edf_domain); | ||
276 | raw_spin_lock_irqsave(entry->lock, flags); | ||
184 | 277 | ||
185 | tsk_rt(server)->job_params.exec_time += delta; | 278 | __merge_ready(edf, tasks); |
186 | 279 | ||
187 | cancel_enforcement_timer(&entry->server.timer); | 280 | if (edf_preemption_needed(edf, entry->linked) && |
281 | (!entry->linked || !is_kernel_np(entry->linked))) { | ||
282 | litmus_reschedule(entry->cpu); | ||
283 | } | ||
188 | 284 | ||
189 | TRACE_TASK(server, "Delta: %llu, new exec: %llu\n", delta, | 285 | raw_spin_unlock_irqrestore(entry->lock, flags); |
190 | get_exec_time(server)); | ||
191 | } | 286 | } |
192 | 287 | ||
193 | /* | 288 | /* |
194 | * Requeue server and, if necessary, a task it is running. | 289 | * Triggers preemption on first FIFO server which is running NULL. |
195 | */ | 290 | */ |
196 | static void preempt_server(struct task_struct *server) | 291 | static void check_for_fifo_preempt(void) |
197 | { | 292 | { |
198 | struct cpu_entry *entry = task_entry(server); | 293 | int ret = 0, cpu; |
294 | struct cpu_entry *entry; | ||
199 | 295 | ||
200 | BUG_ON(!is_server(server)); | 296 | for_each_online_cpu(cpu) { |
297 | entry = remote_entry(cpu); | ||
201 | 298 | ||
202 | requeue(server); | 299 | raw_spin_lock(entry->lock); |
203 | if (entry->scheduled) { | ||
204 | BUG_ON(!is_be(entry->scheduled)); | ||
205 | raw_spin_lock(&fifo_lock); | 300 | raw_spin_lock(&fifo_lock); |
206 | requeue(entry->scheduled); | 301 | |
302 | if (server_running(entry) && !entry->server.linked) { | ||
303 | litmus_reschedule(cpu); | ||
304 | ret = 1; | ||
305 | } | ||
306 | |||
207 | raw_spin_unlock(&fifo_lock); | 307 | raw_spin_unlock(&fifo_lock); |
308 | raw_spin_unlock(entry->lock); | ||
309 | |||
310 | if (ret) | ||
311 | break; | ||
208 | } | 312 | } |
209 | } | 313 | } |
210 | 314 | ||
211 | static void unlink(struct task_struct *t) | 315 | static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) |
212 | { | 316 | { |
213 | struct cpu_entry *entry = task_entry(t); | 317 | unsigned long flags; |
214 | 318 | ||
215 | TRACE_TASK(t, "Unlinking\n"); | 319 | TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)), |
216 | if (entry->linked == t) | 320 | "Released set of FIFO tasks\n"); |
217 | entry->linked = NULL; | 321 | |
218 | if (entry->scheduled == t) | 322 | local_irq_save(flags); |
219 | entry->scheduled = NULL; | 323 | |
324 | raw_spin_lock(&fifo_lock); | ||
325 | __merge_ready(dom, tasks); | ||
326 | raw_spin_unlock(&fifo_lock); | ||
327 | |||
328 | check_for_fifo_preempt(); | ||
329 | |||
330 | local_irq_restore(flags); | ||
220 | } | 331 | } |
221 | 332 | ||
222 | /* | 333 | /* |
223 | * Complete a process's job. | 334 | * Run top-level EDF scheduler. |
224 | */ | 335 | */ |
225 | static void real_job_completion(struct task_struct* t, int forced) | 336 | static struct task_struct* schedule_edf(struct cpu_entry *entry, |
337 | struct task_struct *prev) | ||
226 | { | 338 | { |
227 | sched_trace_task_completion(t,forced); | 339 | struct task_struct *next, *updated_link; |
228 | TRACE_TASK(t, "Job completed\n"); | ||
229 | 340 | ||
230 | unlink(t); | 341 | /* Update previously-running task */ |
231 | set_rt_flags(t, RT_F_SLEEP); | 342 | if (prev) { |
232 | prepare_for_next_period(t); | 343 | if (is_server(prev)) |
344 | update_server(task_server(prev)); | ||
345 | else | ||
346 | update_task(prev); | ||
347 | } | ||
233 | 348 | ||
234 | sched_trace_task_release(t); | 349 | /* Select next task to run */ |
350 | next = entry->linked; | ||
351 | updated_link = next; | ||
352 | if (edf_preemption_needed(&entry->edf_domain, entry->linked)) { | ||
353 | if (entry->linked) { | ||
354 | requeue(entry->linked); | ||
355 | preempt(entry->linked); | ||
356 | } | ||
357 | next = __take_ready(&entry->edf_domain); | ||
358 | BUG_ON(next == updated_link); | ||
359 | } | ||
235 | 360 | ||
236 | if (is_running(t)) | 361 | if (next != prev || /* New task to run or */ |
237 | requeue(t); | 362 | next != updated_link /* Same task, new job */) { |
363 | if (next) { | ||
364 | sched_trace_server_switch_to(entry->cpu + 1, 0, | ||
365 | next->pid, | ||
366 | get_rt_job(next)); | ||
367 | TRACE_TASK(next, "Running on %d\n", entry->cpu); | ||
368 | } else { | ||
369 | TRACE("CPU %d going idle\n", entry->cpu); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | entry->linked = next; | ||
374 | |||
375 | return next; | ||
238 | } | 376 | } |
239 | 377 | ||
240 | /* | 378 | /* |
241 | * Complete a server's job,requeing where appropriate. | 379 | * Run bottom-level fifo scheduler on entry running a server. |
242 | */ | 380 | */ |
243 | static void server_job_completion(struct task_struct *t) | 381 | static struct task_struct* schedule_fifo(struct cpu_entry *entry, |
382 | struct task_struct *prev_linked) | ||
244 | { | 383 | { |
245 | sched_trace_server_completion(t->pid, get_rt_job(t)); | 384 | struct task_struct *task, *prev_fifo, *next_fifo = NULL; |
246 | TRACE_TASK(t, "Server job completed\n"); | 385 | struct fifo_server *server = &entry->server; |
247 | 386 | ||
248 | unlink(t); | 387 | BUG_ON(server->linked && prev_linked != server->task); |
249 | set_rt_flags(t, RT_F_SLEEP); | 388 | BUG_ON(!server_running(entry)); |
250 | prepare_for_next_period(t); | ||
251 | 389 | ||
252 | sched_trace_server_release(t->pid, get_rt_job(t), | 390 | task = server->task; |
253 | get_release(t), | ||
254 | get_deadline(t)); | ||
255 | preempt_server(t); | ||
256 | 391 | ||
257 | } | 392 | /* Update previously-running task */ |
393 | if (prev_linked == task && server->linked) { | ||
394 | update_task(server->linked); | ||
395 | } | ||
258 | 396 | ||
259 | static void color_tick(struct task_struct *t) | 397 | /* Select next task to run */ |
260 | { | 398 | next_fifo = server->linked; |
261 | struct cpu_entry *entry = local_entry; | 399 | prev_fifo = next_fifo; |
262 | BUG_ON(is_realtime(t) && t != entry->scheduled); | 400 | if (fifo_preemption_needed(&fifo_domain, server->linked)) { |
263 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 401 | if (server->linked) { |
264 | if (!is_np(t)) { | 402 | raw_spin_lock(&fifo_lock); |
265 | litmus_reschedule_local(); | 403 | requeue(server->linked); |
266 | TRACE("color_scheduler_tick: " | 404 | preempt(server->linked); |
267 | "%d is preemptable " | 405 | raw_spin_unlock(&fifo_lock); |
268 | " => FORCE_RESCHED\n", t->pid); | ||
269 | } else if (is_user_np(t)) { | ||
270 | TRACE("color_scheduler_tick: " | ||
271 | "%d is non-preemptable, " | ||
272 | "preemption delayed.\n", t->pid); | ||
273 | request_exit_np(t); | ||
274 | } | 406 | } |
407 | next_fifo = __take_ready(&fifo_domain); | ||
408 | } | ||
409 | |||
410 | /* Stop serving the NULL task */ | ||
411 | if (server->running && !prev_fifo && prev_fifo != next_fifo) { | ||
412 | stop_serving(server, prev_fifo); | ||
413 | } | ||
414 | |||
415 | /* If the server was not running or wasn't running next_fifo */ | ||
416 | if (!server->running || next_fifo != prev_fifo) { | ||
417 | start_serving(server, next_fifo); | ||
275 | } | 418 | } |
419 | |||
420 | server->linked = next_fifo; | ||
421 | server->start_time = litmus_clock(); | ||
422 | arm_enforcement_timer(&server->timer, server->task); | ||
423 | |||
424 | return next_fifo; | ||
276 | } | 425 | } |
277 | 426 | ||
278 | static struct task_struct* color_schedule(struct task_struct * prev) | 427 | /* |
428 | * Schedule hierarchically. | ||
429 | */ | ||
430 | static struct task_struct* color_schedule(struct task_struct *prev) | ||
279 | { | 431 | { |
280 | struct cpu_entry *entry = local_entry; | 432 | unsigned long flags; |
281 | rt_domain_t *edf = &entry->edf_domain; | 433 | struct cpu_entry *entry = local_entry; |
282 | struct task_struct *prev_linked; | 434 | struct task_struct *next, *prev_linked = entry->linked; |
283 | int out_of_time, sleep, | ||
284 | np, exists, blocks, server_exhausted; | ||
285 | 435 | ||
286 | raw_spin_lock(entry->lock); | 436 | TRACE("Reschedule on %d at %llu\n", entry->cpu, litmus_clock()); |
437 | |||
438 | raw_spin_lock_irqsave(entry->lock, flags); | ||
287 | 439 | ||
288 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 440 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
289 | BUG_ON(entry->scheduled && !is_realtime(prev)); | 441 | BUG_ON(entry->scheduled && !is_realtime(prev)); |
290 | 442 | ||
291 | /* (0) Update state */ | 443 | /* Top level */ |
292 | if (server_running(entry)) | 444 | next = schedule_edf(entry, prev_linked); |
293 | update_server(entry->linked); | ||
294 | |||
295 | /* (1) Determine state */ | ||
296 | exists = entry->scheduled != NULL; | ||
297 | blocks = exists && !is_running(entry->scheduled); | ||
298 | out_of_time = exists && budget_enforced(entry->scheduled) && | ||
299 | budget_exhausted(entry->scheduled); | ||
300 | np = exists && is_np(entry->scheduled); | ||
301 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
302 | server_exhausted = server_running(entry) && | ||
303 | budget_exhausted(entry->server.task); | ||
304 | prev_linked = entry->linked; | ||
305 | |||
306 | TRACE("Schedule on %d, exists: %d, blocks: %d, out_of_time: %d, ", | ||
307 | "np: %d, sleep: %d, server_exhausted: %d, sched: %d, linked: %d " | ||
308 | "at %llu\n", | ||
309 | entry->cpu, exists, blocks, out_of_time, np, sleep, | ||
310 | server_exhausted, (exists) ? prev->pid : 0, | ||
311 | (entry->linked) ? entry->linked->pid : 0, litmus_clock()); | ||
312 | |||
313 | |||
314 | /* (2) Move previous tasks to proper locations */ | ||
315 | if (blocks) | ||
316 | unlink(entry->scheduled); | ||
317 | if (server_exhausted && !np) | ||
318 | server_job_completion(entry->linked); | ||
319 | else if (!np && !blocks && (out_of_time || sleep)) | ||
320 | real_job_completion(entry->scheduled, !sleep); | ||
321 | |||
322 | /* (3) Figure out what should be running at the top level */ | ||
323 | if (edf_preemption_needed(edf, entry->linked)) { | ||
324 | if (entry->linked) { | ||
325 | if (is_server(entry->linked)) | ||
326 | preempt_server(entry->linked); | ||
327 | else | ||
328 | requeue(entry->linked); | ||
329 | } | ||
330 | entry->linked = __take_ready(edf); | ||
331 | } | ||
332 | 445 | ||
333 | /* (4) Schedule bottom level */ | 446 | /* Bottom level */ |
334 | if (is_server(entry->linked)) | 447 | if (next && is_server(next)) |
335 | schedule_server(entry->linked, prev_linked); | 448 | next = schedule_fifo(entry, prev_linked); |
336 | else | ||
337 | entry->scheduled = entry->linked; | ||
338 | |||
339 | /* (5) Do top level tracing */ | ||
340 | if (entry->linked != prev_linked) { | ||
341 | if (prev_linked) | ||
342 | sched_trace_server_switch_away(entry->cpu, 0, | ||
343 | prev_linked->pid); | ||
344 | if (entry->linked) { | ||
345 | TRACE_TASK(entry->linked, "Run on %d\n", entry->cpu); | ||
346 | sched_trace_server_switch_to(entry->cpu, 0, | ||
347 | entry->linked->pid); | ||
348 | } else | ||
349 | TRACE("Running (NULL) on %d\n", entry->cpu); | ||
350 | } else | ||
351 | TRACE("Running same task on %d\n", entry->cpu); | ||
352 | 449 | ||
450 | entry->scheduled = next; | ||
353 | sched_state_task_picked(); | 451 | sched_state_task_picked(); |
354 | raw_spin_unlock(entry->lock); | 452 | |
453 | raw_spin_unlock_irqrestore(entry->lock, flags); | ||
355 | 454 | ||
356 | return entry->scheduled; | 455 | return entry->scheduled; |
357 | } | 456 | } |
358 | 457 | ||
359 | 458 | static void color_task_new(struct task_struct *t, int on_rq, int running) | |
360 | /* Prepare a task for running in RT mode | ||
361 | */ | ||
362 | static void color_task_new(struct task_struct * t, int on_rq, int running) | ||
363 | { | 459 | { |
364 | struct cpu_entry* entry = task_entry(t); | 460 | struct cpu_entry* entry; |
365 | unsigned long flags; | 461 | unsigned long flags; |
366 | 462 | ||
367 | TRACE_TASK(t, "psn edf: task new, cpu = %d\n", | 463 | TRACE_TASK(t, "New colored task, cpu = %d, wcet = %llu, period = %llu\n", |
368 | t->rt_param.task_params.cpu); | 464 | t->rt_param.task_params.cpu, |
465 | get_exec_cost(t), get_rt_period(t)); | ||
466 | |||
467 | local_irq_save(flags); | ||
369 | 468 | ||
370 | /* setup job parameters */ | ||
371 | release_at(t, litmus_clock()); | 469 | release_at(t, litmus_clock()); |
372 | 470 | ||
373 | /* The task should be running in the queue, otherwise signal | 471 | if (is_be(t)) { |
374 | * code will try to wake it up with fatal consequences. | 472 | entry = local_entry; |
375 | */ | 473 | raw_spin_lock(entry->lock); |
376 | raw_spin_lock_irqsave(entry->lock, flags); | 474 | raw_spin_lock(&fifo_lock); |
377 | if (running) { | 475 | |
378 | /* there shouldn't be anything else running at the time */ | 476 | if (running) { |
379 | BUG_ON(entry->scheduled); | 477 | BUG_ON(entry->scheduled); |
380 | entry->scheduled = t; | 478 | entry->scheduled = t; |
479 | } else { | ||
480 | requeue(t); | ||
481 | } | ||
482 | |||
483 | raw_spin_unlock(&fifo_lock); | ||
484 | raw_spin_unlock(entry->lock); | ||
381 | } else { | 485 | } else { |
382 | requeue(t); | 486 | entry = task_entry(t); |
383 | /* maybe we have to reschedule */ | 487 | raw_spin_lock(entry->lock); |
384 | if (!is_be(t)) | 488 | |
489 | if (running) { | ||
490 | BUG_ON(entry->scheduled); | ||
491 | entry->scheduled = t; | ||
492 | } else { | ||
493 | requeue(t); | ||
385 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 494 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
495 | } | ||
496 | |||
497 | raw_spin_unlock(entry->lock); | ||
386 | } | 498 | } |
387 | raw_spin_unlock_irqrestore(entry->lock, flags); | 499 | |
500 | local_irq_restore(flags); | ||
388 | } | 501 | } |
389 | 502 | ||
390 | static void color_task_wake_up(struct task_struct *task) | 503 | static void color_task_wake_up(struct task_struct *task) |
391 | { | 504 | { |
392 | unsigned long flags; | 505 | unsigned long flags; |
393 | struct cpu_entry* entry = task_entry(task); | 506 | struct cpu_entry* entry = task_entry(task); |
394 | lt_t now; | 507 | raw_spinlock_t *lock; |
508 | lt_t now = litmus_clock(); | ||
509 | |||
510 | TRACE_TASK(task, "Wake up at %llu\n", now); | ||
511 | |||
512 | local_irq_save(flags); | ||
513 | |||
514 | lock = is_be(task) ? &fifo_lock : entry->lock; | ||
395 | 515 | ||
396 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
397 | raw_spin_lock_irqsave(entry->lock, flags); | ||
398 | BUG_ON(is_queued(task)); | ||
399 | now = litmus_clock(); | ||
400 | if (is_tardy(task, now)) { | 516 | if (is_tardy(task, now)) { |
401 | /* new sporadic release */ | ||
402 | release_at(task, now); | 517 | release_at(task, now); |
403 | sched_trace_task_release(task); | 518 | sched_trace_task_release(task); |
404 | } | 519 | } |
405 | 520 | ||
406 | if (entry->scheduled != task) | 521 | if (entry->scheduled != task) { |
522 | raw_spin_lock(lock); | ||
407 | requeue(task); | 523 | requeue(task); |
524 | raw_spin_unlock(lock); | ||
525 | } | ||
408 | 526 | ||
409 | raw_spin_unlock_irqrestore(entry->lock, flags); | 527 | if (is_be(task)) |
410 | TRACE_TASK(task, "wake up done\n"); | 528 | check_for_fifo_preempt(); |
529 | |||
530 | local_irq_restore(flags); | ||
411 | } | 531 | } |
412 | 532 | ||
413 | static void color_task_block(struct task_struct *t) | 533 | static void color_task_block(struct task_struct *t) |
414 | { | 534 | { |
415 | /* only running tasks can block, thus t is in no queue */ | 535 | TRACE_TASK(t, "Block at %llu, state=%d\n", litmus_clock(), t->state); |
416 | TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
417 | |||
418 | BUG_ON(!is_realtime(t)); | 536 | BUG_ON(!is_realtime(t)); |
419 | BUG_ON(is_queued(t)); | 537 | BUG_ON(is_queued(t)); |
420 | } | 538 | } |
@@ -433,8 +551,7 @@ static void color_task_exit(struct task_struct * t) | |||
433 | dom = task_edf(t); | 551 | dom = task_edf(t); |
434 | remove(dom, t); | 552 | remove(dom, t); |
435 | } | 553 | } |
436 | if (entry->scheduled == t) | 554 | unlink(t); |
437 | entry->scheduled = NULL; | ||
438 | 555 | ||
439 | TRACE_TASK(t, "RIP, now reschedule\n"); | 556 | TRACE_TASK(t, "RIP, now reschedule\n"); |
440 | 557 | ||
@@ -442,59 +559,81 @@ static void color_task_exit(struct task_struct * t) | |||
442 | raw_spin_unlock_irqrestore(entry->lock, flags); | 559 | raw_spin_unlock_irqrestore(entry->lock, flags); |
443 | } | 560 | } |
444 | 561 | ||
445 | static long color_admit_task(struct task_struct* tsk) | 562 | /* |
563 | * Non-be tasks must have migrated to the right CPU. | ||
564 | */ | ||
565 | static long color_admit_task(struct task_struct* t) | ||
566 | { | ||
567 | return is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL; | ||
568 | } | ||
569 | |||
570 | static void color_tick(struct task_struct *t) | ||
446 | { | 571 | { |
447 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 572 | struct cpu_entry *entry = local_entry; |
573 | BUG_ON(is_realtime(t) && t != entry->scheduled); | ||
574 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
575 | if (!is_np(t)) { | ||
576 | litmus_reschedule_local(); | ||
577 | TRACE("color_scheduler_tick: " | ||
578 | "%d is preemptable " | ||
579 | " => FORCE_RESCHED\n", t->pid); | ||
580 | } else if (is_user_np(t)) { | ||
581 | TRACE("color_scheduler_tick: " | ||
582 | "%d is non-preemptable, " | ||
583 | "preemption delayed.\n", t->pid); | ||
584 | request_exit_np(t); | ||
585 | } | ||
586 | } | ||
448 | } | 587 | } |
449 | 588 | ||
589 | /* | ||
590 | * Load server parameters. | ||
591 | */ | ||
450 | static long color_activate_plugin(void) | 592 | static long color_activate_plugin(void) |
451 | { | 593 | { |
452 | int cpu, ret = 0; | 594 | int cpu, ret = 0; |
453 | char name[TASK_COMM_LEN]; | ||
454 | struct rt_task tp; | 595 | struct rt_task tp; |
455 | struct task_struct *server; | 596 | struct task_struct *server_task; |
456 | struct cpu_entry *entry; | 597 | struct cpu_entry *entry; |
457 | 598 | lt_t now = litmus_clock(); | |
458 | strcpy(name, "server"); | ||
459 | 599 | ||
460 | for_each_online_cpu(cpu) { | 600 | for_each_online_cpu(cpu) { |
461 | entry = remote_entry(cpu); | 601 | entry = remote_entry(cpu); |
462 | entry->scheduled = NULL; | 602 | entry->scheduled = NULL; |
463 | 603 | ||
464 | server = kmalloc(sizeof(struct task_struct), GFP_ATOMIC); | 604 | server_task = entry->server.task; |
465 | memset(server, 0, sizeof(*server)); | ||
466 | 605 | ||
467 | ret = color_server_params(cpu, &tp.exec_cost, | 606 | ret = color_server_params(cpu, &tp.exec_cost, |
468 | &tp.period); | 607 | &tp.period); |
608 | |||
469 | if (ret) | 609 | if (ret) |
470 | goto out; | 610 | continue; |
471 | 611 | ||
472 | /* Fill rt parameters */ | 612 | /* Fill rt parameters */ |
473 | tp.phase = 0; | 613 | tp.phase = 0; |
474 | tp.cpu = cpu; | 614 | tp.cpu = cpu; |
475 | tp.cls = RT_CLASS_SOFT; | 615 | tp.cls = RT_CLASS_SOFT; |
476 | tp.budget_policy = PRECISE_ENFORCEMENT; | 616 | tp.budget_policy = PRECISE_ENFORCEMENT; |
477 | server->rt_param.task_params = tp; | 617 | server_task->rt_param.task_params = tp; |
478 | |||
479 | /* Fill linux parameters */ | ||
480 | strcpy(server->comm, "server"); | ||
481 | server->pid = cpu + num_online_cpus(); | ||
482 | 618 | ||
483 | /* Initialize rt_param data */ | 619 | release_at(server_task, now); |
484 | tsk_rt(server)->heap_node = bheap_node_alloc(GFP_ATOMIC); | ||
485 | tsk_rt(server)->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
486 | bheap_node_init(&tsk_rt(server)->heap_node, server); | ||
487 | tsk_rt(server)->server = 1; | ||
488 | 620 | ||
489 | entry->server.task = server; | ||
490 | entry->server.start_time = 0; | 621 | entry->server.start_time = 0; |
622 | entry->server.running = 1; | ||
491 | 623 | ||
492 | requeue(server); | 624 | TRACE_TASK(server_task, |
625 | "Created server with wcet: %llu, period: %llu\n", | ||
626 | tp.exec_cost, tp.period); | ||
627 | |||
628 | requeue(server_task); | ||
493 | } | 629 | } |
494 | out: | 630 | out: |
495 | return ret; | 631 | return ret; |
496 | } | 632 | } |
497 | 633 | ||
634 | /* | ||
635 | * Mark servers for system exit. | ||
636 | */ | ||
498 | static long color_deactivate_plugin(void) | 637 | static long color_deactivate_plugin(void) |
499 | { | 638 | { |
500 | int cpu; | 639 | int cpu; |
@@ -509,6 +648,9 @@ static long color_deactivate_plugin(void) | |||
509 | return 0; | 648 | return 0; |
510 | } | 649 | } |
511 | 650 | ||
651 | /* | ||
652 | * Dump container and server parameters for tracing. | ||
653 | */ | ||
512 | static void color_release_ts(lt_t time) | 654 | static void color_release_ts(lt_t time) |
513 | { | 655 | { |
514 | int cpu, fifo_cid; | 656 | int cpu, fifo_cid; |
@@ -525,10 +667,15 @@ static void color_release_ts(lt_t time) | |||
525 | for_each_online_cpu(cpu) { | 667 | for_each_online_cpu(cpu) { |
526 | entry = remote_entry(cpu); | 668 | entry = remote_entry(cpu); |
527 | trace_litmus_container_param(cpu, cpu_name); | 669 | trace_litmus_container_param(cpu, cpu_name); |
528 | trace_litmus_server_param(cpu, cpu, 0, 0); | 670 | trace_litmus_server_param(cpu + 1, cpu, 0, 0); |
529 | 671 | ||
530 | server = entry->server.task; | 672 | server = entry->server.task; |
531 | BUG_ON(!server); | 673 | BUG_ON(!server); |
674 | |||
675 | TRACE_TASK(server, "Server (%llu, %llu)\n", | ||
676 | get_exec_cost(server), | ||
677 | get_rt_period(server)); | ||
678 | |||
532 | trace_litmus_server_param(server->pid, fifo_cid, | 679 | trace_litmus_server_param(server->pid, fifo_cid, |
533 | get_exec_cost(server), | 680 | get_exec_cost(server), |
534 | get_rt_period(server)); | 681 | get_rt_period(server)); |
@@ -556,6 +703,7 @@ static int __init init_color(void) | |||
556 | { | 703 | { |
557 | int cpu; | 704 | int cpu; |
558 | struct cpu_entry *entry; | 705 | struct cpu_entry *entry; |
706 | struct task_struct *server_task; | ||
559 | 707 | ||
560 | for_each_online_cpu(cpu) { | 708 | for_each_online_cpu(cpu) { |
561 | entry = remote_entry(cpu); | 709 | entry = remote_entry(cpu); |
@@ -565,7 +713,22 @@ static int __init init_color(void) | |||
565 | entry->scheduled = NULL; | 713 | entry->scheduled = NULL; |
566 | entry->lock = &entry->edf_domain.ready_lock; | 714 | entry->lock = &entry->edf_domain.ready_lock; |
567 | 715 | ||
568 | entry->server.task = NULL; | 716 | server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC); |
717 | memset(server_task, 0, sizeof(*server_task)); | ||
718 | entry->server.task = server_task; | ||
719 | |||
720 | /* Fill linux parameters */ | ||
721 | server_task->policy = SCHED_LITMUS; | ||
722 | strcpy(server_task->comm, "server"); | ||
723 | server_task->pid = cpu + num_online_cpus() + 1; | ||
724 | |||
725 | /* Initialize rt_param data */ | ||
726 | tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC); | ||
727 | tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
728 | bheap_node_init(&tsk_rt(server_task)->heap_node, server_task); | ||
729 | tsk_rt(server_task)->server = 1; | ||
730 | tsk_rt(server_task)->present = 1; | ||
731 | |||
569 | init_enforcement_timer(&entry->server.timer); | 732 | init_enforcement_timer(&entry->server.timer); |
570 | } | 733 | } |
571 | 734 | ||