aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-04-06 10:52:09 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-04-06 10:52:09 -0400
commit2a604f8edec24b291ba2c5491108808176c9020f (patch)
treea2954d7cbbb6127cd5168a039bf3d7de9776a719
parent7b335ec69a6905c80ff3180a6e4dfac937e7d7f5 (diff)
Better memory management, removed unnecessary preemptions, moved data out of
rt_params and into plugin data, better list managment logic.
-rw-r--r--include/litmus/rt_param.h5
-rw-r--r--include/litmus/servers.h28
-rw-r--r--litmus/bheap.c3
-rw-r--r--litmus/litmus.c9
-rw-r--r--litmus/sched_edf_hsb.c269
-rw-r--r--litmus/servers.c45
6 files changed, 229 insertions, 130 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 7a28e2b0e3e5..26da5f578a6c 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -190,11 +190,6 @@ struct rt_param {
190 */ 190 */
191 struct list_head list; 191 struct list_head list;
192 192
193 /* Used for slack stealing */
194 struct list_head slack_list;
195 struct server *slack_server;
196 int slack_candidate;
197
198 /* Pointer to the page shared between userspace and kernel. */ 193 /* Pointer to the page shared between userspace and kernel. */
199 struct control_page * ctrl_page; 194 struct control_page * ctrl_page;
200 195
diff --git a/include/litmus/servers.h b/include/litmus/servers.h
index 3de9df6e804a..97fd0ce76207 100644
--- a/include/litmus/servers.h
+++ b/include/litmus/servers.h
@@ -97,6 +97,8 @@ typedef struct server_domain {
97 server_completed_t server_completed; 97 server_completed_t server_completed;
98 servers_released_t servers_released; 98 servers_released_t servers_released;
99 99
100 struct kmem_cache *server_release_cache;
101
100 raw_spinlock_t tobe_lock; 102 raw_spinlock_t tobe_lock;
101 struct list_head tobe_released; 103 struct list_head tobe_released;
102} server_domain_t; 104} server_domain_t;
@@ -116,31 +118,41 @@ typedef struct server_release_heap {
116 server_domain_t *domain; 118 server_domain_t *domain;
117} server_release_heap_t; 119} server_release_heap_t;
118 120
121/*
122 * Initialize and exit servers
123 */
119void server_init(server_t *server, int id, 124void server_init(server_t *server, int id,
120 lt_t wcet, lt_t period, int grouped); 125 lt_t wcet, lt_t period, int grouped);
121void server_destroy(server_t *server); 126void server_destroy(server_t *server);
122 127
128/*
129 * Memory manage servers on the module slabs.
130 */
131server_t* server_alloc(int gfp_flags);
132void server_free(server_t *server);
133
134/*
135 * Initialize and exit the server domain.
136 */
123void server_domain_init(server_domain_t *domain, 137void server_domain_init(server_domain_t *domain,
124 servers_released_t servers_released, 138 servers_released_t servers_released,
125 server_completed_t server_completed, 139 server_completed_t server_completed,
126 int release_master, raw_spinlock_t* timer_lock); 140 int release_master, raw_spinlock_t* timer_lock);
127void server_domain_exit(server_domain_t *domain); 141void server_domain_destroy(server_domain_t *domain);
128 142
143/*
144 * Adds the next release of the server to the domain's timer.
145 */
129int add_server_release(server_t *server, server_domain_t *server_domain); 146int add_server_release(server_t *server, server_domain_t *server_domain);
130 147
131/* 148/*
132 * Runs a task on the server. If the server runs out of budget 149 * Runs a task on the server.
133 * before the task finishes executing, the server will set its
134 * budget to 0 and call post_completion. If task is NULL, the server
135 * will just run on the current CPU.
136 */ 150 */
137void server_run(server_t *server, struct task_struct *task, 151void server_run(server_t *server, struct task_struct *task,
138 server_domain_t *server_domain); 152 server_domain_t *server_domain);
139 153
140/* 154/*
141 * Stops server execution. This will decrement the budget 155 * Stops server execution.
142 * by the amount of time since server_run_task was called, based
143 * on the task execution time or current time, if task was NULL.
144 */ 156 */
145void server_stop(server_t *server, server_domain_t *domain); 157void server_stop(server_t *server, server_domain_t *domain);
146 158
diff --git a/litmus/bheap.c b/litmus/bheap.c
index 528af97f18a6..cf9a0f842c17 100644
--- a/litmus/bheap.c
+++ b/litmus/bheap.c
@@ -276,6 +276,9 @@ void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap,
276 pos = heap->head; 276 pos = heap->head;
277 while (pos != node) { 277 while (pos != node) {
278 prev = pos; 278 prev = pos;
279 /* a dereferencing error here means that
280 * the node was not in this heap
281 */
279 pos = pos->next; 282 pos = pos->next;
280 } 283 }
281 /* we have prev, now remove node */ 284 /* we have prev, now remove node */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 6a75da08f0fb..c4f899510900 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -16,6 +16,7 @@
16#include <litmus/rt_domain.h> 16#include <litmus/rt_domain.h>
17#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
19#include <litmus/servers.h>
19 20
20/* Number of RT tasks that exist in the system */ 21/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0); 22atomic_t rt_task_count = ATOMIC_INIT(0);
@@ -33,7 +34,6 @@ atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
33 34
34static struct kmem_cache * bheap_node_cache; 35static struct kmem_cache * bheap_node_cache;
35extern struct kmem_cache * release_heap_cache; 36extern struct kmem_cache * release_heap_cache;
36extern struct kmem_cache * server_release_cache;
37 37
38struct bheap_node* bheap_node_alloc(int gfp_flags) 38struct bheap_node* bheap_node_alloc(int gfp_flags)
39{ 39{
@@ -327,8 +327,6 @@ long litmus_admit_task(struct task_struct* tsk)
327 } 327 }
328 328
329 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 329 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
330 INIT_LIST_HEAD(&tsk_rt(tsk)->slack_list);
331 tsk_rt(tsk)->slack_server = NULL;
332 330
333 /* avoid scheduler plugin changing underneath us */ 331 /* avoid scheduler plugin changing underneath us */
334 raw_spin_lock_irqsave(&task_transition_lock, flags); 332 raw_spin_lock_irqsave(&task_transition_lock, flags);
@@ -336,7 +334,6 @@ long litmus_admit_task(struct task_struct* tsk)
336 /* allocate heap node for this task */ 334 /* allocate heap node for this task */
337 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 335 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
338 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 336 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
339 tsk_rt(tsk)->slack_candidate = 0;
340 337
341 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 338 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
342 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 339 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
@@ -523,9 +520,8 @@ static int __init _init_litmus(void)
523 520
524 register_sched_plugin(&linux_sched_plugin); 521 register_sched_plugin(&linux_sched_plugin);
525 522
526 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); 523 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
527 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); 524 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
528 server_release_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
529 525
530#ifdef CONFIG_MAGIC_SYSRQ 526#ifdef CONFIG_MAGIC_SYSRQ
531 /* offer some debugging help */ 527 /* offer some debugging help */
@@ -545,7 +541,6 @@ static void _exit_litmus(void)
545 exit_litmus_proc(); 541 exit_litmus_proc();
546 kmem_cache_destroy(bheap_node_cache); 542 kmem_cache_destroy(bheap_node_cache);
547 kmem_cache_destroy(release_heap_cache); 543 kmem_cache_destroy(release_heap_cache);
548 kmem_cache_destroy(server_release_cache);
549} 544}
550 545
551module_init(_init_litmus); 546module_init(_init_litmus);
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c
index d75f8a04b362..e3cd78d29ce8 100644
--- a/litmus/sched_edf_hsb.c
+++ b/litmus/sched_edf_hsb.c
@@ -20,7 +20,7 @@
20 * TODO move slack completion into release 20 * TODO move slack completion into release
21 * TODO fix concurrent arms 21 * TODO fix concurrent arms
22 * TODO slack and BE servers 22 * TODO slack and BE servers
23 * 23 * TODO start servers should no longer be cessary
24 */ 24 */
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -63,21 +63,22 @@
63 * Useful debugging macros. Remove for actual use as they cause 63 * Useful debugging macros. Remove for actual use as they cause
64 * a lot of lock contention. 64 * a lot of lock contention.
65 */ 65 */
66//#ifdef DEBUG_EDF_HSB 66#ifdef DEBUG_EDF_HSB
67atomic_t servers_running = ATOMIC_INIT(0); 67atomic_t servers_running = ATOMIC_INIT(0); /* TODO should be unnecessary */
68/* #define TRACE_SUB(fmt, args...) \ */ 68#define TRACE_SUB(fmt, args...) \
69/* sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \ */ 69 sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt "\n", \
70/* TRACE_ARGS, ## args) */ 70 TRACE_ARGS, ## args)
71/* #define TRACE_TASK_SUB(t, fmt, args...) \ */ 71#define TRACE_TASK_SUB(t, fmt, args...) \
72/* TRACE_SUB("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ */ 72 TRACE_SUB("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \
73/* (t)->rt_param.job_params.job_no, ##args) */ 73 (t)->rt_param.job_params.job_no, ##args)
74/* #define TRACE_SERVER_SUB(s, fmt, args...) \ */ 74#define TRACE_SERVER_SUB(s, fmt, args...) \
75/* TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args) */ 75 TRACE_SUB(SERVER_FMT " " fmt, SERVER_ARGS(s), ##args)
76/* #else */ 76#else
77#define TRACE_SUB(fmt, args...) 77#define TRACE_SUB(fmt, args...)
78#define TRACE_TASK_SUB(t, fmt, args...) 78#define TRACE_TASK_SUB(t, fmt, args...)
79#define TRACE_SERVER_SUB(s, fmt, args...) 79#define TRACE_SERVER_SUB(s, fmt, args...)
80//#endif 80#endif
81
81typedef enum { 82typedef enum {
82 S_HRT, 83 S_HRT,
83 S_SRT, 84 S_SRT,
@@ -88,7 +89,7 @@ typedef enum {
88typedef struct { 89typedef struct {
89 server_t server; 90 server_t server;
90 rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ 91 rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */
91 int ready; 92 int ready; /* False if waiting for next release */
92 int no_slack; 93 int no_slack;
93 struct hrtimer slack_timer; /* Server has no slack when: 94 struct hrtimer slack_timer; /* Server has no slack when:
94 * (deadline - budget) <= current_time. 95 * (deadline - budget) <= current_time.
@@ -108,52 +109,74 @@ typedef struct {
108 struct bheap_node* hn; /* For the cpu_heap */ 109 struct bheap_node* hn; /* For the cpu_heap */
109} cpu_entry_t; 110} cpu_entry_t;
110 111
112typedef struct task_data {
113 server_t *srt_server; /* If the task is SRT, its server */
114 struct list_head slack_list; /* List of slack canditates */
115 struct task_struct *owner;
116} task_data_t;
117
111/* CPU state */ 118/* CPU state */
112DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries); 119DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries);
113static struct bheap cpu_heap; 120static struct bheap cpu_heap;
114static struct bheap_node cpu_heap_node[NR_CPUS]; 121static struct bheap_node cpu_heap_node[NR_CPUS];
115 122
116static rt_domain_t srt_domain; /* SRT tasks (and servers) */ 123/* Task domains */
117static rt_domain_t be_domain; /* BE tasks */ 124static rt_domain_t srt_domain;
125static rt_domain_t be_domain;
118 126
119static server_domain_t server_domain; /* Useful tools for server scheduling */ 127/* Useful tools for server scheduling */
128static server_domain_t server_domain;
120 129
121static struct list_head be_servers; /* All BE servers */ 130/* BE server support */
122static struct bheap be_ready_servers; /* Runnable BE servers */ 131static struct list_head be_servers;
132static struct bheap be_ready_servers;
123 133
134/* Slack support */
124static struct list_head slack_queue; 135static struct list_head slack_queue;
125static struct list_head slack_candidates; 136static struct list_head slack_candidates;
126 137
127static int release_master; /* CPU which will release tasks and global servers */ 138/* CPU which will release tasks and global servers */
139static int edf_hsb_release_master;
128 140
141static struct kmem_cache *task_data_cache;
129static struct proc_dir_entry *edf_hsb_proc_dir = NULL; 142static struct proc_dir_entry *edf_hsb_proc_dir = NULL;
130static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; 143static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp;
131 144
132#define local_cpu_entry (&__get_cpu_var(cpu_entries))
133#define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task))) 145#define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task)))
134#define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on)) 146#define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on))
135#define task_job_no(task) (tsk_rt(task)->job_params.job_no) 147#define task_job_no(task) (tsk_rt(task)->job_params.job_no)
136#define task_srt_server(task) ((server_t*)tsk_rt(task)->plugin_data) 148#define task_data(task) ((task_data_t*)tsk_rt(task)->plugin_data)
137#define task_slack_server(task) ((server_t*)tsk_rt(task)->slack_server) 149#define task_srt_server(task) ((server_t*)task_data(task)->srt_server)
150#define local_cpu_entry (&__get_cpu_var(cpu_entries))
138#define global_lock (&srt_domain.ready_lock) 151#define global_lock (&srt_domain.ready_lock)
139#define is_active_plugin (litmus == &edf_hsb_plugin) 152#define is_active_plugin (litmus == &edf_hsb_plugin)
140 153
141static inline int head_in_list(struct list_head *list) 154static inline server_t* task_slack_server(struct task_struct *task)
142{ 155{
143 if ((list->next == list->prev && list->prev == list) || 156 server_t *slack_server = NULL;
144 (list->next == LIST_POISON1 && list->prev == LIST_POISON2)) 157 if (task->rt_param.linked_on != NO_CPU) {
145 return 0; 158 slack_server = task_linked_entry(task)->linked_server;
146 else 159 if (slack_server->type != S_SLACK)
147 return 1; 160 slack_server = NULL;
161 }
162 return slack_server;
163}
164
165static inline int head_in_list(struct list_head *node)
166{
167 return !(node->next == node->prev && node->prev == node);
168}
169
170static task_data_t* task_data_alloc(int gfp_flags)
171{
172 return kmem_cache_alloc(task_data_cache, gfp_flags);
173}
174
175static void task_data_free(task_data_t* data)
176{
177 kmem_cache_free(task_data_cache, data);
148} 178}
149 179
150/* In the next methods check to see if have donated. If so, do what?
151 * Server_stop. in check for hrt, need to make sure a check for global is
152 * called. in the other one, just need to make sure we unlink and crap and
153 * then call for it again.
154 * What happens if an srt is released that is scheduled?
155 * Then the effective deadline will mark it.
156 */
157static server_t* next_eligible_slack(void) 180static server_t* next_eligible_slack(void)
158{ 181{
159 server_t *next_slack = NULL, *donator; 182 server_t *next_slack = NULL, *donator;
@@ -171,11 +194,11 @@ static server_t* next_eligible_slack(void)
171 SERVER_ARGS(next_slack)); 194 SERVER_ARGS(next_slack));
172 sched_trace_action(NULL, 7); 195 sched_trace_action(NULL, 7);
173 196
174 list_del(&next_slack->list); 197 list_del_init(&next_slack->list);
175 donator = (server_t*)next_slack->data; 198 donator = (server_t*)next_slack->data;
176 donator->data = NULL; 199 donator->data = NULL;
177 server_destroy(next_slack); 200 server_destroy(next_slack);
178 kfree(next_slack); 201 server_free(next_slack);
179 202
180 next_slack = NULL; 203 next_slack = NULL;
181 } 204 }
@@ -197,8 +220,6 @@ static void add_slack(server_t *slack)
197 server_t *queued; 220 server_t *queued;
198 TRACE_SERVER_SUB(slack, "adding"); 221 TRACE_SERVER_SUB(slack, "adding");
199 222
200 BUG_ON(head_in_list(&slack->list));
201
202 list_for_each_prev(pos, &slack_queue) { 223 list_for_each_prev(pos, &slack_queue) {
203 queued = list_entry(pos, server_t, list); 224 queued = list_entry(pos, server_t, list);
204 if (lt_before_eq(queued->deadline, slack->deadline)) { 225 if (lt_before_eq(queued->deadline, slack->deadline)) {
@@ -209,26 +230,33 @@ static void add_slack(server_t *slack)
209 list_add(&slack->list, &slack_queue); 230 list_add(&slack->list, &slack_queue);
210} 231}
211 232
233static inline struct task_struct* get_candidate(struct list_head *pos)
234{
235 struct task_struct *task = NULL;
236 task_data_t *data;
237 if (!list_empty(pos)) {
238 data = list_entry(pos, task_data_t, slack_list);
239 task = data->owner;
240 }
241 return task;
242}
243
212static void add_slack_candidate(struct task_struct *task) 244static void add_slack_candidate(struct task_struct *task)
213{ 245{
214 struct list_head *pos; 246 struct list_head *pos;
215 struct task_struct *queued; 247 struct task_struct *queued;
216 248
217 tsk_rt(task)->slack_candidate = 1;
218 INIT_LIST_HEAD(&tsk_rt(task)->slack_list);
219
220 list_for_each_prev(pos, &slack_candidates) { 249 list_for_each_prev(pos, &slack_candidates) {
221 queued = list_entry(pos, struct task_struct, 250 queued = get_candidate(pos);
222 rt_param.slack_list);
223 if (lt_before_eq(get_deadline(queued), get_deadline(task))) { 251 if (lt_before_eq(get_deadline(queued), get_deadline(task))) {
224 TRACE_TASK_SUB(task, "adding after %d", queued->pid); 252 TRACE_TASK_SUB(task, "adding after %d", queued->pid);
225 __list_add(&tsk_rt(task)->slack_list, 253 __list_add(&task_data(task)->slack_list,
226 pos, pos->next); 254 pos, pos->next);
227 return; 255 return;
228 } 256 }
229 } 257 }
230 TRACE_TASK_SUB(task, "adding to end of list"); 258 TRACE_TASK_SUB(task, "adding to end of list");
231 list_add(&tsk_rt(task)->slack_list, &slack_candidates); 259 list_add(&task_data(task)->slack_list, &slack_candidates);
232} 260}
233 261
234static struct task_struct* next_eligible_hrt(hrt_server_t*); 262static struct task_struct* next_eligible_hrt(hrt_server_t*);
@@ -264,7 +292,7 @@ static void donate_slack(server_t *donator, struct task_struct *was_scheduled)
264 292
265 TRACE_SERVER_SUB(donator, "donated %llu slack", TIME(donator->budget)); 293 TRACE_SERVER_SUB(donator, "donated %llu slack", TIME(donator->budget));
266 sched_trace_action(was_scheduled, 9); 294 sched_trace_action(was_scheduled, 9);
267 slack = kmalloc(sizeof(server_t), GFP_ATOMIC); 295 slack = server_alloc(GFP_ATOMIC);
268 server_init(slack, donator->id, donator->budget, 296 server_init(slack, donator->id, donator->budget,
269 donator->period, 0); 297 donator->period, 0);
270 slack->type = S_SLACK; 298 slack->type = S_SLACK;
@@ -286,8 +314,7 @@ static struct task_struct* pick_next_slack(server_t *slack, cpu_entry_t *entry)
286 if (!slack) 314 if (!slack)
287 goto out; 315 goto out;
288 if (!list_empty(&slack_candidates)) { 316 if (!list_empty(&slack_candidates)) {
289 rv = list_entry(slack_candidates.next, struct task_struct, 317 rv = get_candidate(slack_candidates.next);
290 rt_param.slack_list);
291 TRACE_TASK_SUB(rv, "is next slack"); 318 TRACE_TASK_SUB(rv, "is next slack");
292 } else if (entry && entry->linked && 319 } else if (entry && entry->linked &&
293 entry->linked_server->type == S_SLACK) { 320 entry->linked_server->type == S_SLACK) {
@@ -297,25 +324,29 @@ static struct task_struct* pick_next_slack(server_t *slack, cpu_entry_t *entry)
297 return rv; 324 return rv;
298} 325}
299 326
300static void take_next_slack(struct task_struct *task) 327static void take_slack_candidate(struct task_struct *task)
301{ 328{
302 if (tsk_rt(task)->slack_candidate) { 329 if (head_in_list(&task_data(task)->slack_list)) {
303 TRACE_TASK_SUB(task, "deleting slack"); 330 TRACE_TASK_SUB(task, "deleting candidate");
304 list_del(&tsk_rt(task)->slack_list); 331 list_del_init(&task_data(task)->slack_list);
305 tsk_rt(task)->slack_candidate = 0;
306 } else { 332 } else {
307 TRACE_TASK_SUB(task, "can't delete slack"); 333 TRACE_TASK_SUB(task, "can't delete candidate");
308 } 334 }
309} 335}
310 336
311static void check_slack_candidate(struct task_struct *task) 337static void check_slack_candidate(struct task_struct *task)
312{ 338{
313 TRACE_TASK_SUB(task, "checking"); 339 TRACE_TASK_SUB(task, "checking");
314 if (is_srt(task) && task_srt_server(task)->job_no >= 340 if (is_srt(task) &&
315 task_job_no(task) && lt_after(get_release(task), litmus_clock()) && 341 /* The SRT task is not ahead of its server */
342 task_srt_server(task)->job_no >= task_job_no(task) &&
343 /* The task has yet to be released */
344 lt_after(get_release(task), litmus_clock()) &&
345 /* The task didn't just complete */
316 get_rt_flags(task) != RT_F_SLEEP && 346 get_rt_flags(task) != RT_F_SLEEP &&
317 !tsk_rt(task)->slack_candidate) 347 /* The task hasn't already been added to the list */
318{ 348 !head_in_list(&task_data(task)->slack_list)) {
349
319 add_slack_candidate(task); 350 add_slack_candidate(task);
320 sched_trace_action(task, 8); 351 sched_trace_action(task, 8);
321 } 352 }
@@ -454,8 +485,8 @@ static void slack_timer_arm(hrt_server_t *hrt_server)
454 entry = container_of(hrt_server, cpu_entry_t, hrt_server); 485 entry = container_of(hrt_server, cpu_entry_t, hrt_server);
455 486
456#ifdef SLACK_ON_MASTER 487#ifdef SLACK_ON_MASTER
457 if (release_master != NO_CPU) 488 if (edf_hsb_release_master != NO_CPU)
458 cpu = release_master; 489 cpu = edf_hsb_release_master;
459 else 490 else
460#endif 491#endif
461 cpu = entry->cpu; 492 cpu = entry->cpu;
@@ -548,12 +579,14 @@ static noinline void link_server(cpu_entry_t *entry,
548 if (next_server->type == S_SRT) { 579 if (next_server->type == S_SRT) {
549 TRACE_TASK_SUB(entry->linked, "resuming SRT server," 580 TRACE_TASK_SUB(entry->linked, "resuming SRT server,"
550 "budget*: %llu, exec_time: %llu, deadline: %llu," 581 "budget*: %llu, exec_time: %llu, deadline: %llu,"
551 "job_no: %d", 582 "tjob_no: %d, sjob_no: %d sdead: %llu",
552 next_server->budget, get_exec_time(entry->linked), 583 next_server->budget, get_exec_time(entry->linked),
553 get_deadline(entry->linked), next_server->job_no); 584 get_deadline(entry->linked),
585 task_job_no(entry->linked),
586 next_server->job_no,
587 next_server->deadline);
554 BUG_ON(lt_after(next_server->budget, 588 BUG_ON(lt_after(next_server->budget,
555 get_exec_cost(entry->linked))); 589 get_exec_cost(entry->linked)));
556
557 BUG_ON(next_server->job_no < task_job_no(entry->linked)); 590 BUG_ON(next_server->job_no < task_job_no(entry->linked));
558 BUG_ON(lt_after(get_deadline(entry->linked), 591 BUG_ON(lt_after(get_deadline(entry->linked),
559 next_server->deadline)); 592 next_server->deadline));
@@ -563,10 +596,10 @@ static noinline void link_server(cpu_entry_t *entry,
563 slack_timer_cancel(&entry->hrt_server); 596 slack_timer_cancel(&entry->hrt_server);
564 } else if (next_server->type == S_SLACK) { 597 } else if (next_server->type == S_SLACK) {
565 /* Should have already been removed */ 598 /* Should have already been removed */
566 BUG_ON(tsk_rt(entry->linked)->slack_candidate); 599 BUG_ON(head_in_list(&task_data(entry->linked)->slack_list));
600
567 TRACE_SERVER_SUB(next_server, "linking slack server"); 601 TRACE_SERVER_SUB(next_server, "linking slack server");
568 sched_trace_action(entry->linked, 5); 602 sched_trace_action(entry->linked, 5);
569 tsk_rt(entry->linked)->slack_server = next_server;
570 } else { /* BE */ 603 } else { /* BE */
571 BUG_ON(bheap_node_in_heap(next_server->hn)); 604 BUG_ON(bheap_node_in_heap(next_server->hn));
572 sched_trace_action(entry->linked, 200 + next_server->id); 605 sched_trace_action(entry->linked, 200 + next_server->id);
@@ -610,10 +643,6 @@ static noinline void unlink_server(cpu_entry_t *entry,
610 643
611 } 644 }
612 645
613 if (entry->linked_server->type == S_SLACK) {
614 tsk_rt(entry->linked)->slack_server = NULL;
615 }
616
617 /* Requeue SRT servers, they will be garbage collected later */ 646 /* Requeue SRT servers, they will be garbage collected later */
618 if (entry->linked_server->type == S_SLACK && 647 if (entry->linked_server->type == S_SLACK &&
619 next_server != entry->linked_server && 648 next_server != entry->linked_server &&
@@ -864,17 +893,17 @@ static struct task_struct* next_eligible_srt(void)
864 * to overutilization of the system. 893 * to overutilization of the system.
865 */ 894 */
866 if (next_srt) { 895 if (next_srt) {
867 srt_server = tsk_rt(next_srt)->plugin_data; 896 srt_server = task_srt_server(next_srt);
868 if (srt_server->deadline < get_deadline(next_srt)) { 897 if (srt_server->deadline < get_deadline(next_srt)) {
869 TRACE_SUB("catching up SRT to %llu", 898 TRACE_SUB("catching up SRT to %llu",
870 get_release(next_srt)); 899 get_release(next_srt));
871 server_release_at(srt_server, get_release(next_srt)); 900 server_release_at(srt_server, get_release(next_srt));
872 srt_server->job_no = task_job_no(next_srt); 901 srt_server->job_no = task_job_no(next_srt);
873 } 902 }
874 }
875 903
876 if (next_srt && tsk_rt(next_srt)->slack_candidate) 904 /* Ensure that no slack server will try to schedule this */
877 take_next_slack(next_srt); 905 take_slack_candidate(next_srt);
906 }
878 907
879 return next_srt; 908 return next_srt;
880} 909}
@@ -1016,6 +1045,8 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next,
1016{ 1045{
1017 rt_domain_t *domain; 1046 rt_domain_t *domain;
1018 1047
1048 BUG_ON(next == entry->linked);
1049
1019 if (entry->linked) { 1050 if (entry->linked) {
1020 domain = get_rt_domain(entry, entry->linked); 1051 domain = get_rt_domain(entry, entry->linked);
1021 requeue(entry->linked, domain, entry->linked_server); 1052 requeue(entry->linked, domain, entry->linked_server);
@@ -1042,12 +1073,17 @@ static cpu_entry_t* check_for_slack_preempt(struct task_struct *task,
1042 cpu_entry_t *next_entry) 1073 cpu_entry_t *next_entry)
1043{ 1074{
1044 cpu_entry_t *preempted = NULL; 1075 cpu_entry_t *preempted = NULL;
1045 cpu_entry_t *entry = task_linked_entry(task); 1076 cpu_entry_t *entry = NULL;
1046 server_t *slack_server = task_slack_server(task); 1077 server_t *slack_server = NULL;
1047 1078
1048 BUG_ON(!is_srt(task)); 1079 BUG_ON(!is_srt(task));
1049 1080
1050 if (!slack_server || !slack_server->running) 1081 if (tsk_rt(task)->linked_on != NO_CPU) {
1082 entry = task_linked_entry(task);
1083 if (entry->linked_server->type == S_SLACK)
1084 slack_server = entry->linked_server;
1085 }
1086 if (!slack_server)
1051 goto out; 1087 goto out;
1052 1088
1053 TRACE_TASK_SUB(task, "slack preempt"); 1089 TRACE_TASK_SUB(task, "slack preempt");
@@ -1056,7 +1092,7 @@ static cpu_entry_t* check_for_slack_preempt(struct task_struct *task,
1056 preempted = entry; 1092 preempted = entry;
1057 1093
1058 unlink(task); 1094 unlink(task);
1059 take_next_slack(task); 1095 take_slack_candidate(task);
1060 out: 1096 out:
1061 return preempted; 1097 return preempted;
1062} 1098}
@@ -1111,8 +1147,8 @@ static void edf_hsb_pick_next(cpu_entry_t *entry)
1111 next_slack = pick_next_slack(slack_server, NULL); 1147 next_slack = pick_next_slack(slack_server, NULL);
1112 1148
1113 if (next_slack) { 1149 if (next_slack) {
1114 take_next_slack(next_slack); 1150 take_slack_candidate(next_slack);
1115 list_del(&slack_server->list); 1151 list_del_init(&slack_server->list);
1116 TRACE_TASK_SUB(next_slack, "taking"); 1152 TRACE_TASK_SUB(next_slack, "taking");
1117 link_task_to_cpu(entry, next_slack, slack_server); 1153 link_task_to_cpu(entry, next_slack, slack_server);
1118 } 1154 }
@@ -1211,7 +1247,7 @@ static void check_for_global_preempt(void)
1211 /* Swap two slack servers here */ 1247 /* Swap two slack servers here */
1212 if (slack_server && entry->linked_server->type == S_SLACK) { 1248 if (slack_server && entry->linked_server->type == S_SLACK) {
1213 TRACE_SUB("9.5"); 1249 TRACE_SUB("9.5");
1214 list_del(&slack_server->list); 1250 list_del_init(&slack_server->list);
1215 unlink_server(entry, slack_server); 1251 unlink_server(entry, slack_server);
1216 link_server(entry, slack_server); 1252 link_server(entry, slack_server);
1217 update_cpu_position(entry); 1253 update_cpu_position(entry);
@@ -1242,7 +1278,7 @@ static void check_for_global_preempt(void)
1242 } else { /* Slack */ 1278 } else { /* Slack */
1243 next_task = next_slack; 1279 next_task = next_slack;
1244 next_server = slack_server; 1280 next_server = slack_server;
1245 list_del(&slack_server->list); 1281 list_del_init(&slack_server->list);
1246 } 1282 }
1247 BUG_ON(!next_task); 1283 BUG_ON(!next_task);
1248 TRACE_TASK_SUB(next_task, "preempting on P%d", entry->cpu); 1284 TRACE_TASK_SUB(next_task, "preempting on P%d", entry->cpu);
@@ -1250,7 +1286,7 @@ static void check_for_global_preempt(void)
1250 if (next_server != slack_server && is_queued(next_task)) 1286 if (next_server != slack_server && is_queued(next_task))
1251 remove(get_rt_domain(entry, next_task), next_task); 1287 remove(get_rt_domain(entry, next_task), next_task);
1252 else if (next_slack) 1288 else if (next_slack)
1253 take_next_slack(next_slack); 1289 take_slack_candidate(next_slack);
1254 preempt(entry, next_task, next_server); 1290 preempt(entry, next_task, next_server);
1255 1291
1256 loop_end: 1292 loop_end:
@@ -1522,8 +1558,8 @@ static void server_completed(server_t *server, struct task_struct *task)
1522 requeue(task, get_rt_domain(entry, task), server); 1558 requeue(task, get_rt_domain(entry, task), server);
1523 1559
1524 /* Need to pick the next task to run */ 1560 /* Need to pick the next task to run */
1525 check_for_global_preempt(); 1561 edf_hsb_pick_next(entry);
1526 if (!entry->linked) 1562 if (!entry->linked || entry->linked != task)
1527 preempt_if_preemptable(entry->scheduled, entry->cpu); 1563 preempt_if_preemptable(entry->scheduled, entry->cpu);
1528} 1564}
1529 1565
@@ -1542,7 +1578,7 @@ static void hrt_server_released(server_t *server)
1542 /* Boot off an HRT task which has become uneligible */ 1578 /* Boot off an HRT task which has become uneligible */
1543 if (entry->linked && is_hrt(entry->linked) && 1579 if (entry->linked && is_hrt(entry->linked) &&
1544 !is_eligible(entry->linked, hrt_server)) { 1580 !is_eligible(entry->linked, hrt_server)) {
1545 1581 /* TODO: necessary?? */
1546 requeue(entry->linked, &hrt_server->hrt_domain, 1582 requeue(entry->linked, &hrt_server->hrt_domain,
1547 entry->linked_server); 1583 entry->linked_server);
1548 unlink(entry->linked); 1584 unlink(entry->linked);
@@ -1622,7 +1658,7 @@ static int admit_be_server(unsigned long long wcet,
1622 goto out; 1658 goto out;
1623 } 1659 }
1624 1660
1625 be_server = kmalloc(sizeof(server_t), GFP_ATOMIC); 1661 be_server = server_alloc(GFP_ATOMIC);
1626 server_init(be_server, BE_SERVER_BASE + ++curr_be, 1662 server_init(be_server, BE_SERVER_BASE + ++curr_be,
1627 wcet, period, 1); 1663 wcet, period, 1);
1628 be_server->type = S_BE; 1664 be_server->type = S_BE;
@@ -1662,13 +1698,12 @@ static void stop_be_servers(void)
1662 list_for_each_safe(pos, safe, &be_servers) { 1698 list_for_each_safe(pos, safe, &be_servers) {
1663 be_server = list_entry(pos, server_t, list); 1699 be_server = list_entry(pos, server_t, list);
1664 1700
1665 list_del(pos); 1701 list_del_init(pos);
1666 if (bheap_node_in_heap(be_server->hn)) 1702 if (bheap_node_in_heap(be_server->hn))
1667 bheap_delete(server_order, &be_ready_servers, 1703 bheap_delete(server_order, &be_ready_servers,
1668 be_server->hn); 1704 be_server->hn);
1669 1705 server_destroy(be_server);
1670 kfree(be_server->hn); 1706 server_free(be_server);
1671 kfree(be_server);
1672 } 1707 }
1673} 1708}
1674 1709
@@ -1781,8 +1816,8 @@ static void start_servers(lt_t time)
1781 cpu, TIME(slack_fire), &entry->hrt_server.slack_timer); 1816 cpu, TIME(slack_fire), &entry->hrt_server.slack_timer);
1782 1817
1783#ifdef SLACK_ON_MASTER 1818#ifdef SLACK_ON_MASTER
1784 if (release_master != NO_CPU) 1819 if (edf_hsb_release_master != NO_CPU)
1785 slack_cpu = release_master; 1820 slack_cpu = edf_hsb_release_master;
1786 else 1821 else
1787#endif 1822#endif
1788 slack_cpu = cpu; 1823 slack_cpu = cpu;
@@ -1827,11 +1862,11 @@ static long edf_hsb_activate_plugin(void)
1827#endif 1862#endif
1828 1863
1829#ifdef CONFIG_RELEASE_MASTER 1864#ifdef CONFIG_RELEASE_MASTER
1830 release_master = atomic_read(&release_master_cpu); 1865 edf_hsb_release_master = atomic_read(&release_master_cpu);
1831#else 1866#else
1832 release_master = NO_CPU; 1867 edf_hsb_release_master = NO_CPU;
1833#endif 1868#endif
1834 server_domain.release_master = release_master; 1869 server_domain.release_master = edf_hsb_release_master;
1835 1870
1836 TRACE("activating EDF-HSB plugin.\n"); 1871 TRACE("activating EDF-HSB plugin.\n");
1837 return 0; 1872 return 0;
@@ -1915,8 +1950,11 @@ static void edf_hsb_task_exit(struct task_struct *task)
1915 entry->scheduled = NULL; 1950 entry->scheduled = NULL;
1916 tsk_rt(task)->scheduled_on = NO_CPU; 1951 tsk_rt(task)->scheduled_on = NO_CPU;
1917 } 1952 }
1918 if (is_srt(task)) 1953 if (is_srt(task)) {
1919 kfree(tsk_rt(task)->plugin_data); 1954 server_destroy(task_srt_server(task));
1955 server_free(task_srt_server(task));
1956 task_data_free(tsk_rt(task)->plugin_data);
1957 }
1920 1958
1921 raw_spin_unlock_irqrestore(global_lock, flags); 1959 raw_spin_unlock_irqrestore(global_lock, flags);
1922} 1960}
@@ -1996,7 +2034,8 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
1996static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) 2034static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
1997{ 2035{
1998 unsigned long flags; 2036 unsigned long flags;
1999 server_t *srt_server = NULL; 2037 server_t *srt_server;
2038 task_data_t *data = NULL;
2000 cpu_entry_t *entry = task_sched_entry(task); 2039 cpu_entry_t *entry = task_sched_entry(task);
2001 2040
2002 TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); 2041 TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock()));
@@ -2006,17 +2045,21 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
2006 /* Setup job parameters */ 2045 /* Setup job parameters */
2007 release_at(task, litmus_clock()); 2046 release_at(task, litmus_clock());
2008 2047
2009 /* Create struct to store SRT server state on suspension */
2010 if (is_srt(task)) { 2048 if (is_srt(task)) {
2011 srt_server = kmalloc(sizeof(server_t), GFP_ATOMIC); 2049 /* Create SRT server */
2050 srt_server = server_alloc(GFP_ATOMIC);
2012 server_init(srt_server, task->pid, get_exec_cost(task), 2051 server_init(srt_server, task->pid, get_exec_cost(task),
2013 get_rt_period(task), 0); 2052 get_rt_period(task), 0);
2014 srt_server->type = S_SRT; 2053 srt_server->type = S_SRT;
2015 srt_server->data = task; 2054 srt_server->data = task;
2016 srt_server->job_no = tsk_rt(task)->job_params.job_no; 2055 srt_server->job_no = 1;
2017 server_release_at(srt_server, get_release(task)); 2056
2057 data = task_data_alloc(GFP_ATOMIC);
2058 data->owner = task;
2059 data->srt_server = srt_server;
2060 INIT_LIST_HEAD(&data->slack_list);
2018 } 2061 }
2019 tsk_rt(task)->plugin_data = srt_server; 2062 tsk_rt(task)->plugin_data = data;
2020 2063
2021 /* Already running, update the cpu entry. 2064 /* Already running, update the cpu entry.
2022 * This tends to happen when the first tasks enter the system. 2065 * This tends to happen when the first tasks enter the system.
@@ -2122,6 +2165,8 @@ static int __init init_edf_hsb(void)
2122 admit_hrt_server, list_hrt_servers, 2165 admit_hrt_server, list_hrt_servers,
2123 stop_hrt_servers); 2166 stop_hrt_servers);
2124 2167
2168 task_data_cache = KMEM_CACHE(task_data, SLAB_PANIC);
2169
2125 /* Global domains */ 2170 /* Global domains */
2126 edf_domain_init(&srt_domain, NULL, release_srt_jobs); 2171 edf_domain_init(&srt_domain, NULL, release_srt_jobs);
2127 rt_domain_init(&be_domain, be_ready_order, 2172 rt_domain_init(&be_domain, be_ready_order,
@@ -2165,11 +2210,25 @@ static int __init init_edf_hsb(void)
2165 2210
2166static void exit_edf_hsb(void) 2211static void exit_edf_hsb(void)
2167{ 2212{
2213 int cpu;
2214 cpu_entry_t *entry;
2215
2216 stop_be_servers();
2217 stop_hrt_servers();
2218
2168 server_proc_exit(edf_hsb_proc_dir, BE_PROC_NAME); 2219 server_proc_exit(edf_hsb_proc_dir, BE_PROC_NAME);
2169 server_proc_exit(edf_hsb_proc_dir, HRT_PROC_NAME); 2220 server_proc_exit(edf_hsb_proc_dir, HRT_PROC_NAME);
2170 2221
2222 server_domain_destroy(&server_domain);
2223
2224 for_each_online_cpu(cpu) {
2225 entry = &per_cpu(cpu_entries, cpu);
2226 server_destroy(&entry->hrt_server.server);
2227 }
2228
2171 if (edf_hsb_proc_dir) { 2229 if (edf_hsb_proc_dir) {
2172 remove_plugin_proc_dir(&edf_hsb_plugin); 2230 remove_plugin_proc_dir(&edf_hsb_plugin);
2231 /* TODO: is this wrong? */
2173 edf_hsb_proc_dir = NULL; 2232 edf_hsb_proc_dir = NULL;
2174 } 2233 }
2175} 2234}
diff --git a/litmus/servers.c b/litmus/servers.c
index 25d0970abbd0..4011bc3c144b 100644
--- a/litmus/servers.c
+++ b/litmus/servers.c
@@ -13,7 +13,7 @@
13#include <litmus/sched_trace.h> 13#include <litmus/sched_trace.h>
14#include <litmus/servers.h> 14#include <litmus/servers.h>
15 15
16//#define DEBUG_SERVERS 16#define DEBUG_SERVERS
17 17
18/* Not working */ 18/* Not working */
19/* #define COMPLETION_ON_MASTER */ 19/* #define COMPLETION_ON_MASTER */
@@ -65,6 +65,10 @@
65/* Used to run a server on a remote CPU */ 65/* Used to run a server on a remote CPU */
66DEFINE_PER_CPU(struct hrtimer_start_on_info, server_cpu_infos); 66DEFINE_PER_CPU(struct hrtimer_start_on_info, server_cpu_infos);
67 67
68/* Memory slabs for servers */
69struct kmem_cache *server_release_cache;
70struct kmem_cache *server_cache;
71
68typedef struct proc_read_args { 72typedef struct proc_read_args {
69 char *page; 73 char *page;
70 int length; 74 int length;
@@ -240,6 +244,11 @@ static server_release_heap_t* release_heap_alloc(int gfp_flags)
240 return rh; 244 return rh;
241} 245}
242 246
247static void release_heap_free(server_release_heap_t* rh)
248{
249 kmem_cache_free(server_release_cache, rh);
250}
251
243void server_init(server_t *server, int id, lt_t wcet, lt_t period, int grouped) 252void server_init(server_t *server, int id, lt_t wcet, lt_t period, int grouped)
244{ 253{
245 server->id = id; 254 server->id = id;
@@ -257,7 +266,7 @@ void server_init(server_t *server, int id, lt_t wcet, lt_t period, int grouped)
257 266
258 server->data = NULL; 267 server->data = NULL;
259 268
260 server->hn = kmalloc(sizeof(struct bheap_node), GFP_ATOMIC); 269 server->hn = bheap_node_alloc(GFP_ATOMIC);
261 bheap_node_init(&server->hn, server); 270 bheap_node_init(&server->hn, server);
262 INIT_LIST_HEAD(&server->list); 271 INIT_LIST_HEAD(&server->list);
263 server->release_heap = NULL; 272 server->release_heap = NULL;
@@ -270,12 +279,22 @@ void server_init(server_t *server, int id, lt_t wcet, lt_t period, int grouped)
270 279
271void server_destroy(server_t *server) 280void server_destroy(server_t *server)
272{ 281{
273 kfree(server->hn); 282 bheap_node_free(server->hn);
274 if (server->release_heap) { 283 if (server->release_heap) {
275 kfree(server->release_heap); 284 release_heap_free(server->release_heap);
276 } 285 }
277} 286}
278 287
288server_t* server_alloc(int gfp_flags)
289{
290 return kmem_cache_alloc(server_cache, gfp_flags);
291}
292
293void server_free(server_t *server)
294{
295 kmem_cache_free(server_cache, server);
296}
297
279/* 298/*
280 * Handles subtraction of lt_t without underflows. 299 * Handles subtraction of lt_t without underflows.
281 */ 300 */
@@ -586,7 +605,7 @@ void server_domain_init(server_domain_t *domain,
586 } 605 }
587} 606}
588 607
589void server_domain_exit(server_domain_t *domain) 608void server_domain_destroy(server_domain_t *domain)
590{ 609{
591 kfree(domain->completion_timers); 610 kfree(domain->completion_timers);
592 kfree(domain->running); 611 kfree(domain->running);
@@ -801,3 +820,19 @@ int add_server_release(server_t *server, server_domain_t *domain)
801 list_add(&server->release_list, &domain->tobe_released); 820 list_add(&server->release_list, &domain->tobe_released);
802 return arm_release_timer(domain); 821 return arm_release_timer(domain);
803} 822}
823
824static void init_servers(void)
825{
826 server_cache = KMEM_CACHE(server, SLAB_PANIC);
827 server_release_cache = KMEM_CACHE(server_release_heap, SLAB_PANIC);
828}
829
830static void exit_servers(void)
831{
832 kmem_cache_destroy(server_cache);
833 kmem_cache_destroy(server_release_cache);
834}
835
836
837module_init(init_servers);
838module_exit(exit_servers);