aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_edf_hsb.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2010-11-27 02:21:09 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2010-11-27 02:21:09 -0500
commitea13c3eeabc82d1d44235dddee9fe65b16adb17e (patch)
tree58d4b6c184191c6ebdc69f2ddfedeb8135fdaebd /litmus/sched_edf_hsb.c
parentbdf07839f7a29dac9a8568d1fbd99985e1df2066 (diff)
Added code to handle HRT and SRT tasks. Fixed define in litmus.H
Diffstat (limited to 'litmus/sched_edf_hsb.c')
-rwxr-xr-xlitmus/sched_edf_hsb.c564
1 files changed, 423 insertions, 141 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c
index a539c8426e97..19c8f9cd2501 100755
--- a/litmus/sched_edf_hsb.c
+++ b/litmus/sched_edf_hsb.c
@@ -8,17 +8,23 @@
8#include <linux/time.h> 8#include <linux/time.h>
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/hrtimer.h>
11 12
12#include <litmus/litmus.h> 13#include <litmus/litmus.h>
13#include <litmus/jobs.h>
14#include <litmus/bheap.h> 14#include <litmus/bheap.h>
15#include <litmus/sched_plugin.h> 15#include <litmus/jobs.h>
16#include <litmus/litmus_proc.h> 16#include <litmus/litmus_proc.h>
17#include <litmus/sched_plugin.h>
17#include <litmus/edf_common.h> 18#include <litmus/edf_common.h>
18#include <litmus/preempt.h> 19#include <litmus/preempt.h>
19#include <litmus/sched_trace.h> 20#include <litmus/sched_trace.h>
20 21
21/* forward declaration so the below macro works */ 22
23static noinline void requeue(struct task_struct*, rt_domain_t*);
24static noinline void unlink(struct task_struct*, rt_domain_t*);
25static enum hrtimer_restart budget_timer_fire(struct hrtimer*);
26static enum hrtimer_restart refill_timer_fire(struct hrtimer*);
27static void edf_hsb_release_jobs(rt_domain_t*, struct bheap*);
22static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; 28static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp;
23#define is_active_plugin (litmus == &edf_hsb_plugin) 29#define is_active_plugin (litmus == &edf_hsb_plugin)
24 30
@@ -29,11 +35,23 @@ struct hrt_server {
29 lt_t wcet; 35 lt_t wcet;
30 lt_t budget; 36 lt_t budget;
31 lt_t period; 37 lt_t period;
38 lt_t deadline;
32 39
33 rt_domain_t domain; 40 rt_domain_t domain;
34 41
35 /* lock for setting wcet and period */ 42 /* lock for setting wcet and period */
36 rwlock_t param_lock; 43 rwlock_t param_lock;
44
45 /* timer for refilling the budget */
46 struct hrtimer refill_timer;
47 int refill_armed;
48
49 /* timer for decrementing the budget */
50 struct hrtimer budget_timer;
51 int budget_armed;
52
53 /* stores a task's initial remaining budget */
54 lt_t task_budget_rem;
37}; 55};
38 56
39struct cpu_entry { 57struct cpu_entry {
@@ -43,7 +61,7 @@ struct cpu_entry {
43}; 61};
44 62
45DEFINE_PER_CPU_SHARED_ALIGNED(struct hrt_server, hrt_servers); 63DEFINE_PER_CPU_SHARED_ALIGNED(struct hrt_server, hrt_servers);
46DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_entry, cpu_entries); 64DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_entry, cpu_entries);
47static rt_domain_t srt_domain; 65static rt_domain_t srt_domain;
48 66
49/* 67/*
@@ -70,9 +88,10 @@ static int check_hrt_servers_initialized(void)
70 return !all_init; 88 return !all_init;
71} 89}
72 90
73/* link_task_to_cpu - Update the link of a CPU. 91/*
74 * Handles the case where the to-be-linked task is already 92 * Update the link of a CPU.
75 * scheduled on a different CPU. 93 * Handles the case where the to-be-linked task is already scheduled on
94 * a different CPU.
76 */ 95 */
77static noinline void link_task_to_cpu(struct task_struct *linked, 96static noinline void link_task_to_cpu(struct task_struct *linked,
78 struct cpu_entry *entry) 97 struct cpu_entry *entry)
@@ -83,12 +102,12 @@ static noinline void link_task_to_cpu(struct task_struct *linked,
83 102
84 BUG_ON(linked && !is_realtime(linked)); 103 BUG_ON(linked && !is_realtime(linked));
85 104
86 /* Currently linked task is set to be unlinked. */ 105 /* Currently linked task is set to be unlinked */
87 if (entry->linked) { 106 if (entry->linked) {
88 entry->linked->rt_param.linked_on = NO_CPU; 107 entry->linked->rt_param.linked_on = NO_CPU;
89 } 108 }
90 109
91 /* Link new task to CPU. */ 110 /* Link new task to CPU */
92 if (linked) { 111 if (linked) {
93 set_rt_flags(linked, RT_F_RUNNING); 112 set_rt_flags(linked, RT_F_RUNNING);
94 /* Handle task is already scheduled somewhere! */ 113 /* Handle task is already scheduled somewhere! */
@@ -123,33 +142,102 @@ static noinline void link_task_to_cpu(struct task_struct *linked,
123#endif 142#endif
124} 143}
125 144
126static noinline void requeue_srt(struct task_struct* task) 145/*
146 * Updates task properties and requeues it, if necessary.
147 */
148static noinline void job_completion(struct task_struct *task,
149 int forced, rt_domain_t *domain)
150{
151 sched_trace_task_completion(task, forced);
152 TRACE_TASK(task, "job_completion(),\n");
153
154 BUG_ON(task == NULL);
155
156 set_rt_flags(task, RT_F_SLEEP);
157 prepare_for_next_period(task);
158
159 if (is_released(task, litmus_clock()))
160 sched_trace_task_release(task);
161
162 unlink(task, domain);
163
164 /* Requeue the task if it is not blocking */
165 if (is_running(task))
166 requeue(task, domain);
167}
168
169/* If the server is eligible, return the next eligible task. If
170 * the server is ineligible or there are no eligible tasks,
171 * returns NULL.
172 */
173static struct task_struct* next_eligible_task(struct hrt_server *server)
174{
175 struct task_struct *task;
176
177 write_lock_irq(&server->param_lock);
178
179 task = NULL;
180 if (server->budget > 0)
181 task = __peek_ready(&server->domain);
182
183 if (task) {
184 if (get_deadline(task) >= server->deadline)
185 task = NULL;
186 else
187 remove(&server->domain, task);
188 }
189
190 write_unlock_irq(&server->param_lock);
191
192 return task;
193}
194
195/*
196 * Adds a task to the appropriate queue (ready / release) in a domain.
197 */
198static noinline void requeue(struct task_struct *task, rt_domain_t *domain)
127{ 199{
128 BUG_ON(!task); 200 BUG_ON(!task);
129 BUG_ON(is_queued(task)); 201 BUG_ON(is_queued(task));
130 202
131 if (is_released(task, litmus_clock())) { 203 if (is_released(task, litmus_clock())) {
132 __add_ready(&srt_domain, task); 204 __add_ready(domain, task);
133 } else { 205 } else {
134 /* Task needs to wait until it is released */ 206 /* Task needs to wait until it is released */
135 add_release(&srt_domain, task); 207 add_release(domain, task);
136 } 208 }
137} 209}
138 210
139void reset_hrt_servers(void) 211void reset_hrt_servers(void)
140{ 212{
141 int cpu; 213 printk("Resetting hrt servers");
142 struct hrt_server *hrt_server; 214 struct hrt_server *hrt_server;
215 struct hrtimer *budget_timer, *refill_timer;
216 int cpu;
143 217
144 for_each_online_cpu(cpu) { 218 for_each_online_cpu(cpu) {
145 hrt_server = &per_cpu(hrt_servers, cpu); 219 hrt_server = &per_cpu(hrt_servers, cpu);
220 budget_timer = &hrt_server->budget_timer;
221 refill_timer = &hrt_server->refill_timer;
146 rwlock_init(&hrt_server->param_lock); 222 rwlock_init(&hrt_server->param_lock);
147 223
148 hrt_server->cpu = cpu; 224 hrt_server->cpu = cpu;
149 hrt_server->wcet = hrt_server->period = hrt_server->budget = 0; 225 hrt_server->wcet = hrt_server->period = hrt_server->budget = 0;
150 226
151 /* TODO correctly */ 227 edf_domain_init(&hrt_server->domain, NULL,
152 edf_domain_init(&hrt_server->domain, NULL, NULL); 228 edf_hsb_release_jobs);
229
230 hrtimer_init(budget_timer,
231 CLOCK_MONOTONIC,
232 HRTIMER_MODE_ABS);
233 budget_timer->function = budget_timer_fire;
234
235 hrtimer_init(refill_timer,
236 CLOCK_MONOTONIC,
237 HRTIMER_MODE_ABS);
238 refill_timer->function = refill_timer_fire;
239
240
153 } 241 }
154} 242}
155 243
@@ -195,6 +283,7 @@ int set_hrt_params(unsigned long long cpu, unsigned long long wcet,
195 hrt_server->wcet = wcet * NSEC_PER_MSEC; 283 hrt_server->wcet = wcet * NSEC_PER_MSEC;
196 hrt_server->period = period * NSEC_PER_MSEC; 284 hrt_server->period = period * NSEC_PER_MSEC;
197 285
286
198 /* 287 /*
199 * TODO probably need a function to reset hrtimers and stuff here 288 * TODO probably need a function to reset hrtimers and stuff here
200 */ 289 */
@@ -204,9 +293,9 @@ out:
204 return rv; 293 return rv;
205} 294}
206 295
207static noinline void unlink(struct task_struct *task) 296static noinline void unlink(struct task_struct *task, rt_domain_t *domain)
208{ 297{
209 struct cpu_entry *entry; 298 struct cpu_entry *entry;
210 299
211 BUG_ON(task == NULL); 300 BUG_ON(task == NULL);
212 301
@@ -222,74 +311,219 @@ static noinline void unlink(struct task_struct *task)
222 task->rt_param.linked_on = NO_CPU; 311 task->rt_param.linked_on = NO_CPU;
223 link_task_to_cpu(NULL, entry); 312 link_task_to_cpu(NULL, entry);
224 } else if (is_queued(task)) { 313 } else if (is_queued(task)) {
225 // if (srt_domain.ready_queue.head != NULL) 314 /* This is an interesting situation: t is scheduled,
226 remove(&srt_domain, task); 315 * but was just recently unlinked. It cannot be
316 * linked anywhere else (because then it would have
317 * been relinked to this CPU), thus it must be in some
318 * queue. We must remove it from the list in this
319 * case.
320 */
321 remove(domain, task);
227 } 322 }
228} 323}
229 324
230static noinline void job_completion(struct task_struct *task, int forced) 325
326/******************************************************************************
327 * Timer methods
328 ******************************************************************************/
329
330/*
331 * Arms a timer to go off when a task will exhaust its budget or its server
332 * can no longer execute.
333 */
334static inline void budget_timer_arm(struct hrt_server *server,
335 struct cpu_entry *entry)
231{ 336{
232 sched_trace_task_completion(task, forced); 337 lt_t deadline, remaining;
233 TRACE_TASK(task, "job_completion(),\n");
234 338
235 BUG_ON(task == NULL); 339 BUG_ON(server->budget_armed);
236 340
237 set_rt_flags(task, RT_F_SLEEP); 341 remaining = budget_remaining(entry->scheduled);
238 prepare_for_next_period(task); 342 deadline = litmus_clock() + deadline;
343 if (deadline > server->deadline || remaining > server->budget) {
344 remaining = server->deadline - deadline;
345 deadline = server->deadline;
346 }
239 347
240 if (is_released(task, litmus_clock())) 348 TRACE_TASK(entry->scheduled,
241 sched_trace_task_release(task); 349 "budget arm, deadline: %llu, remaining: %llu",
350 deadline, remaining);
351
352 server->task_budget_rem = remaining;
353 server->budget_armed = 1;
354 __hrtimer_start_range_ns(&server->budget_timer,
355 ns_to_ktime(deadline),
356 0 /* delta */,
357 HRTIMER_MODE_ABS_PINNED,
358 0 /* no wakeup */);
359}
242 360
243 unlink(task); 361/*
362 * Decrements a server's budget by the time just consumed by a task.
363 * Do locks need to be involved here?
364 */
365static enum hrtimer_restart budget_timer_fire(struct hrtimer *timer)
366{
367 struct cpu_entry *entry;
368 struct hrt_server *server;
369 struct task_struct *curr;
244 370
245 /* Requeue the task if it is not blocking */ 371 server = &__get_cpu_var(hrt_servers);
246 if (is_running(task)) 372 entry = &__get_cpu_var(cpu_entries);
247 requeue_srt(task); 373 curr = entry->scheduled;
374
375 if (curr) {
376 server->budget -= server->task_budget_rem -
377 budget_remaining(curr);
378
379 TRACE_TASK(curr, "server budget: %llu", server->budget);
380 }
381
382 server->task_budget_rem = 0;
383 server->budget_armed = 0;
384 return HRTIMER_NORESTART;
248} 385}
249 386
387/*
388 * Starts a periodic timer to replenish a server's budget.
389 */
390static inline void refill_timer_arm(struct hrt_server *server)
391{
392 lt_t deadline;
250 393
394 if (!check_hrt_servers_initialized())
395 return;
396
397 BUG_ON(server->refill_armed);
398
399 server->refill_armed = 1;
400 deadline = litmus_clock() + server->period;
401 __hrtimer_start_range_ns(&server->refill_timer,
402 ns_to_ktime(deadline),
403 0 /* delta */,
404 HRTIMER_MODE_ABS_PINNED,
405 0 /* no wakeup */);
406}
407
408/*
409 * Refills a server's budget and rearms the timer.
410 */
411static enum hrtimer_restart refill_timer_fire(struct hrtimer *timer)
412{
413 struct hrt_server *server;
414
415 server = &__get_cpu_var(hrt_servers);
416 server->budget = server->wcet;
417 server->deadline += server->period;
418
419 __hrtimer_start_range_ns(timer,
420 ns_to_ktime(server->deadline),
421 0 /* delta */,
422 HRTIMER_MODE_ABS_PINNED,
423 0 /* no wakeup */);
424 return HRTIMER_NORESTART;
425}
251 426
252/****************************************************************************** 427/******************************************************************************
253 * Plugin methods 428 * Plugin methods
254 ******************************************************************************/ 429 ******************************************************************************/
255 430
431/*
432 * There is a small race condition in checking that all the server parameters
433 * are valid, then having one of them change before setting the active plugin,
434 * but oh well.
435 */
436static long edf_hsb_activate_plugin(void)
437{
438 struct cpu_entry* entry;
439 struct hrt_server* server;
440 int cpu;
441 long rv = 0;
442
443 if (check_hrt_servers_initialized()) {
444 rv = -EINVAL;
445
446 for (cpu = 0; cpu < num_online_cpus(); cpu++) {
447 entry = &per_cpu(cpu_entries, cpu);
448 entry->cpu = cpu;
449
450 server = &per_cpu(hrt_servers, cpu);
451 //refill_timer_arm(server);
452 }
453
454 edf_domain_init(&srt_domain, NULL, edf_hsb_release_jobs);
455 }
456
457 return rv;
458}
256 459
257static long edf_hsb_admit_task(struct task_struct *task) 460static long edf_hsb_admit_task(struct task_struct *task)
258{ 461{
259 return 0; 462 return 0;
260} 463}
261 464
262static void edf_hsb_release_jobs(rt_domain_t *rt, struct bheap *tasks) 465static long edf_hsb_deactivate_plugin(void)
466{
467 reset_hrt_servers();
468 return 0;
469}
470
471/*
472 * Simply merges a heap of tasks into a domain's ready queue.
473 * This is used by the edf domains.
474 */
475static void edf_hsb_release_jobs(rt_domain_t *domain, struct bheap *tasks)
263{ 476{
264 unsigned long flags; 477 unsigned long flags;
265 478
266 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags); 479 raw_spin_lock_irqsave(&domain->ready_lock, flags);
267 __merge_ready(rt, tasks); 480 __merge_ready(domain, tasks);
268 /* check_for_preemptions(); */ 481 /* check_for_preemptions(); */
269 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags); 482 raw_spin_unlock_irqrestore(&domain->ready_lock, flags);
270} 483}
271 484
485/*
486 * If the last task was HRT and so is the next, this should never touch the
487 * SRT domain lock.
488 */
272static struct task_struct* edf_hsb_schedule(struct task_struct *prev) 489static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
273{ 490{
274 struct task_struct *next, *curr_srt;
275 struct cpu_entry *entry; 491 struct cpu_entry *entry;
276 int blocks, exists, out_of_time, sleep; 492 struct hrt_server *server;
277 493 struct task_struct *next, *curr, *next_ready;
278 entry = &__get_cpu_var(cpu_entries); 494 rt_domain_t *hrt_domain, *domain;
279 curr_srt = entry->scheduled; 495 int blocks, exists, sleep,
280 496 need_srt_unlock, out_of_time;
281 raw_spin_lock(&srt_domain.ready_lock); 497
282 498 entry = &__get_cpu_var(cpu_entries);
283 BUG_ON(curr_srt && curr_srt != prev); 499 server = &__get_cpu_var(hrt_servers);
284 BUG_ON(curr_srt && !is_realtime(prev)); 500 curr = entry->scheduled;
285 BUG_ON(is_realtime(prev) && !entry->scheduled); 501 domain = NULL;
502 hrt_domain = &server->domain;
503
504 BUG_ON(curr && curr != prev);
505 BUG_ON(curr && !is_realtime(prev));
506 BUG_ON(is_realtime(prev) && !curr);
507 exists = curr != NULL;
508
509 /* Lock the currently executing task's domain */
510 if (exists) {
511 if (is_hrt(curr))
512 domain = hrt_domain;
513 else if (is_srt(curr)){
514 domain = &srt_domain;
515 need_srt_unlock = 1;
516 }
517 raw_spin_lock(&domain->ready_lock);
518 }
286 519
287 /* Determine state */ 520 /* Determine state */
288 exists = curr_srt != NULL; 521 sleep = exists && get_rt_flags(curr) == RT_F_SLEEP;
289 blocks = exists && !is_running(curr_srt); 522 out_of_time = exists && budget_enforced(curr) &&
290 sleep = exists && get_rt_flags(curr_srt) == RT_F_SLEEP; 523 budget_exhausted(curr);
291 out_of_time = exists && budget_enforced(curr_srt) && 524 blocks = exists && !is_running(curr);
292 budget_exhausted(curr_srt); 525 if (exists && is_hrt(curr))
526 blocks = out_of_time && server->budget > 0;
293 527
294 if (exists) 528 if (exists)
295 TRACE_TASK(prev, 529 TRACE_TASK(prev,
@@ -300,39 +534,50 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
300 534
301 /* Must reschedule blocked task */ 535 /* Must reschedule blocked task */
302 if (blocks) 536 if (blocks)
303 // resched = 1; 537 unlink(curr, domain);
304 unlink(entry->scheduled);
305 538
306 if ((out_of_time || sleep) && !blocks) 539 if ((out_of_time || sleep) && !blocks)
307 job_completion(curr_srt, !sleep); 540 job_completion(curr, !sleep, domain);
541
542 /* If we weren't already in hrt mode, take the lock */
543 if (!exists || is_srt(curr))
544 raw_spin_lock(&hrt_domain->ready_lock);
308 545
309 /* If the cpu entry has no linked task, select the next ready task */ 546 /* If the cpu entry has no linked task, select the next ready task */
310 if (!entry->linked) 547 if (!entry->linked) {
311 link_task_to_cpu(__take_ready(&srt_domain), entry); 548 next_ready = next_eligible_task(server);
549
550 /* Select an SRT task if the server was ineligible */
551 if (!next_ready) {
552 if (!need_srt_unlock) {
553 raw_spin_lock(&srt_domain.ready_lock);
554 need_srt_unlock = 1;
555 }
556 next_ready = __take_ready(&srt_domain);
557 }
558 link_task_to_cpu(next_ready, entry);
559 }
312 560
313 /* Switch current task if we are in RT mode and have no task 561 /* Switch current task if we are in RT mode and have no task
314 * or if linked is different from scheduled */ 562 * or if linked is different from scheduled */
315 next = NULL; 563 next = NULL;
316 if (curr_srt != entry->linked) { 564 if (curr != entry->linked && entry->linked) {
317 /* Schedule an unscheduled linked job */ 565 entry->linked->rt_param.scheduled_on = entry->cpu;
318 if (entry->linked) { 566 next = entry->linked;
319 entry->linked->rt_param.scheduled_on = entry->cpu; 567 } else if (exists) {
320 next = entry->linked; 568 next = prev;
321 }
322 /* Last scheduled task will not be scheduled soon */
323 if (curr_srt) {
324 curr_srt->rt_param.scheduled_on = NO_CPU;
325 }
326 } else {
327 if (exists)
328 next = prev;
329 } 569 }
330 570
331 /* GSN-EDF does not do this, but it sesems like it would make sense */
332 entry->scheduled = next; 571 entry->scheduled = next;
333
334 sched_state_task_picked(); 572 sched_state_task_picked();
335 raw_spin_unlock(&srt_domain.ready_lock); 573
574 /* Timer to decrement server budget */
575 if (exists && is_hrt(next))
576 budget_timer_arm(server, entry);
577
578 raw_spin_unlock(&hrt_domain->ready_lock);
579 if (need_srt_unlock)
580 raw_spin_unlock(&srt_domain.ready_lock);
336 581
337#ifdef WANT_ALL_SCHED_EVENTS 582#ifdef WANT_ALL_SCHED_EVENTS
338 if (next) 583 if (next)
@@ -344,32 +589,68 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
344 return next; 589 return next;
345} 590}
346 591
592/*
593 * If the blocked task was HRT, cancels the budget update timer
594 * and decrements the task's server's budget.
595 */
347static void edf_hsb_task_block(struct task_struct *task) 596static void edf_hsb_task_block(struct task_struct *task)
348{ 597{
349 unsigned long flags; 598 struct hrt_server *server;
599 rt_domain_t *domain;
600 unsigned long flags;
601 int ret;
350 602
351 TRACE_TASK(task, "block at %llu\n", litmus_clock()); 603 TRACE_TASK(task, "block at %llu\n", litmus_clock());
352 BUG_ON(!is_realtime(task)); 604 BUG_ON(!is_realtime(task));
353 605
606 if (is_hrt(task)) {
607 server = &per_cpu(hrt_servers, task_cpu(task));
608 domain = &server->domain;
609
610 /* If an hrt task is blocked, we must do the work of the
611 * budget timer ourselves
612 */
613 server->budget -= server->task_budget_rem -
614 budget_remaining(task);
615 server->task_budget_rem = 0;
616
617 ret = hrtimer_try_to_cancel(&server->budget_timer);
618 BUG_ON(ret == 0); /* inactive */
619 BUG_ON(ret == -1); /* running concurrently */
620
621 } else {
622 domain = &srt_domain;
623 }
624
354 /* Unlink if the task is linked to a CPU */ 625 /* Unlink if the task is linked to a CPU */
355 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags); 626 raw_spin_lock_irqsave(&domain->ready_lock, flags);
356 unlink(task); 627 unlink(task, domain);
357 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags); 628 raw_spin_unlock_irqrestore(&domain->ready_lock, flags);
358} 629}
359 630
360static void edf_hsb_task_exit(struct task_struct *task) 631static void edf_hsb_task_exit(struct task_struct *task)
361{ 632{
362 unsigned long flags; 633 struct hrt_server *server;
634 rt_domain_t *domain;
635 unsigned long flags;
363 636
364 TRACE_TASK(task, "RIP at %llu\n", litmus_clock()); 637 TRACE_TASK(task, "RIP at %llu\n", litmus_clock());
365 BUG_ON(!is_realtime(task)); 638 BUG_ON(!is_realtime(task));
366 639
640 if (is_hrt(task)) {
641 server = &per_cpu(hrt_servers, task_cpu(task));
642 domain = &server->domain;
643 } else {
644 domain = &srt_domain;
645 }
646
367 /* Unlink if the task is linked to a CPU */ 647 /* Unlink if the task is linked to a CPU */
368 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags); 648 raw_spin_lock_irqsave(&domain->ready_lock, flags);
369 unlink(task); 649 unlink(task, domain);
370 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags); 650 raw_spin_unlock_irqrestore(&domain->ready_lock, flags);
371 651
372 /* Necessary to set the current entry scheduled to NULL ? */ 652 /* Necessary to set the current entry scheduled to NULL ? */
653 // entry->scheduled = NULL;
373} 654}
374 655
375/* 656/*
@@ -377,17 +658,27 @@ static void edf_hsb_task_exit(struct task_struct *task)
377 */ 658 */
378static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) 659static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
379{ 660{
380 unsigned long flags;
381 struct cpu_entry *entry; 661 struct cpu_entry *entry;
662 struct hrt_server *server;
663 rt_domain_t *domain;
664 unsigned long flags;
382 665
383 TRACE_TASK(task, "edf_hsb: task new, cpu = %d\n", 666 TRACE_TASK(task, "edf_hsb: task new, cpu = %d\n",
384 task->rt_param.task_params.cpu); 667 task->rt_param.task_params.cpu);
385 668
386 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags); 669 if (is_hrt(task)) {
670 server = &__get_cpu_var(hrt_servers);
671 domain = &server->domain;
672 } else if (is_srt(task)){
673 domain = &srt_domain;
674 } else {
675 return;
676 }
677
678 raw_spin_lock_irqsave(&domain->ready_lock, flags);
387 679
388 /* Setup job parameters */ 680 /* Setup job parameters */
389 release_at(task, litmus_clock()); 681 release_at(task, litmus_clock());
390
391 if (running) { 682 if (running) {
392 entry = &per_cpu(cpu_entries, task_cpu(task)); 683 entry = &per_cpu(cpu_entries, task_cpu(task));
393 BUG_ON(entry->scheduled); 684 BUG_ON(entry->scheduled);
@@ -400,19 +691,28 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
400 691
401 task->rt_param.linked_on = NO_CPU; 692 task->rt_param.linked_on = NO_CPU;
402 693
403 requeue_srt(task); 694 requeue(task, domain);
404 695
405 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags); 696 raw_spin_unlock_irqrestore(&domain->ready_lock, flags);
406} 697}
407 698
408static void edf_hsb_task_wake_up(struct task_struct *task) 699static void edf_hsb_task_wake_up(struct task_struct *task)
409{ 700{
410 unsigned long flags; 701 struct hrt_server *server;
411 lt_t now; 702 rt_domain_t *domain;
703 unsigned long flags;
704 lt_t now;
412 705
413 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 706 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
414 707
415 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags); 708 if (is_hrt(task)) {
709 server = &per_cpu(hrt_servers, task_cpu(task));
710 domain = &server->domain;
711 } else {
712 domain = &srt_domain;
713 }
714
715 raw_spin_lock_irqsave(&domain->ready_lock, flags);
416 716
417 /* If job resumes after being suspended due to acquiring a semaphore, 717 /* If job resumes after being suspended due to acquiring a semaphore,
418 * it should never be treated as a job release */ 718 * it should never be treated as a job release */
@@ -423,6 +723,7 @@ static void edf_hsb_task_wake_up(struct task_struct *task)
423 723
424 if (is_tardy(task, now)) { 724 if (is_tardy(task, now)) {
425 /* Re-release the task */ 725 /* Re-release the task */
726 /* Is this a preemption?? */
426 release_at(task, now); 727 release_at(task, now);
427 sched_trace_task_release(task); 728 sched_trace_task_release(task);
428 } else { 729 } else {
@@ -433,17 +734,18 @@ static void edf_hsb_task_wake_up(struct task_struct *task)
433 } 734 }
434 } 735 }
435 736
436 requeue_srt(task); 737 requeue(task, domain);
437 738
438 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags); 739 raw_spin_unlock_irqrestore(&domain->ready_lock, flags);
439} 740}
440 741
441/* 742/*
442 * Checks if the current task has experiened and whether or not it needs to 743 * Checks if the current task has expered and whether or not it needs to
443 * be preempted. 744 * be preempted.
444 */ 745 */
445static void edf_hsb_tick(struct task_struct *t) 746static void edf_hsb_tick(struct task_struct *t)
446{ 747{
748 /* Reschedule the task if it has run out of time */
447 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { 749 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
448 if (!is_np(t)) { 750 if (!is_np(t)) {
449 /* Non-preemptable tasks will be preempted when they 751 /* Non-preemptable tasks will be preempted when they
@@ -462,37 +764,6 @@ static void edf_hsb_tick(struct task_struct *t)
462} 764}
463 765
464 766
465/*
466 * There is a small race condition in checking that all the server parameters
467 * are valid, then having one of them change before setting the active plugin,
468 * but oh well.
469 */
470static long edf_hsb_activate_plugin(void)
471{
472 struct cpu_entry* entry;
473 int cpu;
474 long rv = 0;
475
476 if (check_hrt_servers_initialized())
477 rv = -EINVAL;
478
479 for (cpu = 0; cpu < num_online_cpus(); cpu++) {
480 entry = &per_cpu(cpu_entries, cpu);
481 entry->cpu = cpu;
482 }
483
484
485 edf_domain_init(&srt_domain, NULL, edf_hsb_release_jobs);
486
487 return rv;
488}
489
490static long edf_hsb_deactivate_plugin(void)
491{
492 //reset_hrt_servers();
493 return 0;
494}
495
496/****************************************************************************** 767/******************************************************************************
497 * Proc methods 768 * Proc methods
498 ******************************************************************************/ 769 ******************************************************************************/
@@ -502,7 +773,8 @@ static struct proc_dir_entry *edf_hsb_proc_dir = NULL, *hrt_server_proc = NULL;
502static int hrt_proc_read(char* page, char **start, off_t off, int count, 773static int hrt_proc_read(char* page, char **start, off_t off, int count,
503 int *eof, void *data) 774 int *eof, void *data)
504{ 775{
505 int cpu, wcet, period, len = 0; 776 lt_t wcet, period;
777 int cpu, len = 0;
506 struct hrt_server *hrt_server; 778 struct hrt_server *hrt_server;
507 779
508 for_each_online_cpu(cpu) { 780 for_each_online_cpu(cpu) {
@@ -510,8 +782,14 @@ static int hrt_proc_read(char* page, char **start, off_t off, int count,
510 hrt_server = &per_cpu(hrt_servers, cpu); 782 hrt_server = &per_cpu(hrt_servers, cpu);
511 783
512 read_lock_irq(&hrt_server->param_lock); 784 read_lock_irq(&hrt_server->param_lock);
513 wcet = do_div(hrt_server->wcet, NSEC_PER_MSEC); 785
514 period = do_div(hrt_server->period, NSEC_PER_MSEC); 786 wcet = hrt_server->wcet;
787 period = hrt_server->period;
788
789 /* do_div(x, y) stores the result in x */
790 do_div(wcet, NSEC_PER_MSEC);
791 do_div(period, NSEC_PER_MSEC);
792
515 read_unlock_irq(&hrt_server->param_lock); 793 read_unlock_irq(&hrt_server->param_lock);
516 794
517 len += snprintf(page + len, PAGE_SIZE - len, 795 len += snprintf(page + len, PAGE_SIZE - len,
@@ -599,7 +877,26 @@ loop_end:
599 return count; 877 return count;
600} 878}
601 879
602/* actual plugin object */ 880#define HRT_SERVER_PROC_NAME "hrt_server"
881
882static void exit_proc(void)
883{
884 if (hrt_server_proc) {
885 remove_proc_entry(HRT_SERVER_PROC_NAME, edf_hsb_proc_dir);
886 hrt_server_proc = NULL;
887 }
888
889 if (edf_hsb_proc_dir) {
890 remove_plugin_proc_dir(&edf_hsb_plugin);
891 edf_hsb_proc_dir = NULL;
892 }
893}
894
895
896/******************************************************************************
897 * Plugin
898 ******************************************************************************/
899
603/* static struct sched_plugin edf_hsb_plugin = { */ 900/* static struct sched_plugin edf_hsb_plugin = { */
604static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = { 901static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = {
605 .plugin_name = "EDF-HSB", 902 .plugin_name = "EDF-HSB",
@@ -629,21 +926,6 @@ static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = {
629#endif 926#endif
630}; 927};
631 928
632#define HRT_SERVER_PROC_NAME "hrt_server"
633
634static void exit_proc(void)
635{
636 if (hrt_server_proc) {
637 remove_proc_entry(HRT_SERVER_PROC_NAME, edf_hsb_proc_dir);
638 hrt_server_proc = NULL;
639 }
640
641 if (edf_hsb_proc_dir) {
642 remove_plugin_proc_dir(&edf_hsb_plugin);
643 edf_hsb_proc_dir = NULL;
644 }
645}
646
647static int __init init_edf_hsb(void) 929static int __init init_edf_hsb(void)
648{ 930{
649 int rv; 931 int rv;