aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern Brandenburg <bb@DS-12.(none)>2007-02-01 23:53:14 -0500
committerBjoern Brandenburg <bb@DS-12.(none)>2007-02-01 23:53:14 -0500
commit212eadc8a7acff695f1c699611681a490b477f65 (patch)
tree9cc68497db17d004af73cb92c9462ab40ce1571a /kernel
parent5ae937f307990e86e5050399ba520ff7e5e327fc (diff)
ported edf_hsb, compiles
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/litmus.c9
-rw-r--r--kernel/sched_edf_hsb.c1799
3 files changed, 1802 insertions, 8 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 84ce266148..13832e645e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \ 11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
12 sched_plugin.o litmus.o sched_trace.o \ 12 sched_plugin.o litmus.o sched_trace.o \
13 edf_common.o fifo_common.o \ 13 edf_common.o fifo_common.o \
14 sched_global_edf.o sched_part_edf.o 14 sched_global_edf.o sched_part_edf.o sched_edf_hsb.o
15 15
16obj-$(CONFIG_STACKTRACE) += stacktrace.o 16obj-$(CONFIG_STACKTRACE) += stacktrace.o
17obj-y += time/ 17obj-y += time/
diff --git a/kernel/litmus.c b/kernel/litmus.c
index 8eb37c8ccb..ac90c28330 100644
--- a/kernel/litmus.c
+++ b/kernel/litmus.c
@@ -349,19 +349,14 @@ static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
349sched_plugin_t *init_global_edf_plugin(void); 349sched_plugin_t *init_global_edf_plugin(void);
350sched_plugin_t *init_global_edf_np_plugin(void); 350sched_plugin_t *init_global_edf_np_plugin(void);
351sched_plugin_t *init_part_edf_plugin(void); 351sched_plugin_t *init_part_edf_plugin(void);
352sched_plugin_t *init_edf_hsb_plugin(void);
352 353
353/* 354/*
354CLEANUP: Add init function when the plugin has been ported. 355CLEANUP: Add init function when the plugin has been ported.
355sched_plugin_t *init_pfair_plugin(void); 356sched_plugin_t *init_pfair_plugin(void);
356sched_plugin_t *init_desync_pfair_plugin(void); 357sched_plugin_t *init_desync_pfair_plugin(void);
357
358
359sched_plugin_t *init_edf_hsb_plugin(void);
360*/ 358*/
361 359
362
363
364
365/* keep everything needed to setup plugins in one place */ 360/* keep everything needed to setup plugins in one place */
366 361
367/* we are lazy, so we use a convention for function naming to fill 362/* we are lazy, so we use a convention for function naming to fill
@@ -381,10 +376,10 @@ static struct {
381 PLUGIN(GLOBAL_EDF_NP, global_edf_np), 376 PLUGIN(GLOBAL_EDF_NP, global_edf_np),
382 PLUGIN(GLOBAL_EDF, global_edf), 377 PLUGIN(GLOBAL_EDF, global_edf),
383 PLUGIN(PART_EDF, part_edf), 378 PLUGIN(PART_EDF, part_edf),
379 PLUGIN(EDF_HSB, edf_hsb)
384/* CLEANUP: Add when ported. 380/* CLEANUP: Add when ported.
385 PLUGIN(PFAIR, pfair), 381 PLUGIN(PFAIR, pfair),
386 PLUGIN(PFAIR_DESYNC, desync_pfair), 382 PLUGIN(PFAIR_DESYNC, desync_pfair),
387 PLUGIN(EDF_HSB, edf_hsb)
388*/ 383*/
389 384
390 /********************************************* 385 /*********************************************
diff --git a/kernel/sched_edf_hsb.c b/kernel/sched_edf_hsb.c
new file mode 100644
index 0000000000..5ba9be8acf
--- /dev/null
+++ b/kernel/sched_edf_hsb.c
@@ -0,0 +1,1799 @@
1/*
2 * kernel/sched_edf_hsb.c
3 *
4 * Implementation of the EDF-HSB scheduler plugin.
5 *
6 */
7
8#include <asm/uaccess.h>
9#include <linux/percpu.h>
10#include <linux/sched.h>
11#include <linux/list.h>
12
13#include <linux/litmus.h>
14#include <linux/sched_plugin.h>
15#include <linux/edf_common.h>
16#include <linux/fifo_common.h>
17#include <linux/sched_trace.h>
18
19/* undefine to remove capacity sharing */
20#define HSB_CAP_SHARE_ENABLED
21
22/* fake server PIDs */
23#define HRT_BASE_PID 50000
24#define SRT_BASE_PID 60000
25
26
27/******************************************************************************/
28/* Capacity queue */
29/******************************************************************************/
30
31int cap_check_resched(jiffie_t deadline);
32
33typedef struct {
34 int budget;
35 jiffie_t deadline;
36 pid_t donor;
37
38 struct list_head list;
39} capacity_t;
40
41typedef struct {
42 spinlock_t lock;
43 struct list_head queue;
44} capacity_queue_t;
45
46#define next_cap(q) list_entry((q)->queue.next, capacity_t, list)
47
48void capacity_queue_init(capacity_queue_t* queue)
49{
50 queue->lock = SPIN_LOCK_UNLOCKED;
51 INIT_LIST_HEAD(&queue->queue);
52}
53
54void __add_capacity(capacity_queue_t* queue, capacity_t *cap)
55{
56 struct list_head* pos;
57 capacity_t* queued;
58
59 list_for_each_prev(pos, &queue->queue) {
60 queued = list_entry(pos, capacity_t, list);
61 if ( time_before_eq(queued->deadline, cap->deadline)) {
62 __list_add(&cap->list, pos, pos->next);
63 return;
64 }
65 }
66 list_add(&cap->list, &queue->queue);
67}
68
69int __capacity_available(capacity_queue_t* queue)
70{
71 capacity_t *cap;
72
73 while (!list_empty(&queue->queue)) {
74 cap = list_entry(queue->queue.next, capacity_t, list);
75
76
77 if (time_before_eq(cap->deadline, jiffies)) {
78 list_del(queue->queue.next);
79 kfree(cap);
80 cap = NULL;
81 } else
82 break;
83 }
84
85 return !list_empty(&queue->queue);
86}
87
88void __return_capacity(capacity_queue_t* queue, capacity_t *cap)
89{
90 if (!cap->budget || time_before_eq(cap->deadline, jiffies))
91 kfree(cap);
92 else
93 __add_capacity(queue, cap);
94}
95
96
97void return_capacity(capacity_queue_t* queue, capacity_t *cap)
98
99{
100 unsigned long flags;
101
102 if (!cap->budget || time_before_eq(cap->deadline, jiffies))
103 kfree(cap);
104 else {
105 spin_lock_irqsave(&queue->lock, flags);
106 __add_capacity(queue, cap);
107 spin_unlock_irqrestore(&queue->lock, flags);
108 }
109}
110
111
112#define MIN_TIME_DELTA 1
113#define MIN_BUDGET 1
114
115#ifdef HSB_CAP_SHARE_ENABLED
116void release_capacity(capacity_queue_t* queue, unsigned int budget,
117 jiffie_t deadline, struct task_struct* t)
118{
119 capacity_t* cap;
120 unsigned long flags;
121
122 if (deadline >= jiffies + MIN_TIME_DELTA && budget >= MIN_BUDGET) {
123 cap = kmalloc(sizeof(capacity_t), GFP_ATOMIC);
124 if (cap) {
125 cap->budget = budget;
126 cap->deadline = deadline;
127 if (t)
128 cap->donor = t->pid;
129 else
130 cap->donor = 0;
131 spin_lock_irqsave(&queue->lock, flags);
132 __add_capacity(queue, cap);
133 cap_check_resched(next_cap(queue)->deadline);
134 spin_unlock_irqrestore(&queue->lock, flags);
135 if (t)
136 sched_trace_capacity_release(t);
137 }
138 }
139}
140
141void __release_capacity(capacity_queue_t* queue, unsigned int budget,
142 jiffie_t deadline, struct task_struct* t)
143{
144 capacity_t* cap;
145
146 if (deadline >= jiffies + MIN_TIME_DELTA && budget >= MIN_BUDGET) {
147 cap = kmalloc(sizeof(capacity_t), GFP_ATOMIC);
148 if (cap) {
149 cap->budget = budget;
150 cap->deadline = deadline;
151 if (t)
152 cap->donor = t->pid;
153 else
154 cap->donor = 0;
155 /* no locking, no resched check -- called from schedule */
156 __add_capacity(queue, cap);
157 if (t)
158 sched_trace_capacity_release(t);
159 }
160 }
161}
162
163
164capacity_t* __take_capacity(capacity_queue_t* queue, jiffie_t deadline, int deadline_matters)
165{
166 capacity_t* cap = NULL;
167
168 while (!list_empty(&queue->queue)) {
169 cap = list_entry(queue->queue.next, capacity_t, list);
170
171 if (deadline_matters && time_before(deadline, cap->deadline)) {
172 cap = NULL;
173 break;
174 }
175
176 list_del(queue->queue.next);
177 if (cap->deadline > jiffies) {
178 if (cap->deadline - jiffies < cap->budget)
179 cap->budget = cap->deadline - jiffies;
180 break;
181 }
182 kfree(cap);
183 cap = NULL;
184 }
185
186 return cap;
187}
188#else
189
190/* no capacity sharing */
191void release_capacity(capacity_queue_t* queue, unsigned int budget,
192 jiffie_t deadline, struct task_struct* t)
193{
194}
195
196capacity_t* __take_capacity(capacity_queue_t* queue, jiffie_t deadline, int deadline_matters)
197{
198 return NULL;
199}
200#endif
201
202
203/******************************************************************************/
204/* server abstractions */
205/******************************************************************************/
206
207
208/* hrt_server_t - Abstraction of a hard real-time server.
209 *
210 * One HRT server per CPU. If it is unused period and wcet may be zero.
211 * HRT servers are strictly periodic and retain their budget.
212 */
213typedef struct {
214 edf_domain_t domain;
215
216 unsigned int period;
217 unsigned int wcet;
218
219 jiffie_t deadline;
220 int budget;
221} hrt_server_t;
222
223/* be_server_t - Abstraction of best-effort server.
224 *
225 * This is pretty much only an accounting abstraction.
226 */
227typedef struct {
228 unsigned int period;
229 unsigned int wcet;
230
231 jiffie_t deadline;
232 jiffie_t release;
233 int budget;
234
235 struct list_head list;
236 pid_t pid;
237} be_server_t;
238
239/* cast to int to allow for negative slack, i.e. tardiness */
240#define server_slack(srv) \
241 ( ((int) (srv)->deadline - (int) jiffies) - (int) (srv)->budget )
242
243typedef struct {
244 int cpu;
245
246 hrt_server_t hrt;
247 be_server_t* be;
248 capacity_t* cap;
249
250 task_class_t exec_class;
251 jiffie_t cur_deadline;
252 atomic_t will_schedule;
253
254 struct list_head list;
255 spinlock_t lock;
256} cpu_state_t;
257
258
259DEFINE_PER_CPU(cpu_state_t, hsb_cpu_state);
260
261#define hrt_dom(cpu) (&per_cpu(hsb_cpu_state, cpu).hrt.domain)
262
263#define set_will_schedule() \
264 (atomic_set(&__get_cpu_var(hsb_cpu_state).will_schedule, 1))
265#define clear_will_schedule() \
266 (atomic_set(&__get_cpu_var(hsb_cpu_state).will_schedule, 0))
267#define test_will_schedule(cpu) \
268 (atomic_read(&per_cpu(hsb_cpu_state, cpu).will_schedule))
269
270
271static void prepare_hrt_release(hrt_server_t *srv, jiffie_t start)
272{
273 if (srv->period && srv->wcet) {
274 srv->deadline = start;
275 srv->budget = 0;
276 }
277}
278
279static void check_for_hrt_release(hrt_server_t *srv) {
280 if (srv->wcet && srv->period &&
281 time_before_eq(srv->deadline, jiffies)) {
282 srv->deadline += srv->period;
283 srv->budget = srv->wcet;
284 sched_trace_server_release(HRT_BASE_PID + smp_processor_id(),
285 srv->budget, srv->period, RT_CLASS_HARD);
286 }
287}
288
289/* A HRT client is eligible if either its deadline is before the
290 * the server deadline or if the server has zero slack. The server
291 * must have budget left.
292 */
293static inline int hrt_client_eligible(hrt_server_t *srv)
294{
295 if (!list_empty(&srv->domain.ready_queue))
296 return srv->budget && (
297 time_before(get_deadline(next_ready(&srv->domain)),
298 srv->deadline)
299 || server_slack(srv) <= 0);
300 else
301 return 0;
302}
303
304static void hsb_cpu_state_init(cpu_state_t* cpu_state,
305 edf_check_resched_needed_t check,
306 int cpu)
307{
308 edf_domain_init(&cpu_state->hrt.domain, check);
309 cpu_state->hrt.budget = 0;
310 cpu_state->hrt.deadline = 0;
311 cpu_state->hrt.period = 0;
312 cpu_state->hrt.wcet = 0;
313
314 cpu_state->be = NULL;
315 cpu_state->cap = NULL;
316
317 cpu_state->cur_deadline = 0;
318 cpu_state->cpu = cpu;
319 cpu_state->lock = SPIN_LOCK_UNLOCKED;
320 cpu_state->exec_class = RT_CLASS_BEST_EFFORT;
321
322 atomic_set(&cpu_state->will_schedule, 0);
323 INIT_LIST_HEAD(&cpu_state->list);
324}
325
326/******************************************************************************/
327/* BE queue functions - mostly like edf_common.c */
328/******************************************************************************/
329
330#define be_earlier_deadline(a, b) (time_before(\
331 (a)->deadline, (b)->deadline))
332#define be_earlier_release(a, b) (time_before(\
333 (a)->release, (b)->release))
334
335
336static void be_add_ready(edf_domain_t* edf, be_server_t *new)
337{
338 unsigned long flags;
339 struct list_head *pos;
340 be_server_t *queued;
341 unsigned int passed = 0;
342
343 BUG_ON(!new);
344 /* first we need the write lock for edf_ready_queue */
345 write_lock_irqsave(&edf->ready_lock, flags);
346 /* find a spot where our deadline is earlier than the next */
347 list_for_each(pos, &edf->ready_queue) {
348 queued = list_entry(pos, be_server_t, list);
349 if (unlikely(be_earlier_deadline(new, queued))) {
350 __list_add(&new->list, pos->prev, pos);
351 goto out;
352 }
353 passed++;
354 }
355 /* if we get to this point either the list is empty or new has the
356 * lowest priority. Let's add it to the end. */
357 list_add_tail(&new->list, &edf->ready_queue);
358 out:
359 if (!passed)
360 edf->check_resched(edf);
361 write_unlock_irqrestore(&edf->ready_lock, flags);
362}
363
364static be_server_t* be_take_ready(edf_domain_t* edf)
365{
366 be_server_t *t = NULL;
367
368 if (!list_empty(&edf->ready_queue)) {
369 t = list_entry(edf->ready_queue.next, be_server_t, list);
370 /* kick it out of the ready list */
371 list_del(&t->list);
372 }
373 return t;
374}
375
376/*static be_server_t* get_be_server(edf_domain_t* edf)
377{
378 be_server_t *t = NULL;
379
380 spin_lock(&edf->release_lock);
381 write_lock(&edf->ready_lock);
382 t = be_take_ready(edf);
383
384 if (!t && !list_empty(&edf->release_queue)) {
385 t = list_entry(edf->release_queue.next, be_server_t, list);
386
387 list_del(&t->list);
388 }
389
390 write_unlock(&edf->ready_lock);
391 spin_unlock(&edf->release_lock);
392 return t;
393}*/
394
395static void be_add_release(edf_domain_t* edf, be_server_t *srv)
396{
397 unsigned long flags;
398 struct list_head *pos;
399 be_server_t *queued;
400
401 spin_lock_irqsave(&edf->release_lock, flags);
402 list_for_each_prev(pos, &edf->release_queue) {
403 queued = list_entry(pos, be_server_t, list);
404 if ((unlikely(be_earlier_release(queued, srv)))) {
405 /* the task at pos has an earlier release */
406 /* insert the new task in behind it */
407 __list_add(&srv->list, pos, pos->next);
408 goto out;
409 }
410 }
411
412 list_add(&srv->list, &edf->release_queue);
413 out:
414 spin_unlock_irqrestore(&edf->release_lock, flags);
415}
416
417static void be_try_release_pending(edf_domain_t* edf)
418{
419 unsigned long flags;
420 struct list_head *pos, *save;
421 be_server_t *queued;
422
423 if (spin_trylock_irqsave(&edf->release_lock, flags)) {
424 list_for_each_safe(pos, save, &edf->release_queue) {
425 queued = list_entry(pos, be_server_t, list);
426 if (likely(time_before_eq(
427 queued->release,
428 jiffies))) {
429 list_del(pos);
430 be_add_ready(edf, queued);
431 sched_trace_server_release(
432 queued->pid, queued->budget,
433 queued->period, RT_CLASS_BEST_EFFORT);
434 } else
435 /* the release queue is ordered */
436 break;
437 }
438 spin_unlock_irqrestore(&edf->release_lock, flags);
439 }
440}
441
442static void be_prepare_new_release(be_server_t *t, jiffie_t start) {
443 t->release = start;
444 t->deadline = t->release + t->period;
445 t->budget = t->wcet;
446}
447
448static void be_prepare_new_releases(edf_domain_t *edf, jiffie_t start)
449{
450 unsigned long flags;
451 struct list_head tmp_list;
452 struct list_head *pos, *n;
453 be_server_t *t;
454
455 INIT_LIST_HEAD(&tmp_list);
456
457 spin_lock_irqsave(&edf->release_lock, flags);
458 write_lock(&edf->ready_lock);
459
460
461 while (!list_empty(&edf->release_queue)) {
462 pos = edf->release_queue.next;
463 list_del(pos);
464 list_add(pos, &tmp_list);
465 }
466
467 while (!list_empty(&edf->ready_queue)) {
468 pos = edf->ready_queue.next;
469 list_del(pos);
470 list_add(pos, &tmp_list);
471
472 }
473
474 write_unlock(&edf->ready_lock);
475 spin_unlock_irqrestore(&edf->release_lock, flags);
476
477 list_for_each_safe(pos, n, &tmp_list) {
478 t = list_entry(pos, be_server_t, list);
479 list_del(pos);
480 be_prepare_new_release(t, start);
481 be_add_release(edf, t);
482 }
483
484}
485
486static void be_prepare_for_next_period(be_server_t *t)
487{
488 BUG_ON(!t);
489 /* prepare next release */
490 t->release = t->deadline;
491 t->deadline += t->period;
492 t->budget = t->wcet;
493}
494
495#define be_next_ready(edf) \
496 list_entry((edf)->ready_queue.next, be_server_t, list)
497
498
499/* need_to_preempt - check whether the task t needs to be preempted by a
500 * best-effort server.
501 */
502static inline int be_preemption_needed(edf_domain_t* edf, cpu_state_t* state)
503{
504 /* we need the read lock for edf_ready_queue */
505 if (!list_empty(&edf->ready_queue))
506 {
507
508 if (state->exec_class == RT_CLASS_SOFT) {
509 if (state->cap)
510 return time_before(
511 be_next_ready(edf)->deadline,
512 state->cap->deadline);
513 else
514 return time_before(
515 be_next_ready(edf)->deadline,
516 state->cur_deadline);
517 } else
518 return 1;
519 }
520 return 0;
521}
522
523static void be_enqueue(edf_domain_t* edf, be_server_t* srv)
524{
525 int new_release = 0;
526 if (!srv->budget) {
527 be_prepare_for_next_period(srv);
528 new_release = 1;
529 }
530
531 if (time_before_eq(srv->release, jiffies) &&
532 get_rt_mode() == MODE_RT_RUN) {
533 be_add_ready(edf, srv);
534 if (new_release)
535 sched_trace_server_release(
536 srv->pid, srv->budget,
537 srv->period, RT_CLASS_BEST_EFFORT);
538 } else
539 be_add_release(edf, srv);
540}
541
542static void be_preempt(edf_domain_t *be, cpu_state_t *state)
543{
544 be_server_t *srv;
545
546 spin_lock(&state->lock);
547 srv = state->be;
548 state->be = NULL;
549 spin_unlock(&state->lock);
550
551 /* add outside of lock to avoid deadlock */
552 if (srv)
553 be_enqueue(be, srv);
554}
555
556
557/******************************************************************************/
558/* Actual HSB implementation */
559/******************************************************************************/
560
561/* always acquire the cpu lock as the last lock to avoid deadlocks */
562static spinlock_t hsb_cpu_lock = SPIN_LOCK_UNLOCKED;
563/* the cpus queue themselves according to priority in here */
564static LIST_HEAD(hsb_cpu_queue);
565
566
567/* the global soft real-time domain */
568static edf_domain_t srt;
569/* the global best-effort server domain
570 * belongs conceptually to the srt domain, but has
571 * be_server_t* queued instead of tast_t*
572 */
573static edf_domain_t be;
574
575static fifo_domain_t hsb_fifo;
576
577static capacity_queue_t cap_queue;
578
579
580
581
582/* adjust_cpu_queue - Move the cpu entry to the correct place to maintain
583 * order in the cpu queue.
584 *
585 */
586static void adjust_cpu_queue(task_class_t class, jiffie_t deadline,
587 be_server_t *be)
588{
589 struct list_head *pos;
590 cpu_state_t *other;
591 cpu_state_t *entry;
592
593 spin_lock(&hsb_cpu_lock);
594
595 entry = &__get_cpu_var(hsb_cpu_state);
596
597 spin_lock(&entry->lock);
598 entry->exec_class = class;
599 entry->cur_deadline = deadline;
600 entry->be = be;
601
602 spin_unlock(&entry->lock);
603
604
605
606 if (be)
607 sched_trace_server_scheduled(
608 be->pid, RT_CLASS_BEST_EFFORT, be->budget,
609 be->deadline);
610 else if (class == RT_CLASS_HARD)
611 sched_trace_server_scheduled(
612 HRT_BASE_PID + smp_processor_id(), RT_CLASS_HARD,
613 entry->hrt.budget, entry->hrt.deadline);
614
615 list_del(&entry->list);
616 /* If we do not execute real-time jobs we just move
617 * to the end of the queue .
618 * If we execute hard real-time jobs we move the start
619 * of the queue.
620 */
621
622 switch (entry->exec_class) {
623 case RT_CLASS_HARD:
624 list_add(&entry->list, &hsb_cpu_queue);
625 break;
626
627 case RT_CLASS_SOFT:
628 list_for_each(pos, &hsb_cpu_queue) {
629 other = list_entry(pos, cpu_state_t, list);
630 if (other->exec_class > RT_CLASS_SOFT ||
631 time_before_eq(entry->cur_deadline,
632 other->cur_deadline))
633 {
634 __list_add(&entry->list, pos->prev, pos);
635 goto out;
636 }
637 }
638 /* possible fall through if lowest SRT priority */
639
640 case RT_CLASS_BEST_EFFORT:
641 list_add_tail(&entry->list, &hsb_cpu_queue);
642 break;
643
644 default:
645 /* something wrong in the variable */
646 BUG();
647 }
648 out:
649 spin_unlock(&hsb_cpu_lock);
650}
651
652
653/* hrt_check_resched - check whether the HRT server on given CPU needs to
654 * preempt the running task.
655 */
656static int hrt_check_resched(edf_domain_t *edf)
657{
658 hrt_server_t *srv = container_of(edf, hrt_server_t, domain);
659 cpu_state_t *state = container_of(srv, cpu_state_t, hrt);
660 int ret = 0;
661
662 spin_lock(&state->lock);
663
664 if (hrt_client_eligible(srv)) {
665 if (state->exec_class > RT_CLASS_HARD ||
666 time_before(
667 get_deadline(next_ready(edf)),
668 state->cur_deadline)
669 ) {
670 if (state->cpu == smp_processor_id())
671 set_tsk_need_resched(current);
672 else
673 smp_send_reschedule(state->cpu);
674 }
675 }
676
677 spin_unlock(&state->lock);
678 return ret;
679}
680
681
682/* srt_check_resched - Check whether another CPU needs to switch to a SRT task.
683 *
684 * The function only checks and kicks the last CPU. It will reschedule and
685 * kick the next if necessary, and so on. The caller is responsible for making
686 * sure that it is not the last entry or that a reschedule is not necessary.
687 *
688 * Caller must hold edf->ready_lock!
689 */
690static int srt_check_resched(edf_domain_t *edf)
691{
692 cpu_state_t *last;
693 int ret = 0;
694
695 spin_lock(&hsb_cpu_lock);
696
697 if (!list_empty(&srt.ready_queue)) {
698 last = list_entry(hsb_cpu_queue.prev, cpu_state_t, list);
699 /* guard against concurrent updates */
700 spin_lock(&last->lock);
701 if (last->exec_class == RT_CLASS_BEST_EFFORT || (
702 last->exec_class == RT_CLASS_SOFT &&
703 time_before(get_deadline(next_ready(&srt)),
704 last->cur_deadline)))
705 {
706 if (smp_processor_id() == last->cpu)
707 set_tsk_need_resched(current);
708 else
709 if (!test_will_schedule(last->cpu))
710 smp_send_reschedule(last->cpu);
711 ret = 1;
712 }
713 spin_unlock(&last->lock);
714 }
715
716 spin_unlock(&hsb_cpu_lock);
717 return ret;
718}
719
720
721/* be_check_resched - Check whether another CPU needs to switch to a BE server..
722 *
723 * Caller must hold edf->ready_lock!
724 */
725static int be_check_resched(edf_domain_t *edf)
726{
727 cpu_state_t *last;
728 int soft, bg;
729 int ret = 0;
730
731 spin_lock(&hsb_cpu_lock);
732
733 if (!list_empty(&be.ready_queue)) {
734 last = list_entry(hsb_cpu_queue.prev, cpu_state_t, list);
735 /* guard against concurrent updates */
736 spin_lock(&last->lock);
737
738 bg = last->exec_class == RT_CLASS_BEST_EFFORT;
739 soft = last->exec_class == RT_CLASS_SOFT;
740
741 if (bg || (soft && time_before(be_next_ready(&be)->deadline,
742 last->cur_deadline)))
743 {
744 if (smp_processor_id() == last->cpu)
745 set_tsk_need_resched(current);
746 else
747 if (!test_will_schedule(last->cpu))
748 smp_send_reschedule(last->cpu);
749 ret = 1;
750 }
751
752 spin_unlock(&last->lock);
753 }
754
755 spin_unlock(&hsb_cpu_lock);
756 return ret;
757}
758
759
760int cap_check_resched(jiffie_t deadline)
761{
762 unsigned long flags;
763 cpu_state_t *last;
764 int soft, bg;
765 int ret = 0;
766
767
768
769 if (get_rt_mode() == MODE_RT_RUN) {
770 spin_lock_irqsave(&hsb_cpu_lock, flags);
771
772 last = list_entry(hsb_cpu_queue.prev, cpu_state_t, list);
773 /* guard against concurrent updates */
774 spin_lock(&last->lock);
775
776 bg = last->exec_class == RT_CLASS_BEST_EFFORT;
777 soft = last->exec_class == RT_CLASS_SOFT;
778
779 if (bg || (soft && time_before(deadline,
780 last->cur_deadline)))
781 {
782 if (smp_processor_id() == last->cpu)
783 set_tsk_need_resched(current);
784 else
785 if (!test_will_schedule(last->cpu))
786 smp_send_reschedule(last->cpu);
787 ret = 1;
788 }
789
790 spin_unlock(&last->lock);
791
792 spin_unlock_irqrestore(&hsb_cpu_lock, flags);
793 }
794 return ret;
795}
796
797int fifo_check_resched(void)
798{
799 unsigned long flags;
800 cpu_state_t *last;
801 int ret = 0;
802
803 if (get_rt_mode() == MODE_RT_RUN) {
804 spin_lock_irqsave(&hsb_cpu_lock, flags);
805
806
807 last = list_entry(hsb_cpu_queue.prev, cpu_state_t, list);
808 /* guard against concurrent updates */
809
810 spin_lock(&last->lock);
811
812 if (last->exec_class == RT_CLASS_BEST_EFFORT)
813 {
814 if (smp_processor_id() == last->cpu)
815 set_tsk_need_resched(current);
816 else
817 if (!test_will_schedule(last->cpu))
818 smp_send_reschedule(last->cpu);
819 ret = 1;
820 }
821
822 spin_unlock(&last->lock);
823
824 spin_unlock_irqrestore(&hsb_cpu_lock, flags);
825 }
826 return ret;
827}
828
829
830
831static inline int hsb_preemption_needed(edf_domain_t* edf, cpu_state_t* state)
832{
833 /* we need the read lock for edf_ready_queue */
834 if (!list_empty(&edf->ready_queue))
835 {
836 if (state->exec_class == RT_CLASS_SOFT) {
837 if (state->cap)
838 return time_before(get_deadline(next_ready(edf))
839 , state->cap->deadline);
840 else
841 return time_before(get_deadline(next_ready(edf))
842 , state->cur_deadline);
843 } else
844 return 1;
845 }
846 return 0;
847}
848
849static inline int cap_preemption_needed(capacity_queue_t* q, cpu_state_t* state)
850{
851 /* we need the read lock for edf_ready_queue */
852 if (!list_empty(&q->queue))
853 {
854 if (state->exec_class == RT_CLASS_SOFT) {
855 if (state->cap)
856 return time_before(next_cap(q)->deadline
857 , state->cap->deadline);
858 else
859 return time_before(next_cap(q)->deadline
860 , state->cur_deadline);
861 } else
862 return 1;
863 }
864 return 0;
865}
866
867/* hsb_scheduler_tick - this function is called for every local timer
868 * interrupt.
869 *
870 * checks whether the current task has expired and checks
871 * whether we need to preempt it if it has not expired
872 */
873static reschedule_check_t hsb_scheduler_tick(void)
874{
875 unsigned long flags;
876 struct task_struct *t = current;
877 int resched = 0;
878
879 cpu_state_t *state = &__get_cpu_var(hsb_cpu_state);
880
881 /* expire tasks even if not in real-time mode
882 * this makes sure that at the end of real-time mode
883 * no tasks "run away forever".
884 */
885
886 /* charge BE server only if we are not running on a spare capacity */
887 if (state->be && !state->cap && --state->be->budget <= 0) {
888 sched_trace_server_completion(state->be->pid, 0,
889 state->be->deadline,
890 RT_CLASS_BEST_EFFORT);
891 be_preempt(&be, state);
892 resched = 1;
893 }
894
895 if (state->cap)
896 if (--state->cap->budget <= 0 ||
897 time_before_eq(state->cap->deadline, jiffies)) {
898 kfree(state->cap);
899 state->cap = NULL;
900 resched = 1;
901 }
902
903 if (is_realtime(t)) {
904 if (is_hrt(t) && (--state->hrt.budget <= 0)) {
905 sched_trace_server_completion(
906 HRT_BASE_PID + smp_processor_id(), 0,
907 state->hrt.deadline, RT_CLASS_HARD);
908 resched = 1;
909 }
910
911 /* account for received service... */
912 t->rt_param.times.exec_time++;
913
914 /* ...and charge current budget */
915 if (!state->cap) {
916 --t->time_slice;
917 /* a task always should be able to finish its job */
918 BUG_ON(!is_be(t) && !t->time_slice && !job_completed(t));
919 }
920
921 if (job_completed(t) || (is_be(t) && !t->time_slice)) {
922 sched_trace_job_completion(t);
923 set_rt_flags(t, RT_F_SLEEP);
924 resched = 1;
925 }
926 }
927
928
929 if (get_rt_mode() == MODE_RT_RUN)
930 {
931 try_release_pending(&state->hrt.domain);
932 check_for_hrt_release(&state->hrt);
933 try_release_pending(&srt);
934 be_try_release_pending(&be);
935
936 if (!resched)
937 switch (state->exec_class) {
938 case RT_CLASS_HARD:
939 read_lock_irqsave(&state->hrt.domain.ready_lock,
940 flags);
941 resched = preemption_needed(&state->hrt.domain,
942 t);
943 read_unlock_irqrestore(
944 &state->hrt.domain.ready_lock, flags);
945 break;
946
947 case RT_CLASS_SOFT:
948 case RT_CLASS_BEST_EFFORT:
949 local_irq_save(flags);
950
951 /* check for HRT jobs */
952 read_lock(&state->hrt.domain.ready_lock);
953 resched = hrt_client_eligible(&state->hrt);
954 read_unlock(&state->hrt.domain.ready_lock);
955
956 /* check for spare capacities */
957 if (!resched) {
958 spin_lock(&cap_queue.lock);
959 resched =
960 cap_preemption_needed(&cap_queue,
961 state);
962 spin_unlock(&cap_queue.lock);
963 }
964
965 /* check for SRT jobs */
966 if (!resched) {
967 read_lock(&srt.ready_lock);
968 resched = hsb_preemption_needed(
969 &srt, state);
970 read_unlock(&srt.ready_lock);
971 }
972
973 /* check for BE jobs */
974 if (!resched) {
975 read_lock(&be.ready_lock);
976 resched = be_preemption_needed(
977 &be, state);
978 read_unlock(&be.ready_lock);
979 }
980
981 /* check for background jobs */
982 if (!resched && !is_realtime(current))
983 resched = fifo_jobs_pending(&hsb_fifo);
984 local_irq_restore(flags);
985 break;
986
987 default:
988 /* something wrong in the variable */
989 BUG();
990 }
991 }
992
993 if (resched) {
994 set_will_schedule();
995 return FORCE_RESCHED;
996 } else
997 return NO_RESCHED;
998}
999
1000static int schedule_hrt(struct task_struct * prev,
1001 struct task_struct ** next, runqueue_t * rq)
1002{
1003 unsigned long flags;
1004 int deactivate = 1;
1005 cpu_state_t *state;
1006
1007
1008 state = &__get_cpu_var(hsb_cpu_state);
1009
1010 write_lock_irqsave(&state->hrt.domain.ready_lock, flags);
1011
1012
1013 if (state->cap) {
1014 /* hrt_schedule does not have the cap_queue lock */
1015 return_capacity(&cap_queue, state->cap);
1016 state->cap = NULL;
1017 }
1018
1019 if (is_hrt(prev) && is_released(prev) && is_running(prev)
1020 && !preemption_needed(&state->hrt.domain, prev)) {
1021 /* This really should only happen if the task has
1022 * 100% utilization or when we got a bogus/delayed
1023 * resched IPI.
1024 */
1025 TRACE("HRT: prev will be next, already released\n");
1026 *next = prev;
1027 deactivate = 0;
1028 } else {
1029 /* either not yet released, preempted, or non-rt */
1030 *next = __take_ready(&state->hrt.domain);
1031 /* the logic in hsb_schedule makes sure *next must exist
1032 * if we get here */
1033 BUG_ON(!*next);
1034 /* stick the task into the runqueue */
1035 __activate_task(*next, rq);
1036 set_task_cpu(*next, smp_processor_id());
1037 }
1038
1039 set_rt_flags(*next, RT_F_RUNNING);
1040 adjust_cpu_queue(RT_CLASS_HARD, get_deadline(*next), NULL);
1041 clear_will_schedule();
1042
1043 write_unlock_irqrestore(&state->hrt.domain.ready_lock, flags);
1044 return deactivate;
1045}
1046
1047
1048static struct task_struct* find_min_slack_task(struct task_struct *prev,
1049 edf_domain_t* edf)
1050{
1051 struct list_head *pos;
1052 struct task_struct* tsk = NULL;
1053 struct task_struct* cur;
1054
1055 if (is_realtime(prev) && is_running(prev) &&
1056 get_rt_flags(prev) != RT_F_SLEEP)
1057 tsk = prev;
1058 list_for_each(pos, &edf->ready_queue) {
1059 cur = list_entry(pos, struct task_struct, rt_list);
1060 if (!tsk || task_slack(tsk) > task_slack(cur))
1061 tsk = cur;
1062 }
1063 return tsk;
1064}
1065
1066static struct task_struct* null_heuristic(struct task_struct *prev,
1067 edf_domain_t* edf,
1068 fifo_domain_t* fifo)
1069{
1070 if (fifo_jobs_pending( fifo))
1071 return NULL;
1072 else if (!list_empty(&edf->ready_queue))
1073 return list_entry(edf->ready_queue.next,
1074 struct task_struct, rt_list);
1075 else
1076 return NULL;
1077}
1078
1079/*static struct task_struct* history_heuristic(struct task_struct *prev, edf_domain_t* edf)
1080{
1081 struct list_head *pos;
1082 struct task_struct* tsk = NULL;
1083 struct task_struct* cur;
1084
1085 if (is_realtime(prev) && is_running(prev) &&
1086 get_rt_flags(prev) != RT_F_SLEEP)
1087 tsk = prev;
1088 list_for_each(pos, &edf->ready_queue) {
1089 cur = list_entry(pos, struct task_struct, rt_list);
1090 if (!tsk ||
1091 tsk->rt_param.stats.nontardy_jobs_ctr >
1092 cur->rt_param.stats.nontardy_jobs_ctr)
1093 tsk = cur;
1094 }
1095 if (tsk && tsk->rt_param.stats.nontardy_jobs_ctr < 5)
1096 return tsk;
1097 else
1098 return NULL;
1099}
1100*/
1101/* TODO: write slack heuristic.*/
1102/*static struct task_struct* slack_heuristic(struct task_struct *prev, edf_domain_t* edf)
1103{
1104 struct list_head *pos;
1105 struct task_struct* tsk = NULL;
1106 struct task_struct* cur;
1107
1108 if (is_realtime(prev) && is_running(prev) &&
1109 get_rt_flags(prev) != RT_F_SLEEP)
1110 tsk = prev;
1111 list_for_each(pos, &edf->ready_queue) {
1112 cur = list_entry(pos, struct task_struct, rt_list);
1113 if (!tsk ||
1114 tsk->rt_param.stats.nontardy_job_ctr >
1115 cur->rt_param.stats.nontardy_job_ctr)
1116 tsk = cur;
1117 }
1118 if (tsk && tsk->rt_param.stats.nontardy_job_ctr < 5)
1119 return tsk;
1120 else
1121 return NULL;
1122}*/
1123
1124
1125/* caller holds all locks
1126 */
1127
1128static int schedule_capacity(struct task_struct *prev,
1129 struct task_struct **next, runqueue_t *rq)
1130{
1131 cpu_state_t *state = &__get_cpu_var(hsb_cpu_state);
1132 capacity_t* old;
1133
1134 if (state->cap) {
1135 old = state->cap;
1136 state->cap = __take_capacity(&cap_queue, old->deadline, 1);
1137 if (!state->cap)
1138 state->cap = old;
1139 else
1140 __return_capacity(&cap_queue, old);
1141 } else
1142 state->cap = __take_capacity(&cap_queue, 0, 0);
1143
1144
1145 /* pick a task likely to be tardy */
1146 *next = find_min_slack_task(prev, &srt);
1147
1148 /* only give away spare capacities if there is no task that
1149 * is going to be tardy
1150 */
1151 if (*next && task_slack(*next) >= 0)
1152 *next = null_heuristic(prev, &srt, &hsb_fifo);
1153 if (*next && *next != prev)
1154 list_del(&(*next)->rt_list);
1155
1156
1157 /* if there is none pick a BE job */
1158 if (!*next) {
1159 if (is_realtime(prev) && is_be(prev) && is_running(prev) &&
1160 get_rt_flags(prev) != RT_F_SLEEP)
1161 *next = prev;
1162 else
1163 *next = fifo_take(&hsb_fifo);
1164 }
1165
1166 if (state->be)
1167 be_preempt(&be, state);
1168 BUG_ON(!state->cap);
1169 if (*next && state->cap->donor) {
1170 sched_trace_capacity_allocation(
1171 *next, state->cap->budget, state->cap->deadline,
1172 state->cap->donor);
1173 }
1174
1175 return *next != prev;
1176}
1177
1178
1179
1180#define BG 0
1181#define SRT 1
1182#define BE 2
1183#define CAP 3
1184
1185static inline int what_first(edf_domain_t *be, edf_domain_t *srt, capacity_queue_t* q)
1186{
1187 jiffie_t sdl = 0, bdl= 0, cdl = 0, cur;
1188 int _srt = !list_empty(&srt->ready_queue);
1189 int _be = !list_empty(&be->ready_queue);
1190 int _cap = __capacity_available(q);
1191
1192
1193 int ret = BG; /* nothing ready => background mode*/
1194 cur = 0;
1195
1196 if (_srt)
1197 sdl = get_deadline(next_ready(srt));
1198 if (_be)
1199 bdl = be_next_ready(be)->deadline;
1200 if (_cap)
1201 cdl = next_cap(q)->deadline;
1202
1203
1204
1205 if (_cap) {
1206 ret = CAP;
1207 cur = cdl;
1208 }
1209 if (_srt && (time_before(sdl, cur) || !ret)) {
1210 ret = SRT;
1211 cur = sdl;
1212 }
1213 if (_be && (time_before(bdl, cur) || !ret)) {
1214 ret = BE;
1215 cur = bdl;
1216 }
1217 return ret;
1218}
1219
1220
1221
1222static int schedule_srt_be_cap(struct task_struct *prev,
1223 struct task_struct **next, runqueue_t *rq)
1224{
1225 task_class_t class = RT_CLASS_BEST_EFFORT;
1226 jiffie_t deadline = 0;
1227 unsigned long flags;
1228 int deactivate = 1;
1229 be_server_t* bes;
1230 cpu_state_t* state;
1231 int type;
1232
1233reschedule:
1234 write_lock_irqsave(&srt.ready_lock, flags);
1235 write_lock(&be.ready_lock);
1236 spin_lock(&cap_queue.lock);
1237
1238
1239 state = &__get_cpu_var(hsb_cpu_state);
1240 bes = NULL;
1241
1242 clear_will_schedule();
1243
1244 if (is_realtime(prev) && (is_released(prev) || is_be(prev)) &&
1245 is_running(prev) && !hsb_preemption_needed(&srt, state) &&
1246 !be_preemption_needed(&be, state)
1247 ) {
1248 /* Our current task's next job has already been
1249 * released and has higher priority than the highest
1250 * prioriy waiting task; in other words: it is tardy.
1251 * We just keep it.
1252 */
1253 TRACE("prev will be next, already released\n");
1254 *next = prev;
1255 class = prev->rt_param.basic_params.class;
1256 deadline = get_deadline(*next);
1257 deactivate = 0;
1258 } else {
1259 /* either not yet released, preempted, or non-rt */
1260 type = what_first(&be, &srt, &cap_queue);
1261 switch (type) {
1262 case CAP:
1263 /* capacity */
1264 deactivate = schedule_capacity(prev, next, rq);
1265 deadline = state->cap->deadline;
1266 if (*next)
1267 class = RT_CLASS_SOFT;
1268 else
1269 class = RT_CLASS_BEST_EFFORT;
1270 break;
1271 case BE:
1272 /* be */
1273 *next = NULL;
1274 bes = be_take_ready(&be);
1275 if (bes) {
1276 class = RT_CLASS_SOFT;
1277 deadline = bes->deadline;
1278 *next = fifo_take(&hsb_fifo);
1279 if (!*next) {
1280 /* deactivate */
1281 __release_capacity(&cap_queue,
1282 bes->budget,
1283 bes->deadline, NULL);
1284 bes->budget = 0;
1285 barrier();
1286 spin_unlock(&cap_queue.lock);
1287 write_unlock(&be.ready_lock);
1288 write_unlock_irqrestore(&srt.ready_lock,
1289 flags);
1290 be_enqueue(&be, bes);
1291 goto reschedule;
1292 }
1293 }
1294 break;
1295 case SRT:
1296 /* srt */
1297 *next = __take_ready(&srt);
1298 if (*next) {
1299 class = RT_CLASS_SOFT;
1300 deadline = get_deadline(*next);
1301 }
1302 break;
1303 case BG:
1304 /* background server mode */
1305 class = RT_CLASS_BEST_EFFORT;
1306 deadline = 0;
1307 *next = fifo_take(&hsb_fifo);
1308 break;
1309 }
1310
1311
1312 /* give back capacities */
1313 if (type != CAP && state->cap) {
1314 __return_capacity(&cap_queue, state->cap);
1315 state->cap = NULL;
1316 }
1317 if (*next && deactivate) {
1318 /* mark the task as executing on this cpu */
1319 set_task_cpu(*next, smp_processor_id());
1320 /* stick the task into the runqueue */
1321 __activate_task(*next, rq);
1322 }
1323 }
1324
1325 adjust_cpu_queue(class, deadline, bes);
1326
1327 switch (type) {
1328 case BG:
1329 break;
1330 case BE:
1331 be.check_resched(&be);
1332 break;
1333 case SRT:
1334 srt.check_resched(&srt);
1335 break;
1336 case CAP:
1337 if (!list_empty(&cap_queue.queue))
1338 cap_check_resched(list_entry(cap_queue.queue.next,
1339 capacity_t, list)->deadline);
1340 break;
1341 }
1342
1343
1344 if(*next)
1345 set_rt_flags(*next, RT_F_RUNNING);
1346
1347 spin_unlock(&cap_queue.lock);
1348 write_unlock(&be.ready_lock);
1349 write_unlock_irqrestore(&srt.ready_lock, flags);
1350 return deactivate;
1351}
1352
1353
1354static int hsb_schedule(struct task_struct * prev, struct task_struct ** next,
1355 runqueue_t * rq)
1356{
1357 int need_deactivate = 1;
1358 cpu_state_t *state = NULL;
1359
1360 preempt_disable();
1361
1362 state = &__get_cpu_var(hsb_cpu_state);
1363
1364 be_preempt(&be, state);
1365
1366
1367 if (is_realtime(prev) && !is_be(prev) &&
1368 get_rt_flags(prev) == RT_F_SLEEP)
1369 {
1370 TRACE("preparing %d for next period\n", prev->pid);
1371 release_capacity(&cap_queue, prev->time_slice,
1372 prev->rt_param.times.deadline, prev);
1373 prepare_for_next_period(prev);
1374 }
1375
1376 if (get_rt_mode() == MODE_RT_RUN) {
1377 /* we need to schedule hrt if a hrt job is pending or when
1378 * we have a non expired hrt job on the cpu
1379 */
1380
1381 if (hrt_client_eligible(&state->hrt) ||
1382 unlikely((is_hrt(prev) && is_running(prev) &&
1383 get_rt_flags(prev) != RT_F_SLEEP))) {
1384 if (state->cap) {
1385 return_capacity(&cap_queue, state->cap);
1386 state->cap = NULL;
1387 }
1388 need_deactivate = schedule_hrt(prev, next, rq);
1389 } else
1390 need_deactivate = schedule_srt_be_cap(prev, next, rq);
1391
1392 }
1393
1394 if (is_realtime(prev) && need_deactivate && prev->array) {
1395 /* take it out of the run queue */
1396 deactivate_task(prev, rq);
1397 }
1398
1399 preempt_enable();
1400
1401 return 0;
1402}
1403
1404/* put task into correct queue */
1405static inline void hsb_add_release(struct task_struct *t)
1406{
1407 if (is_hrt(t))
1408 add_release(hrt_dom(get_partition(t)), t);
1409 else if (is_srt(t))
1410 add_release(&srt, t);
1411 else if (is_be(t)) {
1412 t->time_slice = 0;
1413 fifo_enqueue(&hsb_fifo, t);
1414 fifo_check_resched();
1415 } else
1416 BUG();
1417
1418}
1419
1420/* put task into correct queue */
1421static inline void hsb_add_ready(struct task_struct *t)
1422{
1423 if (is_hrt(t))
1424 add_ready(hrt_dom(get_partition(t)), t);
1425 else if (is_srt(t))
1426 add_ready(&srt, t);
1427 else if (is_be(t)) {
1428 fifo_enqueue(&hsb_fifo, t);
1429 fifo_check_resched();
1430 }
1431 else
1432 BUG();
1433}
1434
1435
1436/* _finish_switch - we just finished the switch away from prev
1437 * it is now safe to requeue the task
1438 */
1439static void hsb_finish_switch(struct task_struct *prev)
1440{
1441 TRACE("finish switch for %d\n", prev->pid);
1442
1443 if (is_be(prev)) {
1444 fifo_enqueue(&hsb_fifo, prev);
1445 return;
1446 }
1447
1448 if (get_rt_flags(prev) == RT_F_SLEEP ||
1449 get_rt_mode() != MODE_RT_RUN) {
1450 /* this task has expired
1451 * _schedule has already taken care of updating
1452 * the release and
1453 * deadline. We just must check if has been released.
1454 */
1455 if (is_released(prev) && get_rt_mode() == MODE_RT_RUN) {
1456 sched_trace_job_release(prev);
1457 hsb_add_ready(prev);
1458 TRACE("%d goes straight to ready queue\n", prev->pid);
1459 }
1460 else
1461 /* it has got to wait */
1462 hsb_add_release(prev);
1463 }
1464 else {
1465 /* this is a forced preemption
1466 * thus the task stays in the ready_queue
1467 * we only must make it available to other cpus
1468 */
1469 hsb_add_ready(prev);
1470 }
1471}
1472
1473
1474/* Prepare a task for running in RT mode
1475 * Enqueues the task into master queue data structure
1476 * returns
1477 * -EPERM if task is not TASK_STOPPED
1478 */
1479static long hsb_prepare_task(struct task_struct * t)
1480{
1481 TRACE("edf-hsb: prepare task %d\n", t->pid);
1482
1483 if (t->state == TASK_STOPPED) {
1484 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1);
1485
1486 if (get_rt_mode() == MODE_RT_RUN && !is_be(t))
1487 /* The action is already on.
1488 * Prepare immediate release
1489 */
1490 prepare_new_release(t);
1491 /* The task should be running in the queue, otherwise signal
1492 * code will try to wake it up with fatal consequences.
1493 */
1494 t->state = TASK_RUNNING;
1495 if (is_be(t))
1496 t->rt_param.times.deadline = 0;
1497 hsb_add_release(t);
1498 return 0;
1499 }
1500 else
1501 return -EPERM;
1502}
1503
1504static void hsb_wake_up_task(struct task_struct *task)
1505{
1506 /* We must determine whether task should go into the release
1507 * queue or into the ready queue. It may enter the ready queue
1508 * if it has credit left in its time slice and has not yet reached
1509 * its deadline. If it is now passed its deadline we assume this the
1510 * arrival of a new sporadic job and thus put it in the ready queue
1511 * anyway.If it has zero budget and the next release is in the future
1512 * it has to go to the release queue.
1513 */
1514 TRACE("edf-hsb: wake up %d with budget=%d\n",
1515 task->pid, task->time_slice);
1516 task->state = TASK_RUNNING;
1517
1518 if (is_be(task)) {
1519 hsb_add_release(task);
1520 }
1521 else if (is_tardy(task)) {
1522 /* new sporadic release */
1523 prepare_new_release(task);
1524 sched_trace_job_release(task);
1525 hsb_add_ready(task);
1526 }
1527 else if (task->time_slice) {
1528 /* came back in time before deadline
1529 * TODO: clip budget to fit into period, otherwise it could
1530 * cause a deadline overrun in the next period, i.e.
1531 * over allocation in the next period.
1532 */
1533 set_rt_flags(task, RT_F_RUNNING);
1534 hsb_add_ready(task);
1535 }
1536 else {
1537 hsb_add_release(task);
1538 }
1539
1540}
1541
1542static void hsb_task_blocks(struct task_struct *t)
1543{
1544 /* CLEANUP: The BUG_ON actually triggerd in a really weierd case if a
1545 * BEST_EFFORT gets caught in a migration right after execv
1546 * The next version of Litmus should deal with this more gracefully.
1547 */
1548
1549 /*BUG_ON(!is_realtime(t));*/
1550 /* not really anything to do since it can only block if
1551 * it is running, and when it is not running it is not in any
1552 * queue anyway.
1553 *
1554 * TODO: Check whether the assumption is correct for SIGKILL and
1555 * SIGSTOP.
1556 */
1557 TRACE("task %d blocks with budget=%d\n", t->pid, t->time_slice);
1558 /*BUG_ON(t->rt_list.next != LIST_POISON1);*/
1559 /*BUG_ON(t->rt_list.prev != LIST_POISON2);*/
1560
1561 if (is_be(t))
1562 sched_trace_job_completion(t);
1563}
1564
1565
1566/* When _tear_down is called, the task should not be in any queue any more
1567 * as it must have blocked first. We don't have any internal state for the task,
1568 * it is all in the task_struct.
1569 */
1570static long hsb_tear_down(struct task_struct * t)
1571{
1572 /* CLEANUP: see hsb_task_blocks */
1573 /*BUG_ON(!is_realtime(t));
1574 TRACE("edf-hsb: tear down called for %d \n", t->pid);
1575 BUG_ON(t->array);
1576 BUG_ON(t->rt_list.next != LIST_POISON1);
1577 BUG_ON(t->rt_list.prev != LIST_POISON2);*/
1578 return 0;
1579}
1580
1581static int hsb_mode_change(int new_mode)
1582{
1583 int cpu;
1584 cpu_state_t *entry;
1585 jiffie_t start;
1586
1587 TRACE("[%d] edf-hsb: mode changed to %d\n", smp_processor_id(),
1588 new_mode);
1589 if (new_mode == MODE_RT_RUN) {
1590 start = jiffies + 20;
1591 prepare_new_releases(&srt, start);
1592 be_prepare_new_releases(&be, start);
1593
1594 /* initialize per CPU state
1595 * we can't do this at boot time because we don't know
1596 * which CPUs will be online and we can't put non-existing
1597 * cpus into the queue
1598 */
1599 spin_lock(&hsb_cpu_lock);
1600 /* get old cruft out of the way in case we reenter real-time
1601 * mode for a second time
1602 */
1603 while (!list_empty(&hsb_cpu_queue))
1604 list_del(hsb_cpu_queue.next);
1605 /* reinitialize */
1606 for_each_online_cpu(cpu) {
1607 entry = &per_cpu(hsb_cpu_state, cpu);
1608 atomic_set(&entry->will_schedule, 0);
1609 entry->exec_class = RT_CLASS_BEST_EFFORT;
1610 entry->cur_deadline = 0;
1611 list_add(&entry->list, &hsb_cpu_queue);
1612
1613 prepare_new_releases(&entry->hrt.domain, start);
1614 prepare_hrt_release(&entry->hrt, start);
1615 }
1616 spin_unlock(&hsb_cpu_lock);
1617
1618 }
1619 TRACE("[%d] edf-hsb: mode change done\n", smp_processor_id());
1620 return 0;
1621}
1622
1623
1624typedef enum {
1625 EDF_HSB_SET_HRT,
1626 EDF_HSB_GET_HRT,
1627 EDF_HSB_CREATE_BE
1628} edf_hsb_setup_cmds_t;
1629
1630typedef struct {
1631 int cpu;
1632 unsigned int wcet;
1633 unsigned int period;
1634} setup_hrt_param_t;
1635
1636typedef struct {
1637 unsigned int wcet;
1638 unsigned int period;
1639} create_be_param_t;
1640
1641typedef struct {
1642 union {
1643 setup_hrt_param_t setup_hrt;
1644 create_be_param_t create_be;
1645 };
1646} param_t;
1647
1648static pid_t next_be_server_pid = SRT_BASE_PID;
1649
1650static int hsb_scheduler_setup(int cmd, void __user* up)
1651{
1652 unsigned long flags;
1653 int error = -EINVAL;
1654 cpu_state_t* state;
1655 be_server_t* srv;
1656 param_t param;
1657
1658 switch (cmd) {
1659 case EDF_HSB_SET_HRT:
1660 if (copy_from_user(&param, up, sizeof(setup_hrt_param_t))) {
1661 error = -EFAULT;
1662 goto out;
1663 }
1664 if (!cpu_online(param.setup_hrt.cpu)) {
1665 printk(KERN_WARNING "scheduler setup: "
1666 "CPU %d is not online!\n", param.setup_hrt.cpu);
1667 error = -EINVAL;
1668 goto out;
1669 }
1670 if (param.setup_hrt.period < param.setup_hrt.wcet) {
1671 printk(KERN_WARNING "period < wcet!\n");
1672 error = -EINVAL;
1673 goto out;
1674 }
1675
1676 state = &per_cpu(hsb_cpu_state, param.setup_hrt.cpu);
1677 spin_lock_irqsave(&state->lock, flags);
1678
1679 state->hrt.wcet = param.setup_hrt.wcet;
1680 state->hrt.period = param.setup_hrt.period;
1681
1682 spin_unlock_irqrestore(&state->lock, flags);
1683
1684 printk(KERN_WARNING "edf-hsb: set HRT #%d to (%d, %d)\n",
1685 param.setup_hrt.cpu, param.setup_hrt.wcet,
1686 param.setup_hrt.period);
1687
1688 error = 0;
1689
1690 break;
1691
1692 case EDF_HSB_GET_HRT:
1693 if (copy_from_user(&param, up, sizeof(setup_hrt_param_t))) {
1694 error = -EFAULT;
1695 goto out;
1696 }
1697 if (!cpu_online(param.setup_hrt.cpu)) {
1698 error = -EINVAL;
1699 goto out;
1700 }
1701 state = &per_cpu(hsb_cpu_state, param.setup_hrt.cpu);
1702 spin_lock_irqsave(&state->lock, flags);
1703
1704 param.setup_hrt.wcet = state->hrt.wcet;
1705 param.setup_hrt.period = state->hrt.period;
1706
1707 spin_unlock_irqrestore(&state->lock, flags);
1708
1709 if (copy_to_user(up, &param, sizeof(setup_hrt_param_t))) {
1710 error = -EFAULT;
1711 goto out;
1712 }
1713 error = 0;
1714 break;
1715
1716 case EDF_HSB_CREATE_BE:
1717 if (copy_from_user(&param, up, sizeof(create_be_param_t))) {
1718 error = -EFAULT;
1719 goto out;
1720 }
1721 if (param.create_be.period < param.create_be.wcet ||
1722 !param.create_be.period || !param.create_be.wcet) {
1723 error = -EINVAL;
1724 goto out;
1725 }
1726 srv = (be_server_t*) kmalloc(sizeof(be_server_t), GFP_KERNEL);
1727 if (!srv) {
1728 error = -ENOMEM;
1729 goto out;
1730 }
1731 srv->wcet = param.create_be.wcet;
1732 srv->period = param.create_be.period;
1733 srv->pid = next_be_server_pid++;
1734 INIT_LIST_HEAD(&srv->list);
1735 be_prepare_new_release(srv, jiffies);
1736 be_enqueue(&be, srv);
1737
1738 printk(KERN_WARNING "edf-hsb: created a BE with (%d, %d)\n",
1739 param.create_be.wcet, param.create_be.period);
1740
1741 error = 0;
1742 break;
1743
1744 default:
1745 printk(KERN_WARNING "edf-hsb: unknown command %d\n", cmd);
1746 }
1747
1748out:
1749 return error;
1750}
1751
1752/* Plugin object */
1753static sched_plugin_t s_plugin __cacheline_aligned_in_smp = {
1754 .ready_to_use = 0
1755};
1756
1757
1758/*
1759 * Plugin initialization code.
1760 */
1761#define INIT_SCHED_PLUGIN (struct sched_plugin){\
1762 .plugin_name = "EDF-HSB",\
1763 .ready_to_use = 1,\
1764 .algo_scheduler_tick = hsb_scheduler_tick,\
1765 .scheduler_tick = rt_scheduler_tick,\
1766 .prepare_task = hsb_prepare_task,\
1767 .sleep_next_period = edf_sleep_next_period,\
1768 .tear_down = hsb_tear_down,\
1769 .shutdown_hook = 0,\
1770 .schedule = hsb_schedule,\
1771 .finish_switch = hsb_finish_switch,\
1772 .mode_change = hsb_mode_change,\
1773 .wake_up_task = hsb_wake_up_task,\
1774 .task_blocks = hsb_task_blocks, \
1775 .scheduler_setup = hsb_scheduler_setup \
1776}
1777
1778
1779sched_plugin_t *__init init_edf_hsb_plugin(void)
1780{
1781 int i;
1782
1783 if (!s_plugin.ready_to_use)
1784 {
1785 set_sched_options(SCHED_NONE);
1786 capacity_queue_init(&cap_queue);
1787 edf_domain_init(&srt, srt_check_resched);
1788 edf_domain_init(&be, be_check_resched);
1789 fifo_domain_init(&hsb_fifo, 50);
1790 for (i = 0; i < NR_CPUS; i++)
1791 {
1792 hsb_cpu_state_init(&per_cpu(hsb_cpu_state, i),
1793 hrt_check_resched, i);
1794 printk("HRT server %d initialized.\n", i);
1795 }
1796 s_plugin = INIT_SCHED_PLUGIN;
1797 }
1798 return &s_plugin;
1799}