aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-01-21 15:54:21 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-01-21 15:54:21 -0500
commit16a64e75ff5d1deeeb8adaaa0d11b1d6fe236bbe (patch)
tree02c8bfade500a847e039ea340c1583d40d1e9fd6
parent6583dcfbda43e420921e3adf7f2e46dc719e8d26 (diff)
Initial lv C impl.wip-mc2
-rw-r--r--include/litmus/reservation.h55
-rw-r--r--litmus/polling_reservations.c1
-rw-r--r--litmus/reservation.c303
-rw-r--r--litmus/sched_mc2.c453
4 files changed, 739 insertions, 73 deletions
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
index 4eecd3f088e8..0345d3482916 100644
--- a/include/litmus/reservation.h
+++ b/include/litmus/reservation.h
@@ -126,6 +126,9 @@ struct reservation {
126 struct reservation_ops *ops; 126 struct reservation_ops *ops;
127 127
128 struct list_head clients; 128 struct list_head clients;
129
130 /* for global env. */
131 int scheduled_on;
129}; 132};
130 133
131void reservation_init(struct reservation *res); 134void reservation_init(struct reservation *res);
@@ -191,4 +194,56 @@ struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
191struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env, 194struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
192 unsigned int id); 195 unsigned int id);
193 196
197#define ENV_RESCHEDULE_NOW (0)
198#define ENV_NO_SCHEDULER_UPDATE (ULLONG_MAX)
199
200struct next_timer_event {
201 lt_t next_event;
202 int timer_armed_on;
203 struct list_head list;
204};
205
206/* A global multiprocessor reservation environment.
207 */
208struct gmp_reservation_environment {
209 //raw_spinlock_t lock;
210 struct reservation_environment env;
211
212 /* ordered by priority */
213 struct list_head active_reservations;
214
215 /* ordered by next_replenishment */
216 struct list_head depleted_reservations;
217
218 /* unordered */
219 struct list_head inactive_reservations;
220
221 /* - ENV_RESCHEDULE_NOW means call gmp_dispatch() now
222 * - ENV_NO_SCHEDULER_UPDATE means nothing to do
223 * any other value means program a timer for the given time
224 */
225 struct list_head next_events;
226 raw_spinlock_t event_lock;
227 bool schedule_now;
228 /* set to true if a call to gmp_dispatch() is imminent */
229 bool will_schedule;
230};
231
232/* Contract:
233 * - before calling into sup_ code, or any reservation methods,
234 * update the time with sup_update_time(); and
235 * - after calling into sup_ code, or any reservation methods,
236 * check next_scheduler_update and program timer or trigger
237 * scheduler invocation accordingly.
238 */
239
240void gmp_init(struct gmp_reservation_environment* gmp_env);
241void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
242 struct reservation* new_res);
243void gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
244struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
245
246struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
247 unsigned int id);
248
194#endif 249#endif
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
index 4c07ee74bf39..81f8e0f54ffd 100644
--- a/litmus/polling_reservations.c
+++ b/litmus/polling_reservations.c
@@ -72,6 +72,7 @@ static void periodic_polling_client_departs(
72 /* do nothing */ 72 /* do nothing */
73 break; 73 break;
74 } 74 }
75 res->scheduled_on = NO_CPU;
75} 76}
76 77
77static void periodic_polling_on_replenishment( 78static void periodic_polling_on_replenishment(
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 0e43479ff2e1..35b3b5de5d0c 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -1,4 +1,5 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/slab.h>
2 3
3#include <litmus/litmus.h> 4#include <litmus/litmus.h>
4#include <litmus/reservation.h> 5#include <litmus/reservation.h>
@@ -203,8 +204,7 @@ static void sup_charge_budget(
203 { 204 {
204 /* make sure scheduler is invoked when this reservation expires 205 /* make sure scheduler is invoked when this reservation expires
205 * its remaining budget */ 206 * its remaining budget */
206 TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n", 207 //TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
207 res->id, res->cur_budget);
208 sup_scheduler_update_after(sup_env, res->cur_budget); 208 sup_scheduler_update_after(sup_env, res->cur_budget);
209 } 209 }
210 if (encountered_active) 210 if (encountered_active)
@@ -317,3 +317,302 @@ void sup_init(struct sup_reservation_environment* sup_env)
317 317
318 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; 318 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
319} 319}
320
321static void gmp_scheduler_update_at(
322 struct gmp_reservation_environment* gmp_env,
323 lt_t when)
324{
325 struct next_timer_event *nevent;
326 struct list_head *pos;
327 struct next_timer_event *queued;
328 int found = 0;
329
330 nevent = kzalloc(sizeof(*nevent), GFP_KERNEL);
331 nevent->next_event = when;
332 nevent->timer_armed_on = NO_CPU;
333
334 list_for_each(pos, &gmp_env->next_events) {
335 queued = list_entry(pos, struct next_timer_event, list);
336 if (queued->next_event > nevent->next_event) {
337 list_add(&nevent->list, pos->prev);
338 found = 1;
339 TRACE("NEXT_EVENT ADDED after %llu\n", queued->next_event);
340 break;
341 }
342 }
343
344 if (!found) {
345 list_add_tail(&nevent->list, &gmp_env->next_events);
346 TRACE("NEXT_EVENT ADDED at [0]\n");
347 }
348
349 TRACE("GMP_SCHEDULER_UPDATE_AT update_time: %llu\n", nevent->next_event);
350}
351
352static void gmp_scheduler_update_after(
353 struct gmp_reservation_environment* gmp_env,
354 lt_t timeout)
355{
356 gmp_scheduler_update_at(gmp_env, gmp_env->env.current_time + timeout);
357}
358
359static int _gmp_queue_depleted(
360 struct gmp_reservation_environment* gmp_env,
361 struct reservation *res)
362{
363 struct list_head *pos;
364 struct reservation *queued;
365 int passed_earlier = 0;
366
367 list_for_each(pos, &gmp_env->depleted_reservations) {
368 queued = list_entry(pos, struct reservation, list);
369 if (queued->next_replenishment > res->next_replenishment) {
370 list_add(&res->list, pos->prev);
371 return passed_earlier;
372 } else
373 passed_earlier = 1;
374 }
375
376 list_add_tail(&res->list, &gmp_env->depleted_reservations);
377
378 return passed_earlier;
379}
380
381static void gmp_queue_depleted(
382 struct gmp_reservation_environment* gmp_env,
383 struct reservation *res)
384{
385 int passed_earlier = _gmp_queue_depleted(gmp_env, res);
386
387 /* check for updated replenishment time */
388 if (!passed_earlier)
389 gmp_scheduler_update_at(gmp_env, res->next_replenishment);
390}
391
392static int _gmp_queue_active(
393 struct gmp_reservation_environment* gmp_env,
394 struct reservation *res)
395{
396 struct list_head *pos;
397 struct reservation *queued;
398 int passed_active = 0;
399
400 list_for_each(pos, &gmp_env->active_reservations) {
401 queued = list_entry(pos, struct reservation, list);
402 if (queued->priority > res->priority) {
403 list_add(&res->list, pos->prev);
404 return passed_active;
405 } else if (queued->state == RESERVATION_ACTIVE)
406 passed_active = 1;
407 }
408
409 list_add_tail(&res->list, &gmp_env->active_reservations);
410 return passed_active;
411}
412
413static void gmp_queue_active(
414 struct gmp_reservation_environment* gmp_env,
415 struct reservation *res)
416{
417 int passed_active = _gmp_queue_active(gmp_env, res);
418
419 /* check for possible preemption */
420 if (res->state == RESERVATION_ACTIVE && !passed_active) {
421 gmp_env->schedule_now = true;
422 } else {
423 /* Active means this reservation is draining budget => make sure
424 * the scheduler is called to notice when the reservation budget has been
425 * drained completely. */
426 gmp_scheduler_update_after(gmp_env, res->cur_budget);
427 }
428}
429
430static void gmp_queue_reservation(
431 struct gmp_reservation_environment* gmp_env,
432 struct reservation *res)
433{
434 TRACE("GMP_QUEUE_RES state (%d)\n", res->state);
435 switch (res->state) {
436 case RESERVATION_INACTIVE:
437 list_add(&res->list, &gmp_env->inactive_reservations);
438 break;
439
440 case RESERVATION_DEPLETED:
441 gmp_queue_depleted(gmp_env, res);
442 break;
443
444 case RESERVATION_ACTIVE_IDLE:
445 case RESERVATION_ACTIVE:
446 gmp_queue_active(gmp_env, res);
447 break;
448 }
449}
450
451void gmp_add_new_reservation(
452 struct gmp_reservation_environment* gmp_env,
453 struct reservation* new_res)
454{
455 new_res->env = &gmp_env->env;
456 TRACE("GMP_ADD_NEW_RES\n");
457 gmp_queue_reservation(gmp_env, new_res);
458}
459
460struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
461 unsigned int id)
462{
463 struct reservation *res;
464
465 list_for_each_entry(res, &gmp_env->active_reservations, list) {
466 if (res->id == id)
467 return res;
468 }
469 list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
470 if (res->id == id)
471 return res;
472 }
473 list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
474 if (res->id == id)
475 return res;
476 }
477
478 return NULL;
479}
480
481static void gmp_charge_budget(
482 struct gmp_reservation_environment* gmp_env,
483 lt_t delta)
484{
485 struct list_head *pos, *next;
486 struct reservation *res;
487
488 //int encountered_active = 0;
489
490 list_for_each_safe(pos, next, &gmp_env->active_reservations) {
491 /* charge all ACTIVE_IDLE up to the current scheduled ACTIVE reservation */
492 res = list_entry(pos, struct reservation, list);
493 if (res->state == RESERVATION_ACTIVE) {
494 res->ops->drain_budget(res, delta);
495 //encountered_active = 1;
496 } else {
497 BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
498 res->ops->drain_budget(res, delta);
499 }
500 if (res->state == RESERVATION_ACTIVE ||
501 res->state == RESERVATION_ACTIVE_IDLE)
502 {
503 /* make sure scheduler is invoked when this reservation expires
504 * its remaining budget */
505 TRACE("[GMP] requesting scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
506 gmp_scheduler_update_after(gmp_env, res->cur_budget);
507 }
508// if (encountered_active)
509 /* stop at the first ACTIVE reservation */
510// break;
511 }
512 //TRACE("finished charging budgets\n");
513}
514
515static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
516{
517 struct list_head *pos, *next;
518 struct reservation *res;
519
520 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
521 res = list_entry(pos, struct reservation, list);
522 if (res->next_replenishment <= gmp_env->env.current_time) {
523 res->ops->replenish(res);
524 } else {
525 /* list is ordered by increasing depletion times */
526 break;
527 }
528 }
529 //TRACE("finished replenishing budgets\n");
530
531 /* request a scheduler update at the next replenishment instant */
532 res = list_first_entry_or_null(&gmp_env->depleted_reservations,
533 struct reservation, list);
534 if (res)
535 gmp_scheduler_update_at(gmp_env, res->next_replenishment);
536}
537
538void gmp_update_time(
539 struct gmp_reservation_environment* gmp_env,
540 lt_t now)
541{
542 lt_t delta;
543
544 /* If the time didn't advance, there is nothing to do.
545 * This check makes it safe to call gmp_advance_time() potentially
546 * multiple times (e.g., via different code paths. */
547 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
548 if (unlikely(now <= gmp_env->env.current_time))
549 return;
550
551 delta = now - gmp_env->env.current_time;
552 gmp_env->env.current_time = now;
553
554 /* deplete budgets by passage of time */
555 gmp_charge_budget(gmp_env, delta);
556
557 /* check if any budgets where replenished */
558 gmp_replenish_budgets(gmp_env);
559}
560
561struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env)
562{
563 struct reservation *res, *next;
564 struct task_struct *tsk = NULL;
565 lt_t time_slice;
566
567 list_for_each_entry_safe(res, next, &gmp_env->active_reservations, list) {
568 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
569 tsk = res->ops->dispatch_client(res, &time_slice);
570 if (likely(tsk)) {
571 if (time_slice)
572 gmp_scheduler_update_after(gmp_env, time_slice);
573 gmp_scheduler_update_after(gmp_env, res->cur_budget);
574 return tsk;
575 }
576 }
577 }
578
579 return NULL;
580}
581
582static void gmp_res_change_state(
583 struct reservation_environment* env,
584 struct reservation *res,
585 reservation_state_t new_state)
586{
587 struct gmp_reservation_environment* gmp_env;
588
589 gmp_env = container_of(env, struct gmp_reservation_environment, env);
590
591 TRACE("[GMP] reservation R%d state %d->%d at %llu\n",
592 res->id, res->state, new_state, env->current_time);
593
594 list_del(&res->list);
595 /* check if we need to reschedule because we lost an active reservation */
596 if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule)
597 gmp_env->schedule_now = true;
598 res->state = new_state;
599 gmp_queue_reservation(gmp_env, res);
600}
601
602void gmp_init(struct gmp_reservation_environment* gmp_env)
603{
604 memset(gmp_env, sizeof(*gmp_env), 0);
605
606 INIT_LIST_HEAD(&gmp_env->active_reservations);
607 INIT_LIST_HEAD(&gmp_env->depleted_reservations);
608 INIT_LIST_HEAD(&gmp_env->inactive_reservations);
609
610 gmp_env->env.change_state = gmp_res_change_state;
611
612 gmp_env->schedule_now = false;
613
614 //raw_spin_lock_init(&gmp_env->lock);
615 INIT_LIST_HEAD(&gmp_env->next_events);
616
617 raw_spin_lock_init(&gmp_env->event_lock);
618}
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index b9f05238461b..3c8aa739345d 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -15,6 +15,8 @@
15#include <litmus/reservation.h> 15#include <litmus/reservation.h>
16#include <litmus/polling_reservations.h> 16#include <litmus/polling_reservations.h>
17 17
18struct gmp_reservation_environment _global_env;
19
18struct mc2_task_state { 20struct mc2_task_state {
19 struct task_client res_info; 21 struct task_client res_info;
20 int cpu; 22 int cpu;
@@ -26,11 +28,18 @@ struct mc2_cpu_state {
26 raw_spinlock_t lock; 28 raw_spinlock_t lock;
27 29
28 struct sup_reservation_environment sup_env; 30 struct sup_reservation_environment sup_env;
31 struct gmp_reservation_environment* gmp_env;
29 struct hrtimer timer; 32 struct hrtimer timer;
30 33
31 int cpu; 34 int cpu;
32 struct task_struct* scheduled; 35 struct task_struct* scheduled;
36 struct task_struct* will_schedule;
37 struct task_struct* linked; // for level C
33 enum crit_level run_level; 38 enum crit_level run_level;
39 struct task_struct* crit_entry[NUM_CRIT_LEVELS]; // mc2_task_state (get_mc2_state)
40
41 // indicate the current timer event is global
42 bool is_global_event;
34}; 43};
35 44
36static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); 45static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
@@ -74,12 +83,14 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
74{ 83{
75 int local; 84 int local;
76 lt_t update, now; 85 lt_t update, now;
77 86 struct next_timer_event *n_event = NULL;
87 int global_found = 0;
88
78 update = state->sup_env.next_scheduler_update; 89 update = state->sup_env.next_scheduler_update;
79 now = state->sup_env.env.current_time; 90 now = state->sup_env.env.current_time;
80 91
81 /* Be sure we're actually running on the right core, 92 /* Be sure we're actually running on the right core,
82 * as pres_update_timer() is also called from pres_task_resume(), 93 * as mc2_update_timer() is also called from mc2_task_resume(),
83 * which might be called on any CPU when a thread resumes. 94 * which might be called on any CPU when a thread resumes.
84 */ 95 */
85 local = local_cpu_state() == state; 96 local = local_cpu_state() == state;
@@ -87,16 +98,50 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
87 /* Must drop state lock before calling into hrtimer_start(), which 98 /* Must drop state lock before calling into hrtimer_start(), which
88 * may raise a softirq, which in turn may wake ksoftirqd. */ 99 * may raise a softirq, which in turn may wake ksoftirqd. */
89 raw_spin_unlock(&state->lock); 100 raw_spin_unlock(&state->lock);
101
102 raw_spin_lock(&(_global_env.event_lock));
103 list_for_each_entry(n_event, &state->gmp_env->next_events, list) {
104 TRACE("G_EVENT time: %llu, timer_armed_on: %d\n", n_event->next_event, n_event->timer_armed_on == NO_CPU?(-1):n_event->timer_armed_on);
105 if (n_event->timer_armed_on == NO_CPU) {
106 global_found = 1;
107 break;
108 }
109 }
110
111 if (global_found == 1) {
112 if (update >= n_event->next_event) {
113 update = n_event->next_event;
114 now = _global_env.env.current_time;
115 //state->is_global_event = true;
116 //n_event->timer_armed_on = state->cpu;
117 } else { // next event is sup
118 global_found = 0;
119 }
120 }
90 121
122 raw_spin_unlock(&(_global_env.event_lock));
123
91 if (update <= now) { 124 if (update <= now) {
92 litmus_reschedule(state->cpu); 125 litmus_reschedule(state->cpu);
93 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { 126 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
94 /* Reprogram only if not already set correctly. */ 127 /* Reprogram only if not already set correctly. */
95 if (!hrtimer_active(&state->timer) || 128 if (!hrtimer_active(&state->timer) ||
96 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { 129 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
130
131 if ((ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) && (state->is_global_event == true)) {
132 struct next_timer_event *prev_event = NULL;
133 raw_spin_lock(&(_global_env.event_lock));
134 list_for_each_entry(prev_event, &state->gmp_env->next_events, list) {
135 if (prev_event->timer_armed_on == state->cpu) {
136 prev_event->timer_armed_on = NO_CPU;
137 break;
138 }
139 }
140 raw_spin_unlock(&(_global_env.event_lock));
141 }
97 TRACE("canceling timer...\n"); 142 TRACE("canceling timer...\n");
98 hrtimer_cancel(&state->timer); 143 hrtimer_cancel(&state->timer);
99 TRACE("setting scheduler timer for %llu\n", update); 144 TRACE("setting scheduler (global: %d) timer for %llu\n", state->is_global_event, update);
100 /* We cannot use hrtimer_start() here because the 145 /* We cannot use hrtimer_start() here because the
101 * wakeup flag must be set to zero. */ 146 * wakeup flag must be set to zero. */
102 __hrtimer_start_range_ns(&state->timer, 147 __hrtimer_start_range_ns(&state->timer,
@@ -104,6 +149,16 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
104 0 /* timer coalescing slack */, 149 0 /* timer coalescing slack */,
105 HRTIMER_MODE_ABS_PINNED, 150 HRTIMER_MODE_ABS_PINNED,
106 0 /* wakeup */); 151 0 /* wakeup */);
152 if (global_found) {
153 raw_spin_lock(&(_global_env.event_lock));
154 state->is_global_event = true;
155 n_event->timer_armed_on = state->cpu;
156 raw_spin_unlock(&(_global_env.event_lock));
157 } else {
158 state->is_global_event = false;
159 }
160
161 TRACE("set scheduler (global: %d) timer for %llu on P%d\n", state->is_global_event, update, n_event->timer_armed_on);
107 } 162 }
108 } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { 163 } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
109 /* Poke remote core only if timer needs to be set earlier than 164 /* Poke remote core only if timer needs to be set earlier than
@@ -132,6 +187,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
132 unsigned long flags; 187 unsigned long flags;
133 enum hrtimer_restart restart = HRTIMER_NORESTART; 188 enum hrtimer_restart restart = HRTIMER_NORESTART;
134 struct mc2_cpu_state *state; 189 struct mc2_cpu_state *state;
190 struct next_timer_event *n_event, *next;
135 lt_t update, now; 191 lt_t update, now;
136 192
137 state = container_of(timer, struct mc2_cpu_state, timer); 193 state = container_of(timer, struct mc2_cpu_state, timer);
@@ -144,16 +200,47 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
144 */ 200 */
145 BUG_ON(state->cpu != raw_smp_processor_id()); 201 BUG_ON(state->cpu != raw_smp_processor_id());
146 202
203 TRACE("TIMER fired at %llu\n", litmus_clock());
204
205 if (state->is_global_event == true) {
206
207 raw_spin_lock_irqsave(&(_global_env.event_lock), flags);
208
209 TRACE("GLOBAL EVENT FIRED\n");
210 list_for_each_entry_safe(n_event, next, &state->gmp_env->next_events, list) {
211 if (n_event->timer_armed_on == state->cpu) {
212 list_del(&n_event->list);
213 TRACE("EVENT ENTRY IS DELETED\n");
214 break;
215 }
216 }
217 gmp_update_time(state->gmp_env, litmus_clock());
218
219
220 update = n_event->next_event;
221 now = state->gmp_env->env.current_time;
222
223 kfree(n_event);
224 TRACE("ON TIMER UPDATE = %llu, NOW = %llu\n", update, now);
225 raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags);
226 }
227
147 raw_spin_lock_irqsave(&state->lock, flags); 228 raw_spin_lock_irqsave(&state->lock, flags);
148 sup_update_time(&state->sup_env, litmus_clock()); 229
149 230
150 update = state->sup_env.next_scheduler_update; 231 if (state->is_global_event != true) {
151 now = state->sup_env.env.current_time; 232 sup_update_time(&state->sup_env, litmus_clock());
233
234 update = state->sup_env.next_scheduler_update;
235 now = state->sup_env.env.current_time;
236 } else {
237 state->is_global_event = false;
238 }
152 239
153 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", 240 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n",
154 now, update, state->cpu); 241 now, update, state->cpu);
155 242
156 if (update <= now) { 243 if (update <= now || state->gmp_env->schedule_now == true) {
157 litmus_reschedule_local(); 244 litmus_reschedule_local();
158 } else if (update != SUP_NO_SCHEDULER_UPDATE) { 245 } else if (update != SUP_NO_SCHEDULER_UPDATE) {
159 hrtimer_set_expires(timer, ns_to_ktime(update)); 246 hrtimer_set_expires(timer, ns_to_ktime(update));
@@ -161,7 +248,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
161 } 248 }
162 249
163 raw_spin_unlock_irqrestore(&state->lock, flags); 250 raw_spin_unlock_irqrestore(&state->lock, flags);
164 251
165 return restart; 252 return restart;
166} 253}
167 254
@@ -176,10 +263,20 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
176 BUG_ON(state->scheduled && state->scheduled != prev); 263 BUG_ON(state->scheduled && state->scheduled != prev);
177 BUG_ON(state->scheduled && !is_realtime(prev)); 264 BUG_ON(state->scheduled && !is_realtime(prev));
178 265
266 tinfo = get_mc2_state(prev);
267 if (state->scheduled != NULL) {
268 struct reservation* res;
269 if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
270 res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id);
271 res->scheduled_on = NO_CPU;
272 prev->rt_param.scheduled_on = NO_CPU;
273 }
274 }
275
179 /* update time */ 276 /* update time */
180 state->sup_env.will_schedule = true; 277 state->sup_env.will_schedule = true;
181 sup_update_time(&state->sup_env, litmus_clock()); 278 sup_update_time(&state->sup_env, litmus_clock());
182 279
183 /* remove task from reservation if it blocks */ 280 /* remove task from reservation if it blocks */
184 if (is_realtime(prev) && !is_running(prev)) 281 if (is_realtime(prev) && !is_running(prev))
185 task_departs(prev, is_completed(prev)); 282 task_departs(prev, is_completed(prev));
@@ -187,6 +284,17 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
187 /* figure out what to schedule next */ 284 /* figure out what to schedule next */
188 state->scheduled = sup_dispatch(&state->sup_env); 285 state->scheduled = sup_dispatch(&state->sup_env);
189 286
287 if (!state->scheduled) {
288 raw_spin_lock(&(_global_env.event_lock));
289
290 state->gmp_env->will_schedule = true;
291 gmp_update_time(state->gmp_env, litmus_clock());
292 //state->scheduled = gmp_dispatch(&_global_env);
293 state->gmp_env->will_schedule = false;
294
295 raw_spin_unlock(&(_global_env.event_lock));
296 }
297
190 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ 298 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
191 sched_state_task_picked(); 299 sched_state_task_picked();
192 300
@@ -196,13 +304,27 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
196 mc2_update_timer_and_unlock(state); 304 mc2_update_timer_and_unlock(state);
197 305
198 if (prev != state->scheduled && is_realtime(prev)) { 306 if (prev != state->scheduled && is_realtime(prev)) {
307 struct reservation* res;
199 TRACE_TASK(prev, "descheduled.\n"); 308 TRACE_TASK(prev, "descheduled.\n");
309 TRACE_TASK(state->scheduled, "SCHEDULED.\n");
200 state->run_level = NUM_CRIT_LEVELS; 310 state->run_level = NUM_CRIT_LEVELS;
311 tinfo = get_mc2_state(prev);
312 if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
313 res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id);
314 res->scheduled_on = NO_CPU;
315 prev->rt_param.scheduled_on = NO_CPU;
316 }
201 } 317 }
202 if (state->scheduled) { 318 if (state->scheduled) {
319 struct reservation* res;
203 TRACE_TASK(state->scheduled, "scheduled.\n"); 320 TRACE_TASK(state->scheduled, "scheduled.\n");
204 //tinfo = get_mc2_state(state->scheduled); 321 tinfo = get_mc2_state(state->scheduled);
205 //state->run_level = tinfo->mc2_param.crit; 322 state->run_level = tinfo->mc2_param.crit;
323 if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
324 res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id);
325 res->scheduled_on = state->cpu;
326 state->scheduled->rt_param.scheduled_on = state->cpu;
327 }
206 } 328 }
207 329
208 return state->scheduled; 330 return state->scheduled;
@@ -230,10 +352,16 @@ static void mc2_task_resume(struct task_struct *tsk)
230{ 352{
231 unsigned long flags; 353 unsigned long flags;
232 struct mc2_task_state* tinfo = get_mc2_state(tsk); 354 struct mc2_task_state* tinfo = get_mc2_state(tsk);
233 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 355 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
234 356
235 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 357 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
236 358
359 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
360 state = cpu_state_for(tinfo->cpu);
361 } else {
362 state = local_cpu_state();
363 }
364
237 raw_spin_lock_irqsave(&state->lock, flags); 365 raw_spin_lock_irqsave(&state->lock, flags);
238 /* Requeue only if self-suspension was already processed. */ 366 /* Requeue only if self-suspension was already processed. */
239 if (tinfo->has_departed) 367 if (tinfo->has_departed)
@@ -241,8 +369,16 @@ static void mc2_task_resume(struct task_struct *tsk)
241 /* Assumption: litmus_clock() is synchronized across cores, 369 /* Assumption: litmus_clock() is synchronized across cores,
242 * since we might not actually be executing on tinfo->cpu 370 * since we might not actually be executing on tinfo->cpu
243 * at the moment. */ 371 * at the moment. */
244 sup_update_time(&state->sup_env, litmus_clock()); 372 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
245 task_arrives(tsk); 373 sup_update_time(&state->sup_env, litmus_clock());
374 task_arrives(tsk);
375 } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
376 raw_spin_lock(&(_global_env.event_lock));
377 gmp_update_time(state->gmp_env, litmus_clock());
378 task_arrives(tsk);
379 raw_spin_unlock(&(_global_env.event_lock));
380 }
381
246 /* NOTE: drops state->lock */ 382 /* NOTE: drops state->lock */
247 TRACE("mc2_resume()\n"); 383 TRACE("mc2_resume()\n");
248 mc2_update_timer_and_unlock(state); 384 mc2_update_timer_and_unlock(state);
@@ -255,24 +391,40 @@ static void mc2_task_resume(struct task_struct *tsk)
255 resume_legacy_task_model_updates(tsk); 391 resume_legacy_task_model_updates(tsk);
256} 392}
257 393
394static void mc2_task_block(struct task_struct *task)
395{
396 struct mc2_task_state *tinfo;
397
398 tinfo = get_mc2_state(task);
399
400 TRACE_TASK(task, "TASK BLOCK\n");
401 if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
402 struct reservation *res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
403 res->scheduled_on = NO_CPU;
404 task->rt_param.scheduled_on = NO_CPU;
405 }
406}
258/* syscall backend for job completions */ 407/* syscall backend for job completions */
259static long mc2_complete_job(void) 408static long mc2_complete_job(void)
260{ 409{
261 ktime_t next_release; 410 ktime_t next_release;
262 long err; 411 long err;
263 struct mc2_cpu_state *state = local_cpu_state(); 412 struct mc2_cpu_state *state = local_cpu_state();
264 struct reservation_environment *env = &(state->sup_env.env); 413 struct reservation_environment *env = NULL;
265 struct mc2_task_state *tinfo = get_mc2_state(current); 414 struct mc2_task_state *tinfo = get_mc2_state(current);
266 415
416 if (tinfo->mc2_param.crit == CRIT_LEVEL_C)
417 env = &(_global_env.env);
418 else
419 env = &(state->sup_env.env);
267 420
268 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), 421 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), get_deadline(current));
269 get_deadline(current));
270 422
271 tsk_rt(current)->completed = 1; 423 tsk_rt(current)->completed = 1;
272 424
273 if (tsk_rt(current)->sporadic_release) { 425 if (tsk_rt(current)->sporadic_release) {
274 env->time_zero = tsk_rt(current)->sporadic_release_time; 426 env->time_zero = tsk_rt(current)->sporadic_release_time;
275 427 hrtimer_cancel(&state->timer);
276 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { 428 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) {
277 struct reservation *res; 429 struct reservation *res;
278 struct table_driven_reservation *tdres; 430 struct table_driven_reservation *tdres;
@@ -286,7 +438,7 @@ static long mc2_complete_job(void)
286 res->next_replenishment += tdres->intervals[0].start; 438 res->next_replenishment += tdres->intervals[0].start;
287 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 439 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
288 440
289 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 441 TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
290 } 442 }
291 443
292 } 444 }
@@ -327,32 +479,55 @@ static long mc2_admit_task(struct task_struct *tsk)
327 } 479 }
328 480
329 preempt_disable(); 481 preempt_disable();
330 482 if (mp->crit == CRIT_LEVEL_C) {
331 state = cpu_state_for(task_cpu(tsk)); 483 raw_spin_lock_irqsave(&(_global_env.event_lock), flags);
332 raw_spin_lock_irqsave(&state->lock, flags);
333
334 res = sup_find_by_id(&state->sup_env, mp->res_id);
335
336 /* found the appropriate reservation (or vCPU) */
337 if (res) {
338 TRACE_TASK(tsk, "FOUND RES ID\n");
339 tinfo->mc2_param.crit = mp->crit;
340 tinfo->mc2_param.res_id = mp->res_id;
341 484
342 kfree(tsk_rt(tsk)->plugin_state); 485 res = gmp_find_by_id(&_global_env, mp->res_id);
343 tsk_rt(tsk)->plugin_state = NULL; 486 if (res) {
487 TRACE_TASK(tsk, "FOUND GMP RES ID\n");
488 tinfo->mc2_param.crit = mp->crit;
489 tinfo->mc2_param.res_id = mp->res_id;
490
491 kfree(tsk_rt(tsk)->plugin_state);
492 tsk_rt(tsk)->plugin_state = NULL;
493
494 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
495 tinfo->cpu = -1;
496 tinfo->has_departed = true;
497 tsk_rt(tsk)->plugin_state = tinfo;
498
499 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
500 }
344 501
345 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 502 raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags);
346 tinfo->cpu = task_cpu(tsk); 503
347 tinfo->has_departed = true; 504 } else {
348 tsk_rt(tsk)->plugin_state = tinfo; 505 state = cpu_state_for(task_cpu(tsk));
506 raw_spin_lock_irqsave(&state->lock, flags);
349 507
350 /* disable LITMUS^RT's per-thread budget enforcement */ 508 res = sup_find_by_id(&state->sup_env, mp->res_id);
351 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
352 }
353 509
354 raw_spin_unlock_irqrestore(&state->lock, flags); 510 /* found the appropriate reservation (or vCPU) */
511 if (res) {
512 TRACE_TASK(tsk, "FOUND SUP RES ID\n");
513 tinfo->mc2_param.crit = mp->crit;
514 tinfo->mc2_param.res_id = mp->res_id;
515
516 kfree(tsk_rt(tsk)->plugin_state);
517 tsk_rt(tsk)->plugin_state = NULL;
518
519 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
520 tinfo->cpu = task_cpu(tsk);
521 tinfo->has_departed = true;
522 tsk_rt(tsk)->plugin_state = tinfo;
523
524 /* disable LITMUS^RT's per-thread budget enforcement */
525 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
526 }
355 527
528 raw_spin_unlock_irqrestore(&state->lock, flags);
529 }
530
356 preempt_enable(); 531 preempt_enable();
357 532
358 if (err) 533 if (err)
@@ -366,15 +541,29 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
366{ 541{
367 unsigned long flags; 542 unsigned long flags;
368 struct mc2_task_state* tinfo = get_mc2_state(tsk); 543 struct mc2_task_state* tinfo = get_mc2_state(tsk);
369 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 544 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
370 struct reservation *res; 545 struct reservation *res;
371 546
372 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 547 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
373 litmus_clock(), on_runqueue, is_running); 548 litmus_clock(), on_runqueue, is_running);
374 549
550 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
551 state = cpu_state_for(tinfo->cpu);
552 } else {
553 state = local_cpu_state();
554 }
555
375 /* acquire the lock protecting the state and disable interrupts */ 556 /* acquire the lock protecting the state and disable interrupts */
376 raw_spin_lock_irqsave(&state->lock, flags); 557 raw_spin_lock_irqsave(&state->lock, flags);
377 558
559 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
560 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
561 } else {
562 raw_spin_lock(&(_global_env.event_lock));
563 res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
564 raw_spin_unlock(&(_global_env.event_lock));
565 }
566
378 if (is_running) { 567 if (is_running) {
379 state->scheduled = tsk; 568 state->scheduled = tsk;
380 /* make sure this task should actually be running */ 569 /* make sure this task should actually be running */
@@ -384,7 +573,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
384 if (on_runqueue || is_running) { 573 if (on_runqueue || is_running) {
385 /* Assumption: litmus_clock() is synchronized across cores 574 /* Assumption: litmus_clock() is synchronized across cores
386 * [see comment in pres_task_resume()] */ 575 * [see comment in pres_task_resume()] */
387 sup_update_time(&state->sup_env, litmus_clock()); 576 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
577 sup_update_time(&state->sup_env, litmus_clock());
578 } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) {
579 raw_spin_lock(&(_global_env.event_lock));
580 TRACE_TASK(tsk, "CALL GMP_UPDATE_TIME in task_new at %llu\n", litmus_clock());
581 gmp_update_time(state->gmp_env, litmus_clock());
582 raw_spin_unlock(&(_global_env.event_lock));
583 }
388 task_arrives(tsk); 584 task_arrives(tsk);
389 /* NOTE: drops state->lock */ 585 /* NOTE: drops state->lock */
390 TRACE("mc2_new()\n"); 586 TRACE("mc2_new()\n");
@@ -393,7 +589,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
393 } else 589 } else
394 raw_spin_unlock_irqrestore(&state->lock, flags); 590 raw_spin_unlock_irqrestore(&state->lock, flags);
395 591
396 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
397 release_at(tsk, res->next_replenishment); 592 release_at(tsk, res->next_replenishment);
398 if (res) 593 if (res)
399 TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); 594 TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment);
@@ -407,9 +602,14 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
407 struct mc2_cpu_state *state; 602 struct mc2_cpu_state *state;
408 struct reservation *res = NULL, *next; 603 struct reservation *res = NULL, *next;
409 struct sup_reservation_environment *sup_env; 604 struct sup_reservation_environment *sup_env;
605 struct gmp_reservation_environment *gmp_env;
410 int found = 0; 606 int found = 0;
411 607
412 state = cpu_state_for(cpu); 608 if (cpu != -1)
609 state = cpu_state_for(cpu);
610 else
611 state = local_cpu_state();
612
413 raw_spin_lock(&state->lock); 613 raw_spin_lock(&state->lock);
414 614
415// res = sup_find_by_id(&state->sup_env, reservation_id); 615// res = sup_find_by_id(&state->sup_env, reservation_id);
@@ -447,6 +647,43 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
447 647
448 raw_spin_unlock(&state->lock); 648 raw_spin_unlock(&state->lock);
449 649
650 raw_spin_lock(&(_global_env.event_lock));
651
652 gmp_env = &_global_env;
653 //if (!res) {
654 if (!found) {
655 list_for_each_entry_safe(res, next, &gmp_env->depleted_reservations, list) {
656 if (res->id == reservation_id) {
657 list_del(&res->list);
658 //kfree(res);
659 found = 1;
660 ret = 0;
661 }
662 }
663 }
664 if (!found) {
665 list_for_each_entry_safe(res, next, &gmp_env->inactive_reservations, list) {
666 if (res->id == reservation_id) {
667 list_del(&res->list);
668 //kfree(res);
669 found = 1;
670 ret = 0;
671 }
672 }
673 }
674 if (!found) {
675 list_for_each_entry_safe(res, next, &gmp_env->active_reservations, list) {
676 if (res->id == reservation_id) {
677 list_del(&res->list);
678 //kfree(res);
679 found = 1;
680 ret = 0;
681 }
682 }
683 }
684
685 raw_spin_unlock(&(_global_env.event_lock));
686
450 TRACE("RESERVATION_DESTROY ret = %d\n", ret); 687 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
451 return ret; 688 return ret;
452} 689}
@@ -455,8 +692,14 @@ static void mc2_task_exit(struct task_struct *tsk)
455{ 692{
456 unsigned long flags; 693 unsigned long flags;
457 struct mc2_task_state* tinfo = get_mc2_state(tsk); 694 struct mc2_task_state* tinfo = get_mc2_state(tsk);
458 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 695 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
459 696
697 if (tinfo->mc2_param.crit != CRIT_LEVEL_C) {
698 state = cpu_state_for(tinfo->cpu);
699 } else {
700 state = local_cpu_state();
701 }
702
460 raw_spin_lock_irqsave(&state->lock, flags); 703 raw_spin_lock_irqsave(&state->lock, flags);
461 704
462 if (state->scheduled == tsk) 705 if (state->scheduled == tsk)
@@ -466,7 +709,13 @@ static void mc2_task_exit(struct task_struct *tsk)
466 if (is_running(tsk)) { 709 if (is_running(tsk)) {
467 /* Assumption: litmus_clock() is synchronized across cores 710 /* Assumption: litmus_clock() is synchronized across cores
468 * [see comment in pres_task_resume()] */ 711 * [see comment in pres_task_resume()] */
469 sup_update_time(&state->sup_env, litmus_clock()); 712 if (tinfo->mc2_param.crit != CRIT_LEVEL_C)
713 sup_update_time(&state->sup_env, litmus_clock());
714 else {
715 raw_spin_lock(&(_global_env.event_lock));
716 gmp_update_time(state->gmp_env, litmus_clock());
717 raw_spin_unlock(&(_global_env.event_lock));
718 }
470 task_departs(tsk, 0); 719 task_departs(tsk, 0);
471 /* NOTE: drops state->lock */ 720 /* NOTE: drops state->lock */
472 TRACE("mc2_exit()\n"); 721 TRACE("mc2_exit()\n");
@@ -505,7 +754,7 @@ static long create_polling_reservation(
505 int use_edf = config->priority == LITMUS_NO_PRIORITY; 754 int use_edf = config->priority == LITMUS_NO_PRIORITY;
506 int periodic = res_type == PERIODIC_POLLING; 755 int periodic = res_type == PERIODIC_POLLING;
507 long err = -EINVAL; 756 long err = -EINVAL;
508 757
509 if (config->polling_params.budget > 758 if (config->polling_params.budget >
510 config->polling_params.period) { 759 config->polling_params.period) {
511 printk(KERN_ERR "invalid polling reservation (%u): " 760 printk(KERN_ERR "invalid polling reservation (%u): "
@@ -533,26 +782,48 @@ static long create_polling_reservation(
533 if (!pres) 782 if (!pres)
534 return -ENOMEM; 783 return -ENOMEM;
535 784
536 state = cpu_state_for(config->cpu); 785 if (config->cpu != -1) {
537 raw_spin_lock_irqsave(&state->lock, flags); 786 state = cpu_state_for(config->cpu);
787 raw_spin_lock_irqsave(&state->lock, flags);
538 788
539 res = sup_find_by_id(&state->sup_env, config->id); 789 res = sup_find_by_id(&state->sup_env, config->id);
540 if (!res) { 790 if (!res) {
541 polling_reservation_init(pres, use_edf, periodic, 791 polling_reservation_init(pres, use_edf, periodic,
542 config->polling_params.budget, 792 config->polling_params.budget,
543 config->polling_params.period, 793 config->polling_params.period,
544 config->polling_params.relative_deadline, 794 config->polling_params.relative_deadline,
545 config->polling_params.offset); 795 config->polling_params.offset);
546 pres->res.id = config->id; 796 pres->res.id = config->id;
547 if (!use_edf) 797 if (!use_edf)
548 pres->res.priority = config->priority; 798 pres->res.priority = config->priority;
549 sup_add_new_reservation(&state->sup_env, &pres->res); 799 sup_add_new_reservation(&state->sup_env, &pres->res);
550 err = config->id; 800 err = config->id;
551 } else { 801 } else {
552 err = -EEXIST; 802 err = -EEXIST;
553 } 803 }
554 804
555 raw_spin_unlock_irqrestore(&state->lock, flags); 805 raw_spin_unlock_irqrestore(&state->lock, flags);
806 } else if (config->cpu == -1) {
807 raw_spin_lock_irqsave(&(_global_env.event_lock), flags);
808
809 res = gmp_find_by_id(&_global_env, config->id);
810 if (!res) {
811 polling_reservation_init(pres, use_edf, periodic,
812 config->polling_params.budget,
813 config->polling_params.period,
814 config->polling_params.relative_deadline,
815 config->polling_params.offset);
816 pres->res.id = config->id;
817 if (!use_edf)
818 pres->res.priority = config->priority;
819 gmp_add_new_reservation(&_global_env, &pres->res);
820 err = config->id;
821 } else {
822 err = -EEXIST;
823 }
824
825 raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags);
826 }
556 827
557 if (err < 0) 828 if (err < 0)
558 kfree(pres); 829 kfree(pres);
@@ -671,10 +942,12 @@ static long mc2_reservation_create(int res_type, void* __user _config)
671 if (copy_from_user(&config, _config, sizeof(config))) 942 if (copy_from_user(&config, _config, sizeof(config)))
672 return -EFAULT; 943 return -EFAULT;
673 944
674 if (config.cpu < 0 || !cpu_online(config.cpu)) { 945 if (config.cpu != -1) {
675 printk(KERN_ERR "invalid polling reservation (%u): " 946 if (config.cpu < 0 || !cpu_online(config.cpu)) {
676 "CPU %d offline\n", config.id, config.cpu); 947 printk(KERN_ERR "invalid polling reservation (%u): "
677 return -EINVAL; 948 "CPU %d offline\n", config.id, config.cpu);
949 return -EINVAL;
950 }
678 } 951 }
679 952
680 switch (res_type) { 953 switch (res_type) {
@@ -732,6 +1005,8 @@ static long mc2_activate_plugin(void)
732 int cpu; 1005 int cpu;
733 struct mc2_cpu_state *state; 1006 struct mc2_cpu_state *state;
734 1007
1008 gmp_init(&_global_env);
1009
735 for_each_online_cpu(cpu) { 1010 for_each_online_cpu(cpu) {
736 TRACE("Initializing CPU%d...\n", cpu); 1011 TRACE("Initializing CPU%d...\n", cpu);
737 1012
@@ -740,7 +1015,11 @@ static long mc2_activate_plugin(void)
740 raw_spin_lock_init(&state->lock); 1015 raw_spin_lock_init(&state->lock);
741 state->cpu = cpu; 1016 state->cpu = cpu;
742 state->scheduled = NULL; 1017 state->scheduled = NULL;
743 1018 state->will_schedule = NULL;
1019 state->linked = NULL;
1020 state->gmp_env = &_global_env;
1021 state->is_global_event = false;
1022
744 sup_init(&state->sup_env); 1023 sup_init(&state->sup_env);
745 1024
746 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1025 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
@@ -794,6 +1073,37 @@ static long mc2_deactivate_plugin(void)
794 raw_spin_unlock(&state->lock); 1073 raw_spin_unlock(&state->lock);
795 } 1074 }
796 1075
1076 raw_spin_lock(&(_global_env.event_lock));
1077
1078 /* Delete all reservations --- assumes struct reservation
1079 * is prefix of containing struct. */
1080
1081 while (!list_empty(&_global_env.active_reservations)) {
1082 res = list_first_entry(
1083 &_global_env.active_reservations,
1084 struct reservation, list);
1085 list_del(&res->list);
1086 kfree(res);
1087 }
1088
1089 while (!list_empty(&_global_env.inactive_reservations)) {
1090 res = list_first_entry(
1091 &_global_env.inactive_reservations,
1092 struct reservation, list);
1093 list_del(&res->list);
1094 kfree(res);
1095 }
1096
1097 while (!list_empty(&_global_env.depleted_reservations)) {
1098 res = list_first_entry(
1099 &_global_env.depleted_reservations,
1100 struct reservation, list);
1101 list_del(&res->list);
1102 kfree(res);
1103 }
1104
1105 raw_spin_unlock(&(_global_env.event_lock));
1106
797 destroy_domain_proc_info(&mc2_domain_proc_info); 1107 destroy_domain_proc_info(&mc2_domain_proc_info);
798 return 0; 1108 return 0;
799} 1109}
@@ -802,6 +1112,7 @@ static struct sched_plugin mc2_plugin = {
802 .plugin_name = "MC2", 1112 .plugin_name = "MC2",
803 .schedule = mc2_schedule, 1113 .schedule = mc2_schedule,
804 .task_wake_up = mc2_task_resume, 1114 .task_wake_up = mc2_task_resume,
1115 .task_block = mc2_task_block,
805 .admit_task = mc2_admit_task, 1116 .admit_task = mc2_admit_task,
806 .task_new = mc2_task_new, 1117 .task_new = mc2_task_new,
807 .task_exit = mc2_task_exit, 1118 .task_exit = mc2_task_exit,