aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-01-28 09:26:59 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-01-28 09:26:59 -0500
commit5ba38eb6290a0c1767932c03b15edb0627ffd6b2 (patch)
tree8b1221cf821755ff7de26bf3fe375596e26d64d4 /litmus/sched_mc2.c
parentca538aafd7cebfd09a47af0a628647620a6bba35 (diff)
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c729
1 files changed, 609 insertions, 120 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 0c260190f287..6dee1ec2c99c 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -15,6 +15,23 @@
15#include <litmus/reservation.h> 15#include <litmus/reservation.h>
16#include <litmus/polling_reservations.h> 16#include <litmus/polling_reservations.h>
17 17
18struct gmp_reservation_environment _global_env;
19
20struct cpu_entry {
21 struct task_struct *scheduled;
22 lt_t deadline;
23 int cpu;
24 enum crit_level lv;
25 bool will_schedule;
26};
27
28struct cpu_priority {
29 raw_spinlock_t lock;
30 struct cpu_entry cpu_entries[NR_CPUS];
31};
32
33struct cpu_priority _lowest_prio_cpu;
34
18struct mc2_task_state { 35struct mc2_task_state {
19 struct task_client res_info; 36 struct task_client res_info;
20 int cpu; 37 int cpu;
@@ -51,11 +68,39 @@ static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
51} 68}
52static enum crit_level get_task_crit_level(struct task_struct *tsk) 69static enum crit_level get_task_crit_level(struct task_struct *tsk)
53{ 70{
54 struct mc2_task_state *tinfo = get_mc2_state(tsk); 71 //struct mc2_task_state *tinfo = get_mc2_state(tsk);
55 if (!tinfo) 72 struct mc2_task *mp;
73
74 if (!tsk || !is_realtime(tsk))
75 return NUM_CRIT_LEVELS;
76
77 mp = tsk_rt(tsk)->mc2_data;
78
79 if (!mp)
56 return NUM_CRIT_LEVELS; 80 return NUM_CRIT_LEVELS;
57 else 81 else
58 return tinfo->mc2_param.crit; 82 return mp->crit;
83}
84
85static struct reservation* res_find_by_id(struct mc2_cpu_state *state, unsigned int id)
86{
87 struct reservation *res;
88
89 res = sup_find_by_id(&state->sup_env, id);
90 if (!res)
91 res = gmp_find_by_id(&_global_env, id);
92
93 return res;
94}
95
96static void mc2_update_time(enum crit_level lv, struct mc2_cpu_state *state, lt_t time)
97{
98 if (lv < CRIT_LEVEL_C)
99 sup_update_time(&state->sup_env, time);
100 else if (lv == CRIT_LEVEL_C)
101 gmp_update_time(&_global_env, time);
102 else
103 TRACE("update_time(): Criticality level error!!!!\n");
59} 104}
60 105
61static void task_departs(struct task_struct *tsk, int job_complete) 106static void task_departs(struct task_struct *tsk, int job_complete)
@@ -78,6 +123,7 @@ static void task_departs(struct task_struct *tsk, int job_complete)
78 123
79 ce = &state->crit_entries[lv]; 124 ce = &state->crit_entries[lv];
80 ce->running = tsk; 125 ce->running = tsk;
126 res->is_ghost = 1;
81 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); 127 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
82 128
83 //BUG_ON(hrtimer_active(&ce->ghost_timer)); 129 //BUG_ON(hrtimer_active(&ce->ghost_timer));
@@ -107,11 +153,44 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
107 } 153 }
108} 154}
109 155
156/* return: NO_CPU - all CPUs are running tasks with higher priority than Level C */
157static int get_lowest_prio_cpu(void)
158{
159 struct cpu_entry *ce;
160 int cpu, ret = NO_CPU;
161 lt_t latest_deadline = 0;
162
163 raw_spin_lock(&_lowest_prio_cpu.lock);
164 for_each_online_cpu(cpu) {
165 ce = &_lowest_prio_cpu.cpu_entries[cpu];
166 if (!ce->will_schedule) {
167 if (!ce->scheduled) {
168 raw_spin_unlock(&_lowest_prio_cpu.lock);
169 return ce->cpu;
170 } else if (ce->lv == CRIT_LEVEL_C && ce->deadline > latest_deadline) {
171 latest_deadline = ce->deadline;
172 ret = ce->cpu;
173 }
174 }
175 }
176
177 raw_spin_unlock(&_lowest_prio_cpu.lock);
178
179 return ret;
180}
181
110/* NOTE: drops state->lock */ 182/* NOTE: drops state->lock */
111static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) 183static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
112{ 184{
113 int local; 185 int local;
114 lt_t update, now; 186 lt_t update, now;
187 enum crit_level lv = get_task_crit_level(state->scheduled);
188 struct next_timer_event *event, *next;
189 int found_event = 0;
190
191 //TRACE_TASK(state->scheduled, "update_timer!\n");
192 if (lv != NUM_CRIT_LEVELS)
193 TRACE_TASK(state->scheduled, "UPDATE_TIMER LV = %d\n", lv);
115 194
116 update = state->sup_env.next_scheduler_update; 195 update = state->sup_env.next_scheduler_update;
117 now = state->sup_env.env.current_time; 196 now = state->sup_env.env.current_time;
@@ -163,6 +242,37 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
163 litmus_reschedule(state->cpu); 242 litmus_reschedule(state->cpu);
164 } 243 }
165 } 244 }
245
246 raw_spin_lock(&_global_env.lock);
247 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
248 if (event->timer_armed_on == NO_CPU) {
249 found_event = 1;
250 if (event->next_update < litmus_clock()) {
251 int cpu = get_lowest_prio_cpu();
252 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
253 list_del(&event->list);
254 kfree(event);
255 if (cpu != NO_CPU) {
256 raw_spin_lock(&_lowest_prio_cpu.lock);
257 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
258 raw_spin_unlock(&_lowest_prio_cpu.lock);
259 litmus_reschedule(cpu);
260 }
261 } else if (!hrtimer_active(&state->g_timer)) {
262 int ret;
263 TRACE("setting global scheduler timer for %llu\n", event->next_update);
264 ret = __hrtimer_start_range_ns(&state->g_timer,
265 ns_to_ktime(event->next_update),
266 0 /* timer coalescing slack */,
267 HRTIMER_MODE_ABS_PINNED,
268 0 /* wakeup */);
269 if (!ret) {
270 event->timer_armed_on = state->cpu;
271 }
272 }
273 }
274 }
275 raw_spin_unlock(&_global_env.lock);
166} 276}
167 277
168static void mc2_update_ghost_state(struct mc2_cpu_state *state) 278static void mc2_update_ghost_state(struct mc2_cpu_state *state)
@@ -176,16 +286,20 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state)
176 ce = &state->crit_entries[lv]; 286 ce = &state->crit_entries[lv];
177 if (ce->running != NULL) { 287 if (ce->running != NULL) {
178 tinfo = get_mc2_state(ce->running); 288 tinfo = get_mc2_state(ce->running);
289 /*
179 if (lv != CRIT_LEVEL_C) 290 if (lv != CRIT_LEVEL_C)
180 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 291 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
181 else 292 else
182 continue; 293 continue;
294 */
295 res = res_find_by_id(state, tinfo->mc2_param.res_id);
183 TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget); 296 TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget);
184 if (!res->cur_budget) { 297 if (!res->cur_budget) {
185 struct sup_reservation_environment* sup_env = &state->sup_env; 298 struct sup_reservation_environment* sup_env = &state->sup_env;
186 299
187 TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock()); 300 TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock());
188 ce->running = NULL; 301 ce->running = NULL;
302 res->is_ghost = 0;
189 res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list); 303 res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list);
190 if (res) 304 if (res)
191 litmus_reschedule_local(); 305 litmus_reschedule_local();
@@ -215,6 +329,95 @@ static enum hrtimer_restart on_ghost_timer(struct hrtimer *timer)
215} 329}
216*/ 330*/
217 331
332static void update_cpu_prio(struct mc2_cpu_state *state)
333{
334 struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
335 enum crit_level lv = get_task_crit_level(state->scheduled);
336
337 if (!state->scheduled) {
338 // cpu is idle.
339 ce->scheduled = NULL;
340 ce->deadline = ULLONG_MAX;
341 ce->lv = NUM_CRIT_LEVELS;
342 } else if (lv == CRIT_LEVEL_C) {
343 ce->scheduled = state->scheduled;
344 ce->deadline = get_deadline(state->scheduled);
345 ce->lv = lv;
346 } else if (lv < CRIT_LEVEL_C) {
347 ce->scheduled = state->scheduled;
348 ce->deadline = 0;
349 ce->lv = lv;
350 }
351};
352
353static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
354{
355 unsigned long flags;
356 enum hrtimer_restart restart = HRTIMER_NORESTART;
357 struct mc2_cpu_state *state;
358 struct next_timer_event *event, *next;
359 bool schedule_now;
360 lt_t update, now;
361 int found_event = 0;
362
363 state = container_of(timer, struct mc2_cpu_state, g_timer);
364
365 /* The scheduling timer should only fire on the local CPU, because
366 * otherwise deadlocks via timer_cancel() are possible.
367 * Note: this does not interfere with dedicated interrupt handling, as
368 * even under dedicated interrupt handling scheduling timers for
369 * budget enforcement must occur locally on each CPU.
370 */
371 //BUG_ON(state->cpu != raw_smp_processor_id());
372 if (state->cpu != raw_smp_processor_id())
373 TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n");
374
375 raw_spin_lock_irqsave(&_global_env.lock, flags);
376
377 update = litmus_clock();
378 TRACE("GLOBAL TIMER FIRED at %llu\n", update);
379
380 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
381 if (event->next_update < update) {
382 found_event = 1;
383 list_del(&event->list);
384 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
385 kfree(event);
386 }
387 }
388
389 if (!found_event) {
390 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
391 return restart;
392 }
393
394 schedule_now = gmp_update_time(&_global_env, update);
395
396 raw_spin_lock(&state->lock);
397 mc2_update_ghost_state(state);
398 raw_spin_unlock(&state->lock);
399
400 now = _global_env.env.current_time;
401
402 TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n",
403 now, update, state->cpu, schedule_now);
404
405 if (schedule_now) {
406 int cpu = get_lowest_prio_cpu();
407 if (cpu != NO_CPU) {
408 raw_spin_lock(&_lowest_prio_cpu.lock);
409 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
410 raw_spin_unlock(&_lowest_prio_cpu.lock);
411 TRACE("LOWEST CPU = P%d\n", cpu);
412 litmus_reschedule(cpu);
413 }
414 }
415
416 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
417
418 return restart;
419}
420
218static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) 421static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
219{ 422{
220 unsigned long flags; 423 unsigned long flags;
@@ -276,6 +479,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
276 if (likely(!ce->running)) { 479 if (likely(!ce->running)) {
277 sup_scheduler_update_after(sup_env, res->cur_budget); 480 sup_scheduler_update_after(sup_env, res->cur_budget);
278 res->blocked_by_ghost = 0; 481 res->blocked_by_ghost = 0;
482 res->is_ghost = 0;
279 return tsk; 483 return tsk;
280 } else { 484 } else {
281 res->blocked_by_ghost = 1; 485 res->blocked_by_ghost = 1;
@@ -284,7 +488,34 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
284 } 488 }
285 } 489 }
286 } 490 }
287 491 // no level A or B tasks
492
493 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
494 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
495 tsk = res->ops->dispatch_client(res, &time_slice);
496 if (likely(tsk)) {
497 lv = get_task_crit_level(tsk);
498 if (lv == NUM_CRIT_LEVELS) {
499 gmp_scheduler_update_after(&_global_env, res->cur_budget);
500 //raw_spin_unlock(&_global_env.lock);
501 return tsk;
502 } else {
503 ce = &state->crit_entries[lv];
504 if (likely(!ce->running)) {
505 gmp_scheduler_update_after(&_global_env, res->cur_budget);
506 res->blocked_by_ghost = 0;
507 res->is_ghost = 0;
508 res->scheduled_on = state->cpu;
509 //raw_spin_unlock(&_global_env.lock);
510 return tsk;
511 } else {
512 res->blocked_by_ghost = 1;
513 }
514 }
515 }
516 }
517 }
518
288 return NULL; 519 return NULL;
289} 520}
290 521
@@ -292,17 +523,30 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
292{ 523{
293 /* next == NULL means "schedule background work". */ 524 /* next == NULL means "schedule background work". */
294 struct mc2_cpu_state *state = local_cpu_state(); 525 struct mc2_cpu_state *state = local_cpu_state();
295 526
527 raw_spin_lock(&_lowest_prio_cpu.lock);
528 if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true)
529 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
530 raw_spin_unlock(&_lowest_prio_cpu.lock);
531
296 raw_spin_lock(&state->lock); 532 raw_spin_lock(&state->lock);
297 533
298 BUG_ON(state->scheduled && state->scheduled != prev); 534 //BUG_ON(state->scheduled && state->scheduled != prev);
299 BUG_ON(state->scheduled && !is_realtime(prev)); 535 //BUG_ON(state->scheduled && !is_realtime(prev));
536 if (state->scheduled && state->scheduled != prev)
537 TRACE("BUG1!!!!!!!!\n");
538 if (state->scheduled && !is_realtime(prev))
539 TRACE("BUG2!!!!!!!!\n");
300 540
301 /* update time */ 541 /* update time */
302 state->sup_env.will_schedule = true; 542 state->sup_env.will_schedule = true;
303 TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); 543 //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n");
304 sup_update_time(&state->sup_env, litmus_clock()); 544 sup_update_time(&state->sup_env, litmus_clock());
305 TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n"); 545
546 raw_spin_lock(&_global_env.lock);
547 gmp_update_time(&_global_env, litmus_clock());
548
549 //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n");
306 mc2_update_ghost_state(state); 550 mc2_update_ghost_state(state);
307 551
308 /* remove task from reservation if it blocks */ 552 /* remove task from reservation if it blocks */
@@ -311,16 +555,29 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
311 555
312 /* figure out what to schedule next */ 556 /* figure out what to schedule next */
313 state->scheduled = mc2_dispatch(&state->sup_env, state); 557 state->scheduled = mc2_dispatch(&state->sup_env, state);
314 558 if (state->scheduled && is_realtime(state->scheduled))
559 TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n");
560
561 raw_spin_lock(&_lowest_prio_cpu.lock);
562 update_cpu_prio(state);
563 raw_spin_unlock(&_lowest_prio_cpu.lock);
564
315 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ 565 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
316 sched_state_task_picked(); 566 sched_state_task_picked();
317 567
318 /* program scheduler timer */ 568 /* program scheduler timer */
319 state->sup_env.will_schedule = false; 569 state->sup_env.will_schedule = false;
570
571 raw_spin_unlock(&_global_env.lock);
572
320 /* NOTE: drops state->lock */ 573 /* NOTE: drops state->lock */
321 mc2_update_timer_and_unlock(state); 574 mc2_update_timer_and_unlock(state);
322 575
323 if (prev != state->scheduled && is_realtime(prev)) { 576 if (prev != state->scheduled && is_realtime(prev)) {
577 struct mc2_task_state* tinfo = get_mc2_state(prev);
578 struct reservation* res = tinfo->res_info.client.reservation;
579 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
580 res->scheduled_on = NO_CPU;
324 TRACE_TASK(prev, "descheduled.\n"); 581 TRACE_TASK(prev, "descheduled.\n");
325 } 582 }
326 if (state->scheduled) { 583 if (state->scheduled) {
@@ -354,10 +611,15 @@ static void mc2_task_resume(struct task_struct *tsk)
354{ 611{
355 unsigned long flags; 612 unsigned long flags;
356 struct mc2_task_state* tinfo = get_mc2_state(tsk); 613 struct mc2_task_state* tinfo = get_mc2_state(tsk);
357 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 614 struct mc2_cpu_state *state;
358 615
359 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 616 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
360 617
618 if (tinfo->cpu != -1)
619 state = cpu_state_for(tinfo->cpu);
620 else
621 state = local_cpu_state();
622
361 raw_spin_lock_irqsave(&state->lock, flags); 623 raw_spin_lock_irqsave(&state->lock, flags);
362 /* Requeue only if self-suspension was already processed. */ 624 /* Requeue only if self-suspension was already processed. */
363 if (tinfo->has_departed) 625 if (tinfo->has_departed)
@@ -365,7 +627,16 @@ static void mc2_task_resume(struct task_struct *tsk)
365 /* Assumption: litmus_clock() is synchronized across cores, 627 /* Assumption: litmus_clock() is synchronized across cores,
366 * since we might not actually be executing on tinfo->cpu 628 * since we might not actually be executing on tinfo->cpu
367 * at the moment. */ 629 * at the moment. */
368 sup_update_time(&state->sup_env, litmus_clock()); 630 if (tinfo->cpu != -1) {
631 sup_update_time(&state->sup_env, litmus_clock());
632 } else {
633 raw_spin_lock(&_global_env.lock);
634 TRACE("RESUME UPDATE ####\n");
635 gmp_update_time(&_global_env, litmus_clock());
636 TRACE("RESUME UPDATE $$$$\n");
637 raw_spin_unlock(&_global_env.lock);
638 }
639
369 mc2_update_ghost_state(state); 640 mc2_update_ghost_state(state);
370 task_arrives(state, tsk); 641 task_arrives(state, tsk);
371 /* NOTE: drops state->lock */ 642 /* NOTE: drops state->lock */
@@ -385,37 +656,55 @@ static long mc2_complete_job(void)
385{ 656{
386 ktime_t next_release; 657 ktime_t next_release;
387 long err; 658 long err;
388 struct mc2_cpu_state *state = local_cpu_state(); 659
389 struct reservation_environment *env = &(state->sup_env.env); 660 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(),
390 struct mc2_task_state *tinfo = get_mc2_state(current); 661 get_deadline(current));
391 struct reservation *res;
392
393 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
394 if (!res)
395 ; // find in global env
396
397 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu) (cur->budget: %llu)\n", litmus_clock(),
398 get_deadline(current), res->cur_budget);
399 662
400 tsk_rt(current)->completed = 1; 663 tsk_rt(current)->completed = 1;
401 664
402 if (tsk_rt(current)->sporadic_release) { 665 if (tsk_rt(current)->sporadic_release) {
403 env->time_zero = tsk_rt(current)->sporadic_release_time; 666 struct mc2_cpu_state *state;
667 struct reservation_environment *env;
668 struct mc2_task_state *tinfo;
669 struct reservation *res;
670 unsigned long flags;
671
672 local_irq_save(flags);
673
674 state = local_cpu_state();
675 env = &(state->sup_env.env);
676 tinfo = get_mc2_state(current);
677
678 res = res_find_by_id(state, tsk_rt(current)->mc2_data->res_id);
679
680 if (get_task_crit_level(current) < CRIT_LEVEL_C) {
681 raw_spin_lock(&state->lock);
682 env->time_zero = tsk_rt(current)->sporadic_release_time;
683 } else {
684 raw_spin_lock(&_global_env.lock);
685 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
686 }
687
404 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 688 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
405 res->cur_budget = 0;
406 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
407 689
408 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { 690 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
409 struct table_driven_reservation *tdres; 691 struct table_driven_reservation *tdres;
410
411 //sup_update_time(&state->sup_env, litmus_clock());
412 //res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
413 tdres = container_of(res, struct table_driven_reservation, res); 692 tdres = container_of(res, struct table_driven_reservation, res);
414 tdres->next_interval = 0; 693 tdres->next_interval = 0;
415 tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; 694 tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
416 res->next_replenishment += tdres->intervals[0].start; 695 res->next_replenishment += tdres->intervals[0].start;
417 } 696 }
418 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 697 res->cur_budget = 0;
698 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
699
700 //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
701 if (get_task_crit_level(current) < CRIT_LEVEL_C) {
702 raw_spin_unlock(&state->lock);
703 } else {
704 raw_spin_unlock(&_global_env.lock);
705 }
706
707 local_irq_restore(flags);
419 } 708 }
420 709
421 prepare_for_next_period(current); 710 prepare_for_next_period(current);
@@ -443,8 +732,9 @@ static long mc2_admit_task(struct task_struct *tsk)
443 struct reservation *res; 732 struct reservation *res;
444 struct mc2_cpu_state *state; 733 struct mc2_cpu_state *state;
445 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); 734 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
446 struct mc2_task *mp = tsk_rt(tsk)->plugin_state; 735 struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
447 736 enum crit_level lv;
737
448 if (!tinfo) 738 if (!tinfo)
449 return -ENOMEM; 739 return -ENOMEM;
450 740
@@ -453,33 +743,61 @@ static long mc2_admit_task(struct task_struct *tsk)
453 return err; 743 return err;
454 } 744 }
455 745
746 lv = mp->crit;
456 preempt_disable(); 747 preempt_disable();
457 748
458 state = cpu_state_for(task_cpu(tsk)); 749 if (lv < CRIT_LEVEL_C) {
459 raw_spin_lock_irqsave(&state->lock, flags); 750 state = cpu_state_for(task_cpu(tsk));
751 raw_spin_lock_irqsave(&state->lock, flags);
460 752
461 res = sup_find_by_id(&state->sup_env, mp->res_id); 753 res = sup_find_by_id(&state->sup_env, mp->res_id);
462 754
463 /* found the appropriate reservation (or vCPU) */ 755 /* found the appropriate reservation (or vCPU) */
464 if (res) { 756 if (res) {
465 TRACE_TASK(tsk, "FOUND RES ID\n"); 757 TRACE_TASK(tsk, "SUP FOUND RES ID\n");
466 tinfo->mc2_param.crit = mp->crit; 758 tinfo->mc2_param.crit = mp->crit;
467 tinfo->mc2_param.res_id = mp->res_id; 759 tinfo->mc2_param.res_id = mp->res_id;
468 760
469 kfree(tsk_rt(tsk)->plugin_state); 761 //kfree(tsk_rt(tsk)->plugin_state);
470 tsk_rt(tsk)->plugin_state = NULL; 762 //tsk_rt(tsk)->plugin_state = NULL;
763
764 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
765 tinfo->cpu = task_cpu(tsk);
766 tinfo->has_departed = true;
767 tsk_rt(tsk)->plugin_state = tinfo;
768
769 /* disable LITMUS^RT's per-thread budget enforcement */
770 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
771 }
772
773 raw_spin_unlock_irqrestore(&state->lock, flags);
774 } else if (lv == CRIT_LEVEL_C) {
775 raw_spin_lock_irqsave(&_global_env.lock, flags);
471 776
472 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 777 res = gmp_find_by_id(&_global_env, mp->res_id);
473 tinfo->cpu = task_cpu(tsk);
474 tinfo->has_departed = true;
475 tsk_rt(tsk)->plugin_state = tinfo;
476 778
477 /* disable LITMUS^RT's per-thread budget enforcement */ 779 /* found the appropriate reservation (or vCPU) */
478 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 780 if (res) {
479 } 781 TRACE_TASK(tsk, "GMP FOUND RES ID\n");
782 tinfo->mc2_param.crit = mp->crit;
783 tinfo->mc2_param.res_id = mp->res_id;
784
785 //kfree(tsk_rt(tsk)->plugin_state);
786 //tsk_rt(tsk)->plugin_state = NULL;
787
788 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
789 tinfo->cpu = -1;
790 tinfo->has_departed = true;
791 tsk_rt(tsk)->plugin_state = tinfo;
480 792
481 raw_spin_unlock_irqrestore(&state->lock, flags); 793 /* disable LITMUS^RT's per-thread budget enforcement */
794 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
795 }
482 796
797 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
798
799 }
800
483 preempt_enable(); 801 preempt_enable();
484 802
485 if (err) 803 if (err)
@@ -493,12 +811,18 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
493{ 811{
494 unsigned long flags; 812 unsigned long flags;
495 struct mc2_task_state* tinfo = get_mc2_state(tsk); 813 struct mc2_task_state* tinfo = get_mc2_state(tsk);
496 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 814 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
497 struct reservation *res; 815 struct reservation *res;
498 816 enum crit_level lv = get_task_crit_level(tsk);
817
499 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 818 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
500 litmus_clock(), on_runqueue, is_running); 819 litmus_clock(), on_runqueue, is_running);
501 820
821 if (tinfo->cpu == -1)
822 state = local_cpu_state();
823 else
824 state = cpu_state_for(tinfo->cpu);
825
502 /* acquire the lock protecting the state and disable interrupts */ 826 /* acquire the lock protecting the state and disable interrupts */
503 raw_spin_lock_irqsave(&state->lock, flags); 827 raw_spin_lock_irqsave(&state->lock, flags);
504 828
@@ -511,7 +835,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
511 if (on_runqueue || is_running) { 835 if (on_runqueue || is_running) {
512 /* Assumption: litmus_clock() is synchronized across cores 836 /* Assumption: litmus_clock() is synchronized across cores
513 * [see comment in pres_task_resume()] */ 837 * [see comment in pres_task_resume()] */
514 sup_update_time(&state->sup_env, litmus_clock()); 838 raw_spin_lock(&_global_env.lock);
839 mc2_update_time(lv, state, litmus_clock());
840 raw_spin_unlock(&_global_env.lock);
515 mc2_update_ghost_state(state); 841 mc2_update_ghost_state(state);
516 task_arrives(state, tsk); 842 task_arrives(state, tsk);
517 /* NOTE: drops state->lock */ 843 /* NOTE: drops state->lock */
@@ -521,12 +847,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
521 } else 847 } else
522 raw_spin_unlock_irqrestore(&state->lock, flags); 848 raw_spin_unlock_irqrestore(&state->lock, flags);
523 849
524 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 850 res = res_find_by_id(state, tinfo->mc2_param.res_id);
525 release_at(tsk, res->next_replenishment); 851
526 if (res) 852 if (res) {
527 TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); 853 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = %llu\n", res->next_replenishment);
854 release_at(tsk, res->next_replenishment);
855 }
528 else 856 else
529 TRACE_TASK(tsk, "next_replenishment = NULL\n"); 857 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n");
530} 858}
531 859
532static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) 860static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
@@ -537,43 +865,71 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
537 struct sup_reservation_environment *sup_env; 865 struct sup_reservation_environment *sup_env;
538 int found = 0; 866 int found = 0;
539 enum crit_level lv = get_task_crit_level(current); 867 enum crit_level lv = get_task_crit_level(current);
540
541 state = cpu_state_for(cpu);
542 raw_spin_lock(&state->lock);
543 868
544// res = sup_find_by_id(&state->sup_env, reservation_id); 869 if (cpu == -1) {
545 sup_env = &state->sup_env; 870 raw_spin_lock(&_global_env.lock);
546 //if (!res) { 871
547 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { 872 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
548 if (res->id == reservation_id) {
549 if (lv == CRIT_LEVEL_A) {
550 struct table_driven_reservation *tdres;
551 tdres = container_of(res, struct table_driven_reservation, res);
552 kfree(tdres->intervals);
553 }
554 list_del(&res->list);
555 kfree(res);
556 found = 1;
557 ret = 0;
558 }
559 }
560 if (!found) {
561 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
562 if (res->id == reservation_id) { 873 if (res->id == reservation_id) {
563 if (lv == CRIT_LEVEL_A) { 874 TRACE("DESTROY RES FOUND!!!\n");
564 struct table_driven_reservation *tdres;
565 tdres = container_of(res, struct table_driven_reservation, res);
566 kfree(tdres->intervals);
567 }
568 list_del(&res->list); 875 list_del(&res->list);
569 kfree(res); 876 kfree(res);
570 found = 1; 877 found = 1;
571 ret = 0; 878 ret = 0;
572 } 879 }
573 } 880 }
574 } 881 if (!found) {
575 if (!found) { 882 list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) {
576 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 883 if (res->id == reservation_id) {
884 TRACE("DESTROY RES FOUND!!!\n");
885 list_del(&res->list);
886 kfree(res);
887 found = 1;
888 ret = 0;
889 }
890 }
891 }
892 if (!found) {
893 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
894 if (res->id == reservation_id) {
895 TRACE("DESTROY RES FOUND!!!\n");
896 list_del(&res->list);
897 kfree(res);
898 found = 1;
899 ret = 0;
900 }
901 }
902 }
903
904/*
905list_for_each_entry(res, &_global_env.depleted_reservations, list) {
906 TRACE("DEPLETED LIST R%d\n", res->id);
907}
908list_for_each_entry(res, &_global_env.inactive_reservations, list) {
909 TRACE("INACTIVE LIST R%d\n", res->id);
910}
911list_for_each_entry(res, &_global_env.active_reservations, list) {
912 TRACE("ACTIVE LIST R%d\n", res->id);
913}
914*/
915 if (list_empty(&_global_env.active_reservations))
916 INIT_LIST_HEAD(&_global_env.active_reservations);
917 if (list_empty(&_global_env.depleted_reservations))
918 INIT_LIST_HEAD(&_global_env.depleted_reservations);
919 if (list_empty(&_global_env.inactive_reservations))
920 INIT_LIST_HEAD(&_global_env.inactive_reservations);
921 if (list_empty(&_global_env.next_events))
922 INIT_LIST_HEAD(&_global_env.next_events);
923
924 raw_spin_unlock(&_global_env.lock);
925 } else {
926 state = cpu_state_for(cpu);
927 raw_spin_lock(&state->lock);
928
929 // res = sup_find_by_id(&state->sup_env, reservation_id);
930 sup_env = &state->sup_env;
931 //if (!res) {
932 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
577 if (res->id == reservation_id) { 933 if (res->id == reservation_id) {
578 if (lv == CRIT_LEVEL_A) { 934 if (lv == CRIT_LEVEL_A) {
579 struct table_driven_reservation *tdres; 935 struct table_driven_reservation *tdres;
@@ -586,10 +942,40 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
586 ret = 0; 942 ret = 0;
587 } 943 }
588 } 944 }
589 } 945 if (!found) {
590 //} 946 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
947 if (res->id == reservation_id) {
948 if (lv == CRIT_LEVEL_A) {
949 struct table_driven_reservation *tdres;
950 tdres = container_of(res, struct table_driven_reservation, res);
951 kfree(tdres->intervals);
952 }
953 list_del(&res->list);
954 kfree(res);
955 found = 1;
956 ret = 0;
957 }
958 }
959 }
960 if (!found) {
961 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
962 if (res->id == reservation_id) {
963 if (lv == CRIT_LEVEL_A) {
964 struct table_driven_reservation *tdres;
965 tdres = container_of(res, struct table_driven_reservation, res);
966 kfree(tdres->intervals);
967 }
968 list_del(&res->list);
969 kfree(res);
970 found = 1;
971 ret = 0;
972 }
973 }
974 }
975 //}
591 976
592 raw_spin_unlock(&state->lock); 977 raw_spin_unlock(&state->lock);
978 }
593 979
594 TRACE("RESERVATION_DESTROY ret = %d\n", ret); 980 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
595 return ret; 981 return ret;
@@ -599,10 +985,15 @@ static void mc2_task_exit(struct task_struct *tsk)
599{ 985{
600 unsigned long flags; 986 unsigned long flags;
601 struct mc2_task_state* tinfo = get_mc2_state(tsk); 987 struct mc2_task_state* tinfo = get_mc2_state(tsk);
602 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 988 struct mc2_cpu_state *state;
603 enum crit_level lv = tinfo->mc2_param.crit; 989 enum crit_level lv = tinfo->mc2_param.crit;
604 struct crit_entry* ce; 990 struct crit_entry* ce;
605 991
992 if (tinfo->cpu != -1)
993 state = cpu_state_for(tinfo->cpu);
994 else
995 state = local_cpu_state();
996
606 raw_spin_lock_irqsave(&state->lock, flags); 997 raw_spin_lock_irqsave(&state->lock, flags);
607 998
608 if (state->scheduled == tsk) 999 if (state->scheduled == tsk)
@@ -616,7 +1007,11 @@ static void mc2_task_exit(struct task_struct *tsk)
616 if (is_running(tsk)) { 1007 if (is_running(tsk)) {
617 /* Assumption: litmus_clock() is synchronized across cores 1008 /* Assumption: litmus_clock() is synchronized across cores
618 * [see comment in pres_task_resume()] */ 1009 * [see comment in pres_task_resume()] */
619 sup_update_time(&state->sup_env, litmus_clock()); 1010 //if (lv < CRIT_LEVEL_C)
1011 // sup_update_time(&state->sup_env, litmus_clock());
1012 raw_spin_lock(&_global_env.lock);
1013 mc2_update_time(lv, state, litmus_clock());
1014 raw_spin_unlock(&_global_env.lock);
620 mc2_update_ghost_state(state); 1015 mc2_update_ghost_state(state);
621 task_departs(tsk, 0); 1016 task_departs(tsk, 0);
622 1017
@@ -644,6 +1039,8 @@ static void mc2_task_exit(struct task_struct *tsk)
644*/ 1039*/
645 kfree(tsk_rt(tsk)->plugin_state); 1040 kfree(tsk_rt(tsk)->plugin_state);
646 tsk_rt(tsk)->plugin_state = NULL; 1041 tsk_rt(tsk)->plugin_state = NULL;
1042 kfree(tsk_rt(tsk)->mc2_data);
1043 tsk_rt(tsk)->mc2_data = NULL;
647} 1044}
648 1045
649static long create_polling_reservation( 1046static long create_polling_reservation(
@@ -685,28 +1082,54 @@ static long create_polling_reservation(
685 if (!pres) 1082 if (!pres)
686 return -ENOMEM; 1083 return -ENOMEM;
687 1084
688 state = cpu_state_for(config->cpu); 1085 if (config->cpu != -1) {
689 raw_spin_lock_irqsave(&state->lock, flags); 1086 state = cpu_state_for(config->cpu);
1087 raw_spin_lock_irqsave(&state->lock, flags);
1088
1089 res = sup_find_by_id(&state->sup_env, config->id);
1090 if (!res) {
1091 polling_reservation_init(pres, use_edf, periodic,
1092 config->polling_params.budget,
1093 config->polling_params.period,
1094 config->polling_params.relative_deadline,
1095 config->polling_params.offset);
1096 pres->res.id = config->id;
1097 pres->res.blocked_by_ghost = 0;
1098 pres->res.is_ghost = 0;
1099 if (!use_edf)
1100 pres->res.priority = config->priority;
1101 sup_add_new_reservation(&state->sup_env, &pres->res);
1102 err = config->id;
1103 } else {
1104 err = -EEXIST;
1105 }
690 1106
691 res = sup_find_by_id(&state->sup_env, config->id); 1107 raw_spin_unlock_irqrestore(&state->lock, flags);
692 if (!res) {
693 polling_reservation_init(pres, use_edf, periodic,
694 config->polling_params.budget,
695 config->polling_params.period,
696 config->polling_params.relative_deadline,
697 config->polling_params.offset);
698 pres->res.id = config->id;
699 pres->res.blocked_by_ghost = 0;
700 if (!use_edf)
701 pres->res.priority = config->priority;
702 sup_add_new_reservation(&state->sup_env, &pres->res);
703 err = config->id;
704 } else { 1108 } else {
705 err = -EEXIST; 1109 raw_spin_lock_irqsave(&_global_env.lock, flags);
1110
1111 res = gmp_find_by_id(&_global_env, config->id);
1112 if (!res) {
1113 polling_reservation_init(pres, use_edf, periodic,
1114 config->polling_params.budget,
1115 config->polling_params.period,
1116 config->polling_params.relative_deadline,
1117 config->polling_params.offset);
1118 pres->res.id = config->id;
1119 pres->res.blocked_by_ghost = 0;
1120 pres->res.scheduled_on = NO_CPU;
1121 pres->res.is_ghost = 0;
1122 if (!use_edf)
1123 pres->res.priority = config->priority;
1124 gmp_add_new_reservation(&_global_env, &pres->res);
1125 TRACE("GMP_ADD_NEW_RESERVATION R%d\n", pres->res.id);
1126 err = config->id;
1127 } else {
1128 err = -EEXIST;
1129 }
1130 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
706 } 1131 }
707 1132
708 raw_spin_unlock_irqrestore(&state->lock, flags);
709
710 if (err < 0) 1133 if (err < 0)
711 kfree(pres); 1134 kfree(pres);
712 1135
@@ -825,10 +1248,12 @@ static long mc2_reservation_create(int res_type, void* __user _config)
825 if (copy_from_user(&config, _config, sizeof(config))) 1248 if (copy_from_user(&config, _config, sizeof(config)))
826 return -EFAULT; 1249 return -EFAULT;
827 1250
828 if (config.cpu < 0 || !cpu_online(config.cpu)) { 1251 if (config.cpu != -1) {
829 printk(KERN_ERR "invalid polling reservation (%u): " 1252 if (config.cpu < 0 || !cpu_online(config.cpu)) {
830 "CPU %d offline\n", config.id, config.cpu); 1253 printk(KERN_ERR "invalid polling reservation (%u): "
831 return -EINVAL; 1254 "CPU %d offline\n", config.id, config.cpu);
1255 return -EINVAL;
1256 }
832 } 1257 }
833 1258
834 switch (res_type) { 1259 switch (res_type) {
@@ -885,19 +1310,30 @@ static long mc2_activate_plugin(void)
885{ 1310{
886 int cpu, lv; 1311 int cpu, lv;
887 struct mc2_cpu_state *state; 1312 struct mc2_cpu_state *state;
1313 struct cpu_entry *ce;
888 1314
1315 gmp_init(&_global_env);
1316 raw_spin_lock_init(&_lowest_prio_cpu.lock);
1317
889 for_each_online_cpu(cpu) { 1318 for_each_online_cpu(cpu) {
890 TRACE("Initializing CPU%d...\n", cpu); 1319 TRACE("Initializing CPU%d...\n", cpu);
891 1320
892 state = cpu_state_for(cpu); 1321 state = cpu_state_for(cpu);
1322 ce = &_lowest_prio_cpu.cpu_entries[cpu];
1323
1324 ce->cpu = cpu;
1325 ce->scheduled = NULL;
1326 ce->deadline = ULLONG_MAX;
1327 ce->lv = NUM_CRIT_LEVELS;
1328 ce->will_schedule = false;
893 1329
894 raw_spin_lock_init(&state->lock); 1330 raw_spin_lock_init(&state->lock);
895 state->cpu = cpu; 1331 state->cpu = cpu;
896 state->scheduled = NULL; 1332 state->scheduled = NULL;
897 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { 1333 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
898 struct crit_entry *ce = &state->crit_entries[lv]; 1334 struct crit_entry *cr_entry = &state->crit_entries[lv];
899 ce->level = lv; 1335 cr_entry->level = lv;
900 ce->running = NULL; 1336 cr_entry->running = NULL;
901 //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1337 //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
902 //ce->ghost_timer.function = on_ghost_timer; 1338 //ce->ghost_timer.function = on_ghost_timer;
903 } 1339 }
@@ -905,6 +1341,9 @@ static long mc2_activate_plugin(void)
905 1341
906 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1342 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
907 state->timer.function = on_scheduling_timer; 1343 state->timer.function = on_scheduling_timer;
1344
1345 hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1346 state->g_timer.function = on_global_scheduling_timer;
908 } 1347 }
909 1348
910 mc2_setup_domain_proc(); 1349 mc2_setup_domain_proc();
@@ -912,17 +1351,26 @@ static long mc2_activate_plugin(void)
912 return 0; 1351 return 0;
913} 1352}
914 1353
1354static void mc2_finish_switch(struct task_struct *prev)
1355{
1356 struct mc2_cpu_state *state = local_cpu_state();
1357
1358 state->scheduled = is_realtime(current) ? current : NULL;
1359}
1360
915static long mc2_deactivate_plugin(void) 1361static long mc2_deactivate_plugin(void)
916{ 1362{
917 int cpu; 1363 int cpu;
918 struct mc2_cpu_state *state; 1364 struct mc2_cpu_state *state;
919 struct reservation *res; 1365 struct reservation *res;
1366 struct next_timer_event *event;
920 1367
921 for_each_online_cpu(cpu) { 1368 for_each_online_cpu(cpu) {
922 state = cpu_state_for(cpu); 1369 state = cpu_state_for(cpu);
923 raw_spin_lock(&state->lock); 1370 raw_spin_lock(&state->lock);
924 1371
925 hrtimer_cancel(&state->timer); 1372 hrtimer_cancel(&state->timer);
1373 hrtimer_cancel(&state->g_timer);
926 1374
927 /* Delete all reservations --- assumes struct reservation 1375 /* Delete all reservations --- assumes struct reservation
928 * is prefix of containing struct. */ 1376 * is prefix of containing struct. */
@@ -954,6 +1402,46 @@ static long mc2_deactivate_plugin(void)
954 raw_spin_unlock(&state->lock); 1402 raw_spin_unlock(&state->lock);
955 } 1403 }
956 1404
1405 raw_spin_lock(&_global_env.lock);
1406
1407 while (!list_empty(&_global_env.active_reservations)) {
1408 TRACE("RES FOUND!!!\n");
1409 res = list_first_entry(
1410 &_global_env.active_reservations,
1411 struct reservation, list);
1412 list_del(&res->list);
1413 kfree(res);
1414 }
1415
1416 while (!list_empty(&_global_env.inactive_reservations)) {
1417 TRACE("RES FOUND!!!\n");
1418 res = list_first_entry(
1419 &_global_env.inactive_reservations,
1420 struct reservation, list);
1421 list_del(&res->list);
1422 kfree(res);
1423 }
1424
1425 while (!list_empty(&_global_env.depleted_reservations)) {
1426 TRACE("RES FOUND!!!\n");
1427 res = list_first_entry(
1428 &_global_env.depleted_reservations,
1429 struct reservation, list);
1430 list_del(&res->list);
1431 kfree(res);
1432 }
1433
1434 while (!list_empty(&_global_env.next_events)) {
1435 TRACE("EVENT FOUND!!!\n");
1436 event = list_first_entry(
1437 &_global_env.next_events,
1438 struct next_timer_event, list);
1439 list_del(&event->list);
1440 kfree(event);
1441 }
1442
1443 raw_spin_unlock(&_global_env.lock);
1444
957 destroy_domain_proc_info(&mc2_domain_proc_info); 1445 destroy_domain_proc_info(&mc2_domain_proc_info);
958 return 0; 1446 return 0;
959} 1447}
@@ -961,6 +1449,7 @@ static long mc2_deactivate_plugin(void)
961static struct sched_plugin mc2_plugin = { 1449static struct sched_plugin mc2_plugin = {
962 .plugin_name = "MC2", 1450 .plugin_name = "MC2",
963 .schedule = mc2_schedule, 1451 .schedule = mc2_schedule,
1452 .finish_switch = mc2_finish_switch,
964 .task_wake_up = mc2_task_resume, 1453 .task_wake_up = mc2_task_resume,
965 .admit_task = mc2_admit_task, 1454 .admit_task = mc2_admit_task,
966 .task_new = mc2_task_new, 1455 .task_new = mc2_task_new,