aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-03-01 17:58:29 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-03-01 17:58:29 -0500
commit0a62a98d4cbd2f1cb0ecee6669f708a3e83afcb3 (patch)
tree09ca1d2e7ba589a2cde80bcef3e0bd2b8eaf92ad
parentf34d9982907644ade66b8689460cf0f414e88ce7 (diff)
MC2 scheduling infrastructure
-rw-r--r--include/litmus/reservation.h2
-rw-r--r--litmus/reservation.c78
-rw-r--r--litmus/sched_mc2.c196
3 files changed, 204 insertions, 72 deletions
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
index 0b9c08d6051e..7e022b34470f 100644
--- a/include/litmus/reservation.h
+++ b/include/litmus/reservation.h
@@ -129,6 +129,7 @@ struct reservation {
129 129
130 /* for global env. */ 130 /* for global env. */
131 int scheduled_on; 131 int scheduled_on;
132 int event_added;
132 /* for blocked by ghost. Do not charge budget when ACTIVE */ 133 /* for blocked by ghost. Do not charge budget when ACTIVE */
133 int blocked_by_ghost; 134 int blocked_by_ghost;
134 /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */ 135 /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
@@ -244,6 +245,7 @@ void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
244 struct reservation* new_res); 245 struct reservation* new_res);
245void gmp_add_event_after(struct gmp_reservation_environment* gmp_env, 246void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
246 lt_t timeout, unsigned int id, event_type_t type); 247 lt_t timeout, unsigned int id, event_type_t type);
248void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
247int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); 249int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
248struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env); 250struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
249struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); 251struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
diff --git a/litmus/reservation.c b/litmus/reservation.c
index b0b13a9916ef..3ec18a23f588 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -428,7 +428,7 @@ static void gmp_add_event(
428 428
429 nevent = gmp_find_event_by_id(gmp_env, id); 429 nevent = gmp_find_event_by_id(gmp_env, id);
430 430
431 if (!nevent) { 431 if (!nevent || nevent->type != type) {
432 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC); 432 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
433 nevent->next_update = when; 433 nevent->next_update = when;
434 nevent->id = id; 434 nevent->id = id;
@@ -440,18 +440,22 @@ static void gmp_add_event(
440 if (queued->next_update > nevent->next_update) { 440 if (queued->next_update > nevent->next_update) {
441 list_add(&nevent->list, pos->prev); 441 list_add(&nevent->list, pos->prev);
442 found = 1; 442 found = 1;
443 TRACE("NEXT_EVENT at %llu ADDED before %llu\n", nevent->next_update, queued->next_update); 443 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
444 break; 444 break;
445 } 445 }
446 } 446 }
447 447
448 if (!found) { 448 if (!found) {
449 list_add_tail(&nevent->list, &gmp_env->next_events); 449 list_add_tail(&nevent->list, &gmp_env->next_events);
450 TRACE("NEXT_EVENT ADDED at %llu ADDED at HEAD\n", nevent->next_update); 450 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
451 } 451 }
452 } else { 452 } else {
453 TRACE("EVENT FOUND type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->type, nevent->next_update, type, when); 453 TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
454 } 454 }
455
456 TRACE("======START PRINTING EVENT LIST======\n");
457 gmp_print_events(gmp_env, litmus_clock());
458 TRACE("======FINISH PRINTING EVENT LIST======\n");
455} 459}
456 460
457void gmp_add_event_after( 461void gmp_add_event_after(
@@ -478,8 +482,9 @@ static void gmp_queue_depleted(
478 482
479 if (!found) 483 if (!found)
480 list_add_tail(&res->list, &gmp_env->depleted_reservations); 484 list_add_tail(&res->list, &gmp_env->depleted_reservations);
481 485 TRACE("R%d queued to depleted_list\n", res->id);
482 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH); 486 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
487 res->event_added = 1;
483} 488}
484 489
485static void gmp_queue_active( 490static void gmp_queue_active(
@@ -508,6 +513,7 @@ static void gmp_queue_active(
508 gmp_env->schedule_now++; 513 gmp_env->schedule_now++;
509 514
510 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN); 515 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
516 res->event_added = 1;
511} 517}
512 518
513static void gmp_queue_reservation( 519static void gmp_queue_reservation(
@@ -554,12 +560,14 @@ static void gmp_charge_budget(
554 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ 560 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
555 res = list_entry(pos, struct reservation, list); 561 res = list_entry(pos, struct reservation, list);
556 if (res->state == RESERVATION_ACTIVE) { 562 if (res->state == RESERVATION_ACTIVE) {
557 TRACE("gmp_charge_budget ACTIVE R%u drain %llu\n", res->id, delta); 563 TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
558 if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) { 564 if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
559 TRACE("DRAIN !!\n"); 565 TRACE("DRAIN !!\n");
560 drained = 1; 566 drained = 1;
561 res->ops->drain_budget(res, delta); 567 res->ops->drain_budget(res, delta);
562 } 568 } else {
569 TRACE("NO DRAIN (not scheduled)!!\n");
570 }
563 } else { 571 } else {
564 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE); 572 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
565 if (res->state != RESERVATION_ACTIVE_IDLE) 573 if (res->state != RESERVATION_ACTIVE_IDLE)
@@ -579,6 +587,7 @@ static void gmp_charge_budget(
579 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", 587 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n",
580 res->id, res->cur_budget); 588 res->id, res->cur_budget);
581 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN); 589 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
590 res->event_added = 1;
582 } 591 }
583 //if (encountered_active == 2) 592 //if (encountered_active == 2)
584 /* stop at the first ACTIVE reservation */ 593 /* stop at the first ACTIVE reservation */
@@ -601,33 +610,49 @@ static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
601 break; 610 break;
602 } 611 }
603 } 612 }
604 //TRACE("finished replenishing budgets\n"); 613 TRACE("finished replenishing budgets\n");
605 614
606 /* request a scheduler update at the next replenishment instant */ 615 /* request a scheduler update at the next replenishment instant */
616 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
617 res = list_entry(pos, struct reservation, list);
618 if (res->event_added == 0) {
619 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
620 res->event_added = 1;
621 }
622 }
623
624/*
607 res = list_first_entry_or_null(&gmp_env->depleted_reservations, 625 res = list_first_entry_or_null(&gmp_env->depleted_reservations,
608 struct reservation, list); 626 struct reservation, list);
609 if (res) 627 if (res && res->event_added == 0) {
610 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH); 628 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
629 res->event_added = 1;
630 }
631*/
611} 632}
612 633
634#define EPSILON 50
635
613/* return schedule_now */ 636/* return schedule_now */
614int gmp_update_time( 637int gmp_update_time(
615 struct gmp_reservation_environment* gmp_env, 638 struct gmp_reservation_environment* gmp_env,
616 lt_t now) 639 lt_t now)
617{ 640{
618 lt_t delta; 641 struct next_timer_event *event, *next;
642 lt_t delta, ret;
619 643
620 /* If the time didn't advance, there is nothing to do. 644 /* If the time didn't advance, there is nothing to do.
621 * This check makes it safe to call sup_advance_time() potentially 645 * This check makes it safe to call sup_advance_time() potentially
622 * multiple times (e.g., via different code paths. */ 646 * multiple times (e.g., via different code paths. */
623 //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); 647 TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
624 if (unlikely(now <= gmp_env->env.current_time)) 648 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
625 return min(gmp_env->schedule_now, NR_CPUS); 649 return 0;
626 650
627 delta = now - gmp_env->env.current_time; 651 delta = now - gmp_env->env.current_time;
628 gmp_env->env.current_time = now; 652 gmp_env->env.current_time = now;
629 653
630 654
655 //gmp_print_events(gmp_env, now);
631 /* deplete budgets by passage of time */ 656 /* deplete budgets by passage of time */
632 //TRACE("CHARGE###\n"); 657 //TRACE("CHARGE###\n");
633 gmp_charge_budget(gmp_env, delta); 658 gmp_charge_budget(gmp_env, delta);
@@ -636,7 +661,30 @@ int gmp_update_time(
636 //TRACE("REPLENISH###\n"); 661 //TRACE("REPLENISH###\n");
637 gmp_replenish_budgets(gmp_env); 662 gmp_replenish_budgets(gmp_env);
638 663
639 return min(gmp_env->schedule_now, NR_CPUS); 664 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
665 if (event->next_update < now) {
666 list_del(&event->list);
667 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
668 kfree(event);
669 }
670 }
671
672 //gmp_print_events(gmp_env, litmus_clock());
673
674 ret = min(gmp_env->schedule_now, NR_CPUS);
675 gmp_env->schedule_now = 0;
676
677 return ret;
678}
679
680void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
681{
682 struct next_timer_event *event, *next;
683
684 TRACE("GLOBAL EVENTS now=%llu\n", now);
685 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
686 TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
687 }
640} 688}
641 689
642static void gmp_res_change_state( 690static void gmp_res_change_state(
@@ -653,7 +701,7 @@ static void gmp_res_change_state(
653 701
654 list_del(&res->list); 702 list_del(&res->list);
655 /* check if we need to reschedule because we lost an active reservation */ 703 /* check if we need to reschedule because we lost an active reservation */
656 if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule) 704 if (res->state == RESERVATION_ACTIVE)
657 gmp_env->schedule_now++; 705 gmp_env->schedule_now++;
658 res->state = new_state; 706 res->state = new_state;
659 gmp_queue_reservation(gmp_env, res); 707 gmp_queue_reservation(gmp_env, res);
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 79fecd49080a..b3390dc87d47 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -74,7 +74,7 @@ struct mc2_cpu_state {
74 74
75 struct sup_reservation_environment sup_env; 75 struct sup_reservation_environment sup_env;
76 struct hrtimer timer; 76 struct hrtimer timer;
77 struct hrtimer g_timer; 77 //struct hrtimer g_timer;
78 78
79 int cpu; 79 int cpu;
80 struct task_struct* scheduled; 80 struct task_struct* scheduled;
@@ -209,21 +209,33 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
209 * If all CPUs are running tasks which has 209 * If all CPUs are running tasks which has
210 * higher priority than level C, return NO_CPU. 210 * higher priority than level C, return NO_CPU.
211 */ 211 */
212static int get_lowest_prio_cpu(void) 212static int get_lowest_prio_cpu(lt_t priority)
213{ 213{
214 struct cpu_entry *ce; 214 struct cpu_entry *ce;
215 int cpu, ret = NO_CPU; 215 int cpu, ret = NO_CPU;
216 lt_t latest_deadline = 0; 216 lt_t latest_deadline = 0;
217 217
218 raw_spin_lock(&_lowest_prio_cpu.lock); 218 raw_spin_lock(&_lowest_prio_cpu.lock);
219 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
220 if (!ce->will_schedule && !ce->scheduled) {
221 raw_spin_unlock(&_lowest_prio_cpu.lock);
222 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
223 return ce->cpu;
224 }
225
219 for_each_online_cpu(cpu) { 226 for_each_online_cpu(cpu) {
220 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 227 ce = &_lowest_prio_cpu.cpu_entries[cpu];
221 /* If a CPU will call schedule() in the near future, we don't 228 /* If a CPU will call schedule() in the near future, we don't
222 return that CPU. */ 229 return that CPU. */
230 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
231 ce->scheduled ? (ce->scheduled)->comm : "null",
232 ce->scheduled ? (ce->scheduled)->pid : 0,
233 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
223 if (!ce->will_schedule) { 234 if (!ce->will_schedule) {
224 if (!ce->scheduled) { 235 if (!ce->scheduled) {
225 /* Idle cpu, return this. */ 236 /* Idle cpu, return this. */
226 raw_spin_unlock(&_lowest_prio_cpu.lock); 237 raw_spin_unlock(&_lowest_prio_cpu.lock);
238 TRACE("CPU %d is the lowest!\n", ce->cpu);
227 return ce->cpu; 239 return ce->cpu;
228 } else if (ce->lv == CRIT_LEVEL_C && 240 } else if (ce->lv == CRIT_LEVEL_C &&
229 ce->deadline > latest_deadline) { 241 ce->deadline > latest_deadline) {
@@ -234,7 +246,12 @@ static int get_lowest_prio_cpu(void)
234 } 246 }
235 247
236 raw_spin_unlock(&_lowest_prio_cpu.lock); 248 raw_spin_unlock(&_lowest_prio_cpu.lock);
249
250 if (priority >= latest_deadline)
251 ret = NO_CPU;
237 252
253 TRACE("CPU %d is the lowest!\n", ret);
254
238 return ret; 255 return ret;
239} 256}
240 257
@@ -253,7 +270,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
253 lt_t update, now; 270 lt_t update, now;
254 enum crit_level lv = get_task_crit_level(state->scheduled); 271 enum crit_level lv = get_task_crit_level(state->scheduled);
255 struct next_timer_event *event, *next; 272 struct next_timer_event *event, *next;
256 int found_event = 0;
257 273
258 //TRACE_TASK(state->scheduled, "update_timer!\n"); 274 //TRACE_TASK(state->scheduled, "update_timer!\n");
259 if (lv != NUM_CRIT_LEVELS) 275 if (lv != NUM_CRIT_LEVELS)
@@ -268,10 +284,35 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
268 */ 284 */
269 local = local_cpu_state() == state; 285 local = local_cpu_state() == state;
270 286
287 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
288 /* If the event time is already passed, we call schedule() on
289 the lowest priority cpu */
290 if (event->next_update < litmus_clock()) {
291 if (event->timer_armed_on == NO_CPU) {
292 struct reservation *res = gmp_find_by_id(&_global_env, event->id);
293 int cpu = get_lowest_prio_cpu(res?res->priority:0);
294 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
295 list_del(&event->list);
296 kfree(event);
297 if (cpu != NO_CPU) {
298 raw_spin_lock(&_lowest_prio_cpu.lock);
299 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
300 raw_spin_unlock(&_lowest_prio_cpu.lock);
301 litmus_reschedule(cpu);
302 }
303 }
304 } else if (event->next_update < update && event->timer_armed_on == NO_CPU) {
305 event->timer_armed_on = state->cpu;
306 update = event->next_update;
307 break;
308 }
309 }
310
271 /* Must drop state lock before calling into hrtimer_start(), which 311 /* Must drop state lock before calling into hrtimer_start(), which
272 * may raise a softirq, which in turn may wake ksoftirqd. */ 312 * may raise a softirq, which in turn may wake ksoftirqd. */
273 raw_spin_unlock(&state->lock); 313 raw_spin_unlock(&state->lock);
274 314 raw_spin_unlock(&_global_env.lock);
315
275 if (update <= now) { 316 if (update <= now) {
276 litmus_reschedule(state->cpu); 317 litmus_reschedule(state->cpu);
277 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { 318 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
@@ -310,7 +351,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
310 litmus_reschedule(state->cpu); 351 litmus_reschedule(state->cpu);
311 } 352 }
312 } 353 }
313 354
355#if 0
314 raw_spin_lock(&_global_env.lock); 356 raw_spin_lock(&_global_env.lock);
315 list_for_each_entry_safe(event, next, &_global_env.next_events, list) { 357 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
316 if (event->timer_armed_on == NO_CPU) { 358 if (event->timer_armed_on == NO_CPU) {
@@ -349,6 +391,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
349 } 391 }
350 if (found_event == 0) 392 if (found_event == 0)
351 raw_spin_unlock(&_global_env.lock); 393 raw_spin_unlock(&_global_env.lock);
394#endif
352} 395}
353 396
354/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs 397/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
@@ -396,6 +439,9 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state)
396 if (res) 439 if (res)
397 litmus_reschedule(state->cpu); 440 litmus_reschedule(state->cpu);
398 } 441 }
442 } else {
443 TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
444 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
399 } 445 }
400 } 446 }
401 } 447 }
@@ -428,6 +474,7 @@ static void update_cpu_prio(struct mc2_cpu_state *state)
428 } 474 }
429}; 475};
430 476
477#if 0
431/* on_global_scheduling_timer - Process the budget accounting (replenish 478/* on_global_scheduling_timer - Process the budget accounting (replenish
432 * and charge) 479 * and charge)
433 */ 480 */
@@ -503,6 +550,7 @@ unlock:
503 550
504 return restart; 551 return restart;
505} 552}
553#endif
506 554
507/* on_scheduling_timer - timer event for partitioned tasks 555/* on_scheduling_timer - timer event for partitioned tasks
508 */ 556 */
@@ -512,6 +560,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
512 enum hrtimer_restart restart = HRTIMER_NORESTART; 560 enum hrtimer_restart restart = HRTIMER_NORESTART;
513 struct mc2_cpu_state *state; 561 struct mc2_cpu_state *state;
514 lt_t update, now; 562 lt_t update, now;
563 int global_schedule_now;
515 564
516 state = container_of(timer, struct mc2_cpu_state, timer); 565 state = container_of(timer, struct mc2_cpu_state, timer);
517 566
@@ -524,16 +573,19 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
524 BUG_ON(state->cpu != raw_smp_processor_id()); 573 BUG_ON(state->cpu != raw_smp_processor_id());
525 574
526 TRACE("TIMER FIRED at %llu\n", litmus_clock()); 575 TRACE("TIMER FIRED at %llu\n", litmus_clock());
527 raw_spin_lock_irqsave(&state->lock, flags); 576 raw_spin_lock_irqsave(&_global_env.lock, flags);
577 raw_spin_lock(&state->lock);
578
528 sup_update_time(&state->sup_env, litmus_clock()); 579 sup_update_time(&state->sup_env, litmus_clock());
529 raw_spin_lock(&_global_env.lock); 580 global_schedule_now = gmp_update_time(&_global_env, litmus_clock());
581
530 mc2_update_ghost_state(state); 582 mc2_update_ghost_state(state);
531 raw_spin_unlock(&_global_env.lock); 583
532 update = state->sup_env.next_scheduler_update; 584 update = state->sup_env.next_scheduler_update;
533 now = state->sup_env.env.current_time; 585 now = state->sup_env.env.current_time;
534 586
535 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", 587 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n",
536 now, update, state->cpu); 588 now, update, state->cpu, global_schedule_now);
537 589
538 if (update <= now) { 590 if (update <= now) {
539 litmus_reschedule_local(); 591 litmus_reschedule_local();
@@ -542,7 +594,20 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
542 restart = HRTIMER_RESTART; 594 restart = HRTIMER_RESTART;
543 } 595 }
544 596
545 raw_spin_unlock_irqrestore(&state->lock, flags); 597 /* Find the lowest cpu, and call reschedule */
598 while (global_schedule_now--) {
599 int cpu = get_lowest_prio_cpu(0);
600 if (cpu != NO_CPU) {
601 raw_spin_lock(&_lowest_prio_cpu.lock);
602 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
603 raw_spin_unlock(&_lowest_prio_cpu.lock);
604 TRACE("LOWEST CPU = P%d\n", cpu);
605 litmus_reschedule(cpu);
606 }
607 }
608
609 raw_spin_unlock(&state->lock);
610 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
546 611
547 return restart; 612 return restart;
548} 613}
@@ -555,7 +620,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
555 struct task_struct *tsk = NULL; 620 struct task_struct *tsk = NULL;
556 struct crit_entry *ce; 621 struct crit_entry *ce;
557 enum crit_level lv; 622 enum crit_level lv;
558 lt_t time_slice; 623 lt_t time_slice, cur_priority;
559 624
560 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 625 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
561 if (res->state == RESERVATION_ACTIVE) { 626 if (res->state == RESERVATION_ACTIVE) {
@@ -578,6 +643,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
578 because the ghost job exists. Set blocked_by_ghost 643 because the ghost job exists. Set blocked_by_ghost
579 flag not to charge budget */ 644 flag not to charge budget */
580 res->blocked_by_ghost = 1; 645 res->blocked_by_ghost = 1;
646 TRACE_TASK(ce->running, " is GHOST\n");
581 } 647 }
582 } 648 }
583 } 649 }
@@ -585,24 +651,32 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
585 } 651 }
586 652
587 /* no eligible level A or B tasks exists */ 653 /* no eligible level A or B tasks exists */
654 cur_priority = _lowest_prio_cpu.cpu_entries[state->cpu].deadline;
655
656 TRACE("****** ACTIVE LIST ******\n");
657 TRACE_TASK(_lowest_prio_cpu.cpu_entries[state->cpu].scheduled, "** CURRENT JOB deadline %llu **\n", cur_priority);
588 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { 658 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
659 TRACE("R%d deadline=%llu, scheduled_on=%d\n", res->id, res->priority, res->scheduled_on);
589 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { 660 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
590 tsk = res->ops->dispatch_client(res, &time_slice); 661 tsk = res->ops->dispatch_client(res, &time_slice);
591 if (likely(tsk)) { 662 if (likely(tsk)) {
592 lv = get_task_crit_level(tsk); 663 lv = get_task_crit_level(tsk);
593 if (lv == NUM_CRIT_LEVELS) { 664 if (lv == NUM_CRIT_LEVELS) {
594 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 665 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
666 res->event_added = 1;
595 return tsk; 667 return tsk;
596 } else { 668 } else {
597 ce = &state->crit_entries[lv]; 669 ce = &state->crit_entries[lv];
598 if (likely(!ce->running)) { 670 if (likely(!ce->running)) {
599 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 671 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
672 res->event_added = 1;
600 res->blocked_by_ghost = 0; 673 res->blocked_by_ghost = 0;
601 res->is_ghost = 0; 674 res->is_ghost = 0;
602 res->scheduled_on = state->cpu; 675 res->scheduled_on = state->cpu;
603 return tsk; 676 return tsk;
604 } else { 677 } else {
605 res->blocked_by_ghost = 1; 678 res->blocked_by_ghost = 1;
679 TRACE_TASK(ce->running, " is GHOST\n");
606 } 680 }
607 } 681 }
608 } 682 }
@@ -641,13 +715,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
641 715
642 pre_schedule(prev); 716 pre_schedule(prev);
643 717
644 raw_spin_lock(&_lowest_prio_cpu.lock);
645 if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true)
646 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
647 raw_spin_unlock(&_lowest_prio_cpu.lock);
648
649 raw_spin_lock(&state->lock);
650 raw_spin_lock(&_global_env.lock); 718 raw_spin_lock(&_global_env.lock);
719 raw_spin_lock(&state->lock);
651 720
652 //BUG_ON(state->scheduled && state->scheduled != prev); 721 //BUG_ON(state->scheduled && state->scheduled != prev);
653 //BUG_ON(state->scheduled && !is_realtime(prev)); 722 //BUG_ON(state->scheduled && !is_realtime(prev));
@@ -668,12 +737,14 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
668 if (is_realtime(prev) && !is_running(prev)) 737 if (is_realtime(prev) && !is_running(prev))
669 task_departs(prev, is_completed(prev)); 738 task_departs(prev, is_completed(prev));
670 739
740 raw_spin_lock(&_lowest_prio_cpu.lock);
741 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
742
671 /* figure out what to schedule next */ 743 /* figure out what to schedule next */
672 state->scheduled = mc2_dispatch(&state->sup_env, state); 744 state->scheduled = mc2_dispatch(&state->sup_env, state);
673 if (state->scheduled && is_realtime(state->scheduled)) 745 if (state->scheduled && is_realtime(state->scheduled))
674 TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n"); 746 TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n");
675 747
676 raw_spin_lock(&_lowest_prio_cpu.lock);
677 update_cpu_prio(state); 748 update_cpu_prio(state);
678 raw_spin_unlock(&_lowest_prio_cpu.lock); 749 raw_spin_unlock(&_lowest_prio_cpu.lock);
679 750
@@ -682,18 +753,29 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
682 753
683 /* program scheduler timer */ 754 /* program scheduler timer */
684 state->sup_env.will_schedule = false; 755 state->sup_env.will_schedule = false;
685 756
686 raw_spin_unlock(&_global_env.lock);
687
688 /* NOTE: drops state->lock */ 757 /* NOTE: drops state->lock */
689 mc2_update_timer_and_unlock(state); 758 mc2_update_timer_and_unlock(state);
690 759
760
761
691 if (prev != state->scheduled && is_realtime(prev)) { 762 if (prev != state->scheduled && is_realtime(prev)) {
692 struct mc2_task_state* tinfo = get_mc2_state(prev); 763 struct mc2_task_state* tinfo = get_mc2_state(prev);
693 struct reservation* res = tinfo->res_info.client.reservation; 764 struct reservation* res = tinfo->res_info.client.reservation;
694 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); 765 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
695 res->scheduled_on = NO_CPU; 766 res->scheduled_on = NO_CPU;
696 TRACE_TASK(prev, "descheduled.\n"); 767 TRACE_TASK(prev, "descheduled.\n");
768 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
769 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
770 int cpu = get_lowest_prio_cpu(res?res->priority:0);
771 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
772 if (cpu != NO_CPU) {
773 raw_spin_lock(&_lowest_prio_cpu.lock);
774 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
775 raw_spin_unlock(&_lowest_prio_cpu.lock);
776 litmus_reschedule(cpu);
777 }
778 }
697 } 779 }
698 if (state->scheduled) { 780 if (state->scheduled) {
699 TRACE_TASK(state->scheduled, "scheduled.\n"); 781 TRACE_TASK(state->scheduled, "scheduled.\n");
@@ -737,34 +819,31 @@ static void mc2_task_resume(struct task_struct *tsk)
737 else 819 else
738 state = local_cpu_state(); 820 state = local_cpu_state();
739 821
740 raw_spin_lock_irqsave(&state->lock, flags); 822 raw_spin_lock_irqsave(&_global_env.lock, flags);
741 /* Requeue only if self-suspension was already processed. */ 823 /* Requeue only if self-suspension was already processed. */
742 if (tinfo->has_departed) 824 if (tinfo->has_departed)
743 { 825 {
744 raw_spin_lock(&_global_env.lock); 826 raw_spin_lock(&state->lock);
745 /* Assumption: litmus_clock() is synchronized across cores, 827 /* Assumption: litmus_clock() is synchronized across cores,
746 * since we might not actually be executing on tinfo->cpu 828 * since we might not actually be executing on tinfo->cpu
747 * at the moment. */ 829 * at the moment. */
748 if (tinfo->cpu != -1) { 830 if (tinfo->cpu != -1) {
749 sup_update_time(&state->sup_env, litmus_clock()); 831 sup_update_time(&state->sup_env, litmus_clock());
750 } else { 832 } else {
751 //raw_spin_lock(&_global_env.lock);
752 TRACE("RESUME UPDATE ####\n"); 833 TRACE("RESUME UPDATE ####\n");
753 gmp_update_time(&_global_env, litmus_clock()); 834 gmp_update_time(&_global_env, litmus_clock());
754 TRACE("RESUME UPDATE $$$$\n"); 835 TRACE("RESUME UPDATE $$$$\n");
755 //raw_spin_unlock(&_global_env.lock);
756 } 836 }
757 837
758 mc2_update_ghost_state(state); 838 mc2_update_ghost_state(state);
759 task_arrives(state, tsk); 839 task_arrives(state, tsk);
760 /* NOTE: drops state->lock */ 840 /* NOTE: drops state->lock */
761 TRACE_TASK(tsk, "mc2_resume()\n"); 841 TRACE_TASK(tsk, "mc2_resume()\n");
762 raw_spin_unlock(&_global_env.lock);
763 mc2_update_timer_and_unlock(state); 842 mc2_update_timer_and_unlock(state);
764 local_irq_restore(flags); 843 local_irq_restore(flags);
765 } else { 844 } else {
766 TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); 845 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
767 raw_spin_unlock_irqrestore(&state->lock, flags); 846 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
768 } 847 }
769 848
770 resume_legacy_task_model_updates(tsk); 849 resume_legacy_task_model_updates(tsk);
@@ -800,7 +879,9 @@ static long mc2_complete_job(void)
800 else 879 else
801 state = local_cpu_state(); 880 state = local_cpu_state();
802 881
882 raw_spin_lock(&_global_env.lock);
803 raw_spin_lock(&state->lock); 883 raw_spin_lock(&state->lock);
884
804 env = &(state->sup_env.env); 885 env = &(state->sup_env.env);
805 886
806 res = res_find_by_id(state, tinfo->mc2_param.res_id); 887 res = res_find_by_id(state, tinfo->mc2_param.res_id);
@@ -808,7 +889,6 @@ static long mc2_complete_job(void)
808 if (get_task_crit_level(current) < CRIT_LEVEL_C) { 889 if (get_task_crit_level(current) < CRIT_LEVEL_C) {
809 env->time_zero = tsk_rt(current)->sporadic_release_time; 890 env->time_zero = tsk_rt(current)->sporadic_release_time;
810 } else { 891 } else {
811 raw_spin_lock(&_global_env.lock);
812 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; 892 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
813 } 893 }
814 894
@@ -826,10 +906,9 @@ static long mc2_complete_job(void)
826 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 906 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
827 907
828 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 908 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
829 if (get_task_crit_level(current) == CRIT_LEVEL_C) { 909
830 raw_spin_unlock(&_global_env.lock);
831 }
832 raw_spin_unlock(&state->lock); 910 raw_spin_unlock(&state->lock);
911 raw_spin_unlock(&_global_env.lock);
833 912
834 local_irq_restore(flags); 913 local_irq_restore(flags);
835 } 914 }
@@ -908,10 +987,10 @@ static long mc2_admit_task(struct task_struct *tsk)
908 987
909 raw_spin_unlock_irqrestore(&state->lock, flags); 988 raw_spin_unlock_irqrestore(&state->lock, flags);
910 } else if (lv == CRIT_LEVEL_C) { 989 } else if (lv == CRIT_LEVEL_C) {
911 local_irq_save(flags); 990 raw_spin_lock_irqsave(&_global_env.lock, flags);
912 state = local_cpu_state(); 991 state = local_cpu_state();
992
913 raw_spin_lock(&state->lock); 993 raw_spin_lock(&state->lock);
914 raw_spin_lock(&_global_env.lock);
915 994
916 res = gmp_find_by_id(&_global_env, mp->res_id); 995 res = gmp_find_by_id(&_global_env, mp->res_id);
917 996
@@ -931,9 +1010,8 @@ static long mc2_admit_task(struct task_struct *tsk)
931 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 1010 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
932 } 1011 }
933 1012
934 raw_spin_unlock(&_global_env.lock);
935 raw_spin_unlock(&state->lock); 1013 raw_spin_unlock(&state->lock);
936 local_irq_restore(flags); 1014 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
937 } 1015 }
938 1016
939 preempt_enable(); 1017 preempt_enable();
@@ -965,8 +1043,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
965 state = cpu_state_for(tinfo->cpu); 1043 state = cpu_state_for(tinfo->cpu);
966 1044
967 /* acquire the lock protecting the state and disable interrupts */ 1045 /* acquire the lock protecting the state and disable interrupts */
968 raw_spin_lock_irqsave(&state->lock, flags); 1046 raw_spin_lock_irqsave(&_global_env.lock, flags);
969 1047 raw_spin_lock(&state->lock);
1048
970 if (is_running) { 1049 if (is_running) {
971 state->scheduled = tsk; 1050 state->scheduled = tsk;
972 /* make sure this task should actually be running */ 1051 /* make sure this task should actually be running */
@@ -976,18 +1055,16 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
976 if (on_runqueue || is_running) { 1055 if (on_runqueue || is_running) {
977 /* Assumption: litmus_clock() is synchronized across cores 1056 /* Assumption: litmus_clock() is synchronized across cores
978 * [see comment in pres_task_resume()] */ 1057 * [see comment in pres_task_resume()] */
979 raw_spin_lock(&_global_env.lock);
980 mc2_update_time(lv, state, litmus_clock()); 1058 mc2_update_time(lv, state, litmus_clock());
981 mc2_update_ghost_state(state); 1059 mc2_update_ghost_state(state);
982 task_arrives(state, tsk); 1060 task_arrives(state, tsk);
983 /* NOTE: drops state->lock */ 1061 /* NOTE: drops state->lock */
984 TRACE("mc2_new()\n"); 1062 TRACE("mc2_new()\n");
985 raw_spin_unlock(&_global_env.lock);
986 1063
987 mc2_update_timer_and_unlock(state); 1064 mc2_update_timer_and_unlock(state);
988 local_irq_restore(flags); 1065 local_irq_restore(flags);
989 } else 1066 } else
990 raw_spin_unlock_irqrestore(&state->lock, flags); 1067 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
991 1068
992 res = res_find_by_id(state, tinfo->mc2_param.res_id); 1069 res = res_find_by_id(state, tinfo->mc2_param.res_id);
993 1070
@@ -1015,9 +1092,9 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1015 /* if the reservation is global reservation */ 1092 /* if the reservation is global reservation */
1016 local_irq_save(flags); 1093 local_irq_save(flags);
1017 state = local_cpu_state(); 1094 state = local_cpu_state();
1018 raw_spin_lock(&state->lock);
1019 raw_spin_lock(&_global_env.lock); 1095 raw_spin_lock(&_global_env.lock);
1020 1096 raw_spin_lock(&state->lock);
1097
1021 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { 1098 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
1022 if (res->id == reservation_id) { 1099 if (res->id == reservation_id) {
1023 TRACE("DESTROY RES FOUND!!!\n"); 1100 TRACE("DESTROY RES FOUND!!!\n");
@@ -1050,8 +1127,8 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1050 } 1127 }
1051 } 1128 }
1052 1129
1053 raw_spin_unlock(&_global_env.lock);
1054 raw_spin_unlock(&state->lock); 1130 raw_spin_unlock(&state->lock);
1131 raw_spin_unlock(&_global_env.lock);
1055 local_irq_restore(flags); 1132 local_irq_restore(flags);
1056 } else { 1133 } else {
1057 /* if the reservation is partitioned reservation */ 1134 /* if the reservation is partitioned reservation */
@@ -1125,9 +1202,10 @@ static void mc2_task_exit(struct task_struct *tsk)
1125 state = cpu_state_for(tinfo->cpu); 1202 state = cpu_state_for(tinfo->cpu);
1126 else 1203 else
1127 state = local_cpu_state(); 1204 state = local_cpu_state();
1205
1206 raw_spin_lock_irqsave(&_global_env.lock, flags);
1207 raw_spin_lock(&state->lock);
1128 1208
1129 raw_spin_lock_irqsave(&state->lock, flags);
1130
1131 if (state->scheduled == tsk) 1209 if (state->scheduled == tsk)
1132 state->scheduled = NULL; 1210 state->scheduled = NULL;
1133 1211
@@ -1140,20 +1218,18 @@ static void mc2_task_exit(struct task_struct *tsk)
1140 /* Assumption: litmus_clock() is synchronized across cores 1218 /* Assumption: litmus_clock() is synchronized across cores
1141 * [see comment in pres_task_resume()] */ 1219 * [see comment in pres_task_resume()] */
1142 1220
1143 raw_spin_lock(&_global_env.lock);
1144 /* update both global and partitioned */ 1221 /* update both global and partitioned */
1145 mc2_update_time(lv, state, litmus_clock()); 1222 mc2_update_time(lv, state, litmus_clock());
1146
1147 mc2_update_ghost_state(state); 1223 mc2_update_ghost_state(state);
1148 task_departs(tsk, 0); 1224 task_departs(tsk, 0);
1149 1225
1150 /* NOTE: drops state->lock */ 1226 /* NOTE: drops state->lock */
1151 TRACE("mc2_exit()\n"); 1227 TRACE("mc2_exit()\n");
1152 raw_spin_unlock(&_global_env.lock); 1228
1153 mc2_update_timer_and_unlock(state); 1229 mc2_update_timer_and_unlock(state);
1154 local_irq_restore(flags); 1230 local_irq_restore(flags);
1155 } else 1231 } else
1156 raw_spin_unlock_irqrestore(&state->lock, flags); 1232 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1157 1233
1158 kfree(tsk_rt(tsk)->plugin_state); 1234 kfree(tsk_rt(tsk)->plugin_state);
1159 tsk_rt(tsk)->plugin_state = NULL; 1235 tsk_rt(tsk)->plugin_state = NULL;
@@ -1204,8 +1280,10 @@ static long create_polling_reservation(
1204 return -ENOMEM; 1280 return -ENOMEM;
1205 1281
1206 if (config->cpu != -1) { 1282 if (config->cpu != -1) {
1283
1284 raw_spin_lock_irqsave(&_global_env.lock, flags);
1207 state = cpu_state_for(config->cpu); 1285 state = cpu_state_for(config->cpu);
1208 raw_spin_lock_irqsave(&state->lock, flags); 1286 raw_spin_lock(&state->lock);
1209 1287
1210 res = sup_find_by_id(&state->sup_env, config->id); 1288 res = sup_find_by_id(&state->sup_env, config->id);
1211 if (!res) { 1289 if (!res) {
@@ -1225,7 +1303,9 @@ static long create_polling_reservation(
1225 err = -EEXIST; 1303 err = -EEXIST;
1226 } 1304 }
1227 1305
1228 raw_spin_unlock_irqrestore(&state->lock, flags); 1306 raw_spin_unlock(&state->lock);
1307 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1308
1229 } else { 1309 } else {
1230 raw_spin_lock_irqsave(&_global_env.lock, flags); 1310 raw_spin_lock_irqsave(&_global_env.lock, flags);
1231 1311
@@ -1467,8 +1547,8 @@ static long mc2_activate_plugin(void)
1467 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1547 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1468 state->timer.function = on_scheduling_timer; 1548 state->timer.function = on_scheduling_timer;
1469 1549
1470 hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1550// hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1471 state->g_timer.function = on_global_scheduling_timer; 1551// state->g_timer.function = on_global_scheduling_timer;
1472 } 1552 }
1473 1553
1474 mc2_setup_domain_proc(); 1554 mc2_setup_domain_proc();
@@ -1481,6 +1561,7 @@ static void mc2_finish_switch(struct task_struct *prev)
1481 struct mc2_cpu_state *state = local_cpu_state(); 1561 struct mc2_cpu_state *state = local_cpu_state();
1482 1562
1483 state->scheduled = is_realtime(current) ? current : NULL; 1563 state->scheduled = is_realtime(current) ? current : NULL;
1564 TRACE_TASK(prev, "FINISH CXS! complete=%d\n", tsk_rt(prev)->completed);
1484} 1565}
1485 1566
1486static long mc2_deactivate_plugin(void) 1567static long mc2_deactivate_plugin(void)
@@ -1490,12 +1571,14 @@ static long mc2_deactivate_plugin(void)
1490 struct reservation *res; 1571 struct reservation *res;
1491 struct next_timer_event *event; 1572 struct next_timer_event *event;
1492 1573
1574 raw_spin_lock(&_global_env.lock);
1575
1493 for_each_online_cpu(cpu) { 1576 for_each_online_cpu(cpu) {
1494 state = cpu_state_for(cpu); 1577 state = cpu_state_for(cpu);
1495 raw_spin_lock(&state->lock); 1578 raw_spin_lock(&state->lock);
1496 1579
1497 hrtimer_cancel(&state->timer); 1580 hrtimer_cancel(&state->timer);
1498 hrtimer_cancel(&state->g_timer); 1581// hrtimer_cancel(&state->g_timer);
1499 1582
1500 /* Delete all reservations --- assumes struct reservation 1583 /* Delete all reservations --- assumes struct reservation
1501 * is prefix of containing struct. */ 1584 * is prefix of containing struct. */
@@ -1527,8 +1610,7 @@ static long mc2_deactivate_plugin(void)
1527 raw_spin_unlock(&state->lock); 1610 raw_spin_unlock(&state->lock);
1528 } 1611 }
1529 1612
1530 raw_spin_lock(&_global_env.lock); 1613
1531
1532 while (!list_empty(&_global_env.active_reservations)) { 1614 while (!list_empty(&_global_env.active_reservations)) {
1533 TRACE("RES FOUND!!!\n"); 1615 TRACE("RES FOUND!!!\n");
1534 res = list_first_entry( 1616 res = list_first_entry(