aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-03-02 15:57:54 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-03-02 15:57:54 -0500
commitd9f5d5edbda26349cf6bf4e7d371d6e91660fe0f (patch)
tree12ea1c5453fb9e545a96d7a2ddf72f0b2588a694
parent0a62a98d4cbd2f1cb0ecee6669f708a3e83afcb3 (diff)
Working version
-rw-r--r--kernel/sched/litmus.c2
-rw-r--r--litmus/polling_reservations.c28
-rw-r--r--litmus/reservation.c98
-rw-r--r--litmus/sched_mc2.c337
4 files changed, 187 insertions, 278 deletions
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
index b84361f03f60..29cd69d5aa0c 100644
--- a/kernel/sched/litmus.c
+++ b/kernel/sched/litmus.c
@@ -73,7 +73,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
73 if (next->rt_param.stack_in_use == NO_CPU) 73 if (next->rt_param.stack_in_use == NO_CPU)
74 TRACE_TASK(next,"descheduled. Proceeding.\n"); 74 TRACE_TASK(next,"descheduled. Proceeding.\n");
75 75
76 if (lt_before(_maybe_deadlock + 1000000000L, 76 if (lt_before(_maybe_deadlock + 5000000000L,
77 litmus_clock())) { 77 litmus_clock())) {
78 /* We've been spinning for 1s. 78 /* We've been spinning for 1s.
79 * Something can't be right! 79 * Something can't be right!
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
index a3125ebf5273..df1aeb0a113f 100644
--- a/litmus/polling_reservations.c
+++ b/litmus/polling_reservations.c
@@ -383,11 +383,7 @@ static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
383{ 383{
384 lt_t now = tdres->res.env->current_time; 384 lt_t now = tdres->res.env->current_time;
385 lt_t end = tdres->cur_interval.end; 385 lt_t end = tdres->cur_interval.end;
386 TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", 386 //TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", tdres->res.id, tdres->cur_interval.start, now, end, tdres->res.state);
387 tdres->res.id,
388 tdres->cur_interval.start,
389 now, end,
390 tdres->res.state);
391 if (now >= end) 387 if (now >= end)
392 return 0; 388 return 0;
393 else 389 else
@@ -400,24 +396,22 @@ static void td_replenish(
400 struct table_driven_reservation *tdres = 396 struct table_driven_reservation *tdres =
401 container_of(res, struct table_driven_reservation, res); 397 container_of(res, struct table_driven_reservation, res);
402 398
403 TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, 399 //TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, res->next_replenishment);
404 res->next_replenishment);
405 400
406 /* figure out current interval */ 401 /* figure out current interval */
407 tdres->cur_interval.start = tdres->major_cycle_start + 402 tdres->cur_interval.start = tdres->major_cycle_start +
408 tdres->intervals[tdres->next_interval].start; 403 tdres->intervals[tdres->next_interval].start;
409 tdres->cur_interval.end = tdres->major_cycle_start + 404 tdres->cur_interval.end = tdres->major_cycle_start +
410 tdres->intervals[tdres->next_interval].end; 405 tdres->intervals[tdres->next_interval].end;
411 TRACE("major_cycle_start=%llu => [%llu, %llu]\n", 406/* TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
412 tdres->major_cycle_start, 407 tdres->major_cycle_start,
413 tdres->cur_interval.start, 408 tdres->cur_interval.start,
414 tdres->cur_interval.end); 409 tdres->cur_interval.end);
415 410*/
416 /* reset budget */ 411 /* reset budget */
417 res->cur_budget = td_time_remaining_until_end(tdres); 412 res->cur_budget = td_time_remaining_until_end(tdres);
418 res->budget_consumed = 0; 413 res->budget_consumed = 0;
419 TRACE("td_replenish(%u): %s budget=%llu\n", res->id, 414 //TRACE("td_replenish(%u): %s budget=%llu\n", res->id, res->cur_budget ? "" : "WARNING", res->cur_budget);
420 res->cur_budget ? "" : "WARNING", res->cur_budget);
421 415
422 /* prepare next slot */ 416 /* prepare next slot */
423 tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals; 417 tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
@@ -428,8 +422,7 @@ static void td_replenish(
428 /* determine next time this reservation becomes eligible to execute */ 422 /* determine next time this reservation becomes eligible to execute */
429 res->next_replenishment = tdres->major_cycle_start; 423 res->next_replenishment = tdres->major_cycle_start;
430 res->next_replenishment += tdres->intervals[tdres->next_interval].start; 424 res->next_replenishment += tdres->intervals[tdres->next_interval].start;
431 TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, 425 //TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
432 res->next_replenishment);
433 426
434 427
435 switch (res->state) { 428 switch (res->state) {
@@ -465,7 +458,7 @@ static void td_drain_budget(
465 * how much time is left in this allocation interval. */ 458 * how much time is left in this allocation interval. */
466 459
467 /* sanity check: we should never try to drain from future slots */ 460 /* sanity check: we should never try to drain from future slots */
468 TRACE("TD_DRAIN STATE(%d) [%llu,%llu] %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time); 461 //TRACE("TD_DRAIN STATE(%d) [%llu,%llu] %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
469 //BUG_ON(tdres->cur_interval.start > res->env->current_time); 462 //BUG_ON(tdres->cur_interval.start > res->env->current_time);
470 if (tdres->cur_interval.start > res->env->current_time) 463 if (tdres->cur_interval.start > res->env->current_time)
471 TRACE("TD_DRAIN BUG!!!!!!!!!!\n"); 464 TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
@@ -480,8 +473,7 @@ static void td_drain_budget(
480 case RESERVATION_ACTIVE_IDLE: 473 case RESERVATION_ACTIVE_IDLE:
481 case RESERVATION_ACTIVE: 474 case RESERVATION_ACTIVE:
482 res->cur_budget = td_time_remaining_until_end(tdres); 475 res->cur_budget = td_time_remaining_until_end(tdres);
483 TRACE("td_drain_budget(%u): drained to budget=%llu\n", 476 //TRACE("td_drain_budget(%u): drained to budget=%llu\n", res->id, res->cur_budget);
484 res->id, res->cur_budget);
485 if (!res->cur_budget) { 477 if (!res->cur_budget) {
486 res->env->change_state(res->env, res, 478 res->env->change_state(res->env, res,
487 RESERVATION_DEPLETED); 479 RESERVATION_DEPLETED);
@@ -489,6 +481,10 @@ static void td_drain_budget(
489 /* sanity check budget calculation */ 481 /* sanity check budget calculation */
490 //BUG_ON(res->env->current_time >= tdres->cur_interval.end); 482 //BUG_ON(res->env->current_time >= tdres->cur_interval.end);
491 //BUG_ON(res->env->current_time < tdres->cur_interval.start); 483 //BUG_ON(res->env->current_time < tdres->cur_interval.start);
484 if (res->env->current_time >= tdres->cur_interval.end)
485 printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING1\n");
486 if (res->env->current_time < tdres->cur_interval.start)
487 printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING2\n");
492 } 488 }
493 489
494 break; 490 break;
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 3ec18a23f588..86d2f6e65382 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -4,6 +4,9 @@
4#include <litmus/litmus.h> 4#include <litmus/litmus.h>
5#include <litmus/reservation.h> 5#include <litmus/reservation.h>
6 6
7#define TRACE(fmt, args...) do {} while (false)
8#define TRACE_TASK(fmt, args...) do {} while (false)
9
7void reservation_init(struct reservation *res) 10void reservation_init(struct reservation *res)
8{ 11{
9 memset(res, sizeof(*res), 0); 12 memset(res, sizeof(*res), 0);
@@ -376,43 +379,6 @@ struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environme
376 return NULL; 379 return NULL;
377} 380}
378 381
379/*
380static void gmp_scheduler_update_at(
381 struct gmp_reservation_environment* gmp_env, unsigned int id,
382 event_type_t type, lt_t when)
383{
384 struct next_timer_event *nevent, *queued;
385 struct list_head *pos;
386 int found = 0;
387
388 nevent = gmp_find_event_by_id(gmp_env, id);
389
390 if (!nevent) {
391 nevent = kzalloc(sizeof(*nevent), GFP_KERNEL);
392 nevent->next_update = when;
393 nevent->id = id;
394 nevent->timer_armed_on = NO_CPU;
395 nevent->type = type;
396
397 list_for_each(pos, &gmp_env->next_events) {
398 queued = list_entry(pos, struct next_timer_event, list);
399 if (queued->next_update > nevent->next_update) {
400 list_add(&nevent->list, pos->prev);
401 found = 1;
402 TRACE("NEXT_EVENT ADDED after %llu\n", queued->next_update);
403 break;
404 }
405 }
406
407 if (!found) {
408 list_add_tail(&nevent->list, &gmp_env->next_events);
409 TRACE("NEXT_EVENT ADDED at [0]\n");
410 }
411 } else {
412 TRACE("EVENT FOUND at %llu T(%d), NEW EVENT %llu T(%d)\n", nevent->next_update, nevent->type, when, type);
413 }
414}
415*/
416#define TIMER_RESOLUTION 100000L 382#define TIMER_RESOLUTION 100000L
417 383
418static void gmp_add_event( 384static void gmp_add_event(
@@ -425,11 +391,12 @@ static void gmp_add_event(
425 391
426 //when = div64_u64(when, TIMER_RESOLUTION); 392 //when = div64_u64(when, TIMER_RESOLUTION);
427 //when *= TIMER_RESOLUTION; 393 //when *= TIMER_RESOLUTION;
428 394//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
429 nevent = gmp_find_event_by_id(gmp_env, id); 395 nevent = gmp_find_event_by_id(gmp_env, id);
430 396
431 if (!nevent || nevent->type != type) { 397 if (!nevent || nevent->type != type) {
432 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC); 398 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
399 BUG_ON(!nevent);
433 nevent->next_update = when; 400 nevent->next_update = when;
434 nevent->id = id; 401 nevent->id = id;
435 nevent->type = type; 402 nevent->type = type;
@@ -450,17 +417,19 @@ static void gmp_add_event(
450 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update); 417 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
451 } 418 }
452 } else { 419 } else {
453 TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when); 420 //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
421; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
454 } 422 }
455 423
456 TRACE("======START PRINTING EVENT LIST======\n"); 424 //TRACE("======START PRINTING EVENT LIST======\n");
457 gmp_print_events(gmp_env, litmus_clock()); 425 //gmp_print_events(gmp_env, litmus_clock());
458 TRACE("======FINISH PRINTING EVENT LIST======\n"); 426 //TRACE("======FINISH PRINTING EVENT LIST======\n");
459} 427}
460 428
461void gmp_add_event_after( 429void gmp_add_event_after(
462 struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type) 430 struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
463{ 431{
432 //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
464 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type); 433 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
465} 434}
466 435
@@ -472,19 +441,24 @@ static void gmp_queue_depleted(
472 struct reservation *queued; 441 struct reservation *queued;
473 int found = 0; 442 int found = 0;
474 443
444//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
445
475 list_for_each(pos, &gmp_env->depleted_reservations) { 446 list_for_each(pos, &gmp_env->depleted_reservations) {
476 queued = list_entry(pos, struct reservation, list); 447 queued = list_entry(pos, struct reservation, list);
477 if (queued && queued->next_replenishment > res->next_replenishment) { 448 if (queued && (queued->next_replenishment > res->next_replenishment)) {
449//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
478 list_add(&res->list, pos->prev); 450 list_add(&res->list, pos->prev);
479 found = 1; 451 found = 1;
452 break;
480 } 453 }
481 } 454 }
482 455
483 if (!found) 456 if (!found)
484 list_add_tail(&res->list, &gmp_env->depleted_reservations); 457 list_add_tail(&res->list, &gmp_env->depleted_reservations);
458
485 TRACE("R%d queued to depleted_list\n", res->id); 459 TRACE("R%d queued to depleted_list\n", res->id);
460//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
486 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH); 461 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
487 res->event_added = 1;
488} 462}
489 463
490static void gmp_queue_active( 464static void gmp_queue_active(
@@ -520,10 +494,8 @@ static void gmp_queue_reservation(
520 struct gmp_reservation_environment* gmp_env, 494 struct gmp_reservation_environment* gmp_env,
521 struct reservation *res) 495 struct reservation *res)
522{ 496{
523 if (res == NULL) { 497
524 BUG(); 498//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
525 return;
526 }
527 switch (res->state) { 499 switch (res->state) {
528 case RESERVATION_INACTIVE: 500 case RESERVATION_INACTIVE:
529 list_add(&res->list, &gmp_env->inactive_reservations); 501 list_add(&res->list, &gmp_env->inactive_reservations);
@@ -584,8 +556,7 @@ static void gmp_charge_budget(
584 { 556 {
585 /* make sure scheduler is invoked when this reservation expires 557 /* make sure scheduler is invoked when this reservation expires
586 * its remaining budget */ 558 * its remaining budget */
587 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", 559 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
588 res->id, res->cur_budget);
589 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN); 560 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
590 res->event_added = 1; 561 res->event_added = 1;
591 } 562 }
@@ -593,7 +564,7 @@ static void gmp_charge_budget(
593 /* stop at the first ACTIVE reservation */ 564 /* stop at the first ACTIVE reservation */
594 // break; 565 // break;
595 } 566 }
596 //TRACE("finished charging budgets\n"); 567 TRACE("finished charging budgets\n");
597} 568}
598 569
599static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env) 570static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
@@ -611,24 +582,6 @@ static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
611 } 582 }
612 } 583 }
613 TRACE("finished replenishing budgets\n"); 584 TRACE("finished replenishing budgets\n");
614
615 /* request a scheduler update at the next replenishment instant */
616 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
617 res = list_entry(pos, struct reservation, list);
618 if (res->event_added == 0) {
619 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
620 res->event_added = 1;
621 }
622 }
623
624/*
625 res = list_first_entry_or_null(&gmp_env->depleted_reservations,
626 struct reservation, list);
627 if (res && res->event_added == 0) {
628 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
629 res->event_added = 1;
630 }
631*/
632} 585}
633 586
634#define EPSILON 50 587#define EPSILON 50
@@ -644,7 +597,7 @@ int gmp_update_time(
644 /* If the time didn't advance, there is nothing to do. 597 /* If the time didn't advance, there is nothing to do.
645 * This check makes it safe to call sup_advance_time() potentially 598 * This check makes it safe to call sup_advance_time() potentially
646 * multiple times (e.g., via different code paths. */ 599 * multiple times (e.g., via different code paths. */
647 TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time); 600 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
648 if (unlikely(now <= gmp_env->env.current_time + EPSILON)) 601 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
649 return 0; 602 return 0;
650 603
@@ -660,12 +613,15 @@ int gmp_update_time(
660 /* check if any budgets where replenished */ 613 /* check if any budgets where replenished */
661 //TRACE("REPLENISH###\n"); 614 //TRACE("REPLENISH###\n");
662 gmp_replenish_budgets(gmp_env); 615 gmp_replenish_budgets(gmp_env);
616
663 617
664 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) { 618 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
665 if (event->next_update < now) { 619 if (event->next_update < now) {
666 list_del(&event->list); 620 list_del(&event->list);
667 TRACE("EVENT at %llu IS DELETED\n", event->next_update); 621 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
668 kfree(event); 622 kfree(event);
623 } else {
624 break;
669 } 625 }
670 } 626 }
671 627
@@ -673,7 +629,7 @@ int gmp_update_time(
673 629
674 ret = min(gmp_env->schedule_now, NR_CPUS); 630 ret = min(gmp_env->schedule_now, NR_CPUS);
675 gmp_env->schedule_now = 0; 631 gmp_env->schedule_now = 0;
676 632
677 return ret; 633 return ret;
678} 634}
679 635
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index b3390dc87d47..f7758f2aff58 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -62,7 +62,6 @@ struct mc2_task_state {
62struct crit_entry { 62struct crit_entry {
63 enum crit_level level; 63 enum crit_level level;
64 struct task_struct *running; 64 struct task_struct *running;
65 //struct hrtimer ghost_timer;
66}; 65};
67 66
68/* mc2_cpu_state - maintain the scheduled state and ghost jobs 67/* mc2_cpu_state - maintain the scheduled state and ghost jobs
@@ -74,7 +73,6 @@ struct mc2_cpu_state {
74 73
75 struct sup_reservation_environment sup_env; 74 struct sup_reservation_environment sup_env;
76 struct hrtimer timer; 75 struct hrtimer timer;
77 //struct hrtimer g_timer;
78 76
79 int cpu; 77 int cpu;
80 struct task_struct* scheduled; 78 struct task_struct* scheduled;
@@ -221,7 +219,9 @@ static int get_lowest_prio_cpu(lt_t priority)
221 raw_spin_unlock(&_lowest_prio_cpu.lock); 219 raw_spin_unlock(&_lowest_prio_cpu.lock);
222 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 220 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
223 return ce->cpu; 221 return ce->cpu;
224 } 222 } else {
223 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
224 }
225 225
226 for_each_online_cpu(cpu) { 226 for_each_online_cpu(cpu) {
227 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 227 ce = &_lowest_prio_cpu.cpu_entries[cpu];
@@ -287,6 +287,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
287 list_for_each_entry_safe(event, next, &_global_env.next_events, list) { 287 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
288 /* If the event time is already passed, we call schedule() on 288 /* If the event time is already passed, we call schedule() on
289 the lowest priority cpu */ 289 the lowest priority cpu */
290 if (event->next_update >= update) {
291 break;
292 }
293
290 if (event->next_update < litmus_clock()) { 294 if (event->next_update < litmus_clock()) {
291 if (event->timer_armed_on == NO_CPU) { 295 if (event->timer_armed_on == NO_CPU) {
292 struct reservation *res = gmp_find_by_id(&_global_env, event->id); 296 struct reservation *res = gmp_find_by_id(&_global_env, event->id);
@@ -351,70 +355,33 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
351 litmus_reschedule(state->cpu); 355 litmus_reschedule(state->cpu);
352 } 356 }
353 } 357 }
354
355#if 0
356 raw_spin_lock(&_global_env.lock);
357 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
358 if (event->timer_armed_on == NO_CPU) {
359 /* If the event time is already passed, we call schedule() on
360 the lowest priority cpu */
361 if (event->next_update < litmus_clock()) {
362 int cpu = get_lowest_prio_cpu();
363 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
364 list_del(&event->list);
365 kfree(event);
366 if (cpu != NO_CPU) {
367 raw_spin_lock(&_lowest_prio_cpu.lock);
368 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
369 raw_spin_unlock(&_lowest_prio_cpu.lock);
370 litmus_reschedule(cpu);
371 }
372 } else if (!hrtimer_active(&state->g_timer)) {
373 int ret;
374
375 raw_spin_unlock(&_global_env.lock);
376 found_event = 1;
377
378 TRACE("setting global scheduler timer for %llu\n",
379 event->next_update);
380 ret = __hrtimer_start_range_ns(&state->g_timer,
381 ns_to_ktime(event->next_update),
382 0 /* timer coalescing slack */,
383 HRTIMER_MODE_ABS_PINNED,
384 0 /* wakeup */);
385 if (!ret) {
386 event->timer_armed_on = state->cpu;
387 break;
388 }
389 }
390 }
391 }
392 if (found_event == 0)
393 raw_spin_unlock(&_global_env.lock);
394#endif
395} 358}
396 359
397/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs 360/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
398 * If the budget of a ghost is exhausted, 361 * If the budget of a ghost is exhausted,
399 * clear is_ghost and reschedule 362 * clear is_ghost and reschedule
400 */ 363 */
401static void mc2_update_ghost_state(struct mc2_cpu_state *state) 364static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state)
402{ 365{
403 int lv = 0; 366 int lv = 0;
404 struct crit_entry* ce; 367 struct crit_entry* ce;
405 struct reservation *res; 368 struct reservation *res;
406 struct mc2_task_state *tinfo; 369 struct mc2_task_state *tinfo;
407 370 lt_t ret = ULLONG_MAX;
371
408 BUG_ON(!state); 372 BUG_ON(!state);
409 373
410 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { 374 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
411 ce = &state->crit_entries[lv]; 375 ce = &state->crit_entries[lv];
412 if (ce->running != NULL) { 376 if (ce->running != NULL) {
377//printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0);
413 tinfo = get_mc2_state(ce->running); 378 tinfo = get_mc2_state(ce->running);
414 if (!tinfo) 379 if (!tinfo)
415 continue; 380 continue;
416 381
417 res = res_find_by_id(state, tinfo->mc2_param.res_id); 382 res = res_find_by_id(state, tinfo->mc2_param.res_id);
383 BUG_ON(!res);
384//printk(KERN_ALERT "R%d found!\n", res->id);
418 TRACE("LV %d running id %d budget %llu\n", 385 TRACE("LV %d running id %d budget %llu\n",
419 lv, tinfo->mc2_param.res_id, res->cur_budget); 386 lv, tinfo->mc2_param.res_id, res->cur_budget);
420 /* If the budget is exhausted, clear is_ghost and reschedule */ 387 /* If the budget is exhausted, clear is_ghost and reschedule */
@@ -432,7 +399,7 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state)
432 struct reservation, list); 399 struct reservation, list);
433 if (res) 400 if (res)
434 litmus_reschedule_local(); 401 litmus_reschedule_local();
435 } else { 402 } else if (lv == CRIT_LEVEL_C) {
436 res = list_first_entry_or_null( 403 res = list_first_entry_or_null(
437 &_global_env.active_reservations, 404 &_global_env.active_reservations,
438 struct reservation, list); 405 struct reservation, list);
@@ -440,11 +407,16 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state)
440 litmus_reschedule(state->cpu); 407 litmus_reschedule(state->cpu);
441 } 408 }
442 } else { 409 } else {
443 TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget); 410 //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
444 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 411 //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
412 if (ret > res->cur_budget) {
413 ret = res->cur_budget;
414 }
445 } 415 }
446 } 416 }
447 } 417 }
418
419 return ret;
448} 420}
449 421
450/* update_cpu_prio - Update cpu's priority 422/* update_cpu_prio - Update cpu's priority
@@ -474,84 +446,6 @@ static void update_cpu_prio(struct mc2_cpu_state *state)
474 } 446 }
475}; 447};
476 448
477#if 0
478/* on_global_scheduling_timer - Process the budget accounting (replenish
479 * and charge)
480 */
481static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
482{
483 unsigned long flags;
484 enum hrtimer_restart restart = HRTIMER_NORESTART;
485 struct mc2_cpu_state *state;
486 struct next_timer_event *event, *next;
487 int schedule_now;
488 lt_t update, now;
489 int found_event = 0;
490
491 state = container_of(timer, struct mc2_cpu_state, g_timer);
492
493 raw_spin_lock_irqsave(&state->lock, flags);
494
495 /* The scheduling timer should only fire on the local CPU, because
496 * otherwise deadlocks via timer_cancel() are possible.
497 * Note: this does not interfere with dedicated interrupt handling, as
498 * even under dedicated interrupt handling scheduling timers for
499 * budget enforcement must occur locally on each CPU.
500 */
501 //BUG_ON(state->cpu != raw_smp_processor_id());
502 if (state->cpu != raw_smp_processor_id())
503 TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n");
504
505 raw_spin_lock(&_global_env.lock);
506
507 update = litmus_clock();
508 TRACE("GLOBAL TIMER FIRED at %llu\n", update);
509
510 /* The event can be processed by the other cpus. So, if there is no
511 events to process, we do nothing */
512 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
513 if (event->next_update < update) {
514 found_event = 1;
515 list_del(&event->list);
516 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
517 kfree(event);
518 }
519 }
520
521 if (!found_event) {
522 goto unlock;
523 }
524
525 /* gmp_update_timer returns how many tasks become ACTIVE */
526 schedule_now = gmp_update_time(&_global_env, update);
527
528 mc2_update_ghost_state(state);
529
530 now = _global_env.env.current_time;
531
532 TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n",
533 now, update, state->cpu, schedule_now);
534
535 /* Find the lowest cpu, and call reschedule */
536 while (schedule_now--) {
537 int cpu = get_lowest_prio_cpu();
538 if (cpu != NO_CPU) {
539 raw_spin_lock(&_lowest_prio_cpu.lock);
540 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
541 raw_spin_unlock(&_lowest_prio_cpu.lock);
542 TRACE("LOWEST CPU = P%d\n", cpu);
543 litmus_reschedule(cpu);
544 }
545 }
546
547unlock:
548 raw_spin_unlock(&_global_env.lock);
549 raw_spin_unlock_irqrestore(&state->lock, flags);
550
551 return restart;
552}
553#endif
554
555/* on_scheduling_timer - timer event for partitioned tasks 449/* on_scheduling_timer - timer event for partitioned tasks
556 */ 450 */
557static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) 451static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
@@ -561,7 +455,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
561 struct mc2_cpu_state *state; 455 struct mc2_cpu_state *state;
562 lt_t update, now; 456 lt_t update, now;
563 int global_schedule_now; 457 int global_schedule_now;
564 458 lt_t remain_budget;
459
565 state = container_of(timer, struct mc2_cpu_state, timer); 460 state = container_of(timer, struct mc2_cpu_state, timer);
566 461
567 /* The scheduling timer should only fire on the local CPU, because 462 /* The scheduling timer should only fire on the local CPU, because
@@ -575,18 +470,22 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
575 TRACE("TIMER FIRED at %llu\n", litmus_clock()); 470 TRACE("TIMER FIRED at %llu\n", litmus_clock());
576 raw_spin_lock_irqsave(&_global_env.lock, flags); 471 raw_spin_lock_irqsave(&_global_env.lock, flags);
577 raw_spin_lock(&state->lock); 472 raw_spin_lock(&state->lock);
578 473//printk(KERN_ALERT "P%d on_scheduling_timer() hold lock %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0);
579 sup_update_time(&state->sup_env, litmus_clock()); 474 now = litmus_clock();
580 global_schedule_now = gmp_update_time(&_global_env, litmus_clock()); 475 sup_update_time(&state->sup_env, now);
581 476 global_schedule_now = gmp_update_time(&_global_env, now);
582 mc2_update_ghost_state(state); 477//printk(KERN_ALERT "P%d update_time in timer() %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0);
478 remain_budget = mc2_update_ghost_state(state);
583 479
584 update = state->sup_env.next_scheduler_update; 480 update = state->sup_env.next_scheduler_update;
585 now = state->sup_env.env.current_time; 481 now = state->sup_env.env.current_time;
586 482
587 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n", 483 if (remain_budget != ULLONG_MAX && update > now + remain_budget) {
588 now, update, state->cpu, global_schedule_now); 484 update = now + remain_budget;
589 485 }
486
487 //TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n", now, update, state->cpu, global_schedule_now);
488//printk(KERN_ALERT "on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n", now, update, state->cpu, global_schedule_now);
590 if (update <= now) { 489 if (update <= now) {
591 litmus_reschedule_local(); 490 litmus_reschedule_local();
592 } else if (update != SUP_NO_SCHEDULER_UPDATE) { 491 } else if (update != SUP_NO_SCHEDULER_UPDATE) {
@@ -594,6 +493,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
594 restart = HRTIMER_RESTART; 493 restart = HRTIMER_RESTART;
595 } 494 }
596 495
496 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
497
597 /* Find the lowest cpu, and call reschedule */ 498 /* Find the lowest cpu, and call reschedule */
598 while (global_schedule_now--) { 499 while (global_schedule_now--) {
599 int cpu = get_lowest_prio_cpu(0); 500 int cpu = get_lowest_prio_cpu(0);
@@ -601,14 +502,14 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
601 raw_spin_lock(&_lowest_prio_cpu.lock); 502 raw_spin_lock(&_lowest_prio_cpu.lock);
602 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 503 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
603 raw_spin_unlock(&_lowest_prio_cpu.lock); 504 raw_spin_unlock(&_lowest_prio_cpu.lock);
604 TRACE("LOWEST CPU = P%d\n", cpu); 505 //TRACE("LOWEST CPU = P%d\n", cpu);
605 litmus_reschedule(cpu); 506 litmus_reschedule(cpu);
606 } 507 }
607 } 508 }
608 509
609 raw_spin_unlock(&state->lock); 510 raw_spin_unlock(&state->lock);
610 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 511 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
611 512//printk(KERN_ALERT "P%d on_scheduling_timer() release lock %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0);
612 return restart; 513 return restart;
613} 514}
614 515
@@ -651,6 +552,13 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
651 } 552 }
652 553
653 /* no eligible level A or B tasks exists */ 554 /* no eligible level A or B tasks exists */
555 /* check the ghost job */
556 ce = &state->crit_entries[CRIT_LEVEL_C];
557 if (ce->running) {
558 TRACE_TASK(ce->running," is GHOST\n");
559 return NULL;
560 }
561
654 cur_priority = _lowest_prio_cpu.cpu_entries[state->cpu].deadline; 562 cur_priority = _lowest_prio_cpu.cpu_entries[state->cpu].deadline;
655 563
656 TRACE("****** ACTIVE LIST ******\n"); 564 TRACE("****** ACTIVE LIST ******\n");
@@ -663,21 +571,24 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
663 lv = get_task_crit_level(tsk); 571 lv = get_task_crit_level(tsk);
664 if (lv == NUM_CRIT_LEVELS) { 572 if (lv == NUM_CRIT_LEVELS) {
665 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 573 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
666 res->event_added = 1; 574 //res->event_added = 1;
667 return tsk; 575 return tsk;
668 } else { 576 } else if (lv == CRIT_LEVEL_C) {
669 ce = &state->crit_entries[lv]; 577 //ce = &state->crit_entries[lv];
670 if (likely(!ce->running)) { 578 //if (likely(!ce->running)) {
671 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 579 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
672 res->event_added = 1; 580 res->event_added = 1;
673 res->blocked_by_ghost = 0; 581 res->blocked_by_ghost = 0;
674 res->is_ghost = 0; 582 res->is_ghost = 0;
675 res->scheduled_on = state->cpu; 583 res->scheduled_on = state->cpu;
676 return tsk; 584 return tsk;
677 } else { 585 //} else {
678 res->blocked_by_ghost = 1; 586 // res->blocked_by_ghost = 1;
679 TRACE_TASK(ce->running, " is GHOST\n"); 587 // TRACE_TASK(ce->running, " is GHOST\n");
680 } 588 // return NULL;
589 //}
590 } else {
591 BUG();
681 } 592 }
682 } 593 }
683 } 594 }
@@ -711,8 +622,9 @@ static void post_schedule(struct task_struct *next)
711static struct task_struct* mc2_schedule(struct task_struct * prev) 622static struct task_struct* mc2_schedule(struct task_struct * prev)
712{ 623{
713 /* next == NULL means "schedule background work". */ 624 /* next == NULL means "schedule background work". */
625 lt_t now;
714 struct mc2_cpu_state *state = local_cpu_state(); 626 struct mc2_cpu_state *state = local_cpu_state();
715 627
716 pre_schedule(prev); 628 pre_schedule(prev);
717 629
718 raw_spin_lock(&_global_env.lock); 630 raw_spin_lock(&_global_env.lock);
@@ -721,17 +633,18 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
721 //BUG_ON(state->scheduled && state->scheduled != prev); 633 //BUG_ON(state->scheduled && state->scheduled != prev);
722 //BUG_ON(state->scheduled && !is_realtime(prev)); 634 //BUG_ON(state->scheduled && !is_realtime(prev));
723 if (state->scheduled && state->scheduled != prev) 635 if (state->scheduled && state->scheduled != prev)
724 TRACE("BUG1!!!!!!!!\n"); 636 printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
725 if (state->scheduled && !is_realtime(prev)) 637 if (state->scheduled && !is_realtime(prev))
726 TRACE("BUG2!!!!!!!!\n"); 638 printk(KERN_ALERT "BUG2!!!!!!!! \n");
727 639
728 /* update time */ 640 /* update time */
729 state->sup_env.will_schedule = true; 641 state->sup_env.will_schedule = true;
730 642
731 sup_update_time(&state->sup_env, litmus_clock()); 643 now = litmus_clock();
732 gmp_update_time(&_global_env, litmus_clock()); 644 sup_update_time(&state->sup_env, now);
733 645 gmp_update_time(&_global_env, now);
734 mc2_update_ghost_state(state); 646
647 mc2_update_ghost_state(state);
735 648
736 /* remove task from reservation if it blocks */ 649 /* remove task from reservation if it blocks */
737 if (is_realtime(prev) && !is_running(prev)) 650 if (is_realtime(prev) && !is_running(prev))
@@ -767,14 +680,17 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
767 TRACE_TASK(prev, "descheduled.\n"); 680 TRACE_TASK(prev, "descheduled.\n");
768 /* if prev is preempted and a global task, find the lowest cpu and reschedule */ 681 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
769 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { 682 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
770 int cpu = get_lowest_prio_cpu(res?res->priority:0); 683 int cpu;
771 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); 684 raw_spin_lock(&_global_env.lock);
685 cpu = get_lowest_prio_cpu(res?res->priority:0);
686 //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
772 if (cpu != NO_CPU) { 687 if (cpu != NO_CPU) {
773 raw_spin_lock(&_lowest_prio_cpu.lock); 688 raw_spin_lock(&_lowest_prio_cpu.lock);
774 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 689 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
775 raw_spin_unlock(&_lowest_prio_cpu.lock); 690 raw_spin_unlock(&_lowest_prio_cpu.lock);
776 litmus_reschedule(cpu); 691 litmus_reschedule(cpu);
777 } 692 }
693 raw_spin_unlock(&_global_env.lock);
778 } 694 }
779 } 695 }
780 if (state->scheduled) { 696 if (state->scheduled) {
@@ -814,12 +730,14 @@ static void mc2_task_resume(struct task_struct *tsk)
814 730
815 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 731 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
816 732
733 local_irq_save(flags);
817 if (tinfo->cpu != -1) 734 if (tinfo->cpu != -1)
818 state = cpu_state_for(tinfo->cpu); 735 state = cpu_state_for(tinfo->cpu);
819 else 736 else
820 state = local_cpu_state(); 737 state = local_cpu_state();
821 738
822 raw_spin_lock_irqsave(&_global_env.lock, flags); 739 raw_spin_lock(&_global_env.lock);
740//printk(KERN_ALERT "P%d resume() hold lock\n", state->cpu);
823 /* Requeue only if self-suspension was already processed. */ 741 /* Requeue only if self-suspension was already processed. */
824 if (tinfo->has_departed) 742 if (tinfo->has_departed)
825 { 743 {
@@ -830,22 +748,25 @@ static void mc2_task_resume(struct task_struct *tsk)
830 if (tinfo->cpu != -1) { 748 if (tinfo->cpu != -1) {
831 sup_update_time(&state->sup_env, litmus_clock()); 749 sup_update_time(&state->sup_env, litmus_clock());
832 } else { 750 } else {
833 TRACE("RESUME UPDATE ####\n"); 751 //TRACE("RESUME UPDATE ####\n");
834 gmp_update_time(&_global_env, litmus_clock()); 752 gmp_update_time(&_global_env, litmus_clock());
835 TRACE("RESUME UPDATE $$$$\n"); 753 //TRACE("RESUME UPDATE $$$$\n");
836 } 754 }
837 755
838 mc2_update_ghost_state(state); 756 mc2_update_ghost_state(state);
839 task_arrives(state, tsk); 757 task_arrives(state, tsk);
840 /* NOTE: drops state->lock */ 758 /* NOTE: drops state->lock */
841 TRACE_TASK(tsk, "mc2_resume()\n"); 759 TRACE_TASK(tsk, "mc2_resume()\n");
842 mc2_update_timer_and_unlock(state); 760 mc2_update_timer_and_unlock(state);
843 local_irq_restore(flags); 761//printk(KERN_ALERT "P%d resume() dropped lock\n", state->cpu);
844 } else { 762 } else {
845 TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); 763 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
846 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 764 raw_spin_unlock(&_global_env.lock);
765//printk(KERN_ALERT "P%d resume() release lock\n", state->cpu);
847 } 766 }
848 767
768 local_irq_restore(flags);
769
849 resume_legacy_task_model_updates(tsk); 770 resume_legacy_task_model_updates(tsk);
850} 771}
851 772
@@ -870,6 +791,7 @@ static long mc2_complete_job(void)
870 struct reservation *res; 791 struct reservation *res;
871 unsigned long flags; 792 unsigned long flags;
872 793
794 preempt_disable();
873 local_irq_save(flags); 795 local_irq_save(flags);
874 796
875 tinfo = get_mc2_state(current); 797 tinfo = get_mc2_state(current);
@@ -881,7 +803,7 @@ static long mc2_complete_job(void)
881 803
882 raw_spin_lock(&_global_env.lock); 804 raw_spin_lock(&_global_env.lock);
883 raw_spin_lock(&state->lock); 805 raw_spin_lock(&state->lock);
884 806//printk(KERN_ALERT "P%d complete() hold lock\n", state->cpu);
885 env = &(state->sup_env.env); 807 env = &(state->sup_env.env);
886 808
887 res = res_find_by_id(state, tinfo->mc2_param.res_id); 809 res = res_find_by_id(state, tinfo->mc2_param.res_id);
@@ -905,12 +827,13 @@ static long mc2_complete_job(void)
905 res->cur_budget = 0; 827 res->cur_budget = 0;
906 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 828 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
907 829
908 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 830 //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
909 831
910 raw_spin_unlock(&state->lock); 832 raw_spin_unlock(&state->lock);
911 raw_spin_unlock(&_global_env.lock); 833 raw_spin_unlock(&_global_env.lock);
912 834//printk(KERN_ALERT "P%d complete() release lock\n", state->cpu);
913 local_irq_restore(flags); 835 local_irq_restore(flags);
836 preempt_enable();
914 } 837 }
915 sched_trace_task_completion(current, 0); 838 sched_trace_task_completion(current, 0);
916 839
@@ -937,6 +860,7 @@ static long mc2_complete_job(void)
937 } 860 }
938 861
939 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); 862 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
863
940 return err; 864 return err;
941} 865}
942 866
@@ -988,6 +912,7 @@ static long mc2_admit_task(struct task_struct *tsk)
988 raw_spin_unlock_irqrestore(&state->lock, flags); 912 raw_spin_unlock_irqrestore(&state->lock, flags);
989 } else if (lv == CRIT_LEVEL_C) { 913 } else if (lv == CRIT_LEVEL_C) {
990 raw_spin_lock_irqsave(&_global_env.lock, flags); 914 raw_spin_lock_irqsave(&_global_env.lock, flags);
915//printk(KERN_ALERT "admit() hold lock\n");
991 state = local_cpu_state(); 916 state = local_cpu_state();
992 917
993 raw_spin_lock(&state->lock); 918 raw_spin_lock(&state->lock);
@@ -1012,6 +937,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1012 937
1013 raw_spin_unlock(&state->lock); 938 raw_spin_unlock(&state->lock);
1014 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 939 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
940//printk(KERN_ALERT "admit() release lock\n");
1015 } 941 }
1016 942
1017 preempt_enable(); 943 preempt_enable();
@@ -1033,25 +959,30 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1033 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); 959 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
1034 struct reservation *res; 960 struct reservation *res;
1035 enum crit_level lv = get_task_crit_level(tsk); 961 enum crit_level lv = get_task_crit_level(tsk);
962 lt_t release = 0;
1036 963
1037 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 964 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
1038 litmus_clock(), on_runqueue, is_running); 965 litmus_clock(), on_runqueue, is_running);
1039 966
967 local_irq_save(flags);
1040 if (tinfo->cpu == -1) 968 if (tinfo->cpu == -1)
1041 state = local_cpu_state(); 969 state = local_cpu_state();
1042 else 970 else
1043 state = cpu_state_for(tinfo->cpu); 971 state = cpu_state_for(tinfo->cpu);
1044 972
1045 /* acquire the lock protecting the state and disable interrupts */ 973 /* acquire the lock protecting the state and disable interrupts */
1046 raw_spin_lock_irqsave(&_global_env.lock, flags); 974 raw_spin_lock(&_global_env.lock);
1047 raw_spin_lock(&state->lock); 975 raw_spin_lock(&state->lock);
1048 976//printk(KERN_ALERT "new() hold lock R%d\n", tinfo->mc2_param.res_id);
1049 if (is_running) { 977 if (is_running) {
1050 state->scheduled = tsk; 978 state->scheduled = tsk;
1051 /* make sure this task should actually be running */ 979 /* make sure this task should actually be running */
1052 litmus_reschedule_local(); 980 litmus_reschedule_local();
1053 } 981 }
1054 982
983 res = res_find_by_id(state, tinfo->mc2_param.res_id);
984 release = res->next_replenishment;
985
1055 if (on_runqueue || is_running) { 986 if (on_runqueue || is_running) {
1056 /* Assumption: litmus_clock() is synchronized across cores 987 /* Assumption: litmus_clock() is synchronized across cores
1057 * [see comment in pres_task_resume()] */ 988 * [see comment in pres_task_resume()] */
@@ -1062,18 +993,20 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1062 TRACE("mc2_new()\n"); 993 TRACE("mc2_new()\n");
1063 994
1064 mc2_update_timer_and_unlock(state); 995 mc2_update_timer_and_unlock(state);
1065 local_irq_restore(flags); 996//printk(KERN_ALERT "new() dropped lock R%d\n",tinfo->mc2_param.res_id);
1066 } else 997 } else {
1067 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 998 raw_spin_unlock(&state->lock);
1068 999 raw_spin_unlock(&_global_env.lock);
1069 res = res_find_by_id(state, tinfo->mc2_param.res_id); 1000//printk(KERN_ALERT "new() release lock R%d\n",tinfo->mc2_param.res_id);
1001 }
1002 local_irq_restore(flags);
1070 1003
1071 if (res) { 1004 if (!release) {
1072 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = %llu\n", res->next_replenishment); 1005 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
1073 release_at(tsk, res->next_replenishment); 1006 release_at(tsk, release);
1074 } 1007 }
1075 else 1008 else
1076 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n"); 1009 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
1077} 1010}
1078 1011
1079/* mc2_reservation_destroy - reservation_destroy system call backend 1012/* mc2_reservation_destroy - reservation_destroy system call backend
@@ -1196,14 +1129,16 @@ static void mc2_task_exit(struct task_struct *tsk)
1196 struct mc2_task_state* tinfo = get_mc2_state(tsk); 1129 struct mc2_task_state* tinfo = get_mc2_state(tsk);
1197 struct mc2_cpu_state *state; 1130 struct mc2_cpu_state *state;
1198 enum crit_level lv = tinfo->mc2_param.crit; 1131 enum crit_level lv = tinfo->mc2_param.crit;
1199 struct crit_entry* ce; 1132 struct crit_entry* ce;
1133 int cpu;
1200 1134
1135 local_irq_save(flags);
1201 if (tinfo->cpu != -1) 1136 if (tinfo->cpu != -1)
1202 state = cpu_state_for(tinfo->cpu); 1137 state = cpu_state_for(tinfo->cpu);
1203 else 1138 else
1204 state = local_cpu_state(); 1139 state = local_cpu_state();
1205 1140
1206 raw_spin_lock_irqsave(&_global_env.lock, flags); 1141 raw_spin_lock(&_global_env.lock);
1207 raw_spin_lock(&state->lock); 1142 raw_spin_lock(&state->lock);
1208 1143
1209 if (state->scheduled == tsk) 1144 if (state->scheduled == tsk)
@@ -1226,11 +1161,30 @@ static void mc2_task_exit(struct task_struct *tsk)
1226 /* NOTE: drops state->lock */ 1161 /* NOTE: drops state->lock */
1227 TRACE("mc2_exit()\n"); 1162 TRACE("mc2_exit()\n");
1228 1163
1229 mc2_update_timer_and_unlock(state); 1164 mc2_update_timer_and_unlock(state);
1230 local_irq_restore(flags); 1165 } else {
1231 } else 1166 raw_spin_unlock(&state->lock);
1232 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 1167 raw_spin_unlock(&_global_env.lock);
1168 }
1233 1169
1170 for_each_online_cpu(cpu) {
1171 state = cpu_state_for(cpu);
1172 if (state == local_cpu_state())
1173 continue;
1174 raw_spin_lock(&state->lock);
1175
1176 if (state->scheduled == tsk)
1177 state->scheduled = NULL;
1178
1179 ce = &state->crit_entries[lv];
1180 if (ce->running == tsk)
1181 ce->running = NULL;
1182
1183 raw_spin_unlock(&state->lock);
1184 }
1185
1186 local_irq_restore(flags);
1187
1234 kfree(tsk_rt(tsk)->plugin_state); 1188 kfree(tsk_rt(tsk)->plugin_state);
1235 tsk_rt(tsk)->plugin_state = NULL; 1189 tsk_rt(tsk)->plugin_state = NULL;
1236 kfree(tsk_rt(tsk)->mc2_data); 1190 kfree(tsk_rt(tsk)->mc2_data);
@@ -1539,16 +1493,11 @@ static long mc2_activate_plugin(void)
1539 struct crit_entry *cr_entry = &state->crit_entries[lv]; 1493 struct crit_entry *cr_entry = &state->crit_entries[lv];
1540 cr_entry->level = lv; 1494 cr_entry->level = lv;
1541 cr_entry->running = NULL; 1495 cr_entry->running = NULL;
1542 //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1543 //ce->ghost_timer.function = on_ghost_timer;
1544 } 1496 }
1545 sup_init(&state->sup_env); 1497 sup_init(&state->sup_env);
1546 1498
1547 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1499 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1548 state->timer.function = on_scheduling_timer; 1500 state->timer.function = on_scheduling_timer;
1549
1550// hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1551// state->g_timer.function = on_global_scheduling_timer;
1552 } 1501 }
1553 1502
1554 mc2_setup_domain_proc(); 1503 mc2_setup_domain_proc();
@@ -1561,7 +1510,7 @@ static void mc2_finish_switch(struct task_struct *prev)
1561 struct mc2_cpu_state *state = local_cpu_state(); 1510 struct mc2_cpu_state *state = local_cpu_state();
1562 1511
1563 state->scheduled = is_realtime(current) ? current : NULL; 1512 state->scheduled = is_realtime(current) ? current : NULL;
1564 TRACE_TASK(prev, "FINISH CXS! complete=%d\n", tsk_rt(prev)->completed); 1513 TRACE("FINISH CXS! from %s/%d to %s/%d\n", prev ? (prev)->comm : "null", prev ? (prev)->pid : 0, current ? (current)->comm : "null", current ? (current)->pid : 0);
1565} 1514}
1566 1515
1567static long mc2_deactivate_plugin(void) 1516static long mc2_deactivate_plugin(void)
@@ -1570,7 +1519,8 @@ static long mc2_deactivate_plugin(void)
1570 struct mc2_cpu_state *state; 1519 struct mc2_cpu_state *state;
1571 struct reservation *res; 1520 struct reservation *res;
1572 struct next_timer_event *event; 1521 struct next_timer_event *event;
1573 1522 struct cpu_entry *ce;
1523
1574 raw_spin_lock(&_global_env.lock); 1524 raw_spin_lock(&_global_env.lock);
1575 1525
1576 for_each_online_cpu(cpu) { 1526 for_each_online_cpu(cpu) {
@@ -1578,7 +1528,14 @@ static long mc2_deactivate_plugin(void)
1578 raw_spin_lock(&state->lock); 1528 raw_spin_lock(&state->lock);
1579 1529
1580 hrtimer_cancel(&state->timer); 1530 hrtimer_cancel(&state->timer);
1581// hrtimer_cancel(&state->g_timer); 1531
1532 ce = &_lowest_prio_cpu.cpu_entries[cpu];
1533
1534 ce->cpu = cpu;
1535 ce->scheduled = NULL;
1536 ce->deadline = ULLONG_MAX;
1537 ce->lv = NUM_CRIT_LEVELS;
1538 ce->will_schedule = false;
1582 1539
1583 /* Delete all reservations --- assumes struct reservation 1540 /* Delete all reservations --- assumes struct reservation
1584 * is prefix of containing struct. */ 1541 * is prefix of containing struct. */