aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/reservation.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/reservation.c')
-rw-r--r--litmus/reservation.c369
1 files changed, 364 insertions, 5 deletions
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 0bc551e2e67b..08c74f9005b3 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -1,4 +1,5 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/slab.h>
2 3
3#include <litmus/litmus.h> 4#include <litmus/litmus.h>
4#include <litmus/reservation.h> 5#include <litmus/reservation.h>
@@ -52,7 +53,8 @@ static void sup_scheduler_update_at(
52 sup_env->next_scheduler_update = when; 53 sup_env->next_scheduler_update = when;
53} 54}
54 55
55static void sup_scheduler_update_after( 56/* changed from non-static to static function because it's used outside */
57void sup_scheduler_update_after(
56 struct sup_reservation_environment* sup_env, 58 struct sup_reservation_environment* sup_env,
57 lt_t timeout) 59 lt_t timeout)
58{ 60{
@@ -192,8 +194,12 @@ static void sup_charge_budget(
192 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ 194 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
193 res = list_entry(pos, struct reservation, list); 195 res = list_entry(pos, struct reservation, list);
194 if (res->state == RESERVATION_ACTIVE) { 196 if (res->state == RESERVATION_ACTIVE) {
195 res->ops->drain_budget(res, delta); 197 TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
196 encountered_active = 1; 198 if (encountered_active == 0 && res->blocked_by_ghost == 0) {
199 TRACE("DRAIN !!\n");
200 res->ops->drain_budget(res, delta);
201 encountered_active = 1;
202 }
197 } else { 203 } else {
198 BUG_ON(res->state != RESERVATION_ACTIVE_IDLE); 204 BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
199 res->ops->drain_budget(res, delta); 205 res->ops->drain_budget(res, delta);
@@ -207,9 +213,9 @@ static void sup_charge_budget(
207 res->id, res->cur_budget); 213 res->id, res->cur_budget);
208 sup_scheduler_update_after(sup_env, res->cur_budget); 214 sup_scheduler_update_after(sup_env, res->cur_budget);
209 } 215 }
210 if (encountered_active) 216 //if (encountered_active)
211 /* stop at the first ACTIVE reservation */ 217 /* stop at the first ACTIVE reservation */
212 break; 218 //break;
213 } 219 }
214 TRACE("finished charging budgets\n"); 220 TRACE("finished charging budgets\n");
215} 221}
@@ -317,3 +323,356 @@ void sup_init(struct sup_reservation_environment* sup_env)
317 323
318 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; 324 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
319} 325}
326
327/* NOTE: Namhoon's implementation starts below. */
328struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
329 unsigned int id)
330{
331 struct reservation *res;
332
333 list_for_each_entry(res, &gmp_env->active_reservations, list) {
334 if (res->id == id)
335 return res;
336 }
337 list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
338 if (res->id == id)
339 return res;
340 }
341 list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
342 if (res->id == id)
343 return res;
344 }
345
346 return NULL;
347}
348
349
350struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
351 unsigned int id)
352{
353 struct next_timer_event *event;
354
355 list_for_each_entry(event, &gmp_env->next_events, list) {
356 if (event->id == id)
357 return event;
358 }
359
360 return NULL;
361}
362
363
364struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
365 lt_t when)
366{
367 struct next_timer_event *event;
368
369 list_for_each_entry(event, &gmp_env->next_events, list) {
370 if (event->next_update == when)
371 return event;
372 }
373
374 return NULL;
375}
376
377#define TIMER_RESOLUTION 100000L
378
379static void gmp_add_event(
380 struct gmp_reservation_environment* gmp_env,
381 lt_t when, unsigned int id, event_type_t type)
382{
383 struct next_timer_event *nevent, *queued;
384 struct list_head *pos;
385 int found = 0;
386
387 //when = div64_u64(when, TIMER_RESOLUTION);
388 //when *= TIMER_RESOLUTION;
389//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
390 nevent = gmp_find_event_by_id(gmp_env, id);
391
392 if (!nevent || nevent->type != type) {
393 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
394 BUG_ON(!nevent);
395 nevent->next_update = when;
396 nevent->id = id;
397 nevent->type = type;
398 nevent->timer_armed_on = NO_CPU;
399
400 list_for_each(pos, &gmp_env->next_events) {
401 queued = list_entry(pos, struct next_timer_event, list);
402 if (queued->next_update > nevent->next_update) {
403 list_add(&nevent->list, pos->prev);
404 found = 1;
405 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
406 break;
407 }
408 }
409
410 if (!found) {
411 list_add_tail(&nevent->list, &gmp_env->next_events);
412 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
413 }
414 } else {
415 //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
416; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
417 }
418
419 //TRACE("======START PRINTING EVENT LIST======\n");
420 //gmp_print_events(gmp_env, litmus_clock());
421 //TRACE("======FINISH PRINTING EVENT LIST======\n");
422}
423
424void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
425 lt_t timeout, unsigned int id, event_type_t type)
426{
427 //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
428 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
429}
430
431static void gmp_queue_depleted(
432 struct gmp_reservation_environment* gmp_env,
433 struct reservation *res)
434{
435 struct list_head *pos;
436 struct reservation *queued;
437 int found = 0;
438
439//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
440
441 list_for_each(pos, &gmp_env->depleted_reservations) {
442 queued = list_entry(pos, struct reservation, list);
443 if (queued && (queued->next_replenishment > res->next_replenishment)) {
444//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
445 list_add(&res->list, pos->prev);
446 found = 1;
447 break;
448 }
449 }
450
451 if (!found)
452 list_add_tail(&res->list, &gmp_env->depleted_reservations);
453
454 TRACE("R%d queued to depleted_list\n", res->id);
455//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
456 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
457}
458
459static void gmp_queue_active(
460 struct gmp_reservation_environment* gmp_env,
461 struct reservation *res)
462{
463 struct list_head *pos;
464 struct reservation *queued;
465 int check_preempt = 1, found = 0;
466
467 list_for_each(pos, &gmp_env->active_reservations) {
468 queued = list_entry(pos, struct reservation, list);
469 if (queued->priority > res->priority) {
470 list_add(&res->list, pos->prev);
471 found = 1;
472 break;
473 } else if (queued->scheduled_on == NO_CPU)
474 check_preempt = 0;
475 }
476
477 if (!found)
478 list_add_tail(&res->list, &gmp_env->active_reservations);
479
480 /* check for possible preemption */
481 if (res->state == RESERVATION_ACTIVE && check_preempt)
482 gmp_env->schedule_now++;
483
484 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
485 res->event_added = 1;
486}
487
488static void gmp_queue_reservation(
489 struct gmp_reservation_environment* gmp_env,
490 struct reservation *res)
491{
492
493//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
494 switch (res->state) {
495 case RESERVATION_INACTIVE:
496 list_add(&res->list, &gmp_env->inactive_reservations);
497 break;
498
499 case RESERVATION_DEPLETED:
500 gmp_queue_depleted(gmp_env, res);
501 break;
502
503 case RESERVATION_ACTIVE_IDLE:
504 case RESERVATION_ACTIVE:
505 gmp_queue_active(gmp_env, res);
506 break;
507 }
508}
509
510void gmp_add_new_reservation(
511 struct gmp_reservation_environment* gmp_env,
512 struct reservation* new_res)
513{
514 new_res->env = &gmp_env->env;
515 gmp_queue_reservation(gmp_env, new_res);
516}
517
518static void gmp_charge_budget(
519 struct gmp_reservation_environment* gmp_env,
520 lt_t delta)
521{
522 struct list_head *pos, *next;
523 struct reservation *res;
524
525 list_for_each_safe(pos, next, &gmp_env->active_reservations) {
526 int drained = 0;
527 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
528 res = list_entry(pos, struct reservation, list);
529 if (res->state == RESERVATION_ACTIVE) {
530 TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
531 if (res->scheduled_on != NO_CPU
532 && res->blocked_by_ghost == 0) {
533 TRACE("DRAIN !!\n");
534 drained = 1;
535 res->ops->drain_budget(res, delta);
536 } else {
537 TRACE("NO DRAIN (not scheduled)!!\n");
538 }
539 } else {
540 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
541 if (res->state != RESERVATION_ACTIVE_IDLE)
542 TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
543 TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n",
544 res->id, delta);
545 //if (res->is_ghost == 1) {
546 TRACE("DRAIN !!\n");
547 drained = 1;
548 res->ops->drain_budget(res, delta);
549 //}
550 }
551 if ((res->state == RESERVATION_ACTIVE ||
552 res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
553 {
554 /* make sure scheduler is invoked when this reservation expires
555 * its remaining budget */
556 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
557 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
558 res->event_added = 1;
559 }
560 //if (encountered_active == 2)
561 /* stop at the first ACTIVE reservation */
562 // break;
563 }
564 TRACE("finished charging budgets\n");
565}
566
567static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
568{
569 struct list_head *pos, *next;
570 struct reservation *res;
571
572 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
573 res = list_entry(pos, struct reservation, list);
574 if (res->next_replenishment <= gmp_env->env.current_time) {
575 res->ops->replenish(res);
576 } else {
577 /* list is ordered by increasing depletion times */
578 break;
579 }
580 }
581 TRACE("finished replenishing budgets\n");
582}
583
584#define EPSILON 50
585
586/* return schedule_now */
587int gmp_update_time(
588 struct gmp_reservation_environment* gmp_env,
589 lt_t now)
590{
591 struct next_timer_event *event, *next;
592 lt_t delta, ret;
593
594 /* If the time didn't advance, there is nothing to do.
595 * This check makes it safe to call sup_advance_time() potentially
596 * multiple times (e.g., via different code paths. */
597 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
598 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
599 return 0;
600
601 delta = now - gmp_env->env.current_time;
602 gmp_env->env.current_time = now;
603
604
605 //gmp_print_events(gmp_env, now);
606 /* deplete budgets by passage of time */
607 //TRACE("CHARGE###\n");
608 gmp_charge_budget(gmp_env, delta);
609
610 /* check if any budgets where replenished */
611 //TRACE("REPLENISH###\n");
612 gmp_replenish_budgets(gmp_env);
613
614
615 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
616 if (event->next_update < now) {
617 list_del(&event->list);
618 TRACE("EVENT at %llu IS DELETED\n", event->next_update);
619 kfree(event);
620 } else {
621 break;
622 }
623 }
624
625 //gmp_print_events(gmp_env, litmus_clock());
626
627 ret = min(gmp_env->schedule_now, NR_CPUS);
628 gmp_env->schedule_now = 0;
629
630 return ret;
631}
632
633void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
634{
635 struct next_timer_event *event, *next;
636
637 TRACE("GLOBAL EVENTS now=%llu\n", now);
638 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
639 TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
640 }
641}
642
643static void gmp_res_change_state(
644 struct reservation_environment* env,
645 struct reservation *res,
646 reservation_state_t new_state)
647{
648 struct gmp_reservation_environment* gmp_env;
649
650 gmp_env = container_of(env, struct gmp_reservation_environment, env);
651
652 TRACE("GMP reservation R%d state %d->%d at %llu\n",
653 res->id, res->state, new_state, env->current_time);
654
655 list_del(&res->list);
656 /* check if we need to reschedule because we lost an active reservation */
657 if (res->state == RESERVATION_ACTIVE)
658 gmp_env->schedule_now++;
659 res->state = new_state;
660 gmp_queue_reservation(gmp_env, res);
661}
662
663void gmp_init(struct gmp_reservation_environment* gmp_env)
664{
665 memset(gmp_env, sizeof(*gmp_env), 0);
666
667 INIT_LIST_HEAD(&gmp_env->active_reservations);
668 INIT_LIST_HEAD(&gmp_env->depleted_reservations);
669 INIT_LIST_HEAD(&gmp_env->inactive_reservations);
670 INIT_LIST_HEAD(&gmp_env->next_events);
671
672 gmp_env->env.change_state = gmp_res_change_state;
673
674 gmp_env->schedule_now = 0;
675 gmp_env->will_schedule = false;
676
677 raw_spin_lock_init(&gmp_env->lock);
678}