diff options
-rw-r--r-- | include/litmus/reservation.h | 22 | ||||
-rw-r--r-- | litmus/mc2_common.c | 3 | ||||
-rw-r--r-- | litmus/polling_reservations.c | 7 | ||||
-rw-r--r-- | litmus/reservation.c | 356 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 729 |
5 files changed, 985 insertions, 132 deletions
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h index 0e656ad2667e..fc7e31918a60 100644 --- a/include/litmus/reservation.h +++ b/include/litmus/reservation.h | |||
@@ -129,8 +129,10 @@ struct reservation { | |||
129 | 129 | ||
130 | /* for global env. */ | 130 | /* for global env. */ |
131 | int scheduled_on; | 131 | int scheduled_on; |
132 | /* for blocked by ghost */ | 132 | /* for blocked by ghost. Do not charge budget when ACTIVE */ |
133 | int blocked_by_ghost; | 133 | int blocked_by_ghost; |
134 | /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */ | ||
135 | int is_ghost; | ||
134 | }; | 136 | }; |
135 | 137 | ||
136 | void reservation_init(struct reservation *res); | 138 | void reservation_init(struct reservation *res); |
@@ -199,11 +201,19 @@ struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env, | |||
199 | unsigned int id); | 201 | unsigned int id); |
200 | 202 | ||
201 | /* A global multiprocessor reservation environment. */ | 203 | /* A global multiprocessor reservation environment. */ |
204 | /* | ||
205 | typedef enum { | ||
206 | EVENT_REPLENISH = 0, | ||
207 | EVENT_DRAIN, | ||
208 | EVENT_OTHERS, | ||
209 | } event_type_t; | ||
210 | */ | ||
202 | 211 | ||
203 | struct next_timer_event { | 212 | struct next_timer_event { |
204 | lt_t next_update; | 213 | lt_t next_update; |
205 | int timer_armed_on; | 214 | int timer_armed_on; |
206 | unsigned int id; | 215 | //unsigned int id; |
216 | //event_type_t type; | ||
207 | struct list_head list; | 217 | struct list_head list; |
208 | }; | 218 | }; |
209 | 219 | ||
@@ -222,6 +232,7 @@ struct gmp_reservation_environment { | |||
222 | 232 | ||
223 | /* timer event ordered by next_update */ | 233 | /* timer event ordered by next_update */ |
224 | struct list_head next_events; | 234 | struct list_head next_events; |
235 | |||
225 | /* (schedule_now == true) means call gmp_dispatch() now */ | 236 | /* (schedule_now == true) means call gmp_dispatch() now */ |
226 | bool schedule_now; | 237 | bool schedule_now; |
227 | /* set to true if a call to gmp_dispatch() is imminent */ | 238 | /* set to true if a call to gmp_dispatch() is imminent */ |
@@ -231,9 +242,12 @@ struct gmp_reservation_environment { | |||
231 | void gmp_init(struct gmp_reservation_environment* gmp_env); | 242 | void gmp_init(struct gmp_reservation_environment* gmp_env); |
232 | void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env, | 243 | void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env, |
233 | struct reservation* new_res); | 244 | struct reservation* new_res); |
234 | void gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); | 245 | void gmp_scheduler_update_after(struct gmp_reservation_environment* gmp_env, |
246 | lt_t timeout); | ||
247 | bool gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); | ||
235 | struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env); | 248 | struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env); |
236 | struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); | 249 | //struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); |
250 | struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when); | ||
237 | struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, | 251 | struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, |
238 | unsigned int id); | 252 | unsigned int id); |
239 | 253 | ||
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c index d0a42c69d65c..a8ea5d9889f3 100644 --- a/litmus/mc2_common.c +++ b/litmus/mc2_common.c | |||
@@ -67,7 +67,8 @@ asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param | |||
67 | goto out_unlock; | 67 | goto out_unlock; |
68 | } | 68 | } |
69 | 69 | ||
70 | target->rt_param.plugin_state = mp; | 70 | //target->rt_param.plugin_state = mp; |
71 | target->rt_param.mc2_data = mp; | ||
71 | 72 | ||
72 | retval = 0; | 73 | retval = 0; |
73 | out_unlock: | 74 | out_unlock: |
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c index ec5cadd19b4f..d2c54c46442c 100644 --- a/litmus/polling_reservations.c +++ b/litmus/polling_reservations.c | |||
@@ -32,8 +32,8 @@ static void periodic_polling_client_arrives( | |||
32 | } | 32 | } |
33 | 33 | ||
34 | TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero); | 34 | TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero); |
35 | TRACE("pol-res: activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n", | 35 | TRACE("pol-res: R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n", |
36 | tmp, instances, pres->period, res->next_replenishment, | 36 | res->id, tmp, instances, pres->period, res->next_replenishment, |
37 | res->env->current_time); | 37 | res->env->current_time); |
38 | 38 | ||
39 | res->env->change_state(res->env, res, | 39 | res->env->change_state(res->env, res, |
@@ -147,7 +147,8 @@ static void common_drain_budget( | |||
147 | switch (res->state) { | 147 | switch (res->state) { |
148 | case RESERVATION_DEPLETED: | 148 | case RESERVATION_DEPLETED: |
149 | case RESERVATION_INACTIVE: | 149 | case RESERVATION_INACTIVE: |
150 | BUG(); | 150 | //BUG(); |
151 | TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state); | ||
151 | break; | 152 | break; |
152 | 153 | ||
153 | case RESERVATION_ACTIVE_IDLE: | 154 | case RESERVATION_ACTIVE_IDLE: |
diff --git a/litmus/reservation.c b/litmus/reservation.c index 16b3a4818e1e..e30892c72f4a 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/slab.h> | ||
2 | 3 | ||
3 | #include <litmus/litmus.h> | 4 | #include <litmus/litmus.h> |
4 | #include <litmus/reservation.h> | 5 | #include <litmus/reservation.h> |
@@ -48,7 +49,7 @@ static void sup_scheduler_update_at( | |||
48 | struct sup_reservation_environment* sup_env, | 49 | struct sup_reservation_environment* sup_env, |
49 | lt_t when) | 50 | lt_t when) |
50 | { | 51 | { |
51 | TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when); | 52 | //TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when); |
52 | if (sup_env->next_scheduler_update > when) | 53 | if (sup_env->next_scheduler_update > when) |
53 | sup_env->next_scheduler_update = when; | 54 | sup_env->next_scheduler_update = when; |
54 | } | 55 | } |
@@ -252,7 +253,7 @@ void sup_update_time( | |||
252 | /* If the time didn't advance, there is nothing to do. | 253 | /* If the time didn't advance, there is nothing to do. |
253 | * This check makes it safe to call sup_advance_time() potentially | 254 | * This check makes it safe to call sup_advance_time() potentially |
254 | * multiple times (e.g., via different code paths. */ | 255 | * multiple times (e.g., via different code paths. */ |
255 | TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); | 256 | //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); |
256 | if (unlikely(now <= sup_env->env.current_time)) | 257 | if (unlikely(now <= sup_env->env.current_time)) |
257 | return; | 258 | return; |
258 | 259 | ||
@@ -264,11 +265,11 @@ void sup_update_time( | |||
264 | sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; | 265 | sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; |
265 | 266 | ||
266 | /* deplete budgets by passage of time */ | 267 | /* deplete budgets by passage of time */ |
267 | TRACE("CHARGE###\n"); | 268 | //TRACE("CHARGE###\n"); |
268 | sup_charge_budget(sup_env, delta); | 269 | sup_charge_budget(sup_env, delta); |
269 | 270 | ||
270 | /* check if any budgets where replenished */ | 271 | /* check if any budgets where replenished */ |
271 | TRACE("REPLENISH###\n"); | 272 | //TRACE("REPLENISH###\n"); |
272 | sup_replenish_budgets(sup_env); | 273 | sup_replenish_budgets(sup_env); |
273 | } | 274 | } |
274 | 275 | ||
@@ -325,3 +326,350 @@ void sup_init(struct sup_reservation_environment* sup_env) | |||
325 | 326 | ||
326 | sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; | 327 | sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; |
327 | } | 328 | } |
329 | |||
330 | struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, | ||
331 | unsigned int id) | ||
332 | { | ||
333 | struct reservation *res; | ||
334 | |||
335 | list_for_each_entry(res, &gmp_env->active_reservations, list) { | ||
336 | if (res->id == id) | ||
337 | return res; | ||
338 | } | ||
339 | list_for_each_entry(res, &gmp_env->inactive_reservations, list) { | ||
340 | if (res->id == id) | ||
341 | return res; | ||
342 | } | ||
343 | list_for_each_entry(res, &gmp_env->depleted_reservations, list) { | ||
344 | if (res->id == id) | ||
345 | return res; | ||
346 | } | ||
347 | |||
348 | return NULL; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, | ||
353 | unsigned int id) | ||
354 | { | ||
355 | struct next_timer_event *event; | ||
356 | |||
357 | list_for_each_entry(event, &gmp_env->next_events, list) { | ||
358 | if (event->id == id) | ||
359 | return event; | ||
360 | } | ||
361 | |||
362 | return NULL; | ||
363 | } | ||
364 | */ | ||
365 | |||
366 | struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, | ||
367 | lt_t when) | ||
368 | { | ||
369 | struct next_timer_event *event; | ||
370 | |||
371 | list_for_each_entry(event, &gmp_env->next_events, list) { | ||
372 | if (event->next_update == when) | ||
373 | return event; | ||
374 | } | ||
375 | |||
376 | return NULL; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | static void gmp_scheduler_update_at( | ||
381 | struct gmp_reservation_environment* gmp_env, unsigned int id, | ||
382 | event_type_t type, lt_t when) | ||
383 | { | ||
384 | struct next_timer_event *nevent, *queued; | ||
385 | struct list_head *pos; | ||
386 | int found = 0; | ||
387 | |||
388 | nevent = gmp_find_event_by_id(gmp_env, id); | ||
389 | |||
390 | if (!nevent) { | ||
391 | nevent = kzalloc(sizeof(*nevent), GFP_KERNEL); | ||
392 | nevent->next_update = when; | ||
393 | nevent->id = id; | ||
394 | nevent->timer_armed_on = NO_CPU; | ||
395 | nevent->type = type; | ||
396 | |||
397 | list_for_each(pos, &gmp_env->next_events) { | ||
398 | queued = list_entry(pos, struct next_timer_event, list); | ||
399 | if (queued->next_update > nevent->next_update) { | ||
400 | list_add(&nevent->list, pos->prev); | ||
401 | found = 1; | ||
402 | TRACE("NEXT_EVENT ADDED after %llu\n", queued->next_update); | ||
403 | break; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | if (!found) { | ||
408 | list_add_tail(&nevent->list, &gmp_env->next_events); | ||
409 | TRACE("NEXT_EVENT ADDED at [0]\n"); | ||
410 | } | ||
411 | } else { | ||
412 | TRACE("EVENT FOUND at %llu T(%d), NEW EVENT %llu T(%d)\n", nevent->next_update, nevent->type, when, type); | ||
413 | } | ||
414 | } | ||
415 | */ | ||
416 | #define TIMER_RESOLUTION 100000L | ||
417 | |||
418 | static void gmp_scheduler_update_at( | ||
419 | struct gmp_reservation_environment* gmp_env, | ||
420 | lt_t when) | ||
421 | { | ||
422 | struct next_timer_event *nevent, *queued; | ||
423 | struct list_head *pos; | ||
424 | int found = 0; | ||
425 | |||
426 | //when = div64_u64(when, TIMER_RESOLUTION); | ||
427 | //when *= TIMER_RESOLUTION; | ||
428 | |||
429 | nevent = gmp_find_event_by_time(gmp_env, when); | ||
430 | |||
431 | if (!nevent) { | ||
432 | nevent = kzalloc(sizeof(*nevent), GFP_KERNEL); | ||
433 | nevent->next_update = when; | ||
434 | nevent->timer_armed_on = NO_CPU; | ||
435 | |||
436 | list_for_each(pos, &gmp_env->next_events) { | ||
437 | queued = list_entry(pos, struct next_timer_event, list); | ||
438 | if (queued->next_update > nevent->next_update) { | ||
439 | list_add(&nevent->list, pos->prev); | ||
440 | found = 1; | ||
441 | TRACE("NEXT_EVENT at %llu ADDED before %llu\n", nevent->next_update, queued->next_update); | ||
442 | break; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | if (!found) { | ||
447 | list_add_tail(&nevent->list, &gmp_env->next_events); | ||
448 | TRACE("NEXT_EVENT ADDED at %llu ADDED at HEAD\n", nevent->next_update); | ||
449 | } | ||
450 | } else { | ||
451 | ; //TRACE("EVENT FOUND at %llu, NEW EVENT %llu\n", nevent->next_update, when); | ||
452 | } | ||
453 | } | ||
454 | |||
455 | void gmp_scheduler_update_after( | ||
456 | struct gmp_reservation_environment* gmp_env, lt_t timeout) | ||
457 | { | ||
458 | gmp_scheduler_update_at(gmp_env, gmp_env->env.current_time + timeout); | ||
459 | } | ||
460 | |||
461 | static void gmp_queue_depleted( | ||
462 | struct gmp_reservation_environment* gmp_env, | ||
463 | struct reservation *res) | ||
464 | { | ||
465 | struct list_head *pos; | ||
466 | struct reservation *queued; | ||
467 | int found = 0; | ||
468 | |||
469 | list_for_each(pos, &gmp_env->depleted_reservations) { | ||
470 | queued = list_entry(pos, struct reservation, list); | ||
471 | if (queued->next_replenishment > res->next_replenishment) { | ||
472 | list_add(&res->list, pos->prev); | ||
473 | found = 1; | ||
474 | } | ||
475 | } | ||
476 | |||
477 | if (!found) | ||
478 | list_add_tail(&res->list, &gmp_env->depleted_reservations); | ||
479 | |||
480 | gmp_scheduler_update_at(gmp_env, res->next_replenishment); | ||
481 | } | ||
482 | |||
483 | static void gmp_queue_active( | ||
484 | struct gmp_reservation_environment* gmp_env, | ||
485 | struct reservation *res) | ||
486 | { | ||
487 | struct list_head *pos; | ||
488 | struct reservation *queued; | ||
489 | int check_preempt = 1, found = 0; | ||
490 | |||
491 | list_for_each(pos, &gmp_env->active_reservations) { | ||
492 | queued = list_entry(pos, struct reservation, list); | ||
493 | if (queued->priority > res->priority) { | ||
494 | list_add(&res->list, pos->prev); | ||
495 | found = 1; | ||
496 | break; | ||
497 | } else if (queued->scheduled_on == NO_CPU) | ||
498 | check_preempt = 0; | ||
499 | } | ||
500 | |||
501 | if (!found) | ||
502 | list_add_tail(&res->list, &gmp_env->active_reservations); | ||
503 | |||
504 | /* check for possible preemption */ | ||
505 | if (res->state == RESERVATION_ACTIVE && !check_preempt) | ||
506 | gmp_env->schedule_now = true; | ||
507 | |||
508 | gmp_scheduler_update_after(gmp_env, res->cur_budget); | ||
509 | } | ||
510 | |||
511 | static void gmp_queue_reservation( | ||
512 | struct gmp_reservation_environment* gmp_env, | ||
513 | struct reservation *res) | ||
514 | { | ||
515 | switch (res->state) { | ||
516 | case RESERVATION_INACTIVE: | ||
517 | list_add(&res->list, &gmp_env->inactive_reservations); | ||
518 | break; | ||
519 | |||
520 | case RESERVATION_DEPLETED: | ||
521 | gmp_queue_depleted(gmp_env, res); | ||
522 | break; | ||
523 | |||
524 | case RESERVATION_ACTIVE_IDLE: | ||
525 | case RESERVATION_ACTIVE: | ||
526 | gmp_queue_active(gmp_env, res); | ||
527 | break; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | void gmp_add_new_reservation( | ||
532 | struct gmp_reservation_environment* gmp_env, | ||
533 | struct reservation* new_res) | ||
534 | { | ||
535 | new_res->env = &gmp_env->env; | ||
536 | gmp_queue_reservation(gmp_env, new_res); | ||
537 | } | ||
538 | |||
539 | static void gmp_charge_budget( | ||
540 | struct gmp_reservation_environment* gmp_env, | ||
541 | lt_t delta) | ||
542 | { | ||
543 | struct list_head *pos, *next; | ||
544 | struct reservation *res; | ||
545 | |||
546 | list_for_each_safe(pos, next, &gmp_env->active_reservations) { | ||
547 | int drained = 0; | ||
548 | /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ | ||
549 | res = list_entry(pos, struct reservation, list); | ||
550 | if (res->state == RESERVATION_ACTIVE) { | ||
551 | TRACE("gmp_charge_budget ACTIVE R%u drain %llu\n", res->id, delta); | ||
552 | if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) { | ||
553 | TRACE("DRAIN !!\n"); | ||
554 | drained = 1; | ||
555 | res->ops->drain_budget(res, delta); | ||
556 | } | ||
557 | } else { | ||
558 | //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE); | ||
559 | if (res->state != RESERVATION_ACTIVE_IDLE) | ||
560 | TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n"); | ||
561 | TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta); | ||
562 | //if (res->is_ghost == 1) { | ||
563 | TRACE("DRAIN !!\n"); | ||
564 | drained = 1; | ||
565 | res->ops->drain_budget(res, delta); | ||
566 | //} | ||
567 | } | ||
568 | if ((res->state == RESERVATION_ACTIVE || | ||
569 | res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1)) | ||
570 | { | ||
571 | /* make sure scheduler is invoked when this reservation expires | ||
572 | * its remaining budget */ | ||
573 | TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", | ||
574 | res->id, res->cur_budget); | ||
575 | gmp_scheduler_update_after(gmp_env, res->cur_budget); | ||
576 | } | ||
577 | //if (encountered_active == 2) | ||
578 | /* stop at the first ACTIVE reservation */ | ||
579 | // break; | ||
580 | } | ||
581 | //TRACE("finished charging budgets\n"); | ||
582 | } | ||
583 | |||
584 | static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env) | ||
585 | { | ||
586 | struct list_head *pos, *next; | ||
587 | struct reservation *res; | ||
588 | |||
589 | list_for_each_safe(pos, next, &gmp_env->depleted_reservations) { | ||
590 | res = list_entry(pos, struct reservation, list); | ||
591 | if (res->next_replenishment <= gmp_env->env.current_time) { | ||
592 | res->ops->replenish(res); | ||
593 | } else { | ||
594 | /* list is ordered by increasing depletion times */ | ||
595 | break; | ||
596 | } | ||
597 | } | ||
598 | //TRACE("finished replenishing budgets\n"); | ||
599 | |||
600 | /* request a scheduler update at the next replenishment instant */ | ||
601 | res = list_first_entry_or_null(&gmp_env->depleted_reservations, | ||
602 | struct reservation, list); | ||
603 | if (res) | ||
604 | gmp_scheduler_update_at(gmp_env, res->next_replenishment); | ||
605 | } | ||
606 | |||
607 | /* return schedule_now */ | ||
608 | bool gmp_update_time( | ||
609 | struct gmp_reservation_environment* gmp_env, | ||
610 | lt_t now) | ||
611 | { | ||
612 | lt_t delta; | ||
613 | |||
614 | if (!gmp_env) { | ||
615 | TRACE("BUG****************************************\n"); | ||
616 | return false; | ||
617 | } | ||
618 | /* If the time didn't advance, there is nothing to do. | ||
619 | * This check makes it safe to call sup_advance_time() potentially | ||
620 | * multiple times (e.g., via different code paths. */ | ||
621 | //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); | ||
622 | if (unlikely(now <= gmp_env->env.current_time)) | ||
623 | return gmp_env->schedule_now; | ||
624 | |||
625 | delta = now - gmp_env->env.current_time; | ||
626 | gmp_env->env.current_time = now; | ||
627 | |||
628 | |||
629 | /* deplete budgets by passage of time */ | ||
630 | //TRACE("CHARGE###\n"); | ||
631 | gmp_charge_budget(gmp_env, delta); | ||
632 | |||
633 | /* check if any budgets where replenished */ | ||
634 | //TRACE("REPLENISH###\n"); | ||
635 | gmp_replenish_budgets(gmp_env); | ||
636 | |||
637 | return gmp_env->schedule_now; | ||
638 | } | ||
639 | |||
640 | static void gmp_res_change_state( | ||
641 | struct reservation_environment* env, | ||
642 | struct reservation *res, | ||
643 | reservation_state_t new_state) | ||
644 | { | ||
645 | struct gmp_reservation_environment* gmp_env; | ||
646 | |||
647 | gmp_env = container_of(env, struct gmp_reservation_environment, env); | ||
648 | |||
649 | TRACE("GMP reservation R%d state %d->%d at %llu\n", | ||
650 | res->id, res->state, new_state, env->current_time); | ||
651 | |||
652 | list_del(&res->list); | ||
653 | /* check if we need to reschedule because we lost an active reservation */ | ||
654 | if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule) | ||
655 | gmp_env->schedule_now = true; | ||
656 | res->state = new_state; | ||
657 | gmp_queue_reservation(gmp_env, res); | ||
658 | } | ||
659 | |||
660 | void gmp_init(struct gmp_reservation_environment* gmp_env) | ||
661 | { | ||
662 | memset(gmp_env, sizeof(*gmp_env), 0); | ||
663 | |||
664 | INIT_LIST_HEAD(&gmp_env->active_reservations); | ||
665 | INIT_LIST_HEAD(&gmp_env->depleted_reservations); | ||
666 | INIT_LIST_HEAD(&gmp_env->inactive_reservations); | ||
667 | INIT_LIST_HEAD(&gmp_env->next_events); | ||
668 | |||
669 | gmp_env->env.change_state = gmp_res_change_state; | ||
670 | |||
671 | gmp_env->schedule_now = false; | ||
672 | gmp_env->will_schedule = false; | ||
673 | |||
674 | raw_spin_lock_init(&gmp_env->lock); | ||
675 | } \ No newline at end of file | ||
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 0c260190f287..6dee1ec2c99c 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -15,6 +15,23 @@ | |||
15 | #include <litmus/reservation.h> | 15 | #include <litmus/reservation.h> |
16 | #include <litmus/polling_reservations.h> | 16 | #include <litmus/polling_reservations.h> |
17 | 17 | ||
18 | struct gmp_reservation_environment _global_env; | ||
19 | |||
20 | struct cpu_entry { | ||
21 | struct task_struct *scheduled; | ||
22 | lt_t deadline; | ||
23 | int cpu; | ||
24 | enum crit_level lv; | ||
25 | bool will_schedule; | ||
26 | }; | ||
27 | |||
28 | struct cpu_priority { | ||
29 | raw_spinlock_t lock; | ||
30 | struct cpu_entry cpu_entries[NR_CPUS]; | ||
31 | }; | ||
32 | |||
33 | struct cpu_priority _lowest_prio_cpu; | ||
34 | |||
18 | struct mc2_task_state { | 35 | struct mc2_task_state { |
19 | struct task_client res_info; | 36 | struct task_client res_info; |
20 | int cpu; | 37 | int cpu; |
@@ -51,11 +68,39 @@ static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) | |||
51 | } | 68 | } |
52 | static enum crit_level get_task_crit_level(struct task_struct *tsk) | 69 | static enum crit_level get_task_crit_level(struct task_struct *tsk) |
53 | { | 70 | { |
54 | struct mc2_task_state *tinfo = get_mc2_state(tsk); | 71 | //struct mc2_task_state *tinfo = get_mc2_state(tsk); |
55 | if (!tinfo) | 72 | struct mc2_task *mp; |
73 | |||
74 | if (!tsk || !is_realtime(tsk)) | ||
75 | return NUM_CRIT_LEVELS; | ||
76 | |||
77 | mp = tsk_rt(tsk)->mc2_data; | ||
78 | |||
79 | if (!mp) | ||
56 | return NUM_CRIT_LEVELS; | 80 | return NUM_CRIT_LEVELS; |
57 | else | 81 | else |
58 | return tinfo->mc2_param.crit; | 82 | return mp->crit; |
83 | } | ||
84 | |||
85 | static struct reservation* res_find_by_id(struct mc2_cpu_state *state, unsigned int id) | ||
86 | { | ||
87 | struct reservation *res; | ||
88 | |||
89 | res = sup_find_by_id(&state->sup_env, id); | ||
90 | if (!res) | ||
91 | res = gmp_find_by_id(&_global_env, id); | ||
92 | |||
93 | return res; | ||
94 | } | ||
95 | |||
96 | static void mc2_update_time(enum crit_level lv, struct mc2_cpu_state *state, lt_t time) | ||
97 | { | ||
98 | if (lv < CRIT_LEVEL_C) | ||
99 | sup_update_time(&state->sup_env, time); | ||
100 | else if (lv == CRIT_LEVEL_C) | ||
101 | gmp_update_time(&_global_env, time); | ||
102 | else | ||
103 | TRACE("update_time(): Criticality level error!!!!\n"); | ||
59 | } | 104 | } |
60 | 105 | ||
61 | static void task_departs(struct task_struct *tsk, int job_complete) | 106 | static void task_departs(struct task_struct *tsk, int job_complete) |
@@ -78,6 +123,7 @@ static void task_departs(struct task_struct *tsk, int job_complete) | |||
78 | 123 | ||
79 | ce = &state->crit_entries[lv]; | 124 | ce = &state->crit_entries[lv]; |
80 | ce->running = tsk; | 125 | ce->running = tsk; |
126 | res->is_ghost = 1; | ||
81 | TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); | 127 | TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); |
82 | 128 | ||
83 | //BUG_ON(hrtimer_active(&ce->ghost_timer)); | 129 | //BUG_ON(hrtimer_active(&ce->ghost_timer)); |
@@ -107,11 +153,44 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
107 | } | 153 | } |
108 | } | 154 | } |
109 | 155 | ||
156 | /* return: NO_CPU - all CPUs are running tasks with higher priority than Level C */ | ||
157 | static int get_lowest_prio_cpu(void) | ||
158 | { | ||
159 | struct cpu_entry *ce; | ||
160 | int cpu, ret = NO_CPU; | ||
161 | lt_t latest_deadline = 0; | ||
162 | |||
163 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
164 | for_each_online_cpu(cpu) { | ||
165 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
166 | if (!ce->will_schedule) { | ||
167 | if (!ce->scheduled) { | ||
168 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
169 | return ce->cpu; | ||
170 | } else if (ce->lv == CRIT_LEVEL_C && ce->deadline > latest_deadline) { | ||
171 | latest_deadline = ce->deadline; | ||
172 | ret = ce->cpu; | ||
173 | } | ||
174 | } | ||
175 | } | ||
176 | |||
177 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
178 | |||
179 | return ret; | ||
180 | } | ||
181 | |||
110 | /* NOTE: drops state->lock */ | 182 | /* NOTE: drops state->lock */ |
111 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | 183 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) |
112 | { | 184 | { |
113 | int local; | 185 | int local; |
114 | lt_t update, now; | 186 | lt_t update, now; |
187 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
188 | struct next_timer_event *event, *next; | ||
189 | int found_event = 0; | ||
190 | |||
191 | //TRACE_TASK(state->scheduled, "update_timer!\n"); | ||
192 | if (lv != NUM_CRIT_LEVELS) | ||
193 | TRACE_TASK(state->scheduled, "UPDATE_TIMER LV = %d\n", lv); | ||
115 | 194 | ||
116 | update = state->sup_env.next_scheduler_update; | 195 | update = state->sup_env.next_scheduler_update; |
117 | now = state->sup_env.env.current_time; | 196 | now = state->sup_env.env.current_time; |
@@ -163,6 +242,37 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
163 | litmus_reschedule(state->cpu); | 242 | litmus_reschedule(state->cpu); |
164 | } | 243 | } |
165 | } | 244 | } |
245 | |||
246 | raw_spin_lock(&_global_env.lock); | ||
247 | list_for_each_entry_safe(event, next, &_global_env.next_events, list) { | ||
248 | if (event->timer_armed_on == NO_CPU) { | ||
249 | found_event = 1; | ||
250 | if (event->next_update < litmus_clock()) { | ||
251 | int cpu = get_lowest_prio_cpu(); | ||
252 | TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | ||
253 | list_del(&event->list); | ||
254 | kfree(event); | ||
255 | if (cpu != NO_CPU) { | ||
256 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
257 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
258 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
259 | litmus_reschedule(cpu); | ||
260 | } | ||
261 | } else if (!hrtimer_active(&state->g_timer)) { | ||
262 | int ret; | ||
263 | TRACE("setting global scheduler timer for %llu\n", event->next_update); | ||
264 | ret = __hrtimer_start_range_ns(&state->g_timer, | ||
265 | ns_to_ktime(event->next_update), | ||
266 | 0 /* timer coalescing slack */, | ||
267 | HRTIMER_MODE_ABS_PINNED, | ||
268 | 0 /* wakeup */); | ||
269 | if (!ret) { | ||
270 | event->timer_armed_on = state->cpu; | ||
271 | } | ||
272 | } | ||
273 | } | ||
274 | } | ||
275 | raw_spin_unlock(&_global_env.lock); | ||
166 | } | 276 | } |
167 | 277 | ||
168 | static void mc2_update_ghost_state(struct mc2_cpu_state *state) | 278 | static void mc2_update_ghost_state(struct mc2_cpu_state *state) |
@@ -176,16 +286,20 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state) | |||
176 | ce = &state->crit_entries[lv]; | 286 | ce = &state->crit_entries[lv]; |
177 | if (ce->running != NULL) { | 287 | if (ce->running != NULL) { |
178 | tinfo = get_mc2_state(ce->running); | 288 | tinfo = get_mc2_state(ce->running); |
289 | /* | ||
179 | if (lv != CRIT_LEVEL_C) | 290 | if (lv != CRIT_LEVEL_C) |
180 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | 291 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); |
181 | else | 292 | else |
182 | continue; | 293 | continue; |
294 | */ | ||
295 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
183 | TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget); | 296 | TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget); |
184 | if (!res->cur_budget) { | 297 | if (!res->cur_budget) { |
185 | struct sup_reservation_environment* sup_env = &state->sup_env; | 298 | struct sup_reservation_environment* sup_env = &state->sup_env; |
186 | 299 | ||
187 | TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock()); | 300 | TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock()); |
188 | ce->running = NULL; | 301 | ce->running = NULL; |
302 | res->is_ghost = 0; | ||
189 | res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list); | 303 | res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list); |
190 | if (res) | 304 | if (res) |
191 | litmus_reschedule_local(); | 305 | litmus_reschedule_local(); |
@@ -215,6 +329,95 @@ static enum hrtimer_restart on_ghost_timer(struct hrtimer *timer) | |||
215 | } | 329 | } |
216 | */ | 330 | */ |
217 | 331 | ||
332 | static void update_cpu_prio(struct mc2_cpu_state *state) | ||
333 | { | ||
334 | struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; | ||
335 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
336 | |||
337 | if (!state->scheduled) { | ||
338 | // cpu is idle. | ||
339 | ce->scheduled = NULL; | ||
340 | ce->deadline = ULLONG_MAX; | ||
341 | ce->lv = NUM_CRIT_LEVELS; | ||
342 | } else if (lv == CRIT_LEVEL_C) { | ||
343 | ce->scheduled = state->scheduled; | ||
344 | ce->deadline = get_deadline(state->scheduled); | ||
345 | ce->lv = lv; | ||
346 | } else if (lv < CRIT_LEVEL_C) { | ||
347 | ce->scheduled = state->scheduled; | ||
348 | ce->deadline = 0; | ||
349 | ce->lv = lv; | ||
350 | } | ||
351 | }; | ||
352 | |||
353 | static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer) | ||
354 | { | ||
355 | unsigned long flags; | ||
356 | enum hrtimer_restart restart = HRTIMER_NORESTART; | ||
357 | struct mc2_cpu_state *state; | ||
358 | struct next_timer_event *event, *next; | ||
359 | bool schedule_now; | ||
360 | lt_t update, now; | ||
361 | int found_event = 0; | ||
362 | |||
363 | state = container_of(timer, struct mc2_cpu_state, g_timer); | ||
364 | |||
365 | /* The scheduling timer should only fire on the local CPU, because | ||
366 | * otherwise deadlocks via timer_cancel() are possible. | ||
367 | * Note: this does not interfere with dedicated interrupt handling, as | ||
368 | * even under dedicated interrupt handling scheduling timers for | ||
369 | * budget enforcement must occur locally on each CPU. | ||
370 | */ | ||
371 | //BUG_ON(state->cpu != raw_smp_processor_id()); | ||
372 | if (state->cpu != raw_smp_processor_id()) | ||
373 | TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n"); | ||
374 | |||
375 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
376 | |||
377 | update = litmus_clock(); | ||
378 | TRACE("GLOBAL TIMER FIRED at %llu\n", update); | ||
379 | |||
380 | list_for_each_entry_safe(event, next, &_global_env.next_events, list) { | ||
381 | if (event->next_update < update) { | ||
382 | found_event = 1; | ||
383 | list_del(&event->list); | ||
384 | TRACE("EVENT at %llu IS DELETED\n", event->next_update); | ||
385 | kfree(event); | ||
386 | } | ||
387 | } | ||
388 | |||
389 | if (!found_event) { | ||
390 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
391 | return restart; | ||
392 | } | ||
393 | |||
394 | schedule_now = gmp_update_time(&_global_env, update); | ||
395 | |||
396 | raw_spin_lock(&state->lock); | ||
397 | mc2_update_ghost_state(state); | ||
398 | raw_spin_unlock(&state->lock); | ||
399 | |||
400 | now = _global_env.env.current_time; | ||
401 | |||
402 | TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n", | ||
403 | now, update, state->cpu, schedule_now); | ||
404 | |||
405 | if (schedule_now) { | ||
406 | int cpu = get_lowest_prio_cpu(); | ||
407 | if (cpu != NO_CPU) { | ||
408 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
409 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
410 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
411 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
412 | litmus_reschedule(cpu); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
417 | |||
418 | return restart; | ||
419 | } | ||
420 | |||
218 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | 421 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) |
219 | { | 422 | { |
220 | unsigned long flags; | 423 | unsigned long flags; |
@@ -276,6 +479,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
276 | if (likely(!ce->running)) { | 479 | if (likely(!ce->running)) { |
277 | sup_scheduler_update_after(sup_env, res->cur_budget); | 480 | sup_scheduler_update_after(sup_env, res->cur_budget); |
278 | res->blocked_by_ghost = 0; | 481 | res->blocked_by_ghost = 0; |
482 | res->is_ghost = 0; | ||
279 | return tsk; | 483 | return tsk; |
280 | } else { | 484 | } else { |
281 | res->blocked_by_ghost = 1; | 485 | res->blocked_by_ghost = 1; |
@@ -284,7 +488,34 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
284 | } | 488 | } |
285 | } | 489 | } |
286 | } | 490 | } |
287 | 491 | // no level A or B tasks | |
492 | |||
493 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
494 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | ||
495 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
496 | if (likely(tsk)) { | ||
497 | lv = get_task_crit_level(tsk); | ||
498 | if (lv == NUM_CRIT_LEVELS) { | ||
499 | gmp_scheduler_update_after(&_global_env, res->cur_budget); | ||
500 | //raw_spin_unlock(&_global_env.lock); | ||
501 | return tsk; | ||
502 | } else { | ||
503 | ce = &state->crit_entries[lv]; | ||
504 | if (likely(!ce->running)) { | ||
505 | gmp_scheduler_update_after(&_global_env, res->cur_budget); | ||
506 | res->blocked_by_ghost = 0; | ||
507 | res->is_ghost = 0; | ||
508 | res->scheduled_on = state->cpu; | ||
509 | //raw_spin_unlock(&_global_env.lock); | ||
510 | return tsk; | ||
511 | } else { | ||
512 | res->blocked_by_ghost = 1; | ||
513 | } | ||
514 | } | ||
515 | } | ||
516 | } | ||
517 | } | ||
518 | |||
288 | return NULL; | 519 | return NULL; |
289 | } | 520 | } |
290 | 521 | ||
@@ -292,17 +523,30 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
292 | { | 523 | { |
293 | /* next == NULL means "schedule background work". */ | 524 | /* next == NULL means "schedule background work". */ |
294 | struct mc2_cpu_state *state = local_cpu_state(); | 525 | struct mc2_cpu_state *state = local_cpu_state(); |
295 | 526 | ||
527 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
528 | if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true) | ||
529 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
530 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
531 | |||
296 | raw_spin_lock(&state->lock); | 532 | raw_spin_lock(&state->lock); |
297 | 533 | ||
298 | BUG_ON(state->scheduled && state->scheduled != prev); | 534 | //BUG_ON(state->scheduled && state->scheduled != prev); |
299 | BUG_ON(state->scheduled && !is_realtime(prev)); | 535 | //BUG_ON(state->scheduled && !is_realtime(prev)); |
536 | if (state->scheduled && state->scheduled != prev) | ||
537 | TRACE("BUG1!!!!!!!!\n"); | ||
538 | if (state->scheduled && !is_realtime(prev)) | ||
539 | TRACE("BUG2!!!!!!!!\n"); | ||
300 | 540 | ||
301 | /* update time */ | 541 | /* update time */ |
302 | state->sup_env.will_schedule = true; | 542 | state->sup_env.will_schedule = true; |
303 | TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); | 543 | //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); |
304 | sup_update_time(&state->sup_env, litmus_clock()); | 544 | sup_update_time(&state->sup_env, litmus_clock()); |
305 | TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n"); | 545 | |
546 | raw_spin_lock(&_global_env.lock); | ||
547 | gmp_update_time(&_global_env, litmus_clock()); | ||
548 | |||
549 | //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n"); | ||
306 | mc2_update_ghost_state(state); | 550 | mc2_update_ghost_state(state); |
307 | 551 | ||
308 | /* remove task from reservation if it blocks */ | 552 | /* remove task from reservation if it blocks */ |
@@ -311,16 +555,29 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
311 | 555 | ||
312 | /* figure out what to schedule next */ | 556 | /* figure out what to schedule next */ |
313 | state->scheduled = mc2_dispatch(&state->sup_env, state); | 557 | state->scheduled = mc2_dispatch(&state->sup_env, state); |
314 | 558 | if (state->scheduled && is_realtime(state->scheduled)) | |
559 | TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n"); | ||
560 | |||
561 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
562 | update_cpu_prio(state); | ||
563 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
564 | |||
315 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | 565 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ |
316 | sched_state_task_picked(); | 566 | sched_state_task_picked(); |
317 | 567 | ||
318 | /* program scheduler timer */ | 568 | /* program scheduler timer */ |
319 | state->sup_env.will_schedule = false; | 569 | state->sup_env.will_schedule = false; |
570 | |||
571 | raw_spin_unlock(&_global_env.lock); | ||
572 | |||
320 | /* NOTE: drops state->lock */ | 573 | /* NOTE: drops state->lock */ |
321 | mc2_update_timer_and_unlock(state); | 574 | mc2_update_timer_and_unlock(state); |
322 | 575 | ||
323 | if (prev != state->scheduled && is_realtime(prev)) { | 576 | if (prev != state->scheduled && is_realtime(prev)) { |
577 | struct mc2_task_state* tinfo = get_mc2_state(prev); | ||
578 | struct reservation* res = tinfo->res_info.client.reservation; | ||
579 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); | ||
580 | res->scheduled_on = NO_CPU; | ||
324 | TRACE_TASK(prev, "descheduled.\n"); | 581 | TRACE_TASK(prev, "descheduled.\n"); |
325 | } | 582 | } |
326 | if (state->scheduled) { | 583 | if (state->scheduled) { |
@@ -354,10 +611,15 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
354 | { | 611 | { |
355 | unsigned long flags; | 612 | unsigned long flags; |
356 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 613 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
357 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 614 | struct mc2_cpu_state *state; |
358 | 615 | ||
359 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 616 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
360 | 617 | ||
618 | if (tinfo->cpu != -1) | ||
619 | state = cpu_state_for(tinfo->cpu); | ||
620 | else | ||
621 | state = local_cpu_state(); | ||
622 | |||
361 | raw_spin_lock_irqsave(&state->lock, flags); | 623 | raw_spin_lock_irqsave(&state->lock, flags); |
362 | /* Requeue only if self-suspension was already processed. */ | 624 | /* Requeue only if self-suspension was already processed. */ |
363 | if (tinfo->has_departed) | 625 | if (tinfo->has_departed) |
@@ -365,7 +627,16 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
365 | /* Assumption: litmus_clock() is synchronized across cores, | 627 | /* Assumption: litmus_clock() is synchronized across cores, |
366 | * since we might not actually be executing on tinfo->cpu | 628 | * since we might not actually be executing on tinfo->cpu |
367 | * at the moment. */ | 629 | * at the moment. */ |
368 | sup_update_time(&state->sup_env, litmus_clock()); | 630 | if (tinfo->cpu != -1) { |
631 | sup_update_time(&state->sup_env, litmus_clock()); | ||
632 | } else { | ||
633 | raw_spin_lock(&_global_env.lock); | ||
634 | TRACE("RESUME UPDATE ####\n"); | ||
635 | gmp_update_time(&_global_env, litmus_clock()); | ||
636 | TRACE("RESUME UPDATE $$$$\n"); | ||
637 | raw_spin_unlock(&_global_env.lock); | ||
638 | } | ||
639 | |||
369 | mc2_update_ghost_state(state); | 640 | mc2_update_ghost_state(state); |
370 | task_arrives(state, tsk); | 641 | task_arrives(state, tsk); |
371 | /* NOTE: drops state->lock */ | 642 | /* NOTE: drops state->lock */ |
@@ -385,37 +656,55 @@ static long mc2_complete_job(void) | |||
385 | { | 656 | { |
386 | ktime_t next_release; | 657 | ktime_t next_release; |
387 | long err; | 658 | long err; |
388 | struct mc2_cpu_state *state = local_cpu_state(); | 659 | |
389 | struct reservation_environment *env = &(state->sup_env.env); | 660 | TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), |
390 | struct mc2_task_state *tinfo = get_mc2_state(current); | 661 | get_deadline(current)); |
391 | struct reservation *res; | ||
392 | |||
393 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
394 | if (!res) | ||
395 | ; // find in global env | ||
396 | |||
397 | TRACE_CUR("mc2_complete_job at %llu (deadline: %llu) (cur->budget: %llu)\n", litmus_clock(), | ||
398 | get_deadline(current), res->cur_budget); | ||
399 | 662 | ||
400 | tsk_rt(current)->completed = 1; | 663 | tsk_rt(current)->completed = 1; |
401 | 664 | ||
402 | if (tsk_rt(current)->sporadic_release) { | 665 | if (tsk_rt(current)->sporadic_release) { |
403 | env->time_zero = tsk_rt(current)->sporadic_release_time; | 666 | struct mc2_cpu_state *state; |
667 | struct reservation_environment *env; | ||
668 | struct mc2_task_state *tinfo; | ||
669 | struct reservation *res; | ||
670 | unsigned long flags; | ||
671 | |||
672 | local_irq_save(flags); | ||
673 | |||
674 | state = local_cpu_state(); | ||
675 | env = &(state->sup_env.env); | ||
676 | tinfo = get_mc2_state(current); | ||
677 | |||
678 | res = res_find_by_id(state, tsk_rt(current)->mc2_data->res_id); | ||
679 | |||
680 | if (get_task_crit_level(current) < CRIT_LEVEL_C) { | ||
681 | raw_spin_lock(&state->lock); | ||
682 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
683 | } else { | ||
684 | raw_spin_lock(&_global_env.lock); | ||
685 | _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
686 | } | ||
687 | |||
404 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | 688 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; |
405 | res->cur_budget = 0; | ||
406 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
407 | 689 | ||
408 | if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { | 690 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { |
409 | struct table_driven_reservation *tdres; | 691 | struct table_driven_reservation *tdres; |
410 | |||
411 | //sup_update_time(&state->sup_env, litmus_clock()); | ||
412 | //res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
413 | tdres = container_of(res, struct table_driven_reservation, res); | 692 | tdres = container_of(res, struct table_driven_reservation, res); |
414 | tdres->next_interval = 0; | 693 | tdres->next_interval = 0; |
415 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | 694 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; |
416 | res->next_replenishment += tdres->intervals[0].start; | 695 | res->next_replenishment += tdres->intervals[0].start; |
417 | } | 696 | } |
418 | TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | 697 | res->cur_budget = 0; |
698 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
699 | |||
700 | //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
701 | if (get_task_crit_level(current) < CRIT_LEVEL_C) { | ||
702 | raw_spin_unlock(&state->lock); | ||
703 | } else { | ||
704 | raw_spin_unlock(&_global_env.lock); | ||
705 | } | ||
706 | |||
707 | local_irq_restore(flags); | ||
419 | } | 708 | } |
420 | 709 | ||
421 | prepare_for_next_period(current); | 710 | prepare_for_next_period(current); |
@@ -443,8 +732,9 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
443 | struct reservation *res; | 732 | struct reservation *res; |
444 | struct mc2_cpu_state *state; | 733 | struct mc2_cpu_state *state; |
445 | struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); | 734 | struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); |
446 | struct mc2_task *mp = tsk_rt(tsk)->plugin_state; | 735 | struct mc2_task *mp = tsk_rt(tsk)->mc2_data; |
447 | 736 | enum crit_level lv; | |
737 | |||
448 | if (!tinfo) | 738 | if (!tinfo) |
449 | return -ENOMEM; | 739 | return -ENOMEM; |
450 | 740 | ||
@@ -453,33 +743,61 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
453 | return err; | 743 | return err; |
454 | } | 744 | } |
455 | 745 | ||
746 | lv = mp->crit; | ||
456 | preempt_disable(); | 747 | preempt_disable(); |
457 | 748 | ||
458 | state = cpu_state_for(task_cpu(tsk)); | 749 | if (lv < CRIT_LEVEL_C) { |
459 | raw_spin_lock_irqsave(&state->lock, flags); | 750 | state = cpu_state_for(task_cpu(tsk)); |
751 | raw_spin_lock_irqsave(&state->lock, flags); | ||
460 | 752 | ||
461 | res = sup_find_by_id(&state->sup_env, mp->res_id); | 753 | res = sup_find_by_id(&state->sup_env, mp->res_id); |
462 | 754 | ||
463 | /* found the appropriate reservation (or vCPU) */ | 755 | /* found the appropriate reservation (or vCPU) */ |
464 | if (res) { | 756 | if (res) { |
465 | TRACE_TASK(tsk, "FOUND RES ID\n"); | 757 | TRACE_TASK(tsk, "SUP FOUND RES ID\n"); |
466 | tinfo->mc2_param.crit = mp->crit; | 758 | tinfo->mc2_param.crit = mp->crit; |
467 | tinfo->mc2_param.res_id = mp->res_id; | 759 | tinfo->mc2_param.res_id = mp->res_id; |
468 | 760 | ||
469 | kfree(tsk_rt(tsk)->plugin_state); | 761 | //kfree(tsk_rt(tsk)->plugin_state); |
470 | tsk_rt(tsk)->plugin_state = NULL; | 762 | //tsk_rt(tsk)->plugin_state = NULL; |
763 | |||
764 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
765 | tinfo->cpu = task_cpu(tsk); | ||
766 | tinfo->has_departed = true; | ||
767 | tsk_rt(tsk)->plugin_state = tinfo; | ||
768 | |||
769 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
770 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
771 | } | ||
772 | |||
773 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
774 | } else if (lv == CRIT_LEVEL_C) { | ||
775 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
471 | 776 | ||
472 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | 777 | res = gmp_find_by_id(&_global_env, mp->res_id); |
473 | tinfo->cpu = task_cpu(tsk); | ||
474 | tinfo->has_departed = true; | ||
475 | tsk_rt(tsk)->plugin_state = tinfo; | ||
476 | 778 | ||
477 | /* disable LITMUS^RT's per-thread budget enforcement */ | 779 | /* found the appropriate reservation (or vCPU) */ |
478 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 780 | if (res) { |
479 | } | 781 | TRACE_TASK(tsk, "GMP FOUND RES ID\n"); |
782 | tinfo->mc2_param.crit = mp->crit; | ||
783 | tinfo->mc2_param.res_id = mp->res_id; | ||
784 | |||
785 | //kfree(tsk_rt(tsk)->plugin_state); | ||
786 | //tsk_rt(tsk)->plugin_state = NULL; | ||
787 | |||
788 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
789 | tinfo->cpu = -1; | ||
790 | tinfo->has_departed = true; | ||
791 | tsk_rt(tsk)->plugin_state = tinfo; | ||
480 | 792 | ||
481 | raw_spin_unlock_irqrestore(&state->lock, flags); | 793 | /* disable LITMUS^RT's per-thread budget enforcement */ |
794 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
795 | } | ||
482 | 796 | ||
797 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
798 | |||
799 | } | ||
800 | |||
483 | preempt_enable(); | 801 | preempt_enable(); |
484 | 802 | ||
485 | if (err) | 803 | if (err) |
@@ -493,12 +811,18 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
493 | { | 811 | { |
494 | unsigned long flags; | 812 | unsigned long flags; |
495 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 813 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
496 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 814 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); |
497 | struct reservation *res; | 815 | struct reservation *res; |
498 | 816 | enum crit_level lv = get_task_crit_level(tsk); | |
817 | |||
499 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | 818 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", |
500 | litmus_clock(), on_runqueue, is_running); | 819 | litmus_clock(), on_runqueue, is_running); |
501 | 820 | ||
821 | if (tinfo->cpu == -1) | ||
822 | state = local_cpu_state(); | ||
823 | else | ||
824 | state = cpu_state_for(tinfo->cpu); | ||
825 | |||
502 | /* acquire the lock protecting the state and disable interrupts */ | 826 | /* acquire the lock protecting the state and disable interrupts */ |
503 | raw_spin_lock_irqsave(&state->lock, flags); | 827 | raw_spin_lock_irqsave(&state->lock, flags); |
504 | 828 | ||
@@ -511,7 +835,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
511 | if (on_runqueue || is_running) { | 835 | if (on_runqueue || is_running) { |
512 | /* Assumption: litmus_clock() is synchronized across cores | 836 | /* Assumption: litmus_clock() is synchronized across cores |
513 | * [see comment in pres_task_resume()] */ | 837 | * [see comment in pres_task_resume()] */ |
514 | sup_update_time(&state->sup_env, litmus_clock()); | 838 | raw_spin_lock(&_global_env.lock); |
839 | mc2_update_time(lv, state, litmus_clock()); | ||
840 | raw_spin_unlock(&_global_env.lock); | ||
515 | mc2_update_ghost_state(state); | 841 | mc2_update_ghost_state(state); |
516 | task_arrives(state, tsk); | 842 | task_arrives(state, tsk); |
517 | /* NOTE: drops state->lock */ | 843 | /* NOTE: drops state->lock */ |
@@ -521,12 +847,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
521 | } else | 847 | } else |
522 | raw_spin_unlock_irqrestore(&state->lock, flags); | 848 | raw_spin_unlock_irqrestore(&state->lock, flags); |
523 | 849 | ||
524 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | 850 | res = res_find_by_id(state, tinfo->mc2_param.res_id); |
525 | release_at(tsk, res->next_replenishment); | 851 | |
526 | if (res) | 852 | if (res) { |
527 | TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); | 853 | TRACE_TASK(tsk, "mc2_task_new() next_replenishment = %llu\n", res->next_replenishment); |
854 | release_at(tsk, res->next_replenishment); | ||
855 | } | ||
528 | else | 856 | else |
529 | TRACE_TASK(tsk, "next_replenishment = NULL\n"); | 857 | TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n"); |
530 | } | 858 | } |
531 | 859 | ||
532 | static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | 860 | static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) |
@@ -537,43 +865,71 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
537 | struct sup_reservation_environment *sup_env; | 865 | struct sup_reservation_environment *sup_env; |
538 | int found = 0; | 866 | int found = 0; |
539 | enum crit_level lv = get_task_crit_level(current); | 867 | enum crit_level lv = get_task_crit_level(current); |
540 | |||
541 | state = cpu_state_for(cpu); | ||
542 | raw_spin_lock(&state->lock); | ||
543 | 868 | ||
544 | // res = sup_find_by_id(&state->sup_env, reservation_id); | 869 | if (cpu == -1) { |
545 | sup_env = &state->sup_env; | 870 | raw_spin_lock(&_global_env.lock); |
546 | //if (!res) { | 871 | |
547 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | 872 | list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { |
548 | if (res->id == reservation_id) { | ||
549 | if (lv == CRIT_LEVEL_A) { | ||
550 | struct table_driven_reservation *tdres; | ||
551 | tdres = container_of(res, struct table_driven_reservation, res); | ||
552 | kfree(tdres->intervals); | ||
553 | } | ||
554 | list_del(&res->list); | ||
555 | kfree(res); | ||
556 | found = 1; | ||
557 | ret = 0; | ||
558 | } | ||
559 | } | ||
560 | if (!found) { | ||
561 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { | ||
562 | if (res->id == reservation_id) { | 873 | if (res->id == reservation_id) { |
563 | if (lv == CRIT_LEVEL_A) { | 874 | TRACE("DESTROY RES FOUND!!!\n"); |
564 | struct table_driven_reservation *tdres; | ||
565 | tdres = container_of(res, struct table_driven_reservation, res); | ||
566 | kfree(tdres->intervals); | ||
567 | } | ||
568 | list_del(&res->list); | 875 | list_del(&res->list); |
569 | kfree(res); | 876 | kfree(res); |
570 | found = 1; | 877 | found = 1; |
571 | ret = 0; | 878 | ret = 0; |
572 | } | 879 | } |
573 | } | 880 | } |
574 | } | 881 | if (!found) { |
575 | if (!found) { | 882 | list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) { |
576 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 883 | if (res->id == reservation_id) { |
884 | TRACE("DESTROY RES FOUND!!!\n"); | ||
885 | list_del(&res->list); | ||
886 | kfree(res); | ||
887 | found = 1; | ||
888 | ret = 0; | ||
889 | } | ||
890 | } | ||
891 | } | ||
892 | if (!found) { | ||
893 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
894 | if (res->id == reservation_id) { | ||
895 | TRACE("DESTROY RES FOUND!!!\n"); | ||
896 | list_del(&res->list); | ||
897 | kfree(res); | ||
898 | found = 1; | ||
899 | ret = 0; | ||
900 | } | ||
901 | } | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | list_for_each_entry(res, &_global_env.depleted_reservations, list) { | ||
906 | TRACE("DEPLETED LIST R%d\n", res->id); | ||
907 | } | ||
908 | list_for_each_entry(res, &_global_env.inactive_reservations, list) { | ||
909 | TRACE("INACTIVE LIST R%d\n", res->id); | ||
910 | } | ||
911 | list_for_each_entry(res, &_global_env.active_reservations, list) { | ||
912 | TRACE("ACTIVE LIST R%d\n", res->id); | ||
913 | } | ||
914 | */ | ||
915 | if (list_empty(&_global_env.active_reservations)) | ||
916 | INIT_LIST_HEAD(&_global_env.active_reservations); | ||
917 | if (list_empty(&_global_env.depleted_reservations)) | ||
918 | INIT_LIST_HEAD(&_global_env.depleted_reservations); | ||
919 | if (list_empty(&_global_env.inactive_reservations)) | ||
920 | INIT_LIST_HEAD(&_global_env.inactive_reservations); | ||
921 | if (list_empty(&_global_env.next_events)) | ||
922 | INIT_LIST_HEAD(&_global_env.next_events); | ||
923 | |||
924 | raw_spin_unlock(&_global_env.lock); | ||
925 | } else { | ||
926 | state = cpu_state_for(cpu); | ||
927 | raw_spin_lock(&state->lock); | ||
928 | |||
929 | // res = sup_find_by_id(&state->sup_env, reservation_id); | ||
930 | sup_env = &state->sup_env; | ||
931 | //if (!res) { | ||
932 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | ||
577 | if (res->id == reservation_id) { | 933 | if (res->id == reservation_id) { |
578 | if (lv == CRIT_LEVEL_A) { | 934 | if (lv == CRIT_LEVEL_A) { |
579 | struct table_driven_reservation *tdres; | 935 | struct table_driven_reservation *tdres; |
@@ -586,10 +942,40 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
586 | ret = 0; | 942 | ret = 0; |
587 | } | 943 | } |
588 | } | 944 | } |
589 | } | 945 | if (!found) { |
590 | //} | 946 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { |
947 | if (res->id == reservation_id) { | ||
948 | if (lv == CRIT_LEVEL_A) { | ||
949 | struct table_driven_reservation *tdres; | ||
950 | tdres = container_of(res, struct table_driven_reservation, res); | ||
951 | kfree(tdres->intervals); | ||
952 | } | ||
953 | list_del(&res->list); | ||
954 | kfree(res); | ||
955 | found = 1; | ||
956 | ret = 0; | ||
957 | } | ||
958 | } | ||
959 | } | ||
960 | if (!found) { | ||
961 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
962 | if (res->id == reservation_id) { | ||
963 | if (lv == CRIT_LEVEL_A) { | ||
964 | struct table_driven_reservation *tdres; | ||
965 | tdres = container_of(res, struct table_driven_reservation, res); | ||
966 | kfree(tdres->intervals); | ||
967 | } | ||
968 | list_del(&res->list); | ||
969 | kfree(res); | ||
970 | found = 1; | ||
971 | ret = 0; | ||
972 | } | ||
973 | } | ||
974 | } | ||
975 | //} | ||
591 | 976 | ||
592 | raw_spin_unlock(&state->lock); | 977 | raw_spin_unlock(&state->lock); |
978 | } | ||
593 | 979 | ||
594 | TRACE("RESERVATION_DESTROY ret = %d\n", ret); | 980 | TRACE("RESERVATION_DESTROY ret = %d\n", ret); |
595 | return ret; | 981 | return ret; |
@@ -599,10 +985,15 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
599 | { | 985 | { |
600 | unsigned long flags; | 986 | unsigned long flags; |
601 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 987 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
602 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 988 | struct mc2_cpu_state *state; |
603 | enum crit_level lv = tinfo->mc2_param.crit; | 989 | enum crit_level lv = tinfo->mc2_param.crit; |
604 | struct crit_entry* ce; | 990 | struct crit_entry* ce; |
605 | 991 | ||
992 | if (tinfo->cpu != -1) | ||
993 | state = cpu_state_for(tinfo->cpu); | ||
994 | else | ||
995 | state = local_cpu_state(); | ||
996 | |||
606 | raw_spin_lock_irqsave(&state->lock, flags); | 997 | raw_spin_lock_irqsave(&state->lock, flags); |
607 | 998 | ||
608 | if (state->scheduled == tsk) | 999 | if (state->scheduled == tsk) |
@@ -616,7 +1007,11 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
616 | if (is_running(tsk)) { | 1007 | if (is_running(tsk)) { |
617 | /* Assumption: litmus_clock() is synchronized across cores | 1008 | /* Assumption: litmus_clock() is synchronized across cores |
618 | * [see comment in pres_task_resume()] */ | 1009 | * [see comment in pres_task_resume()] */ |
619 | sup_update_time(&state->sup_env, litmus_clock()); | 1010 | //if (lv < CRIT_LEVEL_C) |
1011 | // sup_update_time(&state->sup_env, litmus_clock()); | ||
1012 | raw_spin_lock(&_global_env.lock); | ||
1013 | mc2_update_time(lv, state, litmus_clock()); | ||
1014 | raw_spin_unlock(&_global_env.lock); | ||
620 | mc2_update_ghost_state(state); | 1015 | mc2_update_ghost_state(state); |
621 | task_departs(tsk, 0); | 1016 | task_departs(tsk, 0); |
622 | 1017 | ||
@@ -644,6 +1039,8 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
644 | */ | 1039 | */ |
645 | kfree(tsk_rt(tsk)->plugin_state); | 1040 | kfree(tsk_rt(tsk)->plugin_state); |
646 | tsk_rt(tsk)->plugin_state = NULL; | 1041 | tsk_rt(tsk)->plugin_state = NULL; |
1042 | kfree(tsk_rt(tsk)->mc2_data); | ||
1043 | tsk_rt(tsk)->mc2_data = NULL; | ||
647 | } | 1044 | } |
648 | 1045 | ||
649 | static long create_polling_reservation( | 1046 | static long create_polling_reservation( |
@@ -685,28 +1082,54 @@ static long create_polling_reservation( | |||
685 | if (!pres) | 1082 | if (!pres) |
686 | return -ENOMEM; | 1083 | return -ENOMEM; |
687 | 1084 | ||
688 | state = cpu_state_for(config->cpu); | 1085 | if (config->cpu != -1) { |
689 | raw_spin_lock_irqsave(&state->lock, flags); | 1086 | state = cpu_state_for(config->cpu); |
1087 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1088 | |||
1089 | res = sup_find_by_id(&state->sup_env, config->id); | ||
1090 | if (!res) { | ||
1091 | polling_reservation_init(pres, use_edf, periodic, | ||
1092 | config->polling_params.budget, | ||
1093 | config->polling_params.period, | ||
1094 | config->polling_params.relative_deadline, | ||
1095 | config->polling_params.offset); | ||
1096 | pres->res.id = config->id; | ||
1097 | pres->res.blocked_by_ghost = 0; | ||
1098 | pres->res.is_ghost = 0; | ||
1099 | if (!use_edf) | ||
1100 | pres->res.priority = config->priority; | ||
1101 | sup_add_new_reservation(&state->sup_env, &pres->res); | ||
1102 | err = config->id; | ||
1103 | } else { | ||
1104 | err = -EEXIST; | ||
1105 | } | ||
690 | 1106 | ||
691 | res = sup_find_by_id(&state->sup_env, config->id); | 1107 | raw_spin_unlock_irqrestore(&state->lock, flags); |
692 | if (!res) { | ||
693 | polling_reservation_init(pres, use_edf, periodic, | ||
694 | config->polling_params.budget, | ||
695 | config->polling_params.period, | ||
696 | config->polling_params.relative_deadline, | ||
697 | config->polling_params.offset); | ||
698 | pres->res.id = config->id; | ||
699 | pres->res.blocked_by_ghost = 0; | ||
700 | if (!use_edf) | ||
701 | pres->res.priority = config->priority; | ||
702 | sup_add_new_reservation(&state->sup_env, &pres->res); | ||
703 | err = config->id; | ||
704 | } else { | 1108 | } else { |
705 | err = -EEXIST; | 1109 | raw_spin_lock_irqsave(&_global_env.lock, flags); |
1110 | |||
1111 | res = gmp_find_by_id(&_global_env, config->id); | ||
1112 | if (!res) { | ||
1113 | polling_reservation_init(pres, use_edf, periodic, | ||
1114 | config->polling_params.budget, | ||
1115 | config->polling_params.period, | ||
1116 | config->polling_params.relative_deadline, | ||
1117 | config->polling_params.offset); | ||
1118 | pres->res.id = config->id; | ||
1119 | pres->res.blocked_by_ghost = 0; | ||
1120 | pres->res.scheduled_on = NO_CPU; | ||
1121 | pres->res.is_ghost = 0; | ||
1122 | if (!use_edf) | ||
1123 | pres->res.priority = config->priority; | ||
1124 | gmp_add_new_reservation(&_global_env, &pres->res); | ||
1125 | TRACE("GMP_ADD_NEW_RESERVATION R%d\n", pres->res.id); | ||
1126 | err = config->id; | ||
1127 | } else { | ||
1128 | err = -EEXIST; | ||
1129 | } | ||
1130 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
706 | } | 1131 | } |
707 | 1132 | ||
708 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
709 | |||
710 | if (err < 0) | 1133 | if (err < 0) |
711 | kfree(pres); | 1134 | kfree(pres); |
712 | 1135 | ||
@@ -825,10 +1248,12 @@ static long mc2_reservation_create(int res_type, void* __user _config) | |||
825 | if (copy_from_user(&config, _config, sizeof(config))) | 1248 | if (copy_from_user(&config, _config, sizeof(config))) |
826 | return -EFAULT; | 1249 | return -EFAULT; |
827 | 1250 | ||
828 | if (config.cpu < 0 || !cpu_online(config.cpu)) { | 1251 | if (config.cpu != -1) { |
829 | printk(KERN_ERR "invalid polling reservation (%u): " | 1252 | if (config.cpu < 0 || !cpu_online(config.cpu)) { |
830 | "CPU %d offline\n", config.id, config.cpu); | 1253 | printk(KERN_ERR "invalid polling reservation (%u): " |
831 | return -EINVAL; | 1254 | "CPU %d offline\n", config.id, config.cpu); |
1255 | return -EINVAL; | ||
1256 | } | ||
832 | } | 1257 | } |
833 | 1258 | ||
834 | switch (res_type) { | 1259 | switch (res_type) { |
@@ -885,19 +1310,30 @@ static long mc2_activate_plugin(void) | |||
885 | { | 1310 | { |
886 | int cpu, lv; | 1311 | int cpu, lv; |
887 | struct mc2_cpu_state *state; | 1312 | struct mc2_cpu_state *state; |
1313 | struct cpu_entry *ce; | ||
888 | 1314 | ||
1315 | gmp_init(&_global_env); | ||
1316 | raw_spin_lock_init(&_lowest_prio_cpu.lock); | ||
1317 | |||
889 | for_each_online_cpu(cpu) { | 1318 | for_each_online_cpu(cpu) { |
890 | TRACE("Initializing CPU%d...\n", cpu); | 1319 | TRACE("Initializing CPU%d...\n", cpu); |
891 | 1320 | ||
892 | state = cpu_state_for(cpu); | 1321 | state = cpu_state_for(cpu); |
1322 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
1323 | |||
1324 | ce->cpu = cpu; | ||
1325 | ce->scheduled = NULL; | ||
1326 | ce->deadline = ULLONG_MAX; | ||
1327 | ce->lv = NUM_CRIT_LEVELS; | ||
1328 | ce->will_schedule = false; | ||
893 | 1329 | ||
894 | raw_spin_lock_init(&state->lock); | 1330 | raw_spin_lock_init(&state->lock); |
895 | state->cpu = cpu; | 1331 | state->cpu = cpu; |
896 | state->scheduled = NULL; | 1332 | state->scheduled = NULL; |
897 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | 1333 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { |
898 | struct crit_entry *ce = &state->crit_entries[lv]; | 1334 | struct crit_entry *cr_entry = &state->crit_entries[lv]; |
899 | ce->level = lv; | 1335 | cr_entry->level = lv; |
900 | ce->running = NULL; | 1336 | cr_entry->running = NULL; |
901 | //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | 1337 | //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
902 | //ce->ghost_timer.function = on_ghost_timer; | 1338 | //ce->ghost_timer.function = on_ghost_timer; |
903 | } | 1339 | } |
@@ -905,6 +1341,9 @@ static long mc2_activate_plugin(void) | |||
905 | 1341 | ||
906 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | 1342 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
907 | state->timer.function = on_scheduling_timer; | 1343 | state->timer.function = on_scheduling_timer; |
1344 | |||
1345 | hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
1346 | state->g_timer.function = on_global_scheduling_timer; | ||
908 | } | 1347 | } |
909 | 1348 | ||
910 | mc2_setup_domain_proc(); | 1349 | mc2_setup_domain_proc(); |
@@ -912,17 +1351,26 @@ static long mc2_activate_plugin(void) | |||
912 | return 0; | 1351 | return 0; |
913 | } | 1352 | } |
914 | 1353 | ||
1354 | static void mc2_finish_switch(struct task_struct *prev) | ||
1355 | { | ||
1356 | struct mc2_cpu_state *state = local_cpu_state(); | ||
1357 | |||
1358 | state->scheduled = is_realtime(current) ? current : NULL; | ||
1359 | } | ||
1360 | |||
915 | static long mc2_deactivate_plugin(void) | 1361 | static long mc2_deactivate_plugin(void) |
916 | { | 1362 | { |
917 | int cpu; | 1363 | int cpu; |
918 | struct mc2_cpu_state *state; | 1364 | struct mc2_cpu_state *state; |
919 | struct reservation *res; | 1365 | struct reservation *res; |
1366 | struct next_timer_event *event; | ||
920 | 1367 | ||
921 | for_each_online_cpu(cpu) { | 1368 | for_each_online_cpu(cpu) { |
922 | state = cpu_state_for(cpu); | 1369 | state = cpu_state_for(cpu); |
923 | raw_spin_lock(&state->lock); | 1370 | raw_spin_lock(&state->lock); |
924 | 1371 | ||
925 | hrtimer_cancel(&state->timer); | 1372 | hrtimer_cancel(&state->timer); |
1373 | hrtimer_cancel(&state->g_timer); | ||
926 | 1374 | ||
927 | /* Delete all reservations --- assumes struct reservation | 1375 | /* Delete all reservations --- assumes struct reservation |
928 | * is prefix of containing struct. */ | 1376 | * is prefix of containing struct. */ |
@@ -954,6 +1402,46 @@ static long mc2_deactivate_plugin(void) | |||
954 | raw_spin_unlock(&state->lock); | 1402 | raw_spin_unlock(&state->lock); |
955 | } | 1403 | } |
956 | 1404 | ||
1405 | raw_spin_lock(&_global_env.lock); | ||
1406 | |||
1407 | while (!list_empty(&_global_env.active_reservations)) { | ||
1408 | TRACE("RES FOUND!!!\n"); | ||
1409 | res = list_first_entry( | ||
1410 | &_global_env.active_reservations, | ||
1411 | struct reservation, list); | ||
1412 | list_del(&res->list); | ||
1413 | kfree(res); | ||
1414 | } | ||
1415 | |||
1416 | while (!list_empty(&_global_env.inactive_reservations)) { | ||
1417 | TRACE("RES FOUND!!!\n"); | ||
1418 | res = list_first_entry( | ||
1419 | &_global_env.inactive_reservations, | ||
1420 | struct reservation, list); | ||
1421 | list_del(&res->list); | ||
1422 | kfree(res); | ||
1423 | } | ||
1424 | |||
1425 | while (!list_empty(&_global_env.depleted_reservations)) { | ||
1426 | TRACE("RES FOUND!!!\n"); | ||
1427 | res = list_first_entry( | ||
1428 | &_global_env.depleted_reservations, | ||
1429 | struct reservation, list); | ||
1430 | list_del(&res->list); | ||
1431 | kfree(res); | ||
1432 | } | ||
1433 | |||
1434 | while (!list_empty(&_global_env.next_events)) { | ||
1435 | TRACE("EVENT FOUND!!!\n"); | ||
1436 | event = list_first_entry( | ||
1437 | &_global_env.next_events, | ||
1438 | struct next_timer_event, list); | ||
1439 | list_del(&event->list); | ||
1440 | kfree(event); | ||
1441 | } | ||
1442 | |||
1443 | raw_spin_unlock(&_global_env.lock); | ||
1444 | |||
957 | destroy_domain_proc_info(&mc2_domain_proc_info); | 1445 | destroy_domain_proc_info(&mc2_domain_proc_info); |
958 | return 0; | 1446 | return 0; |
959 | } | 1447 | } |
@@ -961,6 +1449,7 @@ static long mc2_deactivate_plugin(void) | |||
961 | static struct sched_plugin mc2_plugin = { | 1449 | static struct sched_plugin mc2_plugin = { |
962 | .plugin_name = "MC2", | 1450 | .plugin_name = "MC2", |
963 | .schedule = mc2_schedule, | 1451 | .schedule = mc2_schedule, |
1452 | .finish_switch = mc2_finish_switch, | ||
964 | .task_wake_up = mc2_task_resume, | 1453 | .task_wake_up = mc2_task_resume, |
965 | .admit_task = mc2_admit_task, | 1454 | .admit_task = mc2_admit_task, |
966 | .task_new = mc2_task_new, | 1455 | .task_new = mc2_task_new, |