diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-08-30 16:47:00 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-08-30 16:47:00 -0400 |
commit | d57b8f5a8e2d08fa972dad6b646a02a5dd931be4 (patch) | |
tree | ffe247f39ebed2a487762ef92d0ac473d2141989 | |
parent | 8c20bdbf935c54784e634af2039a5897581e65e8 (diff) |
Initial commit for shared librarywip-shared-mem
-rw-r--r-- | litmus/Kconfig | 9 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 281 |
3 files changed, 173 insertions, 118 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index 6e0f77c3bb8f..babb43deffb5 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -372,13 +372,4 @@ config PREEMPT_STATE_TRACE | |||
372 | 372 | ||
373 | endmenu | 373 | endmenu |
374 | 374 | ||
375 | config PGMRT_SUPPORT | ||
376 | bool "Support for PGM^RT API" | ||
377 | default y | ||
378 | depends on LITMUS_LOCKING && ALLOW_EARLY_RELEASE | ||
379 | help | ||
380 | This option enables support for PGM^RT API. The source code of PGM^RT | ||
381 | can be found https://github.com/GElliott/pgm . This option adds some | ||
382 | variables in rt_param.h. | ||
383 | |||
384 | endmenu | 375 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index 1845dda0b905..e27440917ca1 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -32,7 +32,6 @@ obj-y = sched_plugin.o litmus.o \ | |||
32 | 32 | ||
33 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 33 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
34 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 34 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
35 | |||
36 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 35 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
37 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 36 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
38 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | 37 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o |
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index e4b5e607c7cc..a2abda848cbf 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include <litmus/reservation.h> | 27 | #include <litmus/reservation.h> |
28 | #include <litmus/polling_reservations.h> | 28 | #include <litmus/polling_reservations.h> |
29 | 29 | ||
30 | #ifdef CONFIG_PGMRT_SUPPORT | ||
31 | #include <litmus/pgm.h> | ||
32 | #endif | ||
33 | |||
30 | //#define TRACE(fmt, args...) do {} while (false) | 34 | //#define TRACE(fmt, args...) do {} while (false) |
31 | //#define TRACE_TASK(fmt, args...) do {} while (false) | 35 | //#define TRACE_TASK(fmt, args...) do {} while (false) |
32 | 36 | ||
@@ -90,6 +94,7 @@ struct mc2_cpu_state { | |||
90 | 94 | ||
91 | static int resched_cpu[NR_CPUS]; | 95 | static int resched_cpu[NR_CPUS]; |
92 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | 96 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); |
97 | static int level_a_priorities[NR_CPUS]; | ||
93 | 98 | ||
94 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) | 99 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) |
95 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) | 100 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) |
@@ -152,7 +157,7 @@ static void task_departs(struct task_struct *tsk, int job_complete) | |||
152 | 157 | ||
153 | res->ops->client_departs(res, client, job_complete); | 158 | res->ops->client_departs(res, client, job_complete); |
154 | tinfo->has_departed = true; | 159 | tinfo->has_departed = true; |
155 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget); | 160 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); |
156 | /* 9/18/2015 fix start - no remaining budget | 161 | /* 9/18/2015 fix start - no remaining budget |
157 | * | 162 | * |
158 | if (job_complete && res->cur_budget) { | 163 | if (job_complete && res->cur_budget) { |
@@ -200,6 +205,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
200 | } | 205 | } |
201 | 206 | ||
202 | res->ops->client_arrives(res, client); | 207 | res->ops->client_arrives(res, client); |
208 | TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); | ||
203 | 209 | ||
204 | if (lv != NUM_CRIT_LEVELS) { | 210 | if (lv != NUM_CRIT_LEVELS) { |
205 | struct crit_entry *ce; | 211 | struct crit_entry *ce; |
@@ -377,11 +383,13 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
377 | raw_spin_unlock(&state->lock); | 383 | raw_spin_unlock(&state->lock); |
378 | 384 | ||
379 | if (update <= now || reschedule[state->cpu]) { | 385 | if (update <= now || reschedule[state->cpu]) { |
380 | //litmus_reschedule(state->cpu); | 386 | reschedule[state->cpu] = 0; |
387 | litmus_reschedule(state->cpu); | ||
388 | /* | ||
381 | raw_spin_lock(&state->lock); | 389 | raw_spin_lock(&state->lock); |
382 | preempt_if_preemptable(state->scheduled, state->cpu); | 390 | preempt_if_preemptable(state->scheduled, state->cpu); |
383 | raw_spin_unlock(&state->lock); | 391 | raw_spin_unlock(&state->lock); |
384 | reschedule[state->cpu] = 0; | 392 | */ |
385 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | 393 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { |
386 | /* Reprogram only if not already set correctly. */ | 394 | /* Reprogram only if not already set correctly. */ |
387 | if (!hrtimer_active(&state->timer) || | 395 | if (!hrtimer_active(&state->timer) || |
@@ -397,6 +405,15 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
397 | 0 /* timer coalescing slack */, | 405 | 0 /* timer coalescing slack */, |
398 | HRTIMER_MODE_ABS_PINNED, | 406 | HRTIMER_MODE_ABS_PINNED, |
399 | 0 /* wakeup */); | 407 | 0 /* wakeup */); |
408 | if (update < litmus_clock()) { | ||
409 | /* uh oh, timer expired while trying to set it */ | ||
410 | TRACE("timer expired during setting " | ||
411 | "update:%llu now:%llu actual:%llu\n", | ||
412 | update, now, litmus_clock()); | ||
413 | /* The timer HW may not have been reprogrammed | ||
414 | * correctly; force rescheduling now. */ | ||
415 | litmus_reschedule(state->cpu); | ||
416 | } | ||
400 | } | 417 | } |
401 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { | 418 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { |
402 | /* Poke remote core only if timer needs to be set earlier than | 419 | /* Poke remote core only if timer needs to be set earlier than |
@@ -424,13 +441,14 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
424 | } | 441 | } |
425 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 442 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
426 | if (reschedule[cpus]) { | 443 | if (reschedule[cpus]) { |
427 | //litmus_reschedule(cpus); | 444 | litmus_reschedule(cpus); |
445 | /* | ||
428 | struct mc2_cpu_state *remote_state; | 446 | struct mc2_cpu_state *remote_state; |
429 | |||
430 | remote_state = cpu_state_for(cpus); | 447 | remote_state = cpu_state_for(cpus); |
431 | raw_spin_lock(&remote_state->lock); | 448 | raw_spin_lock(&remote_state->lock); |
432 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | 449 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); |
433 | raw_spin_unlock(&remote_state->lock); | 450 | raw_spin_unlock(&remote_state->lock); |
451 | */ | ||
434 | } | 452 | } |
435 | } | 453 | } |
436 | } | 454 | } |
@@ -613,13 +631,15 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
613 | 631 | ||
614 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 632 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
615 | if (reschedule[cpus]) { | 633 | if (reschedule[cpus]) { |
616 | //litmus_reschedule(cpus); | 634 | litmus_reschedule(cpus); |
635 | /* | ||
617 | struct mc2_cpu_state *remote_state; | 636 | struct mc2_cpu_state *remote_state; |
618 | 637 | ||
619 | remote_state = cpu_state_for(cpus); | 638 | remote_state = cpu_state_for(cpus); |
620 | raw_spin_lock(&remote_state->lock); | 639 | raw_spin_lock(&remote_state->lock); |
621 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | 640 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); |
622 | raw_spin_unlock(&remote_state->lock); | 641 | raw_spin_unlock(&remote_state->lock); |
642 | */ | ||
623 | } | 643 | } |
624 | } | 644 | } |
625 | 645 | ||
@@ -627,6 +647,101 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
627 | return restart; | 647 | return restart; |
628 | } | 648 | } |
629 | 649 | ||
650 | /* mc2_complete_job - syscall backend for job completions | ||
651 | */ | ||
652 | static long mc2_complete_job(void) | ||
653 | { | ||
654 | ktime_t next_release; | ||
655 | long err; | ||
656 | |||
657 | tsk_rt(current)->completed = 1; | ||
658 | |||
659 | /* If this the first job instance, we need to reset replenish | ||
660 | time to the next release time */ | ||
661 | if (tsk_rt(current)->sporadic_release) { | ||
662 | struct mc2_cpu_state *state; | ||
663 | struct reservation_environment *env; | ||
664 | struct mc2_task_state *tinfo; | ||
665 | struct reservation *res = NULL; | ||
666 | unsigned long flags; | ||
667 | enum crit_level lv; | ||
668 | |||
669 | preempt_disable(); | ||
670 | local_irq_save(flags); | ||
671 | |||
672 | tinfo = get_mc2_state(current); | ||
673 | lv = get_task_crit_level(current); | ||
674 | |||
675 | if (lv < CRIT_LEVEL_C) { | ||
676 | state = cpu_state_for(tinfo->cpu); | ||
677 | raw_spin_lock(&state->lock); | ||
678 | env = &(state->sup_env.env); | ||
679 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
680 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
681 | } | ||
682 | else if (lv == CRIT_LEVEL_C) { | ||
683 | state = local_cpu_state(); | ||
684 | raw_spin_lock(&state->lock); | ||
685 | raw_spin_lock(&_global_env.lock); | ||
686 | res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
687 | _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
688 | } | ||
689 | else | ||
690 | BUG(); | ||
691 | |||
692 | /* set next_replenishtime to synchronous release time */ | ||
693 | BUG_ON(!res); | ||
694 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | ||
695 | /* | ||
696 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
697 | struct table_driven_reservation *tdres; | ||
698 | tdres = container_of(res, struct table_driven_reservation, res); | ||
699 | tdres->next_interval = 0; | ||
700 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
701 | res->next_replenishment += tdres->intervals[0].start; | ||
702 | } | ||
703 | */ | ||
704 | res->cur_budget = 0; | ||
705 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
706 | |||
707 | //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
708 | |||
709 | //if (lv < CRIT_LEVEL_C) | ||
710 | // raw_spin_unlock(&state->lock); | ||
711 | //else | ||
712 | if (lv == CRIT_LEVEL_C) | ||
713 | raw_spin_unlock(&_global_env.lock); | ||
714 | |||
715 | raw_spin_unlock(&state->lock); | ||
716 | local_irq_restore(flags); | ||
717 | preempt_enable(); | ||
718 | } | ||
719 | |||
720 | sched_trace_task_completion(current, 0); | ||
721 | /* update the next release time and deadline */ | ||
722 | prepare_for_next_period(current); | ||
723 | sched_trace_task_release(current); | ||
724 | next_release = ns_to_ktime(get_release(current)); | ||
725 | preempt_disable(); | ||
726 | TRACE_CUR("next_release=%llu\n", get_release(current)); | ||
727 | if (get_release(current) > litmus_clock()) { | ||
728 | /* sleep until next_release */ | ||
729 | set_current_state(TASK_INTERRUPTIBLE); | ||
730 | preempt_enable_no_resched(); | ||
731 | err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); | ||
732 | } else { | ||
733 | /* release the next job immediately */ | ||
734 | err = 0; | ||
735 | TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); | ||
736 | preempt_enable(); | ||
737 | } | ||
738 | |||
739 | TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); | ||
740 | |||
741 | tsk_rt(current)->completed = 0; | ||
742 | return err; | ||
743 | } | ||
744 | |||
630 | /* mc2_dispatch - Select the next task to schedule. | 745 | /* mc2_dispatch - Select the next task to schedule. |
631 | */ | 746 | */ |
632 | struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) | 747 | struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) |
@@ -766,7 +881,7 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
766 | */ | 881 | */ |
767 | static struct task_struct* mc2_schedule(struct task_struct * prev) | 882 | static struct task_struct* mc2_schedule(struct task_struct * prev) |
768 | { | 883 | { |
769 | int np, blocks, exists; | 884 | int np, blocks, exists, preempt, to_schedule; |
770 | /* next == NULL means "schedule background work". */ | 885 | /* next == NULL means "schedule background work". */ |
771 | lt_t now; | 886 | lt_t now; |
772 | struct mc2_cpu_state *state = local_cpu_state(); | 887 | struct mc2_cpu_state *state = local_cpu_state(); |
@@ -781,34 +896,49 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
781 | //BUG_ON(state->scheduled && state->scheduled != prev); | 896 | //BUG_ON(state->scheduled && state->scheduled != prev); |
782 | //BUG_ON(state->scheduled && !is_realtime(prev)); | 897 | //BUG_ON(state->scheduled && !is_realtime(prev)); |
783 | if (state->scheduled && state->scheduled != prev) | 898 | if (state->scheduled && state->scheduled != prev) |
784 | ; //printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | 899 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); |
785 | if (state->scheduled && !is_realtime(prev)) | 900 | if (state->scheduled && !is_realtime(prev)) |
786 | ; //printk(KERN_ALERT "BUG2!!!!!!!! \n"); | 901 | printk(KERN_ALERT "BUG2!!!!!!!! \n"); |
787 | 902 | ||
788 | /* (0) Determine state */ | 903 | /* (0) Determine state */ |
789 | exists = state->scheduled != NULL; | 904 | exists = state->scheduled != NULL; |
790 | blocks = exists && !is_current_running(); | 905 | blocks = exists && !is_current_running(); |
791 | np = exists && is_np(state->scheduled); | 906 | np = exists && is_np(state->scheduled); |
792 | 907 | ||
908 | raw_spin_lock(&_global_env.lock); | ||
909 | preempt = resched_cpu[state->cpu]; | ||
910 | resched_cpu[state->cpu] = 0; | ||
911 | raw_spin_unlock(&_global_env.lock); | ||
912 | |||
793 | /* update time */ | 913 | /* update time */ |
794 | state->sup_env.will_schedule = true; | 914 | state->sup_env.will_schedule = true; |
795 | 915 | ||
796 | now = litmus_clock(); | 916 | now = litmus_clock(); |
797 | sup_update_time(&state->sup_env, now); | 917 | sup_update_time(&state->sup_env, now); |
798 | /* 9/20/2015 fix | 918 | /* 9/20/2015 fix */ |
799 | gmp_update_time(&_global_env, now); | 919 | //raw_spin_lock(&_global_env.lock); |
800 | */ | 920 | //to_schedule = gmp_update_time(&_global_env, now); |
921 | //raw_spin_unlock(&_global_env.lock); | ||
922 | |||
801 | /* 9/20/2015 fix | 923 | /* 9/20/2015 fix |
802 | mc2_update_ghost_state(state); | 924 | mc2_update_ghost_state(state); |
803 | */ | 925 | */ |
804 | 926 | ||
805 | /* remove task from reservation if it blocks */ | 927 | /* remove task from reservation if it blocks */ |
928 | /* | ||
806 | if (is_realtime(prev) && !is_running(prev)) { | 929 | if (is_realtime(prev) && !is_running(prev)) { |
807 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | 930 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) |
808 | raw_spin_lock(&_global_env.lock); | 931 | raw_spin_lock(&_global_env.lock); |
809 | task_departs(prev, is_completed(prev)); | 932 | task_departs(prev, is_completed(prev)); |
810 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | 933 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) |
811 | raw_spin_unlock(&_global_env.lock); | 934 | raw_spin_unlock(&_global_env.lock); |
935 | }*/ | ||
936 | if (is_realtime(current) && blocks) { | ||
937 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | ||
938 | raw_spin_lock(&_global_env.lock); | ||
939 | task_departs(current, is_completed(current)); | ||
940 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | ||
941 | raw_spin_unlock(&_global_env.lock); | ||
812 | } | 942 | } |
813 | 943 | ||
814 | /* figure out what to schedule next */ | 944 | /* figure out what to schedule next */ |
@@ -817,7 +947,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
817 | 947 | ||
818 | if (!state->scheduled) { | 948 | if (!state->scheduled) { |
819 | raw_spin_lock(&_global_env.lock); | 949 | raw_spin_lock(&_global_env.lock); |
820 | gmp_update_time(&_global_env, now); | 950 | to_schedule = gmp_update_time(&_global_env, now); |
821 | state->scheduled = mc2_global_dispatch(state); | 951 | state->scheduled = mc2_global_dispatch(state); |
822 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 952 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
823 | update_cpu_prio(state); | 953 | update_cpu_prio(state); |
@@ -854,7 +984,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
854 | int cpu; | 984 | int cpu; |
855 | raw_spin_lock(&_global_env.lock); | 985 | raw_spin_lock(&_global_env.lock); |
856 | cpu = get_lowest_prio_cpu(res?res->priority:0); | 986 | cpu = get_lowest_prio_cpu(res?res->priority:0); |
857 | //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | 987 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); |
858 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 988 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
859 | //raw_spin_lock(&_lowest_prio_cpu.lock); | 989 | //raw_spin_lock(&_lowest_prio_cpu.lock); |
860 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 990 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
@@ -864,6 +994,19 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
864 | raw_spin_unlock(&_global_env.lock); | 994 | raw_spin_unlock(&_global_env.lock); |
865 | } | 995 | } |
866 | } | 996 | } |
997 | |||
998 | if (to_schedule != 0) { | ||
999 | raw_spin_lock(&_global_env.lock); | ||
1000 | while (to_schedule--) { | ||
1001 | int cpu = get_lowest_prio_cpu(0); | ||
1002 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
1003 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
1004 | resched_cpu[cpu] = 1; | ||
1005 | } | ||
1006 | } | ||
1007 | raw_spin_unlock(&_global_env.lock); | ||
1008 | } | ||
1009 | |||
867 | if (state->scheduled) { | 1010 | if (state->scheduled) { |
868 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1011 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
869 | } | 1012 | } |
@@ -929,7 +1072,7 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
929 | break; | 1072 | break; |
930 | } | 1073 | } |
931 | } | 1074 | } |
932 | 1075 | ||
933 | raw_spin_lock(&state->lock); | 1076 | raw_spin_lock(&state->lock); |
934 | /* Assumption: litmus_clock() is synchronized across cores, | 1077 | /* Assumption: litmus_clock() is synchronized across cores, |
935 | * since we might not actually be executing on tinfo->cpu | 1078 | * since we might not actually be executing on tinfo->cpu |
@@ -962,99 +1105,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
962 | resume_legacy_task_model_updates(tsk); | 1105 | resume_legacy_task_model_updates(tsk); |
963 | } | 1106 | } |
964 | 1107 | ||
965 | /* mc2_complete_job - syscall backend for job completions | ||
966 | */ | ||
967 | static long mc2_complete_job(void) | ||
968 | { | ||
969 | ktime_t next_release; | ||
970 | long err; | ||
971 | |||
972 | tsk_rt(current)->completed = 1; | ||
973 | |||
974 | /* If this the first job instance, we need to reset replenish | ||
975 | time to the next release time */ | ||
976 | if (tsk_rt(current)->sporadic_release) { | ||
977 | struct mc2_cpu_state *state; | ||
978 | struct reservation_environment *env; | ||
979 | struct mc2_task_state *tinfo; | ||
980 | struct reservation *res = NULL; | ||
981 | unsigned long flags; | ||
982 | enum crit_level lv; | ||
983 | |||
984 | preempt_disable(); | ||
985 | local_irq_save(flags); | ||
986 | |||
987 | tinfo = get_mc2_state(current); | ||
988 | lv = get_task_crit_level(current); | ||
989 | |||
990 | if (lv < CRIT_LEVEL_C) { | ||
991 | state = cpu_state_for(tinfo->cpu); | ||
992 | raw_spin_lock(&state->lock); | ||
993 | env = &(state->sup_env.env); | ||
994 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
995 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
996 | } | ||
997 | else if (lv == CRIT_LEVEL_C) { | ||
998 | state = local_cpu_state(); | ||
999 | raw_spin_lock(&state->lock); | ||
1000 | raw_spin_lock(&_global_env.lock); | ||
1001 | res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
1002 | _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
1003 | } | ||
1004 | else | ||
1005 | BUG(); | ||
1006 | |||
1007 | /* set next_replenishtime to synchronous release time */ | ||
1008 | BUG_ON(!res); | ||
1009 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | ||
1010 | /* | ||
1011 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
1012 | struct table_driven_reservation *tdres; | ||
1013 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1014 | tdres->next_interval = 0; | ||
1015 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
1016 | res->next_replenishment += tdres->intervals[0].start; | ||
1017 | } | ||
1018 | */ | ||
1019 | res->cur_budget = 0; | ||
1020 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
1021 | |||
1022 | //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
1023 | |||
1024 | //if (lv < CRIT_LEVEL_C) | ||
1025 | // raw_spin_unlock(&state->lock); | ||
1026 | //else | ||
1027 | if (lv == CRIT_LEVEL_C) | ||
1028 | raw_spin_unlock(&_global_env.lock); | ||
1029 | |||
1030 | raw_spin_unlock(&state->lock); | ||
1031 | local_irq_restore(flags); | ||
1032 | preempt_enable(); | ||
1033 | } | ||
1034 | |||
1035 | sched_trace_task_completion(current, 0); | ||
1036 | /* update the next release time and deadline */ | ||
1037 | prepare_for_next_period(current); | ||
1038 | sched_trace_task_release(current); | ||
1039 | next_release = ns_to_ktime(get_release(current)); | ||
1040 | preempt_disable(); | ||
1041 | TRACE_CUR("next_release=%llu\n", get_release(current)); | ||
1042 | if (get_release(current) > litmus_clock()) { | ||
1043 | /* sleep until next_release */ | ||
1044 | set_current_state(TASK_INTERRUPTIBLE); | ||
1045 | preempt_enable_no_resched(); | ||
1046 | err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); | ||
1047 | } else { | ||
1048 | /* release the next job immediately */ | ||
1049 | err = 0; | ||
1050 | TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); | ||
1051 | preempt_enable(); | ||
1052 | } | ||
1053 | |||
1054 | TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); | ||
1055 | |||
1056 | return err; | ||
1057 | } | ||
1058 | 1108 | ||
1059 | /* mc2_admit_task - Setup mc2 task parameters | 1109 | /* mc2_admit_task - Setup mc2 task parameters |
1060 | */ | 1110 | */ |
@@ -1475,10 +1525,15 @@ static long create_polling_reservation( | |||
1475 | pres->res.id = config->id; | 1525 | pres->res.id = config->id; |
1476 | pres->res.blocked_by_ghost = 0; | 1526 | pres->res.blocked_by_ghost = 0; |
1477 | pres->res.is_ghost = NO_CPU; | 1527 | pres->res.is_ghost = NO_CPU; |
1528 | /*if (config->priority == LITMUS_MAX_PRIORITY) { | ||
1529 | level_a_priorities[config->cpu]++; | ||
1530 | pres->res.priority = level_a_priorities[config->cpu]; | ||
1531 | }*/ | ||
1478 | if (!use_edf) | 1532 | if (!use_edf) |
1479 | pres->res.priority = config->priority; | 1533 | pres->res.priority = config->priority; |
1480 | sup_add_new_reservation(&state->sup_env, &pres->res); | 1534 | sup_add_new_reservation(&state->sup_env, &pres->res); |
1481 | err = config->id; | 1535 | err = config->id; |
1536 | TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority); | ||
1482 | } else { | 1537 | } else { |
1483 | err = -EEXIST; | 1538 | err = -EEXIST; |
1484 | } | 1539 | } |
@@ -1703,6 +1758,7 @@ static long mc2_activate_plugin(void) | |||
1703 | TRACE("Initializing CPU%d...\n", cpu); | 1758 | TRACE("Initializing CPU%d...\n", cpu); |
1704 | 1759 | ||
1705 | resched_cpu[cpu] = 0; | 1760 | resched_cpu[cpu] = 0; |
1761 | level_a_priorities[cpu] = 0; | ||
1706 | state = cpu_state_for(cpu); | 1762 | state = cpu_state_for(cpu); |
1707 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 1763 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
1708 | 1764 | ||
@@ -1733,9 +1789,18 @@ static long mc2_activate_plugin(void) | |||
1733 | 1789 | ||
1734 | static void mc2_finish_switch(struct task_struct *prev) | 1790 | static void mc2_finish_switch(struct task_struct *prev) |
1735 | { | 1791 | { |
1792 | int cpus; | ||
1793 | enum crit_level lv = get_task_crit_level(prev); | ||
1736 | struct mc2_cpu_state *state = local_cpu_state(); | 1794 | struct mc2_cpu_state *state = local_cpu_state(); |
1737 | 1795 | ||
1738 | state->scheduled = is_realtime(current) ? current : NULL; | 1796 | state->scheduled = is_realtime(current) ? current : NULL; |
1797 | if (lv == CRIT_LEVEL_C) { | ||
1798 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
1799 | if (resched_cpu[cpus]) { | ||
1800 | litmus_reschedule(cpus); | ||
1801 | } | ||
1802 | } | ||
1803 | } | ||
1739 | } | 1804 | } |
1740 | 1805 | ||
1741 | static long mc2_deactivate_plugin(void) | 1806 | static long mc2_deactivate_plugin(void) |