diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-25 02:14:29 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-25 02:14:29 -0400 |
commit | 6dc3e50fc1916dbc8b8e9cc863217e545ca4159c (patch) | |
tree | e693a85beac6b5713a87afa5e8e3c5bb42df502d /litmus/sched_mc2.c | |
parent | de945bf970cbb332c0540b2cd071ec3c7e4b7833 (diff) |
sync bug fixed
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r-- | litmus/sched_mc2.c | 145 |
1 files changed, 90 insertions, 55 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 415e5fe3bf12..6723e8a96141 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define BUDGET_ENFORCEMENT_AT_C 0 | 39 | #define BUDGET_ENFORCEMENT_AT_C 0 |
40 | 40 | ||
41 | extern int num_sync_released; | 41 | extern atomic_t num_sync_released; |
42 | extern void do_partition(enum crit_level lv, int cpu); | 42 | extern void do_partition(enum crit_level lv, int cpu); |
43 | 43 | ||
44 | /* _global_env - reservation container for level-C tasks*/ | 44 | /* _global_env - reservation container for level-C tasks*/ |
@@ -137,8 +137,8 @@ asmlinkage long sys_enact_mode(void) | |||
137 | TRACE_TASK(current, "ENACTING SYSCALL\n"); | 137 | TRACE_TASK(current, "ENACTING SYSCALL\n"); |
138 | if (state->cpu == 0){ | 138 | if (state->cpu == 0){ |
139 | mode_changed = false; | 139 | mode_changed = false; |
140 | local_irq_save(flags); | 140 | if (pending){ //MCR has entered |
141 | if (pending){ //MCR has entered | 141 | local_irq_save(flags); |
142 | raw_spin_lock(&state->lock); | 142 | raw_spin_lock(&state->lock); |
143 | raw_spin_lock(&global_lock); | 143 | raw_spin_lock(&global_lock); |
144 | raw_spin_lock(&mode_lock); | 144 | raw_spin_lock(&mode_lock); |
@@ -223,10 +223,11 @@ asmlinkage long sys_enact_mode(void) | |||
223 | raw_spin_unlock(&mode_lock); | 223 | raw_spin_unlock(&mode_lock); |
224 | raw_spin_unlock(&global_lock); | 224 | raw_spin_unlock(&global_lock); |
225 | //raw_spin_unlock(&state->lock); | 225 | //raw_spin_unlock(&state->lock); |
226 | local_irq_restore(flags); | ||
226 | mc2_update_timer_and_unlock(state); | 227 | mc2_update_timer_and_unlock(state); |
227 | } | 228 | } |
228 | this_cpu_inc(mode_counter); | 229 | this_cpu_inc(mode_counter); |
229 | local_irq_restore(flags); | 230 | //local_irq_restore(flags); |
230 | //cpu_0_spin_flag = !cpu_0_spin_flag; | 231 | //cpu_0_spin_flag = !cpu_0_spin_flag; |
231 | } | 232 | } |
232 | else if (cpu_0_task_exist) { | 233 | else if (cpu_0_task_exist) { |
@@ -248,13 +249,13 @@ asmlinkage long sys_enact_mode(void) | |||
248 | while (*cpu0_counter == this_cpu_read(mode_counter)) | 249 | while (*cpu0_counter == this_cpu_read(mode_counter)) |
249 | udelay(1); | 250 | udelay(1); |
250 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); | 251 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); |
251 | local_irq_save(flags); | 252 | //local_irq_save(flags); |
252 | if (mode_changed) { | 253 | if (mode_changed) { |
253 | lt_t new_mode_basetime = get_release(current); | 254 | lt_t new_mode_basetime = get_release(current); |
254 | //TRACE("CPU%d mode changed\n",state->cpu); | 255 | //TRACE("CPU%d mode changed\n",state->cpu); |
255 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | 256 | hrtimer_cancel(&state->timer); //stop listening to old mode timers |
256 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); | 257 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); |
257 | //local_irq_save(flags); | 258 | local_irq_save(flags); |
258 | raw_spin_lock(&state->lock); | 259 | raw_spin_lock(&state->lock); |
259 | state->sup_env = &state->sup_env_modes[mode]; | 260 | state->sup_env = &state->sup_env_modes[mode]; |
260 | list_for_each(pos, &state->sup_env->active_reservations){ | 261 | list_for_each(pos, &state->sup_env->active_reservations){ |
@@ -271,12 +272,13 @@ asmlinkage long sys_enact_mode(void) | |||
271 | } | 272 | } |
272 | sup_update_time(state->sup_env, litmus_clock()); | 273 | sup_update_time(state->sup_env, litmus_clock()); |
273 | //raw_spin_unlock(&state->lock); | 274 | //raw_spin_unlock(&state->lock); |
275 | local_irq_restore(flags); | ||
274 | mc2_update_timer_and_unlock(state); | 276 | mc2_update_timer_and_unlock(state); |
275 | //local_irq_restore(flags); | 277 | //local_irq_restore(flags); |
276 | 278 | ||
277 | } | 279 | } |
278 | this_cpu_write(mode_counter, *cpu0_counter); | 280 | this_cpu_write(mode_counter, *cpu0_counter); |
279 | local_irq_restore(flags); | 281 | //local_irq_restore(flags); |
280 | //state->spin_flag = !state->spin_flag; | 282 | //state->spin_flag = !state->spin_flag; |
281 | } | 283 | } |
282 | else { | 284 | else { |
@@ -304,18 +306,21 @@ asmlinkage long sys_enact_mode(void) | |||
304 | asmlinkage long sys_request_mode(int new_mode){ | 306 | asmlinkage long sys_request_mode(int new_mode){ |
305 | preempt_disable(); | 307 | preempt_disable(); |
306 | raw_spin_lock(&mode_lock); | 308 | raw_spin_lock(&mode_lock); |
309 | TRACE("MCR received at %llu\n", litmus_clock()); | ||
307 | if (pending){ | 310 | if (pending){ |
308 | raw_spin_unlock(&mode_lock); | 311 | raw_spin_unlock(&mode_lock); |
309 | preempt_enable(); | 312 | preempt_enable(); |
313 | TRACE("MCR rejected because the previous MCR is pedning.\n"); | ||
310 | return -EAGAIN; | 314 | return -EAGAIN; |
311 | } | 315 | } |
312 | if (mode == new_mode){ | 316 | if (mode == new_mode){ |
313 | raw_spin_unlock(&mode_lock); | 317 | raw_spin_unlock(&mode_lock); |
314 | preempt_enable(); | 318 | preempt_enable(); |
319 | TRACE("MCR rejected because the system is already in the new mode = %d.\n", new_mode); | ||
315 | return 0; | 320 | return 0; |
316 | } | 321 | } |
317 | requested_mode = new_mode; | 322 | requested_mode = new_mode; |
318 | TRACE("MCR received\n"); | 323 | TRACE("MCR to %d is accepted.\n", new_mode); |
319 | res_reported = mode_sizes[mode]; | 324 | res_reported = mode_sizes[mode]; |
320 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); | 325 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); |
321 | seen_once = false; | 326 | seen_once = false; |
@@ -475,7 +480,7 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
475 | 480 | ||
476 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | 481 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; |
477 | if (!ce->will_schedule && !ce->scheduled) { | 482 | if (!ce->will_schedule && !ce->scheduled) { |
478 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | 483 | TRACE("CPU %d (local) is the lowest (Idle)!\n", ce->cpu); |
479 | return ce->cpu; | 484 | return ce->cpu; |
480 | } else { | 485 | } else { |
481 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); | 486 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); |
@@ -485,14 +490,16 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
485 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 490 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
486 | /* If a CPU will call schedule() in the near future, we don't | 491 | /* If a CPU will call schedule() in the near future, we don't |
487 | return that CPU. */ | 492 | return that CPU. */ |
493 | /* | ||
488 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | 494 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, |
489 | ce->scheduled ? (ce->scheduled)->comm : "null", | 495 | ce->scheduled ? (ce->scheduled)->comm : "null", |
490 | ce->scheduled ? (ce->scheduled)->pid : 0, | 496 | ce->scheduled ? (ce->scheduled)->pid : 0, |
491 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | 497 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); |
498 | */ | ||
492 | if (!ce->will_schedule) { | 499 | if (!ce->will_schedule) { |
493 | if (!ce->scheduled) { | 500 | if (!ce->scheduled) { |
494 | /* Idle cpu, return this. */ | 501 | /* Idle cpu, return this. */ |
495 | TRACE("CPU %d is the lowest!\n", ce->cpu); | 502 | TRACE("CPU %d is the lowest (Idle)!\n", ce->cpu); |
496 | return ce->cpu; | 503 | return ce->cpu; |
497 | } else if (ce->lv == CRIT_LEVEL_C && | 504 | } else if (ce->lv == CRIT_LEVEL_C && |
498 | ce->deadline > latest_deadline) { | 505 | ce->deadline > latest_deadline) { |
@@ -502,10 +509,12 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
502 | } | 509 | } |
503 | } | 510 | } |
504 | 511 | ||
505 | if (priority >= latest_deadline) | 512 | TRACE("CPU %d is the lowest! deadline = %llu, my priority = %llu\n", ret, latest_deadline, priority); |
506 | ret = NO_CPU; | ||
507 | 513 | ||
508 | TRACE("CPU %d is the lowest!\n", ret); | 514 | if (priority >= latest_deadline) { |
515 | TRACE("CPU %d is running a higher-priority task. return NO_CPU\n", ret); | ||
516 | ret = NO_CPU; | ||
517 | } | ||
509 | 518 | ||
510 | return ret; | 519 | return ret; |
511 | } | 520 | } |
@@ -559,9 +568,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
559 | //raw_spin_lock(&_lowest_prio_cpu.lock); | 568 | //raw_spin_lock(&_lowest_prio_cpu.lock); |
560 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 569 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
561 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 570 | //raw_spin_unlock(&_lowest_prio_cpu.lock); |
562 | if (cpu == local_cpu_state()->cpu) | 571 | |
563 | litmus_reschedule_local(); | 572 | //if (cpu == local_cpu_state()->cpu) |
564 | else | 573 | // litmus_reschedule_local(); |
574 | //else | ||
565 | reschedule[cpu] = 1; | 575 | reschedule[cpu] = 1; |
566 | } | 576 | } |
567 | } | 577 | } |
@@ -735,7 +745,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
735 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 745 | //raw_spin_unlock(&_lowest_prio_cpu.lock); |
736 | TRACE("LOWEST CPU = P%d\n", cpu); | 746 | TRACE("LOWEST CPU = P%d\n", cpu); |
737 | if (cpu == state->cpu && update > now) | 747 | if (cpu == state->cpu && update > now) |
738 | litmus_reschedule_local(); | 748 | ;//litmus_reschedule_local(); |
739 | else | 749 | else |
740 | reschedule[cpu] = 1; | 750 | reschedule[cpu] = 1; |
741 | } | 751 | } |
@@ -765,6 +775,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
765 | return restart; | 775 | return restart; |
766 | } | 776 | } |
767 | 777 | ||
778 | #define INIT_PHASE_LENGTH_NS (1000000000) | ||
779 | |||
768 | /* mc2_complete_job - syscall backend for job completions | 780 | /* mc2_complete_job - syscall backend for job completions |
769 | */ | 781 | */ |
770 | static long mc2_complete_job(void) | 782 | static long mc2_complete_job(void) |
@@ -772,14 +784,17 @@ static long mc2_complete_job(void) | |||
772 | ktime_t next_release; | 784 | ktime_t next_release; |
773 | long err; | 785 | long err; |
774 | 786 | ||
775 | enum crit_level lv; | 787 | enum crit_level lv = get_task_crit_level(current); |
776 | 788 | ||
777 | raw_spin_lock(&mode_lock); | 789 | raw_spin_lock(&mode_lock); |
778 | tsk_rt(current)->completed = 1; | 790 | tsk_rt(current)->completed = 1; |
779 | raw_spin_unlock(&mode_lock); | 791 | raw_spin_unlock(&mode_lock); |
780 | 792 | ||
781 | lv = get_task_crit_level(current); | 793 | if (atomic_read(&num_sync_released) == 0 && mode != 0) { |
782 | 794 | tsk_rt(current)->sporadic_release = 0; | |
795 | TRACE_CUR("num_sync_released is 0\n"); | ||
796 | } | ||
797 | |||
783 | /* If this the first job instance, we need to reset replenish | 798 | /* If this the first job instance, we need to reset replenish |
784 | time to the next release time */ | 799 | time to the next release time */ |
785 | if (tsk_rt(current)->sporadic_release) { | 800 | if (tsk_rt(current)->sporadic_release) { |
@@ -798,7 +813,7 @@ static long mc2_complete_job(void) | |||
798 | raw_spin_lock(&state->lock); | 813 | raw_spin_lock(&state->lock); |
799 | for (i = 0; i<NR_MODES; i++) { | 814 | for (i = 0; i<NR_MODES; i++) { |
800 | if (in_mode(current,i) || i == 0) { | 815 | if (in_mode(current,i) || i == 0) { |
801 | state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; | 816 | state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
802 | } | 817 | } |
803 | } | 818 | } |
804 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | 819 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); |
@@ -811,7 +826,7 @@ static long mc2_complete_job(void) | |||
811 | raw_spin_lock(&global_lock); | 826 | raw_spin_lock(&global_lock); |
812 | for (i = 0; i < NR_MODES; i++) { | 827 | for (i = 0; i < NR_MODES; i++) { |
813 | if (in_mode(current,i) || i == 0) { | 828 | if (in_mode(current,i) || i == 0) { |
814 | _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; | 829 | _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
815 | } | 830 | } |
816 | } | 831 | } |
817 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | 832 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); |
@@ -821,7 +836,7 @@ static long mc2_complete_job(void) | |||
821 | 836 | ||
822 | /* set next_replenish to synchronous release time */ | 837 | /* set next_replenish to synchronous release time */ |
823 | BUG_ON(!res); | 838 | BUG_ON(!res); |
824 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | 839 | res->next_replenishment = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
825 | /* | 840 | /* |
826 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | 841 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { |
827 | struct table_driven_reservation *tdres; | 842 | struct table_driven_reservation *tdres; |
@@ -834,7 +849,7 @@ static long mc2_complete_job(void) | |||
834 | res->cur_budget = 0; | 849 | res->cur_budget = 0; |
835 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | 850 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); |
836 | 851 | ||
837 | // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); | 852 | TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); |
838 | if (lv == CRIT_LEVEL_C) | 853 | if (lv == CRIT_LEVEL_C) |
839 | raw_spin_unlock(&global_lock); | 854 | raw_spin_unlock(&global_lock); |
840 | 855 | ||
@@ -863,12 +878,12 @@ static long mc2_complete_job(void) | |||
863 | res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); | 878 | res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); |
864 | if (res && !res->reported){ | 879 | if (res && !res->reported){ |
865 | res_reported--; | 880 | res_reported--; |
866 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); | 881 | TRACE_CUR("RES_REPORTED = %d\n", res_reported); |
867 | res->reported = 1; | 882 | res->reported = 1; |
868 | //Current task doesn't exist in new mode | 883 | //Current task doesn't exist in new mode |
869 | if ( !in_mode(current, requested_mode) ){ | 884 | //if ( !in_mode(current, requested_mode) ){ |
870 | litmus_reschedule_local(); | 885 | // litmus_reschedule_local(); |
871 | } | 886 | //} |
872 | } | 887 | } |
873 | raw_spin_unlock(&mode_lock); | 888 | raw_spin_unlock(&mode_lock); |
874 | } | 889 | } |
@@ -908,8 +923,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
908 | 923 | ||
909 | 924 | ||
910 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 925 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
911 | if (res->state == RESERVATION_ACTIVE) | 926 | if (res->state == RESERVATION_ACTIVE) { |
912 | TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); | 927 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); |
928 | TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
929 | } | ||
913 | } | 930 | } |
914 | 931 | ||
915 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 932 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
@@ -922,7 +939,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
922 | return tsk; | 939 | return tsk; |
923 | } else { | 940 | } else { |
924 | //if (!is_init_finished(tsk)) { | 941 | //if (!is_init_finished(tsk)) { |
925 | TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); | 942 | // TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); |
926 | // if (num_sync_released != 0 && mode == 0) { | 943 | // if (num_sync_released != 0 && mode == 0) { |
927 | //ce = &state->crit_entries[lv]; | 944 | //ce = &state->crit_entries[lv]; |
928 | sup_scheduler_update_after(sup_env, res->cur_budget); | 945 | sup_scheduler_update_after(sup_env, res->cur_budget); |
@@ -953,6 +970,13 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
953 | enum crit_level lv; | 970 | enum crit_level lv; |
954 | lt_t time_slice; | 971 | lt_t time_slice; |
955 | 972 | ||
973 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | ||
974 | if (res->state == RESERVATION_ACTIVE) { | ||
975 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); | ||
976 | TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
977 | } | ||
978 | } | ||
979 | |||
956 | raw_spin_lock(&mode_lock); | 980 | raw_spin_lock(&mode_lock); |
957 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | 981 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { |
958 | BUG_ON(!res); | 982 | BUG_ON(!res); |
@@ -1089,9 +1113,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1089 | if (is_realtime(prev)) | 1113 | if (is_realtime(prev)) |
1090 | gmp_update_time(_global_env, now); | 1114 | gmp_update_time(_global_env, now); |
1091 | state->scheduled = mc2_global_dispatch(state); | 1115 | state->scheduled = mc2_global_dispatch(state); |
1116 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1117 | update_cpu_prio(state); | ||
1118 | raw_spin_unlock(&global_lock); | ||
1119 | } else { | ||
1120 | raw_spin_lock(&global_lock); | ||
1121 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1122 | update_cpu_prio(state); | ||
1092 | raw_spin_unlock(&global_lock); | 1123 | raw_spin_unlock(&global_lock); |
1093 | } | 1124 | } |
1094 | |||
1095 | /* | 1125 | /* |
1096 | if (!state->scheduled) { | 1126 | if (!state->scheduled) { |
1097 | raw_spin_lock(&global_lock); | 1127 | raw_spin_lock(&global_lock); |
@@ -1161,12 +1191,12 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1161 | } | 1191 | } |
1162 | */ | 1192 | */ |
1163 | post_schedule(state->scheduled, state->cpu); | 1193 | post_schedule(state->scheduled, state->cpu); |
1164 | 1194 | /* | |
1165 | raw_spin_lock(&global_lock); | 1195 | raw_spin_lock(&global_lock); |
1166 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 1196 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1167 | update_cpu_prio(state); | 1197 | update_cpu_prio(state); |
1168 | raw_spin_unlock(&global_lock); | 1198 | raw_spin_unlock(&global_lock); |
1169 | 1199 | */ | |
1170 | raw_spin_unlock(&state->lock); | 1200 | raw_spin_unlock(&state->lock); |
1171 | if (state->scheduled) { | 1201 | if (state->scheduled) { |
1172 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1202 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
@@ -1204,7 +1234,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1204 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 1234 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
1205 | 1235 | ||
1206 | //local_irq_save(flags); | 1236 | //local_irq_save(flags); |
1207 | TRACE_TASK(tsk, "preemptible?? %d\n", preemptible()); | ||
1208 | preempt_disable(); | 1237 | preempt_disable(); |
1209 | tinfo = get_mc2_state(tsk); | 1238 | tinfo = get_mc2_state(tsk); |
1210 | if (tinfo->cpu != -1) | 1239 | if (tinfo->cpu != -1) |
@@ -1219,7 +1248,8 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1219 | /* Requeue only if self-suspension was already processed. */ | 1248 | /* Requeue only if self-suspension was already processed. */ |
1220 | if (tinfo->has_departed) | 1249 | if (tinfo->has_departed) |
1221 | { | 1250 | { |
1222 | 1251 | raw_spin_lock(&state->lock); | |
1252 | local_irq_save(flags); | ||
1223 | /* We don't want to consider jobs before synchronous releases */ | 1253 | /* We don't want to consider jobs before synchronous releases */ |
1224 | if (tsk_rt(tsk)->job_params.job_no == 2) { | 1254 | if (tsk_rt(tsk)->job_params.job_no == 2) { |
1225 | /* | 1255 | /* |
@@ -1239,14 +1269,19 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1239 | */ | 1269 | */ |
1240 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); | 1270 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); |
1241 | tsk_mc2_data(tsk)->init_finished = 1; | 1271 | tsk_mc2_data(tsk)->init_finished = 1; |
1242 | raw_spin_lock(&global_lock); | 1272 | atomic_dec(&num_sync_released); |
1243 | num_sync_released--; | 1273 | //raw_spin_unlock(&global_lock); |
1244 | raw_spin_unlock(&global_lock); | 1274 | if (atomic_read(&num_sync_released) == 0) { |
1245 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); | 1275 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); |
1276 | TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); | ||
1277 | sys_request_mode(1); | ||
1278 | sched_trace_sys_start(&start); | ||
1279 | } | ||
1280 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); | ||
1246 | } | 1281 | } |
1247 | 1282 | ||
1248 | raw_spin_lock(&state->lock); | 1283 | // raw_spin_lock(&state->lock); |
1249 | local_irq_save(flags); | 1284 | // local_irq_save(flags); |
1250 | /* Assumption: litmus_clock() is synchronized across cores, | 1285 | /* Assumption: litmus_clock() is synchronized across cores, |
1251 | * since we might not actually be executing on tinfo->cpu | 1286 | * since we might not actually be executing on tinfo->cpu |
1252 | * at the moment. */ | 1287 | * at the moment. */ |
@@ -1363,9 +1398,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1363 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { | 1398 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { |
1364 | cpu_0_task_exist = true; | 1399 | cpu_0_task_exist = true; |
1365 | } | 1400 | } |
1366 | raw_spin_lock(&global_lock); | 1401 | atomic_inc(&num_sync_released); |
1367 | num_sync_released++; | ||
1368 | raw_spin_unlock(&global_lock); | ||
1369 | local_irq_restore(flags); | 1402 | local_irq_restore(flags); |
1370 | raw_spin_unlock(&state->lock); | 1403 | raw_spin_unlock(&state->lock); |
1371 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1404 | //raw_spin_unlock_irqrestore(&state->lock, flags); |
@@ -1425,15 +1458,16 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1425 | tsk_rt(tsk)->plugin_state = tinfo; | 1458 | tsk_rt(tsk)->plugin_state = tinfo; |
1426 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 1459 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
1427 | raw_spin_lock(&mode_lock); | 1460 | raw_spin_lock(&mode_lock); |
1428 | for(i = 0; i < NR_MODES; i++){ | 1461 | for(i = 1; i < NR_MODES; i++){ |
1429 | if (in_mode(tsk, i)){ | 1462 | if (in_mode(tsk, i)){ |
1430 | mode_sizes[i]++; | 1463 | mode_sizes[i]++; |
1431 | } | 1464 | } |
1432 | } | 1465 | } |
1466 | mode_sizes[0]++; | ||
1433 | raw_spin_unlock(&mode_lock); | 1467 | raw_spin_unlock(&mode_lock); |
1434 | 1468 | ||
1435 | } | 1469 | } |
1436 | num_sync_released++; | 1470 | atomic_inc(&num_sync_released); |
1437 | raw_spin_unlock(&global_lock); | 1471 | raw_spin_unlock(&global_lock); |
1438 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1472 | //raw_spin_unlock_irqrestore(&state->lock, flags); |
1439 | local_irq_restore(flags); | 1473 | local_irq_restore(flags); |
@@ -1728,9 +1762,8 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1728 | /* NOTE: drops state->lock */ | 1762 | /* NOTE: drops state->lock */ |
1729 | TRACE("mc2_exit()\n"); | 1763 | TRACE("mc2_exit()\n"); |
1730 | 1764 | ||
1731 | raw_spin_lock(&global_lock); | 1765 | atomic_dec(&num_sync_released); |
1732 | num_sync_released--; | 1766 | |
1733 | raw_spin_unlock(&global_lock); | ||
1734 | mc2_update_timer_and_unlock(state); | 1767 | mc2_update_timer_and_unlock(state); |
1735 | } else { | 1768 | } else { |
1736 | raw_spin_unlock(&state->lock); | 1769 | raw_spin_unlock(&state->lock); |
@@ -1738,15 +1771,16 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1738 | } | 1771 | } |
1739 | 1772 | ||
1740 | if (lv == CRIT_LEVEL_C) { | 1773 | if (lv == CRIT_LEVEL_C) { |
1741 | raw_spin_lock(&global_lock); | 1774 | //raw_spin_lock(&global_lock); |
1742 | raw_spin_lock(&mode_lock); | 1775 | raw_spin_lock(&mode_lock); |
1743 | for(i = 0; i < NR_MODES; i++){ | 1776 | for(i = 1; i < NR_MODES; i++){ |
1744 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) | 1777 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) |
1745 | continue; | 1778 | continue; |
1746 | mode_sizes[i]--; | 1779 | mode_sizes[i]--; |
1747 | } | 1780 | } |
1781 | mode_sizes[0]--; | ||
1748 | raw_spin_unlock(&mode_lock); | 1782 | raw_spin_unlock(&mode_lock); |
1749 | raw_spin_unlock(&global_lock); | 1783 | //raw_spin_unlock(&global_lock); |
1750 | 1784 | ||
1751 | for_each_online_cpu(cpu) { | 1785 | for_each_online_cpu(cpu) { |
1752 | state = cpu_state_for(cpu); | 1786 | state = cpu_state_for(cpu); |
@@ -2340,8 +2374,9 @@ static long mc2_deactivate_plugin(void) | |||
2340 | } | 2374 | } |
2341 | 2375 | ||
2342 | } | 2376 | } |
2343 | num_sync_released = 0; | ||
2344 | raw_spin_unlock(&global_lock); | 2377 | raw_spin_unlock(&global_lock); |
2378 | |||
2379 | atomic_set(&num_sync_released, 0); | ||
2345 | destroy_domain_proc_info(&mc2_domain_proc_info); | 2380 | destroy_domain_proc_info(&mc2_domain_proc_info); |
2346 | return 0; | 2381 | return 0; |
2347 | } | 2382 | } |