aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c153
1 files changed, 84 insertions, 69 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 3b407bd780d0..0c9bb1812367 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -38,7 +38,7 @@
38 38
39#define BUDGET_ENFORCEMENT_AT_C 0 39#define BUDGET_ENFORCEMENT_AT_C 0
40 40
41extern int num_sync_released; 41extern atomic_t num_sync_released;
42extern void do_partition(enum crit_level lv, int cpu); 42extern void do_partition(enum crit_level lv, int cpu);
43 43
44/* _global_env - reservation container for level-C tasks*/ 44/* _global_env - reservation container for level-C tasks*/
@@ -147,8 +147,8 @@ asmlinkage long sys_enact_mode(void)
147 } 147 }
148 } 148 }
149 mode_changed = false; 149 mode_changed = false;
150 local_irq_save(flags); 150 if (pending){ //MCR has entered
151 if (pending){ //MCR has entered 151 local_irq_save(flags);
152 raw_spin_lock(&state->lock); 152 raw_spin_lock(&state->lock);
153 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); 153 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");
154 raw_spin_lock(&global_lock); 154 raw_spin_lock(&global_lock);
@@ -256,10 +256,11 @@ asmlinkage long sys_enact_mode(void)
256 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); 256 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
257 raw_spin_unlock(&global_lock); 257 raw_spin_unlock(&global_lock);
258 //raw_spin_unlock(&state->lock); 258 //raw_spin_unlock(&state->lock);
259 local_irq_restore(flags);
259 mc2_update_timer_and_unlock(state); 260 mc2_update_timer_and_unlock(state);
260 } 261 }
261 this_cpu_inc(mode_counter); 262 this_cpu_inc(mode_counter);
262 local_irq_restore(flags); 263 //local_irq_restore(flags);
263 //cpu_0_spin_flag = !cpu_0_spin_flag; 264 //cpu_0_spin_flag = !cpu_0_spin_flag;
264 } 265 }
265 else if (!mode_poll_exited) { 266 else if (!mode_poll_exited) {
@@ -292,13 +293,13 @@ asmlinkage long sys_enact_mode(void)
292 //} 293 //}
293 } 294 }
294 TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); 295 TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter));
295 local_irq_save(flags); 296 //local_irq_save(flags);
296 if (mode_changed) { 297 if (mode_changed) {
297 lt_t new_mode_basetime = get_release(current); 298 lt_t new_mode_basetime = get_release(current);
298 //TRACE("CPU%d mode changed\n",state->cpu); 299 //TRACE("CPU%d mode changed\n",state->cpu);
299 hrtimer_cancel(&state->timer); //stop listening to old mode timers 300 hrtimer_cancel(&state->timer); //stop listening to old mode timers
300 TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); 301 TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock());
301 //local_irq_save(flags); 302 local_irq_save(flags);
302 raw_spin_lock(&state->lock); 303 raw_spin_lock(&state->lock);
303 state->sup_env = &state->sup_env_modes[mode]; 304 state->sup_env = &state->sup_env_modes[mode];
304 list_for_each(pos, &state->sup_env->active_reservations){ 305 list_for_each(pos, &state->sup_env->active_reservations){
@@ -315,12 +316,11 @@ asmlinkage long sys_enact_mode(void)
315 } 316 }
316 sup_update_time(state->sup_env, litmus_clock()); 317 sup_update_time(state->sup_env, litmus_clock());
317 //raw_spin_unlock(&state->lock); 318 //raw_spin_unlock(&state->lock);
319 local_irq_restore(flags);
318 mc2_update_timer_and_unlock(state); 320 mc2_update_timer_and_unlock(state);
319 //local_irq_restore(flags); 321 //local_irq_restore(flags);
320 322
321 } 323 }
322 //this_cpu_write(mode_counter, *cpu0_counter);
323 local_irq_restore(flags);
324 //state->spin_flag = !state->spin_flag; 324 //state->spin_flag = !state->spin_flag;
325 } 325 }
326 else { 326 else {
@@ -376,6 +376,7 @@ asmlinkage long sys_request_mode(int new_mode){
376 TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode); 376 TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode);
377 raw_spin_unlock(&mode_lock); 377 raw_spin_unlock(&mode_lock);
378 preempt_enable(); 378 preempt_enable();
379 TRACE("MCR rejected because the previous MCR is pedning.\n");
379 return -EAGAIN; 380 return -EAGAIN;
380 } 381 }
381 if (mode == new_mode){ 382 if (mode == new_mode){
@@ -545,7 +546,7 @@ static int get_lowest_prio_cpu(lt_t priority)
545 546
546 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 547 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
547 if (!ce->will_schedule && !ce->scheduled) { 548 if (!ce->will_schedule && !ce->scheduled) {
548 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 549 TRACE("CPU %d (local) is the lowest (Idle)!\n", ce->cpu);
549 return ce->cpu; 550 return ce->cpu;
550 } else { 551 } else {
551 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); 552 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
@@ -555,14 +556,16 @@ static int get_lowest_prio_cpu(lt_t priority)
555 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 556 ce = &_lowest_prio_cpu.cpu_entries[cpu];
556 /* If a CPU will call schedule() in the near future, we don't 557 /* If a CPU will call schedule() in the near future, we don't
557 return that CPU. */ 558 return that CPU. */
559/*
558 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, 560 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
559 ce->scheduled ? (ce->scheduled)->comm : "null", 561 ce->scheduled ? (ce->scheduled)->comm : "null",
560 ce->scheduled ? (ce->scheduled)->pid : 0, 562 ce->scheduled ? (ce->scheduled)->pid : 0,
561 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); 563 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
564*/
562 if (!ce->will_schedule) { 565 if (!ce->will_schedule) {
563 if (!ce->scheduled) { 566 if (!ce->scheduled) {
564 /* Idle cpu, return this. */ 567 /* Idle cpu, return this. */
565 TRACE("CPU %d is the lowest!\n", ce->cpu); 568 TRACE("CPU %d is the lowest (Idle)!\n", ce->cpu);
566 return ce->cpu; 569 return ce->cpu;
567 } else if (ce->lv == CRIT_LEVEL_C && 570 } else if (ce->lv == CRIT_LEVEL_C &&
568 ce->deadline > latest_deadline) { 571 ce->deadline > latest_deadline) {
@@ -572,10 +575,12 @@ static int get_lowest_prio_cpu(lt_t priority)
572 } 575 }
573 } 576 }
574 577
575 if (priority >= latest_deadline) 578 TRACE("CPU %d is the lowest! deadline = %llu, my priority = %llu\n", ret, latest_deadline, priority);
576 ret = NO_CPU;
577 579
578 TRACE("CPU %d is the lowest!\n", ret); 580 if (priority >= latest_deadline) {
581 TRACE("CPU %d is running a higher-priority task. return NO_CPU\n", ret);
582 ret = NO_CPU;
583 }
579 584
580 return ret; 585 return ret;
581} 586}
@@ -630,9 +635,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
630 //raw_spin_lock(&_lowest_prio_cpu.lock); 635 //raw_spin_lock(&_lowest_prio_cpu.lock);
631 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 636 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
632 //raw_spin_unlock(&_lowest_prio_cpu.lock); 637 //raw_spin_unlock(&_lowest_prio_cpu.lock);
633 if (cpu == local_cpu_state()->cpu) 638
634 litmus_reschedule_local(); 639 //if (cpu == local_cpu_state()->cpu)
635 else 640 // litmus_reschedule_local();
641 //else
636 reschedule[cpu] = 1; 642 reschedule[cpu] = 1;
637 } 643 }
638 } 644 }
@@ -808,7 +814,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
808 //raw_spin_unlock(&_lowest_prio_cpu.lock); 814 //raw_spin_unlock(&_lowest_prio_cpu.lock);
809 TRACE("LOWEST CPU = P%d\n", cpu); 815 TRACE("LOWEST CPU = P%d\n", cpu);
810 if (cpu == state->cpu && update > now) 816 if (cpu == state->cpu && update > now)
811 litmus_reschedule_local(); 817 ;//litmus_reschedule_local();
812 else 818 else
813 reschedule[cpu] = 1; 819 reschedule[cpu] = 1;
814 } 820 }
@@ -839,6 +845,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
839 return restart; 845 return restart;
840} 846}
841 847
848#define INIT_PHASE_LENGTH_NS (1000000000)
849
842/* mc2_complete_job - syscall backend for job completions 850/* mc2_complete_job - syscall backend for job completions
843 */ 851 */
844static long mc2_complete_job(void) 852static long mc2_complete_job(void)
@@ -846,14 +854,17 @@ static long mc2_complete_job(void)
846 ktime_t next_release; 854 ktime_t next_release;
847 long err; 855 long err;
848 856
849 enum crit_level lv; 857 enum crit_level lv = get_task_crit_level(current);
850 858
851 raw_spin_lock(&mode_lock); 859 raw_spin_lock(&mode_lock);
852 tsk_rt(current)->completed = 1; 860 tsk_rt(current)->completed = 1;
853 raw_spin_unlock(&mode_lock); 861 raw_spin_unlock(&mode_lock);
854 862
855 lv = get_task_crit_level(current); 863 if (atomic_read(&num_sync_released) == 0 && mode != 0) {
856 864 tsk_rt(current)->sporadic_release = 0;
865 TRACE_CUR("num_sync_released is 0\n");
866 }
867
857 /* If this the first job instance, we need to reset replenish 868 /* If this the first job instance, we need to reset replenish
858 time to the next release time */ 869 time to the next release time */
859 if (tsk_rt(current)->sporadic_release) { 870 if (tsk_rt(current)->sporadic_release) {
@@ -872,7 +883,7 @@ static long mc2_complete_job(void)
872 raw_spin_lock(&state->lock); 883 raw_spin_lock(&state->lock);
873 for (i = 0; i<NR_MODES; i++) { 884 for (i = 0; i<NR_MODES; i++) {
874 if (in_mode(current,i) || i == 0) { 885 if (in_mode(current,i) || i == 0) {
875 state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; 886 state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
876 } 887 }
877 } 888 }
878 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); 889 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id);
@@ -886,7 +897,7 @@ static long mc2_complete_job(void)
886 raw_spin_lock(&global_lock); 897 raw_spin_lock(&global_lock);
887 for (i = 0; i < NR_MODES; i++) { 898 for (i = 0; i < NR_MODES; i++) {
888 if (in_mode(current,i) || i == 0) { 899 if (in_mode(current,i) || i == 0) {
889 _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; 900 _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
890 } 901 }
891 } 902 }
892 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); 903 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id);
@@ -896,7 +907,7 @@ static long mc2_complete_job(void)
896 907
897 /* set next_replenish to synchronous release time */ 908 /* set next_replenish to synchronous release time */
898 BUG_ON(!res); 909 BUG_ON(!res);
899 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 910 res->next_replenishment = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
900/* 911/*
901 if (get_task_crit_level(current) == CRIT_LEVEL_A) { 912 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
902 struct table_driven_reservation *tdres; 913 struct table_driven_reservation *tdres;
@@ -909,9 +920,8 @@ static long mc2_complete_job(void)
909 res->cur_budget = 0; 920 res->cur_budget = 0;
910 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 921 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
911 922
912 // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); 923 TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
913 if (lv == CRIT_LEVEL_C){ 924 if (lv == CRIT_LEVEL_C){
914 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
915 raw_spin_unlock(&global_lock); 925 raw_spin_unlock(&global_lock);
916 } 926 }
917 raw_spin_unlock(&state->lock); 927 raw_spin_unlock(&state->lock);
@@ -939,12 +949,12 @@ static long mc2_complete_job(void)
939 res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); 949 res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id);
940 if (res && !res->reported){ 950 if (res && !res->reported){
941 res_reported--; 951 res_reported--;
942 TRACE_CUR("RES_REPORTED = %d\n",res_reported); 952 TRACE_CUR("RES_REPORTED = %d\n", res_reported);
943 res->reported = 1; 953 res->reported = 1;
944 //Current task doesn't exist in new mode 954 //Current task doesn't exist in new mode
945 if ( !in_mode(current, requested_mode) ){ 955 //if ( !in_mode(current, requested_mode) ){
946 litmus_reschedule_local(); 956 // litmus_reschedule_local();
947 } 957 //}
948 } 958 }
949 raw_spin_unlock(&mode_lock); 959 raw_spin_unlock(&mode_lock);
950 } 960 }
@@ -984,8 +994,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
984 994
985 995
986 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 996 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
987 if (res->state == RESERVATION_ACTIVE) 997 if (res->state == RESERVATION_ACTIVE) {
988 TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); 998 struct task_struct *t = res->ops->dispatch_client(res, &time_slice);
999 TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t));
1000 }
989 } 1001 }
990 1002
991 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 1003 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
@@ -998,9 +1010,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
998 return tsk; 1010 return tsk;
999 } else { 1011 } else {
1000 //if (!is_init_finished(tsk)) { 1012 //if (!is_init_finished(tsk)) {
1001 TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); 1013// TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode);
1002 if (mode != res->mode)
1003 TRACE_CUR("Mode does nto match res mode %d\n", res->mode);
1004// if (num_sync_released != 0 && mode == 0) { 1014// if (num_sync_released != 0 && mode == 0) {
1005 //ce = &state->crit_entries[lv]; 1015 //ce = &state->crit_entries[lv];
1006 sup_scheduler_update_after(sup_env, res->cur_budget); 1016 sup_scheduler_update_after(sup_env, res->cur_budget);
@@ -1031,6 +1041,13 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
1031 enum crit_level lv; 1041 enum crit_level lv;
1032 lt_t time_slice; 1042 lt_t time_slice;
1033 1043
1044 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
1045 if (res->state == RESERVATION_ACTIVE) {
1046 struct task_struct *t = res->ops->dispatch_client(res, &time_slice);
1047 TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t));
1048 }
1049 }
1050
1034 raw_spin_lock(&mode_lock); 1051 raw_spin_lock(&mode_lock);
1035 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { 1052 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
1036 BUG_ON(!res); 1053 BUG_ON(!res);
@@ -1176,10 +1193,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1176 if (is_realtime(prev)) 1193 if (is_realtime(prev))
1177 gmp_update_time(_global_env, now); 1194 gmp_update_time(_global_env, now);
1178 state->scheduled = mc2_global_dispatch(state); 1195 state->scheduled = mc2_global_dispatch(state);
1179 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); 1196 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1197 update_cpu_prio(state);
1198 raw_spin_unlock(&global_lock);
1199 } else {
1200 raw_spin_lock(&global_lock);
1201 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1202 update_cpu_prio(state);
1180 raw_spin_unlock(&global_lock); 1203 raw_spin_unlock(&global_lock);
1181 } 1204 }
1182
1183 /* 1205 /*
1184 if (!state->scheduled) { 1206 if (!state->scheduled) {
1185 TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); 1207 TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock);
@@ -1251,14 +1273,13 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1251 } 1273 }
1252*/ 1274*/
1253 post_schedule(state->scheduled, state->cpu); 1275 post_schedule(state->scheduled, state->cpu);
1254 1276/*
1255 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");
1256 raw_spin_lock(&global_lock); 1277 raw_spin_lock(&global_lock);
1257 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 1278 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1258 update_cpu_prio(state); 1279 update_cpu_prio(state);
1259 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); 1280 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
1260 raw_spin_unlock(&global_lock); 1281 raw_spin_unlock(&global_lock);
1261 1282*/
1262 raw_spin_unlock(&state->lock); 1283 raw_spin_unlock(&state->lock);
1263 if (state->scheduled) { 1284 if (state->scheduled) {
1264 TRACE_TASK(state->scheduled, "scheduled.\n"); 1285 TRACE_TASK(state->scheduled, "scheduled.\n");
@@ -1296,7 +1317,6 @@ static void mc2_task_resume(struct task_struct *tsk)
1296 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 1317 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
1297 1318
1298 //local_irq_save(flags); 1319 //local_irq_save(flags);
1299 TRACE_TASK(tsk, "preemptible?? %d\n", preemptible());
1300 preempt_disable(); 1320 preempt_disable();
1301 tinfo = get_mc2_state(tsk); 1321 tinfo = get_mc2_state(tsk);
1302 if (tinfo->cpu != -1) 1322 if (tinfo->cpu != -1)
@@ -1311,7 +1331,8 @@ static void mc2_task_resume(struct task_struct *tsk)
1311 /* Requeue only if self-suspension was already processed. */ 1331 /* Requeue only if self-suspension was already processed. */
1312 if (tinfo->has_departed) 1332 if (tinfo->has_departed)
1313 { 1333 {
1314 1334 raw_spin_lock(&state->lock);
1335 local_irq_save(flags);
1315 /* We don't want to consider jobs before synchronous releases */ 1336 /* We don't want to consider jobs before synchronous releases */
1316 if (tsk_rt(tsk)->job_params.job_no == 2) { 1337 if (tsk_rt(tsk)->job_params.job_no == 2) {
1317/* 1338/*
@@ -1331,16 +1352,19 @@ static void mc2_task_resume(struct task_struct *tsk)
1331*/ 1352*/
1332 TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); 1353 TRACE_TASK(tsk, "INIT_FINISHED is SET\n");
1333 tsk_mc2_data(tsk)->init_finished = 1; 1354 tsk_mc2_data(tsk)->init_finished = 1;
1334 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); 1355 atomic_dec(&num_sync_released);
1335 raw_spin_lock(&global_lock); 1356 //raw_spin_unlock(&global_lock);
1336 num_sync_released--; 1357 if (atomic_read(&num_sync_released) == 0) {
1337 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); 1358 lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no);
1338 raw_spin_unlock(&global_lock); 1359 TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n");
1339 TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); 1360 sys_request_mode(1);
1361 sched_trace_sys_start(&start);
1362 }
1363 TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released));
1340 } 1364 }
1341 1365
1342 raw_spin_lock(&state->lock); 1366// raw_spin_lock(&state->lock);
1343 local_irq_save(flags); 1367// local_irq_save(flags);
1344 /* Assumption: litmus_clock() is synchronized across cores, 1368 /* Assumption: litmus_clock() is synchronized across cores,
1345 * since we might not actually be executing on tinfo->cpu 1369 * since we might not actually be executing on tinfo->cpu
1346 * at the moment. */ 1370 * at the moment. */
@@ -1459,11 +1483,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1459 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { 1483 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) {
1460 cpu_0_task_exist = true; 1484 cpu_0_task_exist = true;
1461 } 1485 }
1462 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); 1486 atomic_inc(&num_sync_released);
1463 raw_spin_lock(&global_lock);
1464 num_sync_released++;
1465 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
1466 raw_spin_unlock(&global_lock);
1467 local_irq_restore(flags); 1487 local_irq_restore(flags);
1468 raw_spin_unlock(&state->lock); 1488 raw_spin_unlock(&state->lock);
1469 //raw_spin_unlock_irqrestore(&state->lock, flags); 1489 //raw_spin_unlock_irqrestore(&state->lock, flags);
@@ -1533,8 +1553,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1533 raw_spin_unlock(&mode_lock); 1553 raw_spin_unlock(&mode_lock);
1534 1554
1535 } 1555 }
1536 num_sync_released++; 1556 atomic_inc(&num_sync_released);
1537 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
1538 raw_spin_unlock(&global_lock); 1557 raw_spin_unlock(&global_lock);
1539 //raw_spin_unlock_irqrestore(&state->lock, flags); 1558 //raw_spin_unlock_irqrestore(&state->lock, flags);
1540 local_irq_restore(flags); 1559 local_irq_restore(flags);
@@ -1838,11 +1857,8 @@ static void mc2_task_exit(struct task_struct *tsk)
1838 /* NOTE: drops state->lock */ 1857 /* NOTE: drops state->lock */
1839 TRACE("mc2_exit()\n"); 1858 TRACE("mc2_exit()\n");
1840 1859
1841 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); 1860 atomic_dec(&num_sync_released);
1842 raw_spin_lock(&global_lock); 1861
1843 num_sync_released--;
1844 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
1845 raw_spin_unlock(&global_lock);
1846 mc2_update_timer_and_unlock(state); 1862 mc2_update_timer_and_unlock(state);
1847 } else { 1863 } else {
1848 raw_spin_unlock(&state->lock); 1864 raw_spin_unlock(&state->lock);
@@ -1850,17 +1866,16 @@ static void mc2_task_exit(struct task_struct *tsk)
1850 } 1866 }
1851 1867
1852 if (lv == CRIT_LEVEL_C) { 1868 if (lv == CRIT_LEVEL_C) {
1853 //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); 1869 //raw_spin_lock(&global_lock);
1854 raw_spin_lock(&global_lock);
1855 raw_spin_lock(&mode_lock); 1870 raw_spin_lock(&mode_lock);
1856 for(i = 0; i < NR_MODES; i++){ 1871 for(i = 1; i < NR_MODES; i++){
1857 if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) 1872 if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) )
1858 continue; 1873 continue;
1859 mode_sizes[i]--; 1874 mode_sizes[i]--;
1860 } 1875 }
1876 mode_sizes[0]--;
1861 raw_spin_unlock(&mode_lock); 1877 raw_spin_unlock(&mode_lock);
1862 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); 1878 //raw_spin_unlock(&global_lock);
1863 raw_spin_unlock(&global_lock);
1864 1879
1865 for_each_online_cpu(cpu) { 1880 for_each_online_cpu(cpu) {
1866 state = cpu_state_for(cpu); 1881 state = cpu_state_for(cpu);
@@ -2460,9 +2475,9 @@ static long mc2_deactivate_plugin(void)
2460 } 2475 }
2461 2476
2462 } 2477 }
2463 num_sync_released = 0;
2464 //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");
2465 raw_spin_unlock(&global_lock); 2478 raw_spin_unlock(&global_lock);
2479
2480 atomic_set(&num_sync_released, 0);
2466 destroy_domain_proc_info(&mc2_domain_proc_info); 2481 destroy_domain_proc_info(&mc2_domain_proc_info);
2467 return 0; 2482 return 0;
2468} 2483}