diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-14 22:12:18 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-14 22:12:18 -0400 |
commit | 9a6b47139fbbdc9a4d2c4c943517c6c2568da9f1 (patch) | |
tree | 0e1cc138167de0aa47c7ce7a1b015da71431d89b | |
parent | 41eb3cb41a8d703927ccc336a9b41345155bbb8d (diff) |
4/14/2017 end of the day
-rw-r--r-- | litmus/sched_mc2.c | 34 |
1 files changed, 22 insertions, 12 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index ced00ed2ba37..16c5c4af0e13 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -116,6 +116,9 @@ bool cpu_0_spin_flag; | |||
116 | bool seen_once; | 116 | bool seen_once; |
117 | bool cpu_0_task_exist; | 117 | bool cpu_0_task_exist; |
118 | bool mode_changed; | 118 | bool mode_changed; |
119 | static DEFINE_PER_CPU(unsigned long, mode_counter); | ||
120 | #define local_mode_counter() (this_cpu_ptr(&mode_counter)) | ||
121 | #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) | ||
119 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | 122 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) |
120 | #define pending mode != requested_mode | 123 | #define pending mode != requested_mode |
121 | #define ready !res_reported | 124 | #define ready !res_reported |
@@ -228,25 +231,29 @@ asmlinkage long sys_enact_mode(void) | |||
228 | //raw_spin_unlock(&state->lock); | 231 | //raw_spin_unlock(&state->lock); |
229 | //mc2_update_timer_and_unlock(state); | 232 | //mc2_update_timer_and_unlock(state); |
230 | } | 233 | } |
234 | this_cpu_inc(mode_counter); | ||
231 | local_irq_restore(flags); | 235 | local_irq_restore(flags); |
232 | cpu_0_spin_flag = !cpu_0_spin_flag; | 236 | //cpu_0_spin_flag = !cpu_0_spin_flag; |
233 | } | 237 | } |
234 | else if (cpu_0_task_exist) { | 238 | else if (cpu_0_task_exist) { |
235 | //spin, wait for CPU 0 to stabilize mode decision | 239 | //spin, wait for CPU 0 to stabilize mode decision |
236 | //before scheduling next hyperperiod | 240 | //before scheduling next hyperperiod |
237 | //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); | 241 | //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); |
242 | unsigned long *cpu0_counter = cpu_0_mode_counter(); | ||
243 | /* | ||
238 | if (state->spin_flag) { | 244 | if (state->spin_flag) { |
239 | //TRACE_CUR("state->spin_flag %d\n",state->spin_flag); | ||
240 | while(cpu_0_spin_flag) | 245 | while(cpu_0_spin_flag) |
241 | udelay(1); //TRACE_CUR("state->spin_flag %d cpu_0_spin_flag %d\n",state->spin_flag, cpu_0_spin_flag); | 246 | udelay(1); |
242 | } | 247 | } |
243 | else { | 248 | else { |
244 | //TRACE_CUR("state->spin_flag %d\n",state->spin_flag); | ||
245 | while(!cpu_0_spin_flag) | 249 | while(!cpu_0_spin_flag) |
246 | //TRACE_CUR("state->spin_flag %d cpu_0_spin_flag %d\n",state->spin_flag, cpu_0_spin_flag); | ||
247 | udelay(1); | 250 | udelay(1); |
248 | } | 251 | } |
249 | TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | 252 | */ |
253 | //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | ||
254 | while (*cpu0_counter == this_cpu_read(mode_counter)) | ||
255 | udelay(1); | ||
256 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); | ||
250 | local_irq_save(flags); | 257 | local_irq_save(flags); |
251 | if (mode_changed) { | 258 | if (mode_changed) { |
252 | lt_t new_mode_basetime = get_release(current); | 259 | lt_t new_mode_basetime = get_release(current); |
@@ -277,8 +284,9 @@ asmlinkage long sys_enact_mode(void) | |||
277 | 284 | ||
278 | //preempt_enable(); | 285 | //preempt_enable(); |
279 | } | 286 | } |
287 | this_cpu_write(mode_counter, *cpu0_counter); | ||
280 | local_irq_restore(flags); | 288 | local_irq_restore(flags); |
281 | state->spin_flag = !state->spin_flag; | 289 | //state->spin_flag = !state->spin_flag; |
282 | } | 290 | } |
283 | else { | 291 | else { |
284 | //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); | 292 | //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); |
@@ -908,12 +916,12 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
908 | enum crit_level lv; | 916 | enum crit_level lv; |
909 | lt_t time_slice; | 917 | lt_t time_slice; |
910 | 918 | ||
911 | /* | 919 | |
912 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 920 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
913 | if (res->state == RESERVATION_ACTIVE) | 921 | if (res->state == RESERVATION_ACTIVE) |
914 | TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); | 922 | TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); |
915 | } | 923 | } |
916 | */ | 924 | |
917 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 925 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
918 | if (res->state == RESERVATION_ACTIVE) { | 926 | if (res->state == RESERVATION_ACTIVE) { |
919 | tsk = res->ops->dispatch_client(res, &time_slice); | 927 | tsk = res->ops->dispatch_client(res, &time_slice); |
@@ -924,7 +932,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
924 | return tsk; | 932 | return tsk; |
925 | } else { | 933 | } else { |
926 | //if (!is_init_finished(tsk)) { | 934 | //if (!is_init_finished(tsk)) { |
927 | //TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); | 935 | TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); |
928 | if (num_sync_released != 0 && mode == 0) { | 936 | if (num_sync_released != 0 && mode == 0) { |
929 | //ce = &state->crit_entries[lv]; | 937 | //ce = &state->crit_entries[lv]; |
930 | sup_scheduler_update_after(sup_env, res->cur_budget); | 938 | sup_scheduler_update_after(sup_env, res->cur_budget); |
@@ -1263,12 +1271,12 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1263 | break; | 1271 | break; |
1264 | } | 1272 | } |
1265 | */ | 1273 | */ |
1266 | TRACE_CUR("INIT_FINISHED is SET\n"); | 1274 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); |
1267 | tsk_mc2_data(tsk)->init_finished = 1; | 1275 | tsk_mc2_data(tsk)->init_finished = 1; |
1268 | raw_spin_lock(&global_lock); | 1276 | raw_spin_lock(&global_lock); |
1269 | num_sync_released--; | 1277 | num_sync_released--; |
1270 | raw_spin_unlock(&global_lock); | 1278 | raw_spin_unlock(&global_lock); |
1271 | TRACE_CUR("INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); | 1279 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); |
1272 | } | 1280 | } |
1273 | 1281 | ||
1274 | raw_spin_lock(&state->lock); | 1282 | raw_spin_lock(&state->lock); |
@@ -2212,6 +2220,8 @@ static long mc2_activate_plugin(void) | |||
2212 | 2220 | ||
2213 | resched_cpu[cpu] = 0; | 2221 | resched_cpu[cpu] = 0; |
2214 | //level_a_priorities[cpu] = 0; | 2222 | //level_a_priorities[cpu] = 0; |
2223 | this_cpu_write(mode_counter, 0); | ||
2224 | |||
2215 | state = cpu_state_for(cpu); | 2225 | state = cpu_state_for(cpu); |
2216 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 2226 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
2217 | 2227 | ||