diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-06 22:21:08 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-06 22:21:08 -0400 |
commit | ced743383a62769c9cb267a929d1841b73fae6e7 (patch) | |
tree | d872408363571696f743c99fbbc62c5a9dce86e9 | |
parent | 225f5556e1d48163a5b06f1ded4b9811d573e63b (diff) |
sys_enact_mode fix
-rw-r--r-- | litmus/sched_mc2.c | 104 |
1 files changed, 74 insertions, 30 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 0a42497ef9ff..b2259d0310e1 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -112,6 +112,7 @@ unsigned int mode_sizes[NR_MODES]; | |||
112 | unsigned int res_reported; | 112 | unsigned int res_reported; |
113 | bool cpu_0_spin_flag; | 113 | bool cpu_0_spin_flag; |
114 | bool seen_once; | 114 | bool seen_once; |
115 | bool cpu_0_task_exist; | ||
115 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | 116 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) |
116 | #define pending mode != requested_mode | 117 | #define pending mode != requested_mode |
117 | #define ready !res_reported | 118 | #define ready !res_reported |
@@ -165,6 +166,7 @@ asmlinkage long sys_enact_mode(void) | |||
165 | TRACE("Timer canceled\n"); | 166 | TRACE("Timer canceled\n"); |
166 | hrtimer_cancel(&state->timer);//stop listening to old mode timers | 167 | hrtimer_cancel(&state->timer);//stop listening to old mode timers |
167 | mode = requested_mode; | 168 | mode = requested_mode; |
169 | TRACE("Mode has been changed.\n"); | ||
168 | _global_env = &_global_env_modes[mode]; | 170 | _global_env = &_global_env_modes[mode]; |
169 | //set res->reported for new global tasks | 171 | //set res->reported for new global tasks |
170 | list_for_each(pos, &_global_env->active_reservations){ | 172 | list_for_each(pos, &_global_env->active_reservations){ |
@@ -190,7 +192,7 @@ asmlinkage long sys_enact_mode(void) | |||
190 | //release other CPUs | 192 | //release other CPUs |
191 | cpu_0_spin_flag = !cpu_0_spin_flag; | 193 | cpu_0_spin_flag = !cpu_0_spin_flag; |
192 | } | 194 | } |
193 | else{ | 195 | else if (cpu_0_task_exist) { |
194 | //spin, wait for CPU 0 to stabilize mode decision | 196 | //spin, wait for CPU 0 to stabilize mode decision |
195 | //before scheduling next hyperperiod | 197 | //before scheduling next hyperperiod |
196 | if (state->spin_flag) | 198 | if (state->spin_flag) |
@@ -199,6 +201,8 @@ asmlinkage long sys_enact_mode(void) | |||
199 | while(!cpu_0_spin_flag); | 201 | while(!cpu_0_spin_flag); |
200 | state->spin_flag = !state->spin_flag; | 202 | state->spin_flag = !state->spin_flag; |
201 | } | 203 | } |
204 | else | ||
205 | return 0; | ||
202 | //if mode didn't change this has no effect on what's being scheduled | 206 | //if mode didn't change this has no effect on what's being scheduled |
203 | state->sup_env = &state->sup_env_modes[mode]; | 207 | state->sup_env = &state->sup_env_modes[mode]; |
204 | //sup_update_time(state->sup_env, now); | 208 | //sup_update_time(state->sup_env, now); |
@@ -282,14 +286,16 @@ static void task_departs(struct task_struct *tsk, int job_complete) | |||
282 | 286 | ||
283 | if (job_complete) | 287 | if (job_complete) |
284 | res->cur_budget = 0; | 288 | res->cur_budget = 0; |
285 | 289 | ||
286 | res->ops->client_departs(res, client, job_complete); | 290 | res->ops->client_departs(res, client, job_complete); |
287 | } | 291 | } |
292 | |||
288 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ | 293 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ |
294 | /* | ||
289 | if (job_complete) { | 295 | if (job_complete) { |
290 | //res->cur_budget = 0; | 296 | //res->cur_budget = 0; |
291 | sched_trace_task_completion(tsk, 0); | ||
292 | } | 297 | } |
298 | */ | ||
293 | /* fix end */ | 299 | /* fix end */ |
294 | 300 | ||
295 | tinfo->has_departed = true; | 301 | tinfo->has_departed = true; |
@@ -307,8 +313,6 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
307 | enum crit_level lv = get_task_crit_level(tsk); | 313 | enum crit_level lv = get_task_crit_level(tsk); |
308 | int i; | 314 | int i; |
309 | 315 | ||
310 | tinfo->has_departed = false; | ||
311 | |||
312 | switch(lv) { | 316 | switch(lv) { |
313 | case CRIT_LEVEL_A: | 317 | case CRIT_LEVEL_A: |
314 | case CRIT_LEVEL_B: | 318 | case CRIT_LEVEL_B: |
@@ -321,6 +325,8 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
321 | break; | 325 | break; |
322 | } | 326 | } |
323 | 327 | ||
328 | tinfo->has_departed = false; | ||
329 | |||
324 | TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); | 330 | TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); |
325 | 331 | ||
326 | for(i = 0; i < NR_MODES; i++){ | 332 | for(i = 0; i < NR_MODES; i++){ |
@@ -331,7 +337,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
331 | 337 | ||
332 | res->ops->client_arrives(res, client); | 338 | res->ops->client_arrives(res, client); |
333 | } | 339 | } |
334 | 340 | ||
335 | switch(lv) { | 341 | switch(lv) { |
336 | case CRIT_LEVEL_A: | 342 | case CRIT_LEVEL_A: |
337 | case CRIT_LEVEL_B: | 343 | case CRIT_LEVEL_B: |
@@ -342,7 +348,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
342 | break; | 348 | break; |
343 | default: | 349 | default: |
344 | break; | 350 | break; |
345 | } | 351 | } |
346 | } | 352 | } |
347 | 353 | ||
348 | /* get_lowest_prio_cpu - return the lowest priority cpu | 354 | /* get_lowest_prio_cpu - return the lowest priority cpu |
@@ -882,11 +888,16 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
882 | return; | 888 | return; |
883 | 889 | ||
884 | lev = get_task_crit_level(next); | 890 | lev = get_task_crit_level(next); |
891 | if (is_mode_poll_task(next)) { | ||
892 | lev = MODE_POLL_TASK; | ||
893 | } | ||
894 | |||
885 | do_partition(lev, cpu); | 895 | do_partition(lev, cpu); |
886 | 896 | ||
887 | switch(lev) { | 897 | switch(lev) { |
888 | case CRIT_LEVEL_A: | 898 | case CRIT_LEVEL_A: |
889 | case CRIT_LEVEL_B: | 899 | case CRIT_LEVEL_B: |
900 | case MODE_POLL_TASK: | ||
890 | TS_SCHED_A_END(next); | 901 | TS_SCHED_A_END(next); |
891 | break; | 902 | break; |
892 | case CRIT_LEVEL_C: | 903 | case CRIT_LEVEL_C: |
@@ -904,35 +915,33 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
904 | { | 915 | { |
905 | int np, blocks, exists, preempt, to_schedule; | 916 | int np, blocks, exists, preempt, to_schedule; |
906 | /* next == NULL means "schedule background work". */ | 917 | /* next == NULL means "schedule background work". */ |
907 | lt_t now; | 918 | lt_t now = litmus_clock(); |
908 | struct mc2_cpu_state *state = local_cpu_state(); | 919 | struct mc2_cpu_state *state = local_cpu_state(); |
909 | 920 | ||
921 | raw_spin_lock(&state->lock); | ||
922 | |||
923 | raw_spin_lock(&global_lock); | ||
924 | preempt = resched_cpu[state->cpu]; | ||
925 | resched_cpu[state->cpu] = 0; | ||
926 | raw_spin_unlock(&global_lock); | ||
927 | |||
910 | pre_schedule(prev, state->cpu); | 928 | pre_schedule(prev, state->cpu); |
911 | 929 | ||
912 | raw_spin_lock(&state->lock); | 930 | BUG_ON(state->scheduled && state->scheduled != prev); |
931 | BUG_ON(state->scheduled && !is_realtime(prev)); | ||
913 | 932 | ||
914 | //BUG_ON(state->scheduled && state->scheduled != prev); | 933 | //if (state->scheduled && state->scheduled != prev) |
915 | //BUG_ON(state->scheduled && !is_realtime(prev)); | 934 | // printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); |
916 | if (state->scheduled && state->scheduled != prev) | 935 | //if (state->scheduled && !is_realtime(prev)) |
917 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | 936 | // printk(KERN_ALERT "BUG2!!!!!!!! \n"); |
918 | if (state->scheduled && !is_realtime(prev)) | ||
919 | printk(KERN_ALERT "BUG2!!!!!!!! \n"); | ||
920 | 937 | ||
921 | /* (0) Determine state */ | 938 | /* (0) Determine state */ |
922 | exists = state->scheduled != NULL; | 939 | exists = state->scheduled != NULL; |
923 | blocks = exists && !is_current_running(); | 940 | blocks = exists && !is_current_running(); |
924 | np = exists && is_np(state->scheduled); | 941 | np = exists && is_np(state->scheduled); |
925 | |||
926 | raw_spin_lock(&global_lock); | ||
927 | preempt = resched_cpu[state->cpu]; | ||
928 | resched_cpu[state->cpu] = 0; | ||
929 | raw_spin_unlock(&global_lock); | ||
930 | 942 | ||
931 | /* update time */ | 943 | /* update time */ |
932 | state->sup_env->will_schedule = true; | 944 | state->sup_env->will_schedule = true; |
933 | |||
934 | now = litmus_clock(); | ||
935 | |||
936 | sup_update_time(state->sup_env, now); | 945 | sup_update_time(state->sup_env, now); |
937 | /* 9/20/2015 fix */ | 946 | /* 9/20/2015 fix */ |
938 | //raw_spin_lock(&_global_env.lock); | 947 | //raw_spin_lock(&_global_env.lock); |
@@ -966,6 +975,13 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
966 | 975 | ||
967 | if (!state->scheduled) { | 976 | if (!state->scheduled) { |
968 | raw_spin_lock(&global_lock); | 977 | raw_spin_lock(&global_lock); |
978 | state->scheduled = mc2_global_dispatch(state); | ||
979 | raw_spin_unlock(&global_lock); | ||
980 | } | ||
981 | |||
982 | /* | ||
983 | if (!state->scheduled) { | ||
984 | raw_spin_lock(&global_lock); | ||
969 | //to_schedule = gmp_update_time(_global_env, now); | 985 | //to_schedule = gmp_update_time(_global_env, now); |
970 | state->scheduled = mc2_global_dispatch(state); | 986 | state->scheduled = mc2_global_dispatch(state); |
971 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 987 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
@@ -977,6 +993,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
977 | update_cpu_prio(state); | 993 | update_cpu_prio(state); |
978 | raw_spin_unlock(&global_lock); | 994 | raw_spin_unlock(&global_lock); |
979 | } | 995 | } |
996 | */ | ||
980 | 997 | ||
981 | //raw_spin_lock(&_lowest_prio_cpu.lock); | 998 | //raw_spin_lock(&_lowest_prio_cpu.lock); |
982 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 999 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
@@ -998,7 +1015,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
998 | struct reservation* res = tinfo->res_info[mode].client.reservation; | 1015 | struct reservation* res = tinfo->res_info[mode].client.reservation; |
999 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); | 1016 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); |
1000 | TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count()); | 1017 | TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count()); |
1001 | res->scheduled_on = NO_CPU; | 1018 | if (res) |
1019 | res->scheduled_on = NO_CPU; | ||
1002 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); | 1020 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); |
1003 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | 1021 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ |
1004 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | 1022 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { |
@@ -1015,6 +1033,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1015 | raw_spin_unlock(&global_lock); | 1033 | raw_spin_unlock(&global_lock); |
1016 | } | 1034 | } |
1017 | } | 1035 | } |
1036 | |||
1018 | 1037 | ||
1019 | if (to_schedule != 0) { | 1038 | if (to_schedule != 0) { |
1020 | raw_spin_lock(&global_lock); | 1039 | raw_spin_lock(&global_lock); |
@@ -1028,12 +1047,18 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1028 | raw_spin_unlock(&global_lock); | 1047 | raw_spin_unlock(&global_lock); |
1029 | } | 1048 | } |
1030 | 1049 | ||
1050 | post_schedule(state->scheduled, state->cpu); | ||
1051 | |||
1052 | raw_spin_lock(&global_lock); | ||
1053 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1054 | update_cpu_prio(state); | ||
1055 | raw_spin_unlock(&global_lock); | ||
1056 | |||
1031 | raw_spin_unlock(&state->lock); | 1057 | raw_spin_unlock(&state->lock); |
1032 | if (state->scheduled) { | 1058 | /* if (state->scheduled) { |
1033 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1059 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
1034 | } | 1060 | } |
1035 | 1061 | */ | |
1036 | post_schedule(state->scheduled, state->cpu); | ||
1037 | 1062 | ||
1038 | return state->scheduled; | 1063 | return state->scheduled; |
1039 | } | 1064 | } |
@@ -1191,7 +1216,8 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1191 | tsk_rt(tsk)->plugin_state = tinfo; | 1216 | tsk_rt(tsk)->plugin_state = tinfo; |
1192 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 1217 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
1193 | } | 1218 | } |
1194 | 1219 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) | |
1220 | cpu_0_task_exist = true; | ||
1195 | 1221 | ||
1196 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1222 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1197 | } else if (lv == CRIT_LEVEL_C) { | 1223 | } else if (lv == CRIT_LEVEL_C) { |
@@ -1294,7 +1320,16 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1294 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | 1320 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); |
1295 | } | 1321 | } |
1296 | 1322 | ||
1297 | BUG_ON(!res); | 1323 | //BUG_ON(!res); |
1324 | // the current mode doesn't have this task. | ||
1325 | // do not update timer and set the next release time. | ||
1326 | if (!res) { | ||
1327 | task_departs(tsk, 0); | ||
1328 | if (lv == CRIT_LEVEL_C) | ||
1329 | raw_spin_unlock(&global_lock); | ||
1330 | raw_spin_unlock(&state->lock); | ||
1331 | return; | ||
1332 | } | ||
1298 | 1333 | ||
1299 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); | 1334 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); |
1300 | release = res->next_replenishment; | 1335 | release = res->next_replenishment; |
@@ -1492,6 +1527,9 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1492 | /* update both global and partitioned */ | 1527 | /* update both global and partitioned */ |
1493 | if (lv < CRIT_LEVEL_C) { | 1528 | if (lv < CRIT_LEVEL_C) { |
1494 | sup_update_time(state->sup_env, litmus_clock()); | 1529 | sup_update_time(state->sup_env, litmus_clock()); |
1530 | raw_spin_lock(&global_lock); | ||
1531 | gmp_update_time(_global_env, litmus_clock()); | ||
1532 | raw_spin_unlock(&global_lock); | ||
1495 | } | 1533 | } |
1496 | else if (lv == CRIT_LEVEL_C) { | 1534 | else if (lv == CRIT_LEVEL_C) { |
1497 | raw_spin_lock(&global_lock); | 1535 | raw_spin_lock(&global_lock); |
@@ -1542,6 +1580,11 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1542 | } | 1580 | } |
1543 | 1581 | ||
1544 | local_irq_restore(flags); | 1582 | local_irq_restore(flags); |
1583 | |||
1584 | if (is_mode_poll_task(tsk) && (tinfo->cpu == 0)) { | ||
1585 | cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. | ||
1586 | cpu_0_task_exist = false; | ||
1587 | } | ||
1545 | 1588 | ||
1546 | kfree(tsk_rt(tsk)->plugin_state); | 1589 | kfree(tsk_rt(tsk)->plugin_state); |
1547 | tsk_rt(tsk)->plugin_state = NULL; | 1590 | tsk_rt(tsk)->plugin_state = NULL; |
@@ -1919,6 +1962,7 @@ static long mc2_activate_plugin(void) | |||
1919 | } | 1962 | } |
1920 | res_reported = 0; | 1963 | res_reported = 0; |
1921 | cpu_0_spin_flag = false; | 1964 | cpu_0_spin_flag = false; |
1965 | cpu_0_task_exist = false; | ||
1922 | 1966 | ||
1923 | return 0; | 1967 | return 0; |
1924 | } | 1968 | } |