diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-07 12:23:08 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2017-04-07 12:23:08 -0400 |
commit | 3f82a33ca2fd95e3c1165d4dff979d29b77aac34 (patch) | |
tree | 070ffb3abb0dfeb6157c2820c096652241cc980c | |
parent | ab5ac836a257786daa953edb99435b19024ca971 (diff) |
TODO: Fix ctrl_page bug
-rw-r--r-- | include/litmus/mc2_common.h | 1 | ||||
-rw-r--r-- | litmus/mc2_common.c | 1 | ||||
-rw-r--r-- | litmus/reservation.c | 9 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 96 |
4 files changed, 96 insertions, 11 deletions
diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h index 7524ea79a1e9..118ff129cac9 100644 --- a/include/litmus/mc2_common.h +++ b/include/litmus/mc2_common.h | |||
@@ -19,6 +19,7 @@ struct mc2_task { | |||
19 | enum crit_level crit; | 19 | enum crit_level crit; |
20 | unsigned int res_id; | 20 | unsigned int res_id; |
21 | uint32_t mode_mask; | 21 | uint32_t mode_mask; |
22 | int init_finished; | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | #ifdef __KERNEL__ | 25 | #ifdef __KERNEL__ |
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c index cb6127ad57eb..730f99bcab54 100644 --- a/litmus/mc2_common.c +++ b/litmus/mc2_common.c | |||
@@ -67,6 +67,7 @@ asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param | |||
67 | goto out_unlock; | 67 | goto out_unlock; |
68 | } | 68 | } |
69 | 69 | ||
70 | mp->init_finished = 0; | ||
70 | //target->rt_param.plugin_state = mp; | 71 | //target->rt_param.plugin_state = mp; |
71 | target->rt_param.mc2_data = mp; | 72 | target->rt_param.mc2_data = mp; |
72 | 73 | ||
diff --git a/litmus/reservation.c b/litmus/reservation.c index d288178b9b5f..0d0c4930b872 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -199,9 +199,9 @@ static void sup_charge_budget( | |||
199 | /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ | 199 | /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ |
200 | res = list_entry(pos, struct reservation, list); | 200 | res = list_entry(pos, struct reservation, list); |
201 | if (res->state == RESERVATION_ACTIVE) { | 201 | if (res->state == RESERVATION_ACTIVE) { |
202 | TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta); | 202 | //TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta); |
203 | if (encountered_active == 0 && res->blocked_by_ghost == 0) { | 203 | if (encountered_active == 0 && res->blocked_by_ghost == 0) { |
204 | TRACE("DRAIN !!\n"); | 204 | //TRACE("DRAIN !!\n"); |
205 | res->ops->drain_budget(res, delta); | 205 | res->ops->drain_budget(res, delta); |
206 | encountered_active = 1; | 206 | encountered_active = 1; |
207 | } | 207 | } |
@@ -215,8 +215,7 @@ static void sup_charge_budget( | |||
215 | { | 215 | { |
216 | /* make sure scheduler is invoked when this reservation expires | 216 | /* make sure scheduler is invoked when this reservation expires |
217 | * its remaining budget */ | 217 | * its remaining budget */ |
218 | TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n", | 218 | //TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget); |
219 | res->id, res->cur_budget); | ||
220 | sup_scheduler_update_after(sup_env, res->cur_budget); | 219 | sup_scheduler_update_after(sup_env, res->cur_budget); |
221 | } | 220 | } |
222 | //if (encountered_active == 2) | 221 | //if (encountered_active == 2) |
@@ -625,7 +624,7 @@ int gmp_update_time( | |||
625 | /* If the time didn't advance, there is nothing to do. | 624 | /* If the time didn't advance, there is nothing to do. |
626 | * This check makes it safe to call sup_advance_time() potentially | 625 | * This check makes it safe to call sup_advance_time() potentially |
627 | * multiple times (e.g., via different code paths. */ | 626 | * multiple times (e.g., via different code paths. */ |
628 | TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time); | 627 | //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time); |
629 | if (unlikely(now <= gmp_env->env.current_time + EPSILON)) | 628 | if (unlikely(now <= gmp_env->env.current_time + EPSILON)) |
630 | return 0; | 629 | return 0; |
631 | 630 | ||
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index b7813ae7d85e..497990090b7a 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -112,6 +112,7 @@ unsigned int res_reported; | |||
112 | bool cpu_0_spin_flag; | 112 | bool cpu_0_spin_flag; |
113 | bool seen_once; | 113 | bool seen_once; |
114 | bool cpu_0_task_exist; | 114 | bool cpu_0_task_exist; |
115 | bool mode_changed; | ||
115 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | 116 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) |
116 | #define pending mode != requested_mode | 117 | #define pending mode != requested_mode |
117 | #define ready !res_reported | 118 | #define ready !res_reported |
@@ -129,8 +130,10 @@ asmlinkage long sys_enact_mode(void) | |||
129 | //lt_t now = litmus_clock(); | 130 | //lt_t now = litmus_clock(); |
130 | TRACE_TASK(current, "ENACTING MODE TASK\n"); | 131 | TRACE_TASK(current, "ENACTING MODE TASK\n"); |
131 | if (state->cpu == 0){ | 132 | if (state->cpu == 0){ |
133 | preempt_disable(); | ||
132 | raw_spin_lock(&global_lock); | 134 | raw_spin_lock(&global_lock); |
133 | raw_spin_lock(&mode_lock); | 135 | raw_spin_lock(&mode_lock); |
136 | mode_changed = false; | ||
134 | if (pending){ //MCR has entered | 137 | if (pending){ //MCR has entered |
135 | if (!seen_once){ | 138 | if (!seen_once){ |
136 | TRACE_TASK(current, "NOTICED MCR\n"); | 139 | TRACE_TASK(current, "NOTICED MCR\n"); |
@@ -139,6 +142,7 @@ asmlinkage long sys_enact_mode(void) | |||
139 | //after this jobs report themselves | 142 | //after this jobs report themselves |
140 | list_for_each(pos, &_global_env->active_reservations){ | 143 | list_for_each(pos, &_global_env->active_reservations){ |
141 | res = list_entry(pos, struct reservation, list); | 144 | res = list_entry(pos, struct reservation, list); |
145 | |||
142 | if (tsk_rt(res->tsk)->completed){ | 146 | if (tsk_rt(res->tsk)->completed){ |
143 | res->reported = 1; | 147 | res->reported = 1; |
144 | res_reported--; | 148 | res_reported--; |
@@ -162,25 +166,48 @@ asmlinkage long sys_enact_mode(void) | |||
162 | seen_once = true; | 166 | seen_once = true; |
163 | } | 167 | } |
164 | if( ready ){ //C is throttled | 168 | if( ready ){ //C is throttled |
169 | lt_t new_mode_basetime = get_release(current); | ||
170 | |||
165 | TRACE("Timer canceled\n"); | 171 | TRACE("Timer canceled\n"); |
166 | hrtimer_cancel(&state->timer);//stop listening to old mode timers | 172 | hrtimer_cancel(&state->timer);//stop listening to old mode timers |
167 | mode = requested_mode; | 173 | mode = requested_mode; |
168 | TRACE("Mode has been changed.\n"); | 174 | TRACE("Mode has been changed.\n"); |
175 | mode_changed = true; | ||
169 | _global_env = &_global_env_modes[mode]; | 176 | _global_env = &_global_env_modes[mode]; |
170 | //set res->reported for new global tasks | 177 | //set res->reported for new global tasks |
171 | list_for_each(pos, &_global_env->active_reservations){ | 178 | list_for_each(pos, &_global_env->active_reservations){ |
172 | res = list_entry(pos, struct reservation, list); | 179 | res = list_entry(pos, struct reservation, list); |
180 | release_at(res->tsk, new_mode_basetime); | ||
173 | res->reported = 0; | 181 | res->reported = 0; |
174 | } | 182 | } |
175 | list_for_each(pos, &_global_env->depleted_reservations){ | 183 | list_for_each(pos, &_global_env->depleted_reservations){ |
176 | res = list_entry(pos, struct reservation, list); | 184 | res = list_entry(pos, struct reservation, list); |
185 | release_at(res->tsk, new_mode_basetime); | ||
177 | res->reported = 0; | 186 | res->reported = 0; |
178 | } | 187 | } |
179 | list_for_each(pos, &_global_env->inactive_reservations){ | 188 | list_for_each(pos, &_global_env->inactive_reservations){ |
180 | res = list_entry(pos, struct reservation, list); | 189 | res = list_entry(pos, struct reservation, list); |
190 | release_at(res->tsk, new_mode_basetime); | ||
181 | res->reported = 0; | 191 | res->reported = 0; |
182 | } | 192 | } |
183 | //gmp_update_time(_global_env, now); | 193 | //gmp_update_time(_global_env, now); |
194 | raw_spin_lock(&state->lock); | ||
195 | |||
196 | state->sup_env = &state->sup_env_modes[mode]; | ||
197 | list_for_each(pos, &state->sup_env->active_reservations){ | ||
198 | res = list_entry(pos, struct reservation, list); | ||
199 | release_at(res->tsk, new_mode_basetime); | ||
200 | } | ||
201 | list_for_each(pos, &state->sup_env->depleted_reservations){ | ||
202 | res = list_entry(pos, struct reservation, list); | ||
203 | release_at(res->tsk, new_mode_basetime); | ||
204 | } | ||
205 | list_for_each(pos, &state->sup_env->inactive_reservations){ | ||
206 | res = list_entry(pos, struct reservation, list); | ||
207 | release_at(res->tsk, new_mode_basetime); | ||
208 | } | ||
209 | raw_spin_unlock(&state->lock); | ||
210 | |||
184 | sched_trace_enact_mode(current); | 211 | sched_trace_enact_mode(current); |
185 | } | 212 | } |
186 | 213 | ||
@@ -188,16 +215,42 @@ asmlinkage long sys_enact_mode(void) | |||
188 | } | 215 | } |
189 | raw_spin_unlock(&mode_lock); | 216 | raw_spin_unlock(&mode_lock); |
190 | raw_spin_unlock(&global_lock); | 217 | raw_spin_unlock(&global_lock); |
218 | preempt_enable(); | ||
191 | //release other CPUs | 219 | //release other CPUs |
220 | |||
192 | cpu_0_spin_flag = !cpu_0_spin_flag; | 221 | cpu_0_spin_flag = !cpu_0_spin_flag; |
193 | } | 222 | } |
194 | else if (cpu_0_task_exist) { | 223 | else if (cpu_0_task_exist) { |
195 | //spin, wait for CPU 0 to stabilize mode decision | 224 | //spin, wait for CPU 0 to stabilize mode decision |
196 | //before scheduling next hyperperiod | 225 | //before scheduling next hyperperiod |
226 | TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); | ||
197 | if (state->spin_flag) | 227 | if (state->spin_flag) |
198 | while(cpu_0_spin_flag); | 228 | while(cpu_0_spin_flag); |
199 | else | 229 | else |
200 | while(!cpu_0_spin_flag); | 230 | while(!cpu_0_spin_flag); |
231 | TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | ||
232 | if (mode_changed) { | ||
233 | lt_t new_mode_basetime = get_release(current); | ||
234 | TRACE("CPU%d mode changed\n",state->cpu); | ||
235 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | ||
236 | //preempt_disable(); | ||
237 | raw_spin_lock(&state->lock); | ||
238 | state->sup_env = &state->sup_env_modes[mode]; | ||
239 | list_for_each(pos, &state->sup_env->active_reservations){ | ||
240 | res = list_entry(pos, struct reservation, list); | ||
241 | release_at(res->tsk, new_mode_basetime); | ||
242 | } | ||
243 | list_for_each(pos, &state->sup_env->depleted_reservations){ | ||
244 | res = list_entry(pos, struct reservation, list); | ||
245 | release_at(res->tsk, new_mode_basetime); | ||
246 | } | ||
247 | list_for_each(pos, &state->sup_env->inactive_reservations){ | ||
248 | res = list_entry(pos, struct reservation, list); | ||
249 | release_at(res->tsk, new_mode_basetime); | ||
250 | } | ||
251 | raw_spin_unlock(&state->lock); | ||
252 | //preempt_enable(); | ||
253 | } | ||
201 | state->spin_flag = !state->spin_flag; | 254 | state->spin_flag = !state->spin_flag; |
202 | } | 255 | } |
203 | else | 256 | else |
@@ -224,6 +277,7 @@ asmlinkage long sys_request_mode(int new_mode){ | |||
224 | return 0; | 277 | return 0; |
225 | } | 278 | } |
226 | requested_mode = new_mode; | 279 | requested_mode = new_mode; |
280 | TRACE("MCR received\n"); | ||
227 | res_reported = mode_sizes[mode]; | 281 | res_reported = mode_sizes[mode]; |
228 | seen_once = false; | 282 | seen_once = false; |
229 | raw_spin_unlock(&mode_lock); | 283 | raw_spin_unlock(&mode_lock); |
@@ -260,6 +314,21 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk) | |||
260 | return mp->crit; | 314 | return mp->crit; |
261 | } | 315 | } |
262 | 316 | ||
317 | static int is_init_finished(struct task_struct *tsk) | ||
318 | { | ||
319 | struct mc2_task *mp; | ||
320 | |||
321 | if (!tsk || !is_realtime(tsk)) | ||
322 | return 0; | ||
323 | |||
324 | mp = tsk_rt(tsk)->mc2_data; | ||
325 | |||
326 | if (!mp) | ||
327 | return 0; | ||
328 | else | ||
329 | return mp->init_finished; | ||
330 | } | ||
331 | |||
263 | /* task_depart - remove a task from its reservation | 332 | /* task_depart - remove a task from its reservation |
264 | * If the job has remaining budget, convert it to a ghost job | 333 | * If the job has remaining budget, convert it to a ghost job |
265 | * and update crit_entries[] | 334 | * and update crit_entries[] |
@@ -801,11 +870,19 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
801 | sup_scheduler_update_after(sup_env, res->cur_budget); | 870 | sup_scheduler_update_after(sup_env, res->cur_budget); |
802 | return tsk; | 871 | return tsk; |
803 | } else { | 872 | } else { |
804 | //ce = &state->crit_entries[lv]; | 873 | TRACE_TASK(tsk, "@@@@@DISPATCH@@@@@@@ init_finished? %s\n", is_init_finished(tsk)?"true":"false"); |
805 | sup_scheduler_update_after(sup_env, res->cur_budget); | 874 | if (!is_init_finished(tsk)) { |
806 | res->blocked_by_ghost = 0; | 875 | //ce = &state->crit_entries[lv]; |
807 | res->is_ghost = NO_CPU; | 876 | sup_scheduler_update_after(sup_env, res->cur_budget); |
808 | return tsk; | 877 | res->blocked_by_ghost = 0; |
878 | res->is_ghost = NO_CPU; | ||
879 | return tsk; | ||
880 | } else if (res->mode == mode) { | ||
881 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
882 | res->blocked_by_ghost = 0; | ||
883 | res->is_ghost = NO_CPU; | ||
884 | return tsk; | ||
885 | } | ||
809 | } | 886 | } |
810 | } | 887 | } |
811 | } | 888 | } |
@@ -1106,7 +1183,7 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1106 | if (tinfo->has_departed) | 1183 | if (tinfo->has_departed) |
1107 | { | 1184 | { |
1108 | /* We don't want to consider jobs before synchronous releases */ | 1185 | /* We don't want to consider jobs before synchronous releases */ |
1109 | if (tsk_rt(tsk)->job_params.job_no > 5) { | 1186 | if (tsk_rt(tsk)->job_params.job_no > 3) { |
1110 | switch(get_task_crit_level(tsk)) { | 1187 | switch(get_task_crit_level(tsk)) { |
1111 | case CRIT_LEVEL_A: | 1188 | case CRIT_LEVEL_A: |
1112 | TS_RELEASE_LATENCY_A(get_release(tsk)); | 1189 | TS_RELEASE_LATENCY_A(get_release(tsk)); |
@@ -1120,6 +1197,8 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1120 | default: | 1197 | default: |
1121 | break; | 1198 | break; |
1122 | } | 1199 | } |
1200 | TRACE_CUR("INIT_FINISHED is SET\n"); | ||
1201 | tsk_mc2_data(tsk)->init_finished = 1; | ||
1123 | } | 1202 | } |
1124 | 1203 | ||
1125 | raw_spin_lock(&state->lock); | 1204 | raw_spin_lock(&state->lock); |
@@ -1191,6 +1270,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1191 | tinfo->has_departed = true; | 1270 | tinfo->has_departed = true; |
1192 | tinfo->mc2_param.res_id = mp->res_id; | 1271 | tinfo->mc2_param.res_id = mp->res_id; |
1193 | tinfo->mc2_param.mode_mask = mp->mode_mask; | 1272 | tinfo->mc2_param.mode_mask = mp->mode_mask; |
1273 | tinfo->mc2_param.init_finished = 0; | ||
1194 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | 1274 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); |
1195 | 1275 | ||
1196 | TRACE_TASK(tsk, "Mode 0\n"); | 1276 | TRACE_TASK(tsk, "Mode 0\n"); |
@@ -1234,7 +1314,9 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1234 | tsk_rt(tsk)->plugin_state = tinfo; | 1314 | tsk_rt(tsk)->plugin_state = tinfo; |
1235 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 1315 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
1236 | } | 1316 | } |
1317 | TRACE_CUR("ctrl_page mode_poll_task %d, cpu = %d, tsk_rt->ctrl_page = %x\n", is_mode_poll_task(tsk), tinfo->cpu, tsk_rt(tsk)->ctrl_page); | ||
1237 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { | 1318 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { |
1319 | TRACE_CUR("CPU0_TASK_EXIST set\n"); | ||
1238 | cpu_0_task_exist = true; | 1320 | cpu_0_task_exist = true; |
1239 | } | 1321 | } |
1240 | 1322 | ||
@@ -1253,6 +1335,8 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1253 | tinfo->has_departed = true; | 1335 | tinfo->has_departed = true; |
1254 | tinfo->mc2_param.res_id = mp->res_id; | 1336 | tinfo->mc2_param.res_id = mp->res_id; |
1255 | tinfo->mc2_param.mode_mask = mp->mode_mask; | 1337 | tinfo->mc2_param.mode_mask = mp->mode_mask; |
1338 | tinfo->mc2_param.init_finished = 0; | ||
1339 | |||
1256 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | 1340 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); |
1257 | 1341 | ||
1258 | TRACE_TASK(tsk, "Mode 0\n"); | 1342 | TRACE_TASK(tsk, "Mode 0\n"); |