aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-04-11 00:17:56 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-04-11 00:17:56 -0400
commit84fe6787c0a251593d1ab5f311a203d114f9bf94 (patch)
treedbb4e429824b064f765853d7ac598f61f6000afc /litmus/sched_mc2.c
parent9309774d024934b71816efa41171f439b007f983 (diff)
Fix mode 0 overutilization bug
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c85
1 files changed, 48 insertions, 37 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index ec8a92440f2b..654380d0f104 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -38,6 +38,7 @@
38 38
39#define BUDGET_ENFORCEMENT_AT_C 0 39#define BUDGET_ENFORCEMENT_AT_C 0
40 40
41extern int num_sync_released;
41extern void do_partition(enum crit_level lv, int cpu); 42extern void do_partition(enum crit_level lv, int cpu);
42 43
43/* _global_env - reservation container for level-C tasks*/ 44/* _global_env - reservation container for level-C tasks*/
@@ -130,24 +131,25 @@ asmlinkage long sys_enact_mode(void)
130 struct list_head *pos; 131 struct list_head *pos;
131 unsigned long flags; 132 unsigned long flags;
132 //lt_t now = litmus_clock(); 133 //lt_t now = litmus_clock();
133 TRACE_TASK(current, "ENACTING MODE TASK\n"); 134 TRACE_TASK(current, "ENACTING SYSCALL\n");
134 if (state->cpu == 0){ 135 if (state->cpu == 0){
135 //preempt_disable(); 136 //preempt_disable();
136 mode_changed = false; 137 mode_changed = false;
137 local_irq_save(flags); 138 local_irq_save(flags);
138
139 raw_spin_lock(&global_lock);
140 raw_spin_lock(&mode_lock);
141 if (pending){ //MCR has entered 139 if (pending){ //MCR has entered
140 raw_spin_lock(&state->lock);
141 raw_spin_lock(&global_lock);
142 raw_spin_lock(&mode_lock);
143
142 if (!seen_once){ 144 if (!seen_once){
143 TRACE_TASK(current, "REQUEST = %llu\n", litmus_clock()); 145 TRACE_TASK(current, "REQUEST\n");
144 sched_trace_request_mode(current); 146 sched_trace_request_mode(current);
145 TS_MODE_CHANGE_START; 147 //TS_MODE_CHANGE_START;
146 //clean up jobs that are already done 148 //clean up jobs that are already done
147 //after this jobs report themselves 149 //after this jobs report themselves
148 list_for_each(pos, &_global_env->active_reservations){ 150 list_for_each(pos, &_global_env->active_reservations){
149 res = list_entry(pos, struct reservation, list); 151 res = list_entry(pos, struct reservation, list);
150 if (tsk_rt(res->tsk)->completed && res->mode == mode){ 152 if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
151 res->reported = 1; 153 res->reported = 1;
152 TRACE_CUR("R%d RES_REPORTED_ACTIVE = %d mode %d\n", res->id, res_reported, res->mode); 154 TRACE_CUR("R%d RES_REPORTED_ACTIVE = %d mode %d\n", res->id, res_reported, res->mode);
153 res_reported--; 155 res_reported--;
@@ -155,7 +157,7 @@ asmlinkage long sys_enact_mode(void)
155 } 157 }
156 list_for_each(pos, &_global_env->depleted_reservations){ 158 list_for_each(pos, &_global_env->depleted_reservations){
157 res = list_entry(pos, struct reservation, list); 159 res = list_entry(pos, struct reservation, list);
158 if (tsk_rt(res->tsk)->completed && res->mode == mode){ 160 if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
159 res->reported = 1; 161 res->reported = 1;
160 TRACE_CUR("R%d RES_REPORTED_DEPLETED = %d mode %d\n",res->id, res_reported, res->mode); 162 TRACE_CUR("R%d RES_REPORTED_DEPLETED = %d mode %d\n",res->id, res_reported, res->mode);
161 res_reported--; 163 res_reported--;
@@ -164,7 +166,7 @@ asmlinkage long sys_enact_mode(void)
164 } 166 }
165 list_for_each(pos, &_global_env->inactive_reservations){ 167 list_for_each(pos, &_global_env->inactive_reservations){
166 res = list_entry(pos, struct reservation, list); 168 res = list_entry(pos, struct reservation, list);
167 if (tsk_rt(res->tsk)->completed && res->mode == mode){ 169 if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
168 res->reported = 1; 170 res->reported = 1;
169 //TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); 171 //TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode);
170 res_reported--; 172 res_reported--;
@@ -174,7 +176,6 @@ asmlinkage long sys_enact_mode(void)
174 } 176 }
175 if( ready ){ //C is throttled 177 if( ready ){ //C is throttled
176 lt_t new_mode_basetime = get_release(current); 178 lt_t new_mode_basetime = get_release(current);
177 lt_t t;
178 //TRACE("Timer canceled\n"); 179 //TRACE("Timer canceled\n");
179 hrtimer_cancel(&state->timer);//stop listening to old mode timers 180 hrtimer_cancel(&state->timer);//stop listening to old mode timers
180 mode = requested_mode; 181 mode = requested_mode;
@@ -213,17 +214,17 @@ asmlinkage long sys_enact_mode(void)
213 res = list_entry(pos, struct reservation, list); 214 res = list_entry(pos, struct reservation, list);
214 release_at(res->tsk, new_mode_basetime); 215 release_at(res->tsk, new_mode_basetime);
215 } 216 }
217 sup_update_time(state->sup_env, litmus_clock());
216 //raw_spin_unlock(&state->lock); 218 //raw_spin_unlock(&state->lock);
217 t=litmus_clock(); 219 //t=litmus_clock();
218 sched_trace_enact_mode(current); 220 sched_trace_enact_mode(current);
219 TS_MODE_CHANGE_END; 221 //TS_MODE_CHANGE_END;
220 TRACE(KERN_ALERT "ENACT = %llu\n", t); 222 TRACE("ENACT\n");
221 } 223 }
222 224 raw_spin_unlock(&mode_lock);
223 225 raw_spin_unlock(&global_lock);
226 raw_spin_unlock(&state->lock);
224 } 227 }
225 raw_spin_unlock(&mode_lock);
226 raw_spin_unlock(&global_lock);
227 local_irq_restore(flags); 228 local_irq_restore(flags);
228 cpu_0_spin_flag = !cpu_0_spin_flag; 229 cpu_0_spin_flag = !cpu_0_spin_flag;
229 } 230 }
@@ -243,13 +244,13 @@ asmlinkage long sys_enact_mode(void)
243 udelay(1); 244 udelay(1);
244 } 245 }
245 //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); 246 //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed);
247 local_irq_save(flags);
246 if (mode_changed) { 248 if (mode_changed) {
247 lt_t new_mode_basetime = get_release(current); 249 lt_t new_mode_basetime = get_release(current);
248 //TRACE("CPU%d mode changed\n",state->cpu); 250 //TRACE("CPU%d mode changed\n",state->cpu);
249 hrtimer_cancel(&state->timer); //stop listening to old mode timers 251 hrtimer_cancel(&state->timer); //stop listening to old mode timers
250 //preempt_disable(); 252 //preempt_disable();
251 local_irq_save(flags); 253 //local_irq_save(flags);
252
253 raw_spin_lock(&state->lock); 254 raw_spin_lock(&state->lock);
254 state->sup_env = &state->sup_env_modes[mode]; 255 state->sup_env = &state->sup_env_modes[mode];
255 list_for_each(pos, &state->sup_env->active_reservations){ 256 list_for_each(pos, &state->sup_env->active_reservations){
@@ -264,22 +265,23 @@ asmlinkage long sys_enact_mode(void)
264 res = list_entry(pos, struct reservation, list); 265 res = list_entry(pos, struct reservation, list);
265 release_at(res->tsk, new_mode_basetime); 266 release_at(res->tsk, new_mode_basetime);
266 } 267 }
268 sup_update_time(state->sup_env, litmus_clock());
267 raw_spin_unlock(&state->lock); 269 raw_spin_unlock(&state->lock);
268 local_irq_restore(flags); 270 //local_irq_restore(flags);
269 271
270 //preempt_enable(); 272 //preempt_enable();
271 } 273 }
274 local_irq_restore(flags);
272 state->spin_flag = !state->spin_flag; 275 state->spin_flag = !state->spin_flag;
273 } 276 }
274 else { 277 else {
275 //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); 278 //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed);
276 local_irq_restore(flags);
277 return 0; 279 return 0;
278 } 280 }
279 TRACE("CPU%d enact syscall ends m_c? %d\n",state->cpu, mode_changed); 281 TRACE("CPU%d enact syscall ends m_c? %d new_mode %d\n",state->cpu, mode_changed, mode);
280 //if mode didn't change this has no effect on what's being scheduled 282 //if mode didn't change this has no effect on what's being scheduled
281 //raw_spin_lock(&state->lock); 283 //raw_spin_lock(&state->lock);
282 state->sup_env = &state->sup_env_modes[mode]; 284 //state->sup_env = &state->sup_env_modes[mode];
283 //raw_spin_unlock(&state->lock); 285 //raw_spin_unlock(&state->lock);
284 //sup_update_time(state->sup_env, litmus_clock()); 286 //sup_update_time(state->sup_env, litmus_clock());
285 287
@@ -753,8 +755,9 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
753static long mc2_complete_job(void) 755static long mc2_complete_job(void)
754{ 756{
755 ktime_t next_release; 757 ktime_t next_release;
758 lt_t next_release_ns;
756 long err; 759 long err;
757 760
758 enum crit_level lv; 761 enum crit_level lv;
759 762
760 raw_spin_lock(&mode_lock); 763 raw_spin_lock(&mode_lock);
@@ -767,12 +770,10 @@ static long mc2_complete_job(void)
767 time to the next release time */ 770 time to the next release time */
768 if (tsk_rt(current)->sporadic_release) { 771 if (tsk_rt(current)->sporadic_release) {
769 struct mc2_cpu_state *state; 772 struct mc2_cpu_state *state;
770 //struct reservation_environment *env;
771 struct mc2_task_state *tinfo; 773 struct mc2_task_state *tinfo;
772 struct reservation *res = NULL; 774 struct reservation *res = NULL;
773 unsigned long flags; 775 unsigned long flags;
774 776
775// preempt_disable();
776 local_irq_save(flags); 777 local_irq_save(flags);
777 778
778 tinfo = get_mc2_state(current); 779 tinfo = get_mc2_state(current);
@@ -804,7 +805,7 @@ static long mc2_complete_job(void)
804 else 805 else
805 BUG(); 806 BUG();
806 807
807 /* set next_replenishtime to synchronous release time */ 808 /* set next_replenish to synchronous release time */
808 BUG_ON(!res); 809 BUG_ON(!res);
809 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 810 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
810/* 811/*
@@ -821,15 +822,11 @@ static long mc2_complete_job(void)
821 822
822 // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); 823 // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
823 824
824 //if (lv < CRIT_LEVEL_C)
825// raw_spin_unlock(&state->lock);
826 //else
827 if (lv == CRIT_LEVEL_C) 825 if (lv == CRIT_LEVEL_C)
828 raw_spin_unlock(&global_lock); 826 raw_spin_unlock(&global_lock);
829 827
830 raw_spin_unlock(&state->lock); 828 raw_spin_unlock(&state->lock);
831 local_irq_restore(flags); 829 local_irq_restore(flags);
832// preempt_enable();
833 } 830 }
834 831
835 sched_trace_task_completion(current, 0); 832 sched_trace_task_completion(current, 0);
@@ -874,6 +871,7 @@ static long mc2_complete_job(void)
874 /* sleep until next_release */ 871 /* sleep until next_release */
875 set_current_state(TASK_INTERRUPTIBLE); 872 set_current_state(TASK_INTERRUPTIBLE);
876 preempt_enable_no_resched(); 873 preempt_enable_no_resched();
874 TRACE_CUR("Sleep until %llu\n", next_release);
877 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); 875 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
878 } else { 876 } else {
879 /* release the next job immediately */ 877 /* release the next job immediately */
@@ -901,6 +899,11 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
901 lt_t time_slice; 899 lt_t time_slice;
902 900
903 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 901 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
902 if (res->state == RESERVATION_ACTIVE)
903 TRACE_TASK(tsk, "ACT_LIST R%d mode = %d budget = %llu\n", res->id, res->mode, res->cur_budget);
904 }
905
906 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
904 if (res->state == RESERVATION_ACTIVE) { 907 if (res->state == RESERVATION_ACTIVE) {
905 tsk = res->ops->dispatch_client(res, &time_slice); 908 tsk = res->ops->dispatch_client(res, &time_slice);
906 if (likely(tsk)) { 909 if (likely(tsk)) {
@@ -1229,7 +1232,8 @@ static void mc2_task_resume(struct task_struct *tsk)
1229 if (tinfo->has_departed) 1232 if (tinfo->has_departed)
1230 { 1233 {
1231 /* We don't want to consider jobs before synchronous releases */ 1234 /* We don't want to consider jobs before synchronous releases */
1232 if (tsk_rt(tsk)->job_params.job_no > 3) { 1235 if (tsk_rt(tsk)->job_params.job_no == 2) {
1236/*
1233 switch(get_task_crit_level(tsk)) { 1237 switch(get_task_crit_level(tsk)) {
1234 case CRIT_LEVEL_A: 1238 case CRIT_LEVEL_A:
1235 TS_RELEASE_LATENCY_A(get_release(tsk)); 1239 TS_RELEASE_LATENCY_A(get_release(tsk));
@@ -1243,8 +1247,12 @@ static void mc2_task_resume(struct task_struct *tsk)
1243 default: 1247 default:
1244 break; 1248 break;
1245 } 1249 }
1250*/
1246 // TRACE_CUR("INIT_FINISHED is SET\n"); 1251 // TRACE_CUR("INIT_FINISHED is SET\n");
1247 tsk_mc2_data(tsk)->init_finished = 1; 1252 tsk_mc2_data(tsk)->init_finished = 1;
1253 raw_spin_lock(&global_lock);
1254 num_sync_released--;
1255 raw_spin_unlock(&global_lock);
1248 } 1256 }
1249 1257
1250 raw_spin_lock(&state->lock); 1258 raw_spin_lock(&state->lock);
@@ -1434,7 +1442,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1434 if (err) 1442 if (err)
1435 kfree(tinfo); 1443 kfree(tinfo);
1436 1444
1437 //TRACE_TASK(tsk, "MC2 task admitted %d\n", err); 1445 TRACE_TASK(tsk, "MC2 task admitted %d\n", err);
1438 return err; 1446 return err;
1439} 1447}
1440 1448
@@ -1489,8 +1497,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1489 //res = res_find_by_id(state, tinfo->mc2_param.res_id); 1497 //res = res_find_by_id(state, tinfo->mc2_param.res_id);
1490 BUG_ON(!res); 1498 BUG_ON(!res);
1491 1499
1492 release = res->next_replenishment;
1493
1494 if (on_runqueue || is_running) { 1500 if (on_runqueue || is_running) {
1495 /* Assumption: litmus_clock() is synchronized across cores 1501 /* Assumption: litmus_clock() is synchronized across cores
1496 * [see comment in pres_task_resume()] */ 1502 * [see comment in pres_task_resume()] */
@@ -1517,14 +1523,19 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1517 raw_spin_unlock(&state->lock); 1523 raw_spin_unlock(&state->lock);
1518 //raw_spin_unlock(&_global_env.lock); 1524 //raw_spin_unlock(&_global_env.lock);
1519 } 1525 }
1526 release = res->next_replenishment;
1520 local_irq_restore(flags); 1527 local_irq_restore(flags);
1521 1528
1522 if (!release) { 1529 if (!release) {
1523 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); 1530 /*TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
1531 release = res->next_replenishment;
1532 TRACE_TASK(tsk, "mc2_task_new() next_release SET! = %llu\n", release);
1524 release_at(tsk, release); 1533 release_at(tsk, release);
1534 */
1535 BUG();
1525 } 1536 }
1526 else 1537 else
1527 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); 1538 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
1528} 1539}
1529 1540
1530/* mc2_reservation_destroy - reservation_destroy system call backend 1541/* mc2_reservation_destroy - reservation_destroy system call backend