aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-04-04 20:29:39 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-04-04 20:29:39 -0400
commit81362ac2173b72b74b068e169e3055c090734048 (patch)
treef238065a9ed6bf122ca4868e4b1e2960c2e63ae0
parent55e43094e3f27bfe2df817359175e7b3111f4dd7 (diff)
Fixes some bugs
-rw-r--r--litmus/bank_proc.c4
-rw-r--r--litmus/litmus.c3
-rw-r--r--litmus/reservation.c2
-rw-r--r--litmus/sched_mc2.c60
4 files changed, 39 insertions, 30 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
index 6103611211ce..8ac7e3208a8f 100644
--- a/litmus/bank_proc.c
+++ b/litmus/bank_proc.c
@@ -230,8 +230,8 @@ static int do_add_pages(void)
230 230
231 // until all the page lists contain enough pages 231 // until all the page lists contain enough pages
232 //for (i =0; i<5; i++) { 232 //for (i =0; i<5; i++) {
233 for (i=0; i< 1024*100;i++) { 233 //for (i=0; i< 1024*100;i++) {
234 //while (smallest_nr_pages() < PAGES_PER_COLOR) { 234 while (smallest_nr_pages() < PAGES_PER_COLOR) {
235 // printk("smallest = %d\n", smallest_nr_pages()); 235 // printk("smallest = %d\n", smallest_nr_pages());
236 page = alloc_page(GFP_HIGHUSER_MOVABLE); 236 page = alloc_page(GFP_HIGHUSER_MOVABLE);
237 // page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0); 237 // page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
diff --git a/litmus/litmus.c b/litmus/litmus.c
index ddb80e1aae12..3cf784b19034 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -474,8 +474,7 @@ asmlinkage long sys_set_page_color(int cpu)
474 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); 474 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
475 printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed); 475 printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed);
476 //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id())); 476 //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id()));
477 flush_cache(1); 477 //flush_cache(1);
478
479 return ret; 478 return ret;
480} 479}
481 480
diff --git a/litmus/reservation.c b/litmus/reservation.c
index 05cad35e0314..a17343efdbae 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -625,7 +625,7 @@ int gmp_update_time(
625 /* If the time didn't advance, there is nothing to do. 625 /* If the time didn't advance, there is nothing to do.
626 * This check makes it safe to call sup_advance_time() potentially 626 * This check makes it safe to call sup_advance_time() potentially
627 * multiple times (e.g., via different code paths. */ 627 * multiple times (e.g., via different code paths. */
628 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time); 628 TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
629 if (unlikely(now <= gmp_env->env.current_time + EPSILON)) 629 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
630 return 0; 630 return 0;
631 631
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 9949791e2d7a..da91158fc75c 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -126,13 +126,14 @@ asmlinkage long sys_enact_mode(void)
126 struct mc2_cpu_state *state = local_cpu_state(); 126 struct mc2_cpu_state *state = local_cpu_state();
127 struct reservation *res; 127 struct reservation *res;
128 struct list_head *pos; 128 struct list_head *pos;
129 hrtimer_cancel(&state->timer);//stop listening to old mode timers
130 //lt_t now = litmus_clock(); 129 //lt_t now = litmus_clock();
130 TRACE_TASK(current, "ENACTING MODE TASK\n");
131 if (state->cpu == 0){ 131 if (state->cpu == 0){
132 raw_spin_lock(&global_lock); 132 raw_spin_lock(&global_lock);
133 raw_spin_lock(&mode_lock); 133 raw_spin_lock(&mode_lock);
134 if (pending){ //MCR has entered 134 if (pending){ //MCR has entered
135 if (!seen_once){ 135 if (!seen_once){
136 TRACE_TASK(current, "NOTICED MCR\n");
136 sched_trace_request_mode(current); 137 sched_trace_request_mode(current);
137 //clean up jobs that are already done 138 //clean up jobs that are already done
138 //after this jobs report themselves 139 //after this jobs report themselves
@@ -161,6 +162,8 @@ asmlinkage long sys_enact_mode(void)
161 seen_once = true; 162 seen_once = true;
162 } 163 }
163 if( ready ){ //C is throttled 164 if( ready ){ //C is throttled
165 TRACE("Timer canceled\n");
166 hrtimer_cancel(&state->timer);//stop listening to old mode timers
164 mode = requested_mode; 167 mode = requested_mode;
165 _global_env = &_global_env_modes[mode]; 168 _global_env = &_global_env_modes[mode];
166 //set res->reported for new global tasks 169 //set res->reported for new global tasks
@@ -290,7 +293,7 @@ static void task_departs(struct task_struct *tsk, int job_complete)
290/* fix end */ 293/* fix end */
291 294
292 tinfo->has_departed = true; 295 tinfo->has_departed = true;
293 //TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); 296 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock());
294} 297}
295 298
296/* task_arrive - put a task into its reservation 299/* task_arrive - put a task into its reservation
@@ -318,7 +321,6 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
318 break; 321 break;
319 } 322 }
320 323
321
322 TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); 324 TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock());
323 325
324 for(i = 0; i < NR_MODES; i++){ 326 for(i = 0; i < NR_MODES; i++){
@@ -407,7 +409,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
407 //enum crit_level lv = get_task_crit_level(state->scheduled); 409 //enum crit_level lv = get_task_crit_level(state->scheduled);
408 struct next_timer_event *event, *next; 410 struct next_timer_event *event, *next;
409 int reschedule[NR_CPUS]; 411 int reschedule[NR_CPUS];
410 412
411 for (cpus = 0; cpus<NR_CPUS; cpus++) 413 for (cpus = 0; cpus<NR_CPUS; cpus++)
412 reschedule[cpus] = 0; 414 reschedule[cpus] = 0;
413 415
@@ -509,24 +511,21 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
509 hrtimer_active(&state->timer), 511 hrtimer_active(&state->timer),
510 ktime_to_ns(hrtimer_get_expires(&state->timer))); 512 ktime_to_ns(hrtimer_get_expires(&state->timer)));
511 //litmus_reschedule(state->cpu); 513 //litmus_reschedule(state->cpu);
514/*
512 raw_spin_lock(&state->lock); 515 raw_spin_lock(&state->lock);
513 preempt_if_preemptable(state->scheduled, state->cpu); 516 preempt_if_preemptable(state->scheduled, state->cpu);
514 raw_spin_unlock(&state->lock); 517 raw_spin_unlock(&state->lock);
515 reschedule[state->cpu] = 0; 518 reschedule[state->cpu] = 0;
519*/
516 } 520 }
517 } 521 }
522/*
518 for (cpus = 0; cpus<NR_CPUS; cpus++) { 523 for (cpus = 0; cpus<NR_CPUS; cpus++) {
519 if (reschedule[cpus]) { 524 if (reschedule[cpus]) {
520 litmus_reschedule(cpus); 525 litmus_reschedule(cpus);
521 /*
522 struct mc2_cpu_state *remote_state;
523 remote_state = cpu_state_for(cpus);
524 raw_spin_lock(&remote_state->lock);
525 preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
526 raw_spin_unlock(&remote_state->lock);
527 */
528 } 526 }
529 } 527 }
528*/
530} 529}
531 530
532/* update_cpu_prio - Update cpu's priority 531/* update_cpu_prio - Update cpu's priority
@@ -606,7 +605,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
606 605
607 raw_spin_lock(&global_lock); 606 raw_spin_lock(&global_lock);
608 global_schedule_now = gmp_update_time(_global_env, now); 607 global_schedule_now = gmp_update_time(_global_env, now);
609
610 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); 608 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
611 609
612 /* Find the lowest cpu, and call reschedule */ 610 /* Find the lowest cpu, and call reschedule */
@@ -624,7 +622,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
624 } 622 }
625 } 623 }
626 raw_spin_unlock(&global_lock); 624 raw_spin_unlock(&global_lock);
627
628 raw_spin_unlock_irqrestore(&state->lock, flags); 625 raw_spin_unlock_irqrestore(&state->lock, flags);
629 //raw_spin_unlock_irqrestore(&_global_env.lock, flags); 626 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
630 627
@@ -709,7 +706,7 @@ static long mc2_complete_job(void)
709 res->cur_budget = 0; 706 res->cur_budget = 0;
710 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 707 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
711 708
712 //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); 709 TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
713 710
714 //if (lv < CRIT_LEVEL_C) 711 //if (lv < CRIT_LEVEL_C)
715// raw_spin_unlock(&state->lock); 712// raw_spin_unlock(&state->lock);
@@ -911,10 +908,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
911 struct mc2_cpu_state *state = local_cpu_state(); 908 struct mc2_cpu_state *state = local_cpu_state();
912 909
913 pre_schedule(prev, state->cpu); 910 pre_schedule(prev, state->cpu);
914 911
915 /* 9/20/2015 fix
916 raw_spin_lock(&_global_env.lock);
917 */
918 raw_spin_lock(&state->lock); 912 raw_spin_lock(&state->lock);
919 913
920 //BUG_ON(state->scheduled && state->scheduled != prev); 914 //BUG_ON(state->scheduled && state->scheduled != prev);
@@ -938,6 +932,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
938 state->sup_env->will_schedule = true; 932 state->sup_env->will_schedule = true;
939 933
940 now = litmus_clock(); 934 now = litmus_clock();
935
941 sup_update_time(state->sup_env, now); 936 sup_update_time(state->sup_env, now);
942 /* 9/20/2015 fix */ 937 /* 9/20/2015 fix */
943 //raw_spin_lock(&_global_env.lock); 938 //raw_spin_lock(&_global_env.lock);
@@ -971,7 +966,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
971 966
972 if (!state->scheduled) { 967 if (!state->scheduled) {
973 raw_spin_lock(&global_lock); 968 raw_spin_lock(&global_lock);
974 to_schedule = gmp_update_time(_global_env, now); 969 //to_schedule = gmp_update_time(_global_env, now);
975 state->scheduled = mc2_global_dispatch(state); 970 state->scheduled = mc2_global_dispatch(state);
976 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 971 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
977 update_cpu_prio(state); 972 update_cpu_prio(state);
@@ -997,12 +992,14 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
997 /* NOTE: drops state->lock */ 992 /* NOTE: drops state->lock */
998 mc2_update_timer_and_unlock(state); 993 mc2_update_timer_and_unlock(state);
999 994
995 raw_spin_lock(&state->lock);
1000 if (prev != state->scheduled && is_realtime(prev)) { 996 if (prev != state->scheduled && is_realtime(prev)) {
1001 struct mc2_task_state* tinfo = get_mc2_state(prev); 997 struct mc2_task_state* tinfo = get_mc2_state(prev);
1002 struct reservation* res = tinfo->res_info[mode].client.reservation; 998 struct reservation* res = tinfo->res_info[mode].client.reservation;
1003 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); 999 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
1000 TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count());
1004 res->scheduled_on = NO_CPU; 1001 res->scheduled_on = NO_CPU;
1005 TRACE_TASK(prev, "descheduled.\n"); 1002 TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock());
1006 /* if prev is preempted and a global task, find the lowest cpu and reschedule */ 1003 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
1007 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { 1004 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
1008 int cpu; 1005 int cpu;
@@ -1031,6 +1028,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1031 raw_spin_unlock(&global_lock); 1028 raw_spin_unlock(&global_lock);
1032 } 1029 }
1033 1030
1031 raw_spin_unlock(&state->lock);
1034 if (state->scheduled) { 1032 if (state->scheduled) {
1035 TRACE_TASK(state->scheduled, "scheduled.\n"); 1033 TRACE_TASK(state->scheduled, "scheduled.\n");
1036 } 1034 }
@@ -1051,7 +1049,7 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk)
1051 1049
1052 now = litmus_clock(); 1050 now = litmus_clock();
1053 if (is_tardy(tsk, now)) { 1051 if (is_tardy(tsk, now)) {
1054 //release_at(tsk, now); 1052 release_at(tsk, now);
1055 //sched_trace_task_release(tsk); 1053 //sched_trace_task_release(tsk);
1056 } 1054 }
1057 } 1055 }
@@ -1143,17 +1141,20 @@ static long mc2_admit_task(struct task_struct *tsk)
1143 enum crit_level lv; 1141 enum crit_level lv;
1144 int i; 1142 int i;
1145 1143
1144 TRACE_TASK(tsk, "MC2 admitting task\n");
1146 if (!tinfo) 1145 if (!tinfo)
1147 return -ENOMEM; 1146 return -ENOMEM;
1148 1147
1149 if (!mp) { 1148 if (!mp) {
1150 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); 1149 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
1150 TRACE("mc2_admit_task: criticality level has not been set\n");
1151 return -ESRCH; 1151 return -ESRCH;
1152 } 1152 }
1153 1153
1154 lv = mp->crit; 1154 lv = mp->crit;
1155 preempt_disable(); 1155 preempt_disable();
1156 1156
1157
1157 if (lv < CRIT_LEVEL_C) { 1158 if (lv < CRIT_LEVEL_C) {
1158 state = cpu_state_for(task_cpu(tsk)); 1159 state = cpu_state_for(task_cpu(tsk));
1159 raw_spin_lock_irqsave(&state->lock, flags); 1160 raw_spin_lock_irqsave(&state->lock, flags);
@@ -1162,13 +1163,14 @@ static long mc2_admit_task(struct task_struct *tsk)
1162 tinfo->cpu = task_cpu(tsk); 1163 tinfo->cpu = task_cpu(tsk);
1163 tinfo->has_departed = true; 1164 tinfo->has_departed = true;
1164 tinfo->mc2_param.res_id = mp->res_id; 1165 tinfo->mc2_param.res_id = mp->res_id;
1165 1166 tinfo->mc2_param.mode_mask = mp->mode_mask;
1167 TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask);
1166 for(i = 0; i < NR_MODES; i++){ 1168 for(i = 0; i < NR_MODES; i++){
1167 if (!in_mode(tsk, i)){ 1169 if (!in_mode(tsk, i)){
1168 //task not present in mode 1170 //task not present in mode
1169 continue; 1171 continue;
1170 } 1172 }
1171 1173 TRACE_TASK(tsk, "Mode %d\n",i);
1172 res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); 1174 res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id);
1173 1175
1174 /* found the appropriate reservation */ 1176 /* found the appropriate reservation */
@@ -1193,6 +1195,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1193 1195
1194 raw_spin_unlock_irqrestore(&state->lock, flags); 1196 raw_spin_unlock_irqrestore(&state->lock, flags);
1195 } else if (lv == CRIT_LEVEL_C) { 1197 } else if (lv == CRIT_LEVEL_C) {
1198 TRACE_TASK(tsk, "Task being admitted is Level C\n");
1196 state = local_cpu_state(); 1199 state = local_cpu_state();
1197 raw_spin_lock_irqsave(&state->lock, flags); 1200 raw_spin_lock_irqsave(&state->lock, flags);
1198 raw_spin_lock(&global_lock); 1201 raw_spin_lock(&global_lock);
@@ -1242,6 +1245,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1242 if (err) 1245 if (err)
1243 kfree(tinfo); 1246 kfree(tinfo);
1244 1247
1248 TRACE_TASK(tsk, "MC2 task admitted %d\n", err);
1245 return err; 1249 return err;
1246} 1250}
1247 1251
@@ -1258,6 +1262,8 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1258 enum crit_level lv = get_task_crit_level(tsk); 1262 enum crit_level lv = get_task_crit_level(tsk);
1259 lt_t release = 0; 1263 lt_t release = 0;
1260 1264
1265 BUG_ON(lv < CRIT_LEVEL_A || lv > CRIT_LEVEL_C);
1266
1261 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 1267 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
1262 litmus_clock(), on_runqueue, is_running); 1268 litmus_clock(), on_runqueue, is_running);
1263 1269
@@ -1286,6 +1292,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1286 else { 1292 else {
1287 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); 1293 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id);
1288 } 1294 }
1295
1296 BUG_ON(!res);
1297
1289 //res = res_find_by_id(state, tinfo->mc2_param.res_id); 1298 //res = res_find_by_id(state, tinfo->mc2_param.res_id);
1290 release = res->next_replenishment; 1299 release = res->next_replenishment;
1291 1300
@@ -1319,7 +1328,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1319 1328
1320 if (!release) { 1329 if (!release) {
1321 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); 1330 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
1322 //release_at(tsk, release); 1331 release_at(tsk, release);
1323 } 1332 }
1324 else 1333 else
1325 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); 1334 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
@@ -1614,7 +1623,7 @@ static long create_polling_reservation(
1614 pres->res.priority = config->priority; 1623 pres->res.priority = config->priority;
1615 sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res); 1624 sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res);
1616 err = config->id; 1625 err = config->id;
1617 TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority); 1626 TRACE_CUR("reservation created R%d for mode %d priority : %llu\n", config->id, config->mode, pres->res.priority);
1618 } else { 1627 } else {
1619 err = -EEXIST; 1628 err = -EEXIST;
1620 } 1629 }
@@ -1862,6 +1871,7 @@ static long mc2_activate_plugin(void)
1862 1871
1863 raw_spin_lock_init(&_lowest_prio_cpu.lock); 1872 raw_spin_lock_init(&_lowest_prio_cpu.lock);
1864 raw_spin_lock_init(&mode_lock); 1873 raw_spin_lock_init(&mode_lock);
1874 raw_spin_lock_init(&global_lock);
1865 1875
1866 seen_once = false; 1876 seen_once = false;
1867 1877