aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-10-12 19:13:30 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-10-12 19:13:30 -0400
commita53078ec5cc167413bad6dd7ce3c1fc8ec97d39e (patch)
tree00d403a486ddbdb6425fd5fe7141c98c08169417 /litmus/sched_mc2.c
parent2bed3116318647479e14aa22ff762bed16c066b4 (diff)
RTAS 2017 Submission ver.wip-shared-lib2
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c362
1 files changed, 12 insertions, 350 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 588f78e2107f..6c02a56959b5 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of the Mixed-Criticality on MultiCore scheduler 4 * Implementation of the Mixed-Criticality on MultiCore scheduler
5 * 5 *
6 * Thus plugin implements a scheduling algorithm proposed in 6 * This plugin implements a scheduling algorithm proposed in
7 * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. 7 * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
8 */ 8 */
9 9
@@ -27,9 +27,6 @@
27#include <litmus/reservation.h> 27#include <litmus/reservation.h>
28#include <litmus/polling_reservations.h> 28#include <litmus/polling_reservations.h>
29 29
30#define TRACE(fmt, args...) do {} while (false)
31#define TRACE_TASK(fmt, args...) do {} while (false)
32
33#define BUDGET_ENFORCEMENT_AT_C 0 30#define BUDGET_ENFORCEMENT_AT_C 0
34 31
35extern void do_partition(enum crit_level lv, int cpu); 32extern void do_partition(enum crit_level lv, int cpu);
@@ -133,7 +130,7 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk)
133static void task_departs(struct task_struct *tsk, int job_complete) 130static void task_departs(struct task_struct *tsk, int job_complete)
134{ 131{
135 struct mc2_task_state* tinfo = get_mc2_state(tsk); 132 struct mc2_task_state* tinfo = get_mc2_state(tsk);
136 //struct mc2_cpu_state* state = local_cpu_state(); 133
137 struct reservation* res = NULL; 134 struct reservation* res = NULL;
138 struct reservation_client *client = NULL; 135 struct reservation_client *client = NULL;
139 136
@@ -144,33 +141,15 @@ static void task_departs(struct task_struct *tsk, int job_complete)
144 BUG_ON(!res); 141 BUG_ON(!res);
145 BUG_ON(!client); 142 BUG_ON(!client);
146 143
147/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ 144 /* No ghost job handling, empty remaining budget */
148 if (job_complete) { 145 if (job_complete) {
149 res->cur_budget = 0; 146 res->cur_budget = 0;
150 sched_trace_task_completion(tsk, 0); 147 sched_trace_task_completion(tsk, 0);
151 } 148 }
152/* fix end */
153 149
154 res->ops->client_departs(res, client, job_complete); 150 res->ops->client_departs(res, client, job_complete);
155 tinfo->has_departed = true; 151 tinfo->has_departed = true;
156 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); 152 TRACE_TASK(tsk, "Client departs with budget %llu at %llu\n", res->cur_budget, litmus_clock());
157/* 9/18/2015 fix start - no remaining budget
158 *
159 if (job_complete && res->cur_budget) {
160 struct crit_entry* ce;
161 enum crit_level lv = tinfo->mc2_param.crit;
162
163 ce = &state->crit_entries[lv];
164 ce->running = tsk;
165 res->is_ghost = state->cpu;
166#if BUDGET_ENFORCEMENT_AT_C
167 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
168#endif
169 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
170 }
171 * fix -end
172 */
173
174} 153}
175 154
176/* task_arrive - put a task into its reservation 155/* task_arrive - put a task into its reservation
@@ -188,20 +167,8 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
188 167
189 tinfo->has_departed = false; 168 tinfo->has_departed = false;
190 169
191 switch(lv) {
192 case CRIT_LEVEL_A:
193 case CRIT_LEVEL_B:
194 TS_RELEASE_START;
195 break;
196 case CRIT_LEVEL_C:
197 TS_RELEASE_C_START;
198 break;
199 default:
200 break;
201 }
202
203 res->ops->client_arrives(res, client); 170 res->ops->client_arrives(res, client);
204 TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); 171 TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock());
205 172
206 if (lv != NUM_CRIT_LEVELS) { 173 if (lv != NUM_CRIT_LEVELS) {
207 struct crit_entry *ce; 174 struct crit_entry *ce;
@@ -210,22 +177,6 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
210 if (ce->running == tsk) 177 if (ce->running == tsk)
211 ce->running = NULL; 178 ce->running = NULL;
212 } 179 }
213 /* do we need this??
214 if (resched_cpu[state->cpu])
215 litmus_reschedule(state->cpu);
216 */
217
218 switch(lv) {
219 case CRIT_LEVEL_A:
220 case CRIT_LEVEL_B:
221 TS_RELEASE_END;
222 break;
223 case CRIT_LEVEL_C:
224 TS_RELEASE_C_END;
225 break;
226 default:
227 break;
228 }
229} 180}
230 181
231/* get_lowest_prio_cpu - return the lowest priority cpu 182/* get_lowest_prio_cpu - return the lowest priority cpu
@@ -239,10 +190,8 @@ static int get_lowest_prio_cpu(lt_t priority)
239 int cpu, ret = NO_CPU; 190 int cpu, ret = NO_CPU;
240 lt_t latest_deadline = 0; 191 lt_t latest_deadline = 0;
241 192
242 //raw_spin_lock(&_lowest_prio_cpu.lock);
243 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 193 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
244 if (!ce->will_schedule && !ce->scheduled) { 194 if (!ce->will_schedule && !ce->scheduled) {
245 //raw_spin_unlock(&_lowest_prio_cpu.lock);
246 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 195 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
247 return ce->cpu; 196 return ce->cpu;
248 } else { 197 } else {
@@ -260,7 +209,6 @@ static int get_lowest_prio_cpu(lt_t priority)
260 if (!ce->will_schedule) { 209 if (!ce->will_schedule) {
261 if (!ce->scheduled) { 210 if (!ce->scheduled) {
262 /* Idle cpu, return this. */ 211 /* Idle cpu, return this. */
263 //raw_spin_unlock(&_lowest_prio_cpu.lock);
264 TRACE("CPU %d is the lowest!\n", ce->cpu); 212 TRACE("CPU %d is the lowest!\n", ce->cpu);
265 return ce->cpu; 213 return ce->cpu;
266 } else if (ce->lv == CRIT_LEVEL_C && 214 } else if (ce->lv == CRIT_LEVEL_C &&
@@ -270,8 +218,6 @@ static int get_lowest_prio_cpu(lt_t priority)
270 } 218 }
271 } 219 }
272 } 220 }
273
274 //raw_spin_unlock(&_lowest_prio_cpu.lock);
275 221
276 if (priority >= latest_deadline) 222 if (priority >= latest_deadline)
277 ret = NO_CPU; 223 ret = NO_CPU;
@@ -281,36 +227,6 @@ static int get_lowest_prio_cpu(lt_t priority)
281 return ret; 227 return ret;
282} 228}
283 229
284/* mc2_update_time - update time for a given criticality level.
285 * caller must hold a proper lock
286 * (cpu_state lock or global lock)
287 */
288/* 9/24/2015 temporally not using
289static void mc2_update_time(enum crit_level lv,
290 struct mc2_cpu_state *state, lt_t time)
291{
292 int global_schedule_now;
293
294 if (lv < CRIT_LEVEL_C)
295 sup_update_time(&state->sup_env, time);
296 else if (lv == CRIT_LEVEL_C) {
297 global_schedule_now = gmp_update_time(&_global_env, time);
298 while (global_schedule_now--) {
299 int cpu = get_lowest_prio_cpu(0);
300 if (cpu != NO_CPU) {
301 raw_spin_lock(&_lowest_prio_cpu.lock);
302 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
303 raw_spin_unlock(&_lowest_prio_cpu.lock);
304 TRACE("LOWEST CPU = P%d\n", cpu);
305 litmus_reschedule(cpu);
306 }
307 }
308 }
309 else
310 TRACE("update_time(): Criticality level error!!!!\n");
311}
312*/
313
314/* NOTE: drops state->lock */ 230/* NOTE: drops state->lock */
315/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock 231/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock
316 * Whenever res_env.current_time is updated, 232 * Whenever res_env.current_time is updated,
@@ -324,7 +240,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
324{ 240{
325 int local, cpus; 241 int local, cpus;
326 lt_t update, now; 242 lt_t update, now;
327 //enum crit_level lv = get_task_crit_level(state->scheduled);
328 struct next_timer_event *event, *next; 243 struct next_timer_event *event, *next;
329 int reschedule[NR_CPUS]; 244 int reschedule[NR_CPUS];
330 245
@@ -357,9 +272,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
357 list_del(&event->list); 272 list_del(&event->list);
358 kfree(event); 273 kfree(event);
359 if (cpu != NO_CPU) { 274 if (cpu != NO_CPU) {
360 //raw_spin_lock(&_lowest_prio_cpu.lock);
361 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 275 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
362 //raw_spin_unlock(&_lowest_prio_cpu.lock);
363 if (cpu == local_cpu_state()->cpu) 276 if (cpu == local_cpu_state()->cpu)
364 litmus_reschedule_local(); 277 litmus_reschedule_local();
365 else 278 else
@@ -381,11 +294,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
381 if (update <= now || reschedule[state->cpu]) { 294 if (update <= now || reschedule[state->cpu]) {
382 reschedule[state->cpu] = 0; 295 reschedule[state->cpu] = 0;
383 litmus_reschedule(state->cpu); 296 litmus_reschedule(state->cpu);
384 /*
385 raw_spin_lock(&state->lock);
386 preempt_if_preemptable(state->scheduled, state->cpu);
387 raw_spin_unlock(&state->lock);
388 */
389 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { 297 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
390 /* Reprogram only if not already set correctly. */ 298 /* Reprogram only if not already set correctly. */
391 if (!hrtimer_active(&state->timer) || 299 if (!hrtimer_active(&state->timer) ||
@@ -428,7 +336,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
428 state->cpu, 336 state->cpu,
429 hrtimer_active(&state->timer), 337 hrtimer_active(&state->timer),
430 ktime_to_ns(hrtimer_get_expires(&state->timer))); 338 ktime_to_ns(hrtimer_get_expires(&state->timer)));
431 //litmus_reschedule(state->cpu);
432 raw_spin_lock(&state->lock); 339 raw_spin_lock(&state->lock);
433 preempt_if_preemptable(state->scheduled, state->cpu); 340 preempt_if_preemptable(state->scheduled, state->cpu);
434 raw_spin_unlock(&state->lock); 341 raw_spin_unlock(&state->lock);
@@ -438,85 +345,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
438 for (cpus = 0; cpus<NR_CPUS; cpus++) { 345 for (cpus = 0; cpus<NR_CPUS; cpus++) {
439 if (reschedule[cpus]) { 346 if (reschedule[cpus]) {
440 litmus_reschedule(cpus); 347 litmus_reschedule(cpus);
441 /*
442 struct mc2_cpu_state *remote_state;
443 remote_state = cpu_state_for(cpus);
444 raw_spin_lock(&remote_state->lock);
445 preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
446 raw_spin_unlock(&remote_state->lock);
447 */
448 } 348 }
449 } 349 }
450} 350}
451 351
452/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
453 * If the budget of a ghost is exhausted,
454 * clear is_ghost and reschedule
455 */
456/*
457static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state)
458{
459 int lv = 0;
460 struct crit_entry* ce;
461 struct reservation *res;
462 struct mc2_task_state *tinfo;
463 lt_t ret = ULLONG_MAX;
464
465 BUG_ON(!state);
466
467 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
468 ce = &state->crit_entries[lv];
469 if (ce->running != NULL) {
470//printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0);
471 tinfo = get_mc2_state(ce->running);
472 if (!tinfo)
473 continue;
474
475 res = res_find_by_id(state, tinfo->mc2_param.res_id);
476 //BUG_ON(!res);
477 if (!res) {
478 printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id);
479 return 0;
480 }
481
482 TRACE("LV %d running id %d budget %llu\n",
483 lv, tinfo->mc2_param.res_id, res->cur_budget);
484 // If the budget is exhausted, clear is_ghost and reschedule
485 if (!res->cur_budget) {
486 struct sup_reservation_environment* sup_env = &state->sup_env;
487
488 TRACE("GHOST FINISH id %d at %llu\n",
489 tinfo->mc2_param.res_id, litmus_clock());
490 ce->running = NULL;
491 res->is_ghost = NO_CPU;
492
493 if (lv < CRIT_LEVEL_C) {
494 res = list_first_entry_or_null(
495 &sup_env->active_reservations,
496 struct reservation, list);
497 if (res)
498 litmus_reschedule_local();
499 } else if (lv == CRIT_LEVEL_C) {
500 res = list_first_entry_or_null(
501 &_global_env.active_reservations,
502 struct reservation, list);
503 if (res)
504 litmus_reschedule(state->cpu);
505 }
506 } else {
507 //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
508 //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
509 if (ret > res->cur_budget) {
510 ret = res->cur_budget;
511 }
512 }
513 }
514 }
515
516 return ret;
517}
518*/
519
520/* update_cpu_prio - Update cpu's priority 352/* update_cpu_prio - Update cpu's priority
521 * When a cpu picks a new task, call this function 353 * When a cpu picks a new task, call this function
522 * to update cpu priorities. 354 * to update cpu priorities.
@@ -553,7 +385,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
553 struct mc2_cpu_state *state; 385 struct mc2_cpu_state *state;
554 lt_t update, now; 386 lt_t update, now;
555 int global_schedule_now; 387 int global_schedule_now;
556 //lt_t remain_budget; // no ghost jobs
557 int reschedule[NR_CPUS]; 388 int reschedule[NR_CPUS];
558 int cpus; 389 int cpus;
559 390
@@ -573,25 +404,13 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
573 TS_ISR_START; 404 TS_ISR_START;
574 405
575 TRACE("Timer fired at %llu\n", litmus_clock()); 406 TRACE("Timer fired at %llu\n", litmus_clock());
576 //raw_spin_lock_irqsave(&_global_env.lock, flags);
577 raw_spin_lock_irqsave(&state->lock, flags); 407 raw_spin_lock_irqsave(&state->lock, flags);
578 now = litmus_clock(); 408 now = litmus_clock();
579 sup_update_time(&state->sup_env, now); 409 sup_update_time(&state->sup_env, now);
580 410
581/* 9/20/2015 fix - no ghost job
582 remain_budget = mc2_update_ghost_state(state);
583*/
584 update = state->sup_env.next_scheduler_update; 411 update = state->sup_env.next_scheduler_update;
585 now = state->sup_env.env.current_time; 412 now = state->sup_env.env.current_time;
586 413
587/* 9/20/2015 fix - no ghost job
588 if (remain_budget != ULLONG_MAX && update > now + remain_budget) {
589 update = now + remain_budget;
590 }
591
592 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget);
593*/
594
595 if (update <= now) { 414 if (update <= now) {
596 litmus_reschedule_local(); 415 litmus_reschedule_local();
597 } else if (update != SUP_NO_SCHEDULER_UPDATE) { 416 } else if (update != SUP_NO_SCHEDULER_UPDATE) {
@@ -608,9 +427,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
608 while (global_schedule_now--) { 427 while (global_schedule_now--) {
609 int cpu = get_lowest_prio_cpu(0); 428 int cpu = get_lowest_prio_cpu(0);
610 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 429 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
611 //raw_spin_lock(&_lowest_prio_cpu.lock);
612 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 430 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
613 //raw_spin_unlock(&_lowest_prio_cpu.lock);
614 TRACE("LOWEST CPU = P%d\n", cpu); 431 TRACE("LOWEST CPU = P%d\n", cpu);
615 if (cpu == state->cpu && update > now) 432 if (cpu == state->cpu && update > now)
616 litmus_reschedule_local(); 433 litmus_reschedule_local();
@@ -621,21 +438,12 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
621 raw_spin_unlock(&_global_env.lock); 438 raw_spin_unlock(&_global_env.lock);
622 439
623 raw_spin_unlock_irqrestore(&state->lock, flags); 440 raw_spin_unlock_irqrestore(&state->lock, flags);
624 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
625 441
626 TS_ISR_END; 442 TS_ISR_END;
627 443
628 for (cpus = 0; cpus<NR_CPUS; cpus++) { 444 for (cpus = 0; cpus<NR_CPUS; cpus++) {
629 if (reschedule[cpus]) { 445 if (reschedule[cpus]) {
630 litmus_reschedule(cpus); 446 litmus_reschedule(cpus);
631 /*
632 struct mc2_cpu_state *remote_state;
633
634 remote_state = cpu_state_for(cpus);
635 raw_spin_lock(&remote_state->lock);
636 preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
637 raw_spin_unlock(&remote_state->lock);
638 */
639 } 447 }
640 } 448 }
641 449
@@ -688,23 +496,9 @@ static long mc2_complete_job(void)
688 /* set next_replenishtime to synchronous release time */ 496 /* set next_replenishtime to synchronous release time */
689 BUG_ON(!res); 497 BUG_ON(!res);
690 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 498 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
691/*
692 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
693 struct table_driven_reservation *tdres;
694 tdres = container_of(res, struct table_driven_reservation, res);
695 tdres->next_interval = 0;
696 tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
697 res->next_replenishment += tdres->intervals[0].start;
698 }
699*/
700 res->cur_budget = 0; 499 res->cur_budget = 0;
701 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 500 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
702 501
703 //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
704
705 //if (lv < CRIT_LEVEL_C)
706// raw_spin_unlock(&state->lock);
707 //else
708 if (lv == CRIT_LEVEL_C) 502 if (lv == CRIT_LEVEL_C)
709 raw_spin_unlock(&_global_env.lock); 503 raw_spin_unlock(&_global_env.lock);
710 504
@@ -762,17 +556,6 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
762 res->blocked_by_ghost = 0; 556 res->blocked_by_ghost = 0;
763 res->is_ghost = NO_CPU; 557 res->is_ghost = NO_CPU;
764 return tsk; 558 return tsk;
765/* no ghost jobs
766 if (likely(!ce->running)) {
767 sup_scheduler_update_after(sup_env, res->cur_budget);
768 res->blocked_by_ghost = 0;
769 res->is_ghost = NO_CPU;
770 return tsk;
771 } else {
772 res->blocked_by_ghost = 1;
773 TRACE_TASK(ce->running, " is GHOST\n");
774 }
775*/
776 } 559 }
777 } 560 }
778 } 561 }
@@ -785,19 +568,10 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
785{ 568{
786 struct reservation *res, *next; 569 struct reservation *res, *next;
787 struct task_struct *tsk = NULL; 570 struct task_struct *tsk = NULL;
788 //struct crit_entry *ce; 571
789 enum crit_level lv; 572 enum crit_level lv;
790 lt_t time_slice; 573 lt_t time_slice;
791 574
792 /* no eligible level A or B tasks exists */
793 /* check the ghost job */
794 /*
795 ce = &state->crit_entries[CRIT_LEVEL_C];
796 if (ce->running) {
797 TRACE_TASK(ce->running," is GHOST\n");
798 return NULL;
799 }
800 */
801 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { 575 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
802 BUG_ON(!res); 576 BUG_ON(!res);
803 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { 577 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
@@ -814,8 +588,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
814 res->scheduled_on = state->cpu; 588 res->scheduled_on = state->cpu;
815 return tsk; 589 return tsk;
816 } else if (lv == CRIT_LEVEL_C) { 590 } else if (lv == CRIT_LEVEL_C) {
817 //ce = &state->crit_entries[lv];
818 //if (likely(!ce->running)) {
819#if BUDGET_ENFORCEMENT_AT_C 591#if BUDGET_ENFORCEMENT_AT_C
820 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 592 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
821#endif 593#endif
@@ -824,11 +596,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
824 res->is_ghost = NO_CPU; 596 res->is_ghost = NO_CPU;
825 res->scheduled_on = state->cpu; 597 res->scheduled_on = state->cpu;
826 return tsk; 598 return tsk;
827 //} else {
828 // res->blocked_by_ghost = 1;
829 // TRACE_TASK(ce->running, " is GHOST\n");
830 // return NULL;
831 //}
832 } else { 599 } else {
833 BUG(); 600 BUG();
834 } 601 }
@@ -884,13 +651,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
884 651
885 pre_schedule(prev, state->cpu); 652 pre_schedule(prev, state->cpu);
886 653
887 /* 9/20/2015 fix
888 raw_spin_lock(&_global_env.lock);
889 */
890 raw_spin_lock(&state->lock); 654 raw_spin_lock(&state->lock);
891 655
892 //BUG_ON(state->scheduled && state->scheduled != prev);
893 //BUG_ON(state->scheduled && !is_realtime(prev));
894 if (state->scheduled && state->scheduled != prev) 656 if (state->scheduled && state->scheduled != prev)
895 printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); 657 printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
896 if (state->scheduled && !is_realtime(prev)) 658 if (state->scheduled && !is_realtime(prev))
@@ -911,24 +673,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
911 673
912 now = litmus_clock(); 674 now = litmus_clock();
913 sup_update_time(&state->sup_env, now); 675 sup_update_time(&state->sup_env, now);
914 /* 9/20/2015 fix */ 676
915 //raw_spin_lock(&_global_env.lock);
916 //to_schedule = gmp_update_time(&_global_env, now);
917 //raw_spin_unlock(&_global_env.lock);
918
919 /* 9/20/2015 fix
920 mc2_update_ghost_state(state);
921 */
922
923 /* remove task from reservation if it blocks */
924 /*
925 if (is_realtime(prev) && !is_running(prev)) {
926 if (get_task_crit_level(prev) == CRIT_LEVEL_C)
927 raw_spin_lock(&_global_env.lock);
928 task_departs(prev, is_completed(prev));
929 if (get_task_crit_level(prev) == CRIT_LEVEL_C)
930 raw_spin_unlock(&_global_env.lock);
931 }*/
932 if (is_realtime(current) && blocks) { 677 if (is_realtime(current) && blocks) {
933 if (get_task_crit_level(current) == CRIT_LEVEL_C) 678 if (get_task_crit_level(current) == CRIT_LEVEL_C)
934 raw_spin_lock(&_global_env.lock); 679 raw_spin_lock(&_global_env.lock);
@@ -955,11 +700,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
955 raw_spin_unlock(&_global_env.lock); 700 raw_spin_unlock(&_global_env.lock);
956 } 701 }
957 702
958 //raw_spin_lock(&_lowest_prio_cpu.lock);
959 //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
960 //update_cpu_prio(state);
961 //raw_spin_unlock(&_lowest_prio_cpu.lock);
962
963 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ 703 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
964 sched_state_task_picked(); 704 sched_state_task_picked();
965 705
@@ -982,10 +722,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
982 cpu = get_lowest_prio_cpu(res?res->priority:0); 722 cpu = get_lowest_prio_cpu(res?res->priority:0);
983 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); 723 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
984 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 724 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
985 //raw_spin_lock(&_lowest_prio_cpu.lock);
986 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 725 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
987 resched_cpu[cpu] = 1; 726 resched_cpu[cpu] = 1;
988 //raw_spin_unlock(&_lowest_prio_cpu.lock);
989 } 727 }
990 raw_spin_unlock(&_global_env.lock); 728 raw_spin_unlock(&_global_env.lock);
991 } 729 }
@@ -1012,23 +750,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1012 return state->scheduled; 750 return state->scheduled;
1013} 751}
1014 752
1015static void resume_legacy_task_model_updates(struct task_struct *tsk)
1016{
1017 lt_t now;
1018 if (is_sporadic(tsk)) {
1019 /* If this sporadic task was gone for a "long" time and woke up past
1020 * its deadline, then give it a new budget by triggering a job
1021 * release. This is purely cosmetic and has no effect on the
1022 * MC2 scheduler. */
1023
1024 now = litmus_clock();
1025 if (is_tardy(tsk, now)) {
1026 //release_at(tsk, now);
1027 //sched_trace_task_release(tsk);
1028 }
1029 }
1030}
1031
1032/* mc2_task_resume - Called when the state of tsk changes back to 753/* mc2_task_resume - Called when the state of tsk changes back to
1033 * TASK_RUNNING. We need to requeue the task. 754 * TASK_RUNNING. We need to requeue the task.
1034 */ 755 */
@@ -1046,9 +767,6 @@ static void mc2_task_resume(struct task_struct *tsk)
1046 else 767 else
1047 state = local_cpu_state(); 768 state = local_cpu_state();
1048 769
1049 /* 9/20/2015 fix
1050 raw_spin_lock(&_global_env.lock);
1051 */
1052 /* Requeue only if self-suspension was already processed. */ 770 /* Requeue only if self-suspension was already processed. */
1053 if (tinfo->has_departed) 771 if (tinfo->has_departed)
1054 { 772 {
@@ -1083,22 +801,14 @@ static void mc2_task_resume(struct task_struct *tsk)
1083 raw_spin_unlock(&_global_env.lock); 801 raw_spin_unlock(&_global_env.lock);
1084 } 802 }
1085 803
1086 /* 9/20/2015 fix
1087 mc2_update_ghost_state(state);
1088 */
1089 //task_arrives(state, tsk);
1090 /* NOTE: drops state->lock */ 804 /* NOTE: drops state->lock */
1091 TRACE_TASK(tsk, "mc2_resume()\n"); 805 TRACE_TASK(tsk, "mc2_resume()\n");
1092 mc2_update_timer_and_unlock(state); 806 mc2_update_timer_and_unlock(state);
1093 } else { 807 } else {
1094 TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); 808 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
1095 //raw_spin_unlock(&_global_env.lock);
1096 } 809 }
1097 810
1098 local_irq_restore(flags); 811 local_irq_restore(flags);
1099
1100 //gmp_free_passed_event();
1101 resume_legacy_task_model_updates(tsk);
1102} 812}
1103 813
1104 814
@@ -1152,9 +862,6 @@ static long mc2_admit_task(struct task_struct *tsk)
1152 state = local_cpu_state(); 862 state = local_cpu_state();
1153 raw_spin_lock_irqsave(&state->lock, flags); 863 raw_spin_lock_irqsave(&state->lock, flags);
1154 raw_spin_lock(&_global_env.lock); 864 raw_spin_lock(&_global_env.lock);
1155 //state = local_cpu_state();
1156
1157 //raw_spin_lock(&state->lock);
1158 865
1159 res = gmp_find_by_id(&_global_env, mp->res_id); 866 res = gmp_find_by_id(&_global_env, mp->res_id);
1160 867
@@ -1206,18 +913,16 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1206 state = local_cpu_state(); 913 state = local_cpu_state();
1207 else 914 else
1208 state = cpu_state_for(tinfo->cpu); 915 state = cpu_state_for(tinfo->cpu);
916
1209 917
1210 local_irq_save(flags);
1211
1212 /* acquire the lock protecting the state and disable interrupts */
1213 //raw_spin_lock(&_global_env.lock);
1214 //raw_spin_lock(&state->lock);
1215 if (is_running) { 918 if (is_running) {
1216 state->scheduled = tsk; 919 state->scheduled = tsk;
1217 /* make sure this task should actually be running */ 920 /* make sure this task should actually be running */
1218 litmus_reschedule_local(); 921 litmus_reschedule_local();
1219 } 922 }
1220 923
924 /* acquire the lock protecting the state and disable interrupts */
925 local_irq_save(flags);
1221 raw_spin_lock(&state->lock); 926 raw_spin_lock(&state->lock);
1222 927
1223 if (lv == CRIT_LEVEL_C) { 928 if (lv == CRIT_LEVEL_C) {
@@ -1227,7 +932,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1227 else { 932 else {
1228 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 933 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
1229 } 934 }
1230 //res = res_find_by_id(state, tinfo->mc2_param.res_id);
1231 release = res->next_replenishment; 935 release = res->next_replenishment;
1232 936
1233 if (on_runqueue || is_running) { 937 if (on_runqueue || is_running) {
@@ -1235,14 +939,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1235 * [see comment in pres_task_resume()] */ 939 * [see comment in pres_task_resume()] */
1236 if (lv == CRIT_LEVEL_C) { 940 if (lv == CRIT_LEVEL_C) {
1237 gmp_update_time(&_global_env, litmus_clock()); 941 gmp_update_time(&_global_env, litmus_clock());
1238 //raw_spin_unlock(&_global_env.lock);
1239 } 942 }
1240 else 943 else
1241 sup_update_time(&state->sup_env, litmus_clock()); 944 sup_update_time(&state->sup_env, litmus_clock());
1242 //mc2_update_time(lv, state, litmus_clock());
1243 /* 9/20/2015 fix
1244 mc2_update_ghost_state(state);
1245 */
1246 task_arrives(state, tsk); 945 task_arrives(state, tsk);
1247 if (lv == CRIT_LEVEL_C) 946 if (lv == CRIT_LEVEL_C)
1248 raw_spin_unlock(&_global_env.lock); 947 raw_spin_unlock(&_global_env.lock);
@@ -1254,13 +953,11 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1254 if (lv == CRIT_LEVEL_C) 953 if (lv == CRIT_LEVEL_C)
1255 raw_spin_unlock(&_global_env.lock); 954 raw_spin_unlock(&_global_env.lock);
1256 raw_spin_unlock(&state->lock); 955 raw_spin_unlock(&state->lock);
1257 //raw_spin_unlock(&_global_env.lock);
1258 } 956 }
1259 local_irq_restore(flags); 957 local_irq_restore(flags);
1260 958
1261 if (!release) { 959 if (!release) {
1262 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); 960 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
1263 //release_at(tsk, release);
1264 } 961 }
1265 else 962 else
1266 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); 963 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
@@ -1275,15 +972,12 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1275 struct reservation *res = NULL, *next; 972 struct reservation *res = NULL, *next;
1276 struct sup_reservation_environment *sup_env; 973 struct sup_reservation_environment *sup_env;
1277 int found = 0; 974 int found = 0;
1278 //enum crit_level lv = get_task_crit_level(current);
1279 unsigned long flags; 975 unsigned long flags;
1280 976
1281 if (cpu == -1) { 977 if (cpu == -1) {
1282 /* if the reservation is global reservation */ 978 /* if the reservation is global reservation */
1283 local_irq_save(flags); 979 local_irq_save(flags);
1284 //state = local_cpu_state();
1285 raw_spin_lock(&_global_env.lock); 980 raw_spin_lock(&_global_env.lock);
1286 //raw_spin_lock(&state->lock);
1287 981
1288 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { 982 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
1289 if (res->id == reservation_id) { 983 if (res->id == reservation_id) {
@@ -1314,7 +1008,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1314 } 1008 }
1315 } 1009 }
1316 1010
1317 //raw_spin_unlock(&state->lock);
1318 raw_spin_unlock(&_global_env.lock); 1011 raw_spin_unlock(&_global_env.lock);
1319 local_irq_restore(flags); 1012 local_irq_restore(flags);
1320 } else { 1013 } else {
@@ -1323,17 +1016,9 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1323 local_irq_save(flags); 1016 local_irq_save(flags);
1324 raw_spin_lock(&state->lock); 1017 raw_spin_lock(&state->lock);
1325 1018
1326 // res = sup_find_by_id(&state->sup_env, reservation_id);
1327 sup_env = &state->sup_env; 1019 sup_env = &state->sup_env;
1328 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { 1020 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
1329 if (res->id == reservation_id) { 1021 if (res->id == reservation_id) {
1330/*
1331 if (lv == CRIT_LEVEL_A) {
1332 struct table_driven_reservation *tdres;
1333 tdres = container_of(res, struct table_driven_reservation, res);
1334 kfree(tdres->intervals);
1335 }
1336*/
1337 list_del(&res->list); 1022 list_del(&res->list);
1338 kfree(res); 1023 kfree(res);
1339 found = 1; 1024 found = 1;
@@ -1343,12 +1028,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1343 if (!found) { 1028 if (!found) {
1344 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { 1029 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
1345 if (res->id == reservation_id) { 1030 if (res->id == reservation_id) {
1346/* if (lv == CRIT_LEVEL_A) {
1347 struct table_driven_reservation *tdres;
1348 tdres = container_of(res, struct table_driven_reservation, res);
1349 kfree(tdres->intervals);
1350 }
1351*/
1352 list_del(&res->list); 1031 list_del(&res->list);
1353 kfree(res); 1032 kfree(res);
1354 found = 1; 1033 found = 1;
@@ -1359,12 +1038,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1359 if (!found) { 1038 if (!found) {
1360 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 1039 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
1361 if (res->id == reservation_id) { 1040 if (res->id == reservation_id) {
1362/* if (lv == CRIT_LEVEL_A) {
1363 struct table_driven_reservation *tdres;
1364 tdres = container_of(res, struct table_driven_reservation, res);
1365 kfree(tdres->intervals);
1366 }
1367*/
1368 list_del(&res->list); 1041 list_del(&res->list);
1369 kfree(res); 1042 kfree(res);
1370 found = 1; 1043 found = 1;
@@ -1419,11 +1092,7 @@ static void mc2_task_exit(struct task_struct *tsk)
1419 else if (lv == CRIT_LEVEL_C) { 1092 else if (lv == CRIT_LEVEL_C) {
1420 raw_spin_lock(&_global_env.lock); 1093 raw_spin_lock(&_global_env.lock);
1421 gmp_update_time(&_global_env, litmus_clock()); 1094 gmp_update_time(&_global_env, litmus_clock());
1422 //raw_spin_unlock(&_global_env.lock);
1423 } 1095 }
1424 /* 9/20/2015 fix
1425 mc2_update_ghost_state(state);
1426 */
1427 task_departs(tsk, 0); 1096 task_departs(tsk, 0);
1428 if (lv == CRIT_LEVEL_C) 1097 if (lv == CRIT_LEVEL_C)
1429 raw_spin_unlock(&_global_env.lock); 1098 raw_spin_unlock(&_global_env.lock);
@@ -1506,8 +1175,6 @@ static long create_polling_reservation(
1506 return -ENOMEM; 1175 return -ENOMEM;
1507 1176
1508 if (config->cpu != -1) { 1177 if (config->cpu != -1) {
1509
1510 //raw_spin_lock_irqsave(&_global_env.lock, flags);
1511 state = cpu_state_for(config->cpu); 1178 state = cpu_state_for(config->cpu);
1512 raw_spin_lock_irqsave(&state->lock, flags); 1179 raw_spin_lock_irqsave(&state->lock, flags);
1513 1180
@@ -1521,10 +1188,6 @@ static long create_polling_reservation(
1521 pres->res.id = config->id; 1188 pres->res.id = config->id;
1522 pres->res.blocked_by_ghost = 0; 1189 pres->res.blocked_by_ghost = 0;
1523 pres->res.is_ghost = NO_CPU; 1190 pres->res.is_ghost = NO_CPU;
1524 /*if (config->priority == LITMUS_MAX_PRIORITY) {
1525 level_a_priorities[config->cpu]++;
1526 pres->res.priority = level_a_priorities[config->cpu];
1527 }*/
1528 if (!use_edf) 1191 if (!use_edf)
1529 pres->res.priority = config->priority; 1192 pres->res.priority = config->priority;
1530 sup_add_new_reservation(&state->sup_env, &pres->res); 1193 sup_add_new_reservation(&state->sup_env, &pres->res);
@@ -1535,7 +1198,6 @@ static long create_polling_reservation(
1535 } 1198 }
1536 1199
1537 raw_spin_unlock_irqrestore(&state->lock, flags); 1200 raw_spin_unlock_irqrestore(&state->lock, flags);
1538 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1539 1201
1540 } else { 1202 } else {
1541 raw_spin_lock_irqsave(&_global_env.lock, flags); 1203 raw_spin_lock_irqsave(&_global_env.lock, flags);