aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
authorStephen Tang <sytang@cs.unc.edu>2017-04-03 09:30:13 -0400
committerStephen Tang <sytang@cs.unc.edu>2017-04-03 09:30:13 -0400
commitcae5ba2badcf13937b83564a5ab2231cc554b42c (patch)
treea311ddf0a4a1944e8d215695d83cac4150fd1ce7 /litmus/sched_mc2.c
parentd57b8f5a8e2d08fa972dad6b646a02a5dd931be4 (diff)
Mode change implementation
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c932
1 files changed, 510 insertions, 422 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index a2abda848cbf..31d3019d9fce 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/percpu.h> 10#include <linux/percpu.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/rwlock.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
14#include <litmus/sched_plugin.h> 15#include <litmus/sched_plugin.h>
@@ -39,7 +40,10 @@
39extern void do_partition(enum crit_level lv, int cpu); 40extern void do_partition(enum crit_level lv, int cpu);
40 41
41/* _global_env - reservation container for level-C tasks*/ 42/* _global_env - reservation container for level-C tasks*/
42struct gmp_reservation_environment _global_env; 43struct gmp_reservation_environment _global_env_modes[NR_MODES];
44struct gmp_reservation_environment *_global_env;
45raw_spinlock_t global_lock;
46
43 47
44/* cpu_entry - keep track of a running task on a cpu 48/* cpu_entry - keep track of a running task on a cpu
45 * This state is used to decide the lowest priority cpu 49 * This state is used to decide the lowest priority cpu
@@ -64,19 +68,13 @@ struct cpu_priority _lowest_prio_cpu;
64 68
65/* mc2_task_state - a task state structure */ 69/* mc2_task_state - a task state structure */
66struct mc2_task_state { 70struct mc2_task_state {
67 struct task_client res_info; 71 struct task_client res_info[NR_MODES];
68 /* if cpu == -1, this task is a global task (level C) */ 72 /* if cpu == -1, this task is a global task (level C) */
69 int cpu; 73 int cpu;
70 bool has_departed; 74 bool has_departed;
71 struct mc2_task mc2_param; 75 struct mc2_task mc2_param;
72}; 76};
73 77
74/* crit_entry - maintain the logically running job (ghost job) */
75struct crit_entry {
76 enum crit_level level;
77 struct task_struct *running;
78};
79
80/* mc2_cpu_state - maintain the scheduled state and ghost jobs 78/* mc2_cpu_state - maintain the scheduled state and ghost jobs
81 * timer : timer for partitioned tasks (level A and B) 79 * timer : timer for partitioned tasks (level A and B)
82 * g_timer : timer for global tasks (level C) 80 * g_timer : timer for global tasks (level C)
@@ -84,21 +82,107 @@ struct crit_entry {
84struct mc2_cpu_state { 82struct mc2_cpu_state {
85 raw_spinlock_t lock; 83 raw_spinlock_t lock;
86 84
87 struct sup_reservation_environment sup_env; 85 struct sup_reservation_environment sup_env_modes[NR_MODES];
86 struct sup_reservation_environment *sup_env;
88 struct hrtimer timer; 87 struct hrtimer timer;
89 88
90 int cpu; 89 int cpu;
91 struct task_struct* scheduled; 90 struct task_struct* scheduled;
92 struct crit_entry crit_entries[NUM_CRIT_LEVELS]; 91 //struct crit_entry crit_entries[NUM_CRIT_LEVELS];
92 bool spin_flag; //not used on cpu 0
93}; 93};
94 94
95
95static int resched_cpu[NR_CPUS]; 96static int resched_cpu[NR_CPUS];
96static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); 97static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
97static int level_a_priorities[NR_CPUS]; 98//level_a_priorities unused
99//static int level_a_priorities[NR_CPUS];
98 100
99#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) 101#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id))
100#define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) 102#define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state))
101 103
104
105unsigned int mode; //currently executing mode, from 0 to NR_MODES-1
106unsigned int requested_mode; //The pending mode
107/* Prevent multiple requests from entering and prevent request from entering while old
108 * is being enacted */
109raw_spinlock_t mode_lock;
110
111unsigned int mode_sizes[NR_MODES];
112unsigned int res_reported;
113bool cpu_0_spin_flag;
114#define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum))
115/*
116 * To be called from level A task's with period equal to
117 * A and B hyperperiod
118 */
119
120asmlinkage int sys_enact_mode(void)
121{
122 struct mc2_cpu_state *state = local_cpu_state();
123 struct list_head *pos;
124 struct reservation *res;
125 hrtimer_cancel(&state->timer);//stop listening to old mode timers
126 //lt_t now = litmus_clock();
127 if (state->cpu == 0){
128 raw_spin_lock(&global_lock);
129 raw_spin_lock(&mode_lock);
130 if (mode != requested_mode //MCR has entered
131 && !res_reported){ //and C is throttled
132 mode = requested_mode;
133 _global_env = &_global_env_modes[mode];
134 list_for_each(pos, &_global_env->active_reservations){
135 res = list_entry(pos, struct reservation, list);
136 res->reported = 0;
137 }
138 list_for_each(pos, &_global_env->depleted_reservations){
139 res = list_entry(pos, struct reservation, list);
140 res->reported = 0;
141 }
142 list_for_each(pos, &_global_env->inactive_reservations){
143 res = list_entry(pos, struct reservation, list);
144 res->reported = 0;
145 }
146 //gmp_update_time(_global_env, now);
147 }
148 raw_spin_unlock(&mode_lock);
149 raw_spin_unlock(&global_lock);
150 //release other CPUs
151 cpu_0_spin_flag = !cpu_0_spin_flag;
152 }
153 else{
154 //spin, wait for CPU 0 to stabilize mode decision
155 //before scheduling next hyperperiod
156 if (state->spin_flag)
157 while(cpu_0_spin_flag);
158 else
159 while(!cpu_0_spin_flag);
160 state->spin_flag = !state->spin_flag;
161 }
162 //if mode didn't change this has no effect on what's being scheduled
163 state->sup_env = &state->sup_env_modes[mode];
164 //sup_update_time(state->sup_env, now);
165 return 0;
166}
167
168
169/*
170 * Called from non-real time program
171 * Protect by exclusive lock to prevent from occuring while mode change is enacted
172 */
173asmlinkage int sys_request_mode(int new_mode){
174 raw_spin_lock(&mode_lock);
175 if (mode != requested_mode){
176 raw_spin_unlock(&mode_lock);
177 return -EAGAIN;
178 }
179 requested_mode = new_mode;
180 res_reported = mode_sizes[mode];
181 raw_spin_unlock(&mode_lock);
182 return 0;
183}
184
185
102/* get_mc2_state - get the task's state */ 186/* get_mc2_state - get the task's state */
103static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) 187static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
104{ 188{
@@ -140,41 +224,31 @@ static void task_departs(struct task_struct *tsk, int job_complete)
140 //struct mc2_cpu_state* state = local_cpu_state(); 224 //struct mc2_cpu_state* state = local_cpu_state();
141 struct reservation* res = NULL; 225 struct reservation* res = NULL;
142 struct reservation_client *client = NULL; 226 struct reservation_client *client = NULL;
143 227 int i;
144 BUG_ON(!is_realtime(tsk)); 228 BUG_ON(!is_realtime(tsk));
145
146 res = tinfo->res_info.client.reservation;
147 client = &tinfo->res_info.client;
148 BUG_ON(!res);
149 BUG_ON(!client);
150 229
230 for(i = 0; i < NR_MODES; i++){
231 if (! in_mode(tsk, i) )
232 continue;
233 res = tinfo->res_info[i].client.reservation;
234 client = &tinfo->res_info[i].client;
235 BUG_ON(!res);
236 BUG_ON(!client);
237
238 if (job_complete)
239 res->cur_budget = 0;
240
241 res->ops->client_departs(res, client, job_complete);
242 }
151/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ 243/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */
152 if (job_complete) { 244 if (job_complete) {
153 res->cur_budget = 0; 245 //res->cur_budget = 0;
154 sched_trace_task_completion(tsk, 0); 246 sched_trace_task_completion(tsk, 0);
155 } 247 }
156/* fix end */ 248/* fix end */
157 249
158 res->ops->client_departs(res, client, job_complete);
159 tinfo->has_departed = true; 250 tinfo->has_departed = true;
160 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); 251 //TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock());
161/* 9/18/2015 fix start - no remaining budget
162 *
163 if (job_complete && res->cur_budget) {
164 struct crit_entry* ce;
165 enum crit_level lv = tinfo->mc2_param.crit;
166
167 ce = &state->crit_entries[lv];
168 ce->running = tsk;
169 res->is_ghost = state->cpu;
170#if BUDGET_ENFORCEMENT_AT_C
171 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
172#endif
173 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
174 }
175 * fix -end
176 */
177
178} 252}
179 253
180/* task_arrive - put a task into its reservation 254/* task_arrive - put a task into its reservation
@@ -186,9 +260,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
186 struct reservation* res; 260 struct reservation* res;
187 struct reservation_client *client; 261 struct reservation_client *client;
188 enum crit_level lv = get_task_crit_level(tsk); 262 enum crit_level lv = get_task_crit_level(tsk);
189 263 int i;
190 res = tinfo->res_info.client.reservation;
191 client = &tinfo->res_info.client;
192 264
193 tinfo->has_departed = false; 265 tinfo->has_departed = false;
194 266
@@ -203,22 +275,19 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
203 default: 275 default:
204 break; 276 break;
205 } 277 }
278
206 279
207 res->ops->client_arrives(res, client);
208 TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); 280 TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock());
209 281
210 if (lv != NUM_CRIT_LEVELS) { 282 for(i = 0; i < NR_MODES; i++){
211 struct crit_entry *ce; 283 if (! in_mode(tsk, i) )
212 ce = &state->crit_entries[lv]; 284 continue;
213 /* if the currrent task is a ghost job, remove it */ 285 res = tinfo->res_info[i].client.reservation;
214 if (ce->running == tsk) 286 client = &tinfo->res_info[i].client;
215 ce->running = NULL; 287
216 } 288 res->ops->client_arrives(res, client);
217 /* do we need this?? 289 }
218 if (resched_cpu[state->cpu]) 290
219 litmus_reschedule(state->cpu);
220 */
221
222 switch(lv) { 291 switch(lv) {
223 case CRIT_LEVEL_A: 292 case CRIT_LEVEL_A:
224 case CRIT_LEVEL_B: 293 case CRIT_LEVEL_B:
@@ -243,10 +312,8 @@ static int get_lowest_prio_cpu(lt_t priority)
243 int cpu, ret = NO_CPU; 312 int cpu, ret = NO_CPU;
244 lt_t latest_deadline = 0; 313 lt_t latest_deadline = 0;
245 314
246 //raw_spin_lock(&_lowest_prio_cpu.lock);
247 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 315 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
248 if (!ce->will_schedule && !ce->scheduled) { 316 if (!ce->will_schedule && !ce->scheduled) {
249 //raw_spin_unlock(&_lowest_prio_cpu.lock);
250 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 317 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
251 return ce->cpu; 318 return ce->cpu;
252 } else { 319 } else {
@@ -264,7 +331,6 @@ static int get_lowest_prio_cpu(lt_t priority)
264 if (!ce->will_schedule) { 331 if (!ce->will_schedule) {
265 if (!ce->scheduled) { 332 if (!ce->scheduled) {
266 /* Idle cpu, return this. */ 333 /* Idle cpu, return this. */
267 //raw_spin_unlock(&_lowest_prio_cpu.lock);
268 TRACE("CPU %d is the lowest!\n", ce->cpu); 334 TRACE("CPU %d is the lowest!\n", ce->cpu);
269 return ce->cpu; 335 return ce->cpu;
270 } else if (ce->lv == CRIT_LEVEL_C && 336 } else if (ce->lv == CRIT_LEVEL_C &&
@@ -275,8 +341,6 @@ static int get_lowest_prio_cpu(lt_t priority)
275 } 341 }
276 } 342 }
277 343
278 //raw_spin_unlock(&_lowest_prio_cpu.lock);
279
280 if (priority >= latest_deadline) 344 if (priority >= latest_deadline)
281 ret = NO_CPU; 345 ret = NO_CPU;
282 346
@@ -285,36 +349,6 @@ static int get_lowest_prio_cpu(lt_t priority)
285 return ret; 349 return ret;
286} 350}
287 351
288/* mc2_update_time - update time for a given criticality level.
289 * caller must hold a proper lock
290 * (cpu_state lock or global lock)
291 */
292/* 9/24/2015 temporally not using
293static void mc2_update_time(enum crit_level lv,
294 struct mc2_cpu_state *state, lt_t time)
295{
296 int global_schedule_now;
297
298 if (lv < CRIT_LEVEL_C)
299 sup_update_time(&state->sup_env, time);
300 else if (lv == CRIT_LEVEL_C) {
301 global_schedule_now = gmp_update_time(&_global_env, time);
302 while (global_schedule_now--) {
303 int cpu = get_lowest_prio_cpu(0);
304 if (cpu != NO_CPU) {
305 raw_spin_lock(&_lowest_prio_cpu.lock);
306 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
307 raw_spin_unlock(&_lowest_prio_cpu.lock);
308 TRACE("LOWEST CPU = P%d\n", cpu);
309 litmus_reschedule(cpu);
310 }
311 }
312 }
313 else
314 TRACE("update_time(): Criticality level error!!!!\n");
315}
316*/
317
318/* NOTE: drops state->lock */ 352/* NOTE: drops state->lock */
319/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock 353/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock
320 * Whenever res_env.current_time is updated, 354 * Whenever res_env.current_time is updated,
@@ -335,8 +369,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
335 for (cpus = 0; cpus<NR_CPUS; cpus++) 369 for (cpus = 0; cpus<NR_CPUS; cpus++)
336 reschedule[cpus] = 0; 370 reschedule[cpus] = 0;
337 371
338 update = state->sup_env.next_scheduler_update; 372 update = state->sup_env->next_scheduler_update;
339 now = state->sup_env.env.current_time; 373 now = state->sup_env->env.current_time;
340 374
341 /* Be sure we're actually running on the right core, 375 /* Be sure we're actually running on the right core,
342 * as pres_update_timer() is also called from pres_task_resume(), 376 * as pres_update_timer() is also called from pres_task_resume(),
@@ -344,9 +378,9 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
344 */ 378 */
345 local = local_cpu_state() == state; 379 local = local_cpu_state() == state;
346 380
347 raw_spin_lock(&_global_env.lock); 381 raw_spin_lock(&global_lock);
348 382
349 list_for_each_entry_safe(event, next, &_global_env.next_events, list) { 383 list_for_each_entry_safe(event, next, &_global_env->next_events, list) {
350 /* If the event time is already passed, we call schedule() on 384 /* If the event time is already passed, we call schedule() on
351 the lowest priority cpu */ 385 the lowest priority cpu */
352 if (event->next_update >= update) { 386 if (event->next_update >= update) {
@@ -355,7 +389,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
355 389
356 if (event->next_update < litmus_clock()) { 390 if (event->next_update < litmus_clock()) {
357 if (event->timer_armed_on == NO_CPU) { 391 if (event->timer_armed_on == NO_CPU) {
358 struct reservation *res = gmp_find_by_id(&_global_env, event->id); 392 struct reservation *res = gmp_find_by_id(_global_env, event->id);
359 int cpu = get_lowest_prio_cpu(res?res->priority:0); 393 int cpu = get_lowest_prio_cpu(res?res->priority:0);
360 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); 394 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
361 list_del(&event->list); 395 list_del(&event->list);
@@ -379,7 +413,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
379 413
380 /* Must drop state lock before calling into hrtimer_start(), which 414 /* Must drop state lock before calling into hrtimer_start(), which
381 * may raise a softirq, which in turn may wake ksoftirqd. */ 415 * may raise a softirq, which in turn may wake ksoftirqd. */
382 raw_spin_unlock(&_global_env.lock); 416 raw_spin_unlock(&global_lock);
383 raw_spin_unlock(&state->lock); 417 raw_spin_unlock(&state->lock);
384 418
385 if (update <= now || reschedule[state->cpu]) { 419 if (update <= now || reschedule[state->cpu]) {
@@ -453,74 +487,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
453 } 487 }
454} 488}
455 489
456/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
457 * If the budget of a ghost is exhausted,
458 * clear is_ghost and reschedule
459 */
460/*
461static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state)
462{
463 int lv = 0;
464 struct crit_entry* ce;
465 struct reservation *res;
466 struct mc2_task_state *tinfo;
467 lt_t ret = ULLONG_MAX;
468
469 BUG_ON(!state);
470
471 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
472 ce = &state->crit_entries[lv];
473 if (ce->running != NULL) {
474//printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0);
475 tinfo = get_mc2_state(ce->running);
476 if (!tinfo)
477 continue;
478
479 res = res_find_by_id(state, tinfo->mc2_param.res_id);
480 //BUG_ON(!res);
481 if (!res) {
482 printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id);
483 return 0;
484 }
485
486 TRACE("LV %d running id %d budget %llu\n",
487 lv, tinfo->mc2_param.res_id, res->cur_budget);
488 // If the budget is exhausted, clear is_ghost and reschedule
489 if (!res->cur_budget) {
490 struct sup_reservation_environment* sup_env = &state->sup_env;
491
492 TRACE("GHOST FINISH id %d at %llu\n",
493 tinfo->mc2_param.res_id, litmus_clock());
494 ce->running = NULL;
495 res->is_ghost = NO_CPU;
496
497 if (lv < CRIT_LEVEL_C) {
498 res = list_first_entry_or_null(
499 &sup_env->active_reservations,
500 struct reservation, list);
501 if (res)
502 litmus_reschedule_local();
503 } else if (lv == CRIT_LEVEL_C) {
504 res = list_first_entry_or_null(
505 &_global_env.active_reservations,
506 struct reservation, list);
507 if (res)
508 litmus_reschedule(state->cpu);
509 }
510 } else {
511 //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
512 //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
513 if (ret > res->cur_budget) {
514 ret = res->cur_budget;
515 }
516 }
517 }
518 }
519
520 return ret;
521}
522*/
523
524/* update_cpu_prio - Update cpu's priority 490/* update_cpu_prio - Update cpu's priority
525 * When a cpu picks a new task, call this function 491 * When a cpu picks a new task, call this function
526 * to update cpu priorities. 492 * to update cpu priorities.
@@ -580,21 +546,14 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
580 //raw_spin_lock_irqsave(&_global_env.lock, flags); 546 //raw_spin_lock_irqsave(&_global_env.lock, flags);
581 raw_spin_lock_irqsave(&state->lock, flags); 547 raw_spin_lock_irqsave(&state->lock, flags);
582 now = litmus_clock(); 548 now = litmus_clock();
583 sup_update_time(&state->sup_env, now); 549 sup_update_time(state->sup_env, now);
584 550
585/* 9/20/2015 fix - no ghost job 551/* 9/20/2015 fix - no ghost job
586 remain_budget = mc2_update_ghost_state(state); 552 remain_budget = mc2_update_ghost_state(state);
587*/ 553*/
588 update = state->sup_env.next_scheduler_update; 554 update = state->sup_env->next_scheduler_update;
589 now = state->sup_env.env.current_time; 555 now = state->sup_env->env.current_time;
590 556
591/* 9/20/2015 fix - no ghost job
592 if (remain_budget != ULLONG_MAX && update > now + remain_budget) {
593 update = now + remain_budget;
594 }
595
596 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget);
597*/
598 557
599 if (update <= now) { 558 if (update <= now) {
600 litmus_reschedule_local(); 559 litmus_reschedule_local();
@@ -603,8 +562,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
603 restart = HRTIMER_RESTART; 562 restart = HRTIMER_RESTART;
604 } 563 }
605 564
606 raw_spin_lock(&_global_env.lock); 565 raw_spin_lock(&global_lock);
607 global_schedule_now = gmp_update_time(&_global_env, now); 566 global_schedule_now = gmp_update_time(_global_env, now);
608 567
609 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); 568 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
610 569
@@ -622,7 +581,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
622 reschedule[cpu] = 1; 581 reschedule[cpu] = 1;
623 } 582 }
624 } 583 }
625 raw_spin_unlock(&_global_env.lock); 584 raw_spin_unlock(&global_lock);
626 585
627 raw_spin_unlock_irqrestore(&state->lock, flags); 586 raw_spin_unlock_irqrestore(&state->lock, flags);
628 //raw_spin_unlock_irqrestore(&_global_env.lock, flags); 587 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
@@ -654,8 +613,12 @@ static long mc2_complete_job(void)
654 ktime_t next_release; 613 ktime_t next_release;
655 long err; 614 long err;
656 615
616 enum crit_level lv;
617
657 tsk_rt(current)->completed = 1; 618 tsk_rt(current)->completed = 1;
658 619
620 lv = get_task_crit_level(current);
621
659 /* If this the first job instance, we need to reset replenish 622 /* If this the first job instance, we need to reset replenish
660 time to the next release time */ 623 time to the next release time */
661 if (tsk_rt(current)->sporadic_release) { 624 if (tsk_rt(current)->sporadic_release) {
@@ -664,27 +627,25 @@ static long mc2_complete_job(void)
664 struct mc2_task_state *tinfo; 627 struct mc2_task_state *tinfo;
665 struct reservation *res = NULL; 628 struct reservation *res = NULL;
666 unsigned long flags; 629 unsigned long flags;
667 enum crit_level lv;
668 630
669 preempt_disable(); 631 preempt_disable();
670 local_irq_save(flags); 632 local_irq_save(flags);
671 633
672 tinfo = get_mc2_state(current); 634 tinfo = get_mc2_state(current);
673 lv = get_task_crit_level(current);
674 635
675 if (lv < CRIT_LEVEL_C) { 636 if (lv < CRIT_LEVEL_C) {
676 state = cpu_state_for(tinfo->cpu); 637 state = cpu_state_for(tinfo->cpu);
677 raw_spin_lock(&state->lock); 638 raw_spin_lock(&state->lock);
678 env = &(state->sup_env.env); 639 env = &(state->sup_env->env);
679 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 640 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id[mode]);
680 env->time_zero = tsk_rt(current)->sporadic_release_time; 641 env->time_zero = tsk_rt(current)->sporadic_release_time;
681 } 642 }
682 else if (lv == CRIT_LEVEL_C) { 643 else if (lv == CRIT_LEVEL_C) {
683 state = local_cpu_state(); 644 state = local_cpu_state();
684 raw_spin_lock(&state->lock); 645 raw_spin_lock(&state->lock);
685 raw_spin_lock(&_global_env.lock); 646 raw_spin_lock(&global_lock);
686 res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); 647 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id[mode]);
687 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; 648 _global_env->env.time_zero = tsk_rt(current)->sporadic_release_time;
688 } 649 }
689 else 650 else
690 BUG(); 651 BUG();
@@ -704,13 +665,13 @@ static long mc2_complete_job(void)
704 res->cur_budget = 0; 665 res->cur_budget = 0;
705 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 666 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
706 667
707 //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 668 //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
708 669
709 //if (lv < CRIT_LEVEL_C) 670 //if (lv < CRIT_LEVEL_C)
710// raw_spin_unlock(&state->lock); 671// raw_spin_unlock(&state->lock);
711 //else 672 //else
712 if (lv == CRIT_LEVEL_C) 673 if (lv == CRIT_LEVEL_C)
713 raw_spin_unlock(&_global_env.lock); 674 raw_spin_unlock(&global_lock);
714 675
715 raw_spin_unlock(&state->lock); 676 raw_spin_unlock(&state->lock);
716 local_irq_restore(flags); 677 local_irq_restore(flags);
@@ -724,6 +685,38 @@ static long mc2_complete_job(void)
724 next_release = ns_to_ktime(get_release(current)); 685 next_release = ns_to_ktime(get_release(current));
725 preempt_disable(); 686 preempt_disable();
726 TRACE_CUR("next_release=%llu\n", get_release(current)); 687 TRACE_CUR("next_release=%llu\n", get_release(current));
688
689 /*
690 * Changed logic for mode switch case
691 * In case of mode switch, do not want to release
692 * new job even if release time has passed
693 */
694
695 raw_spin_lock(&mode_lock);
696 if (lv == CRIT_LEVEL_C && mode != requested_mode){
697 struct reservation *res = NULL;
698 res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id[mode]);
699 if (res && !res->reported){
700 res_reported--;
701
702 res->reported = 1;
703 //Current task doesn't exist in new mode
704 if ( !in_mode(current, requested_mode) ){
705 res->env->change_state(res->env, res, RESERVATION_INACTIVE);
706 raw_spin_unlock(&mode_lock);
707 litmus_reschedule_local();
708 }
709 //Otherwise schedule normally
710 else
711 raw_spin_unlock(&mode_lock);
712 }
713 else
714 raw_spin_unlock(&mode_lock);
715
716 }
717 else
718 raw_spin_unlock(&mode_lock);
719
727 if (get_release(current) > litmus_clock()) { 720 if (get_release(current) > litmus_clock()) {
728 /* sleep until next_release */ 721 /* sleep until next_release */
729 set_current_state(TASK_INTERRUPTIBLE); 722 set_current_state(TASK_INTERRUPTIBLE);
@@ -748,7 +741,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
748{ 741{
749 struct reservation *res, *next; 742 struct reservation *res, *next;
750 struct task_struct *tsk = NULL; 743 struct task_struct *tsk = NULL;
751 struct crit_entry *ce; 744 //struct crit_entry *ce;
752 enum crit_level lv; 745 enum crit_level lv;
753 lt_t time_slice; 746 lt_t time_slice;
754 747
@@ -761,22 +754,11 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
761 sup_scheduler_update_after(sup_env, res->cur_budget); 754 sup_scheduler_update_after(sup_env, res->cur_budget);
762 return tsk; 755 return tsk;
763 } else { 756 } else {
764 ce = &state->crit_entries[lv]; 757 //ce = &state->crit_entries[lv];
765 sup_scheduler_update_after(sup_env, res->cur_budget); 758 sup_scheduler_update_after(sup_env, res->cur_budget);
766 res->blocked_by_ghost = 0; 759 res->blocked_by_ghost = 0;
767 res->is_ghost = NO_CPU; 760 res->is_ghost = NO_CPU;
768 return tsk; 761 return tsk;
769/* no ghost jobs
770 if (likely(!ce->running)) {
771 sup_scheduler_update_after(sup_env, res->cur_budget);
772 res->blocked_by_ghost = 0;
773 res->is_ghost = NO_CPU;
774 return tsk;
775 } else {
776 res->blocked_by_ghost = 1;
777 TRACE_TASK(ce->running, " is GHOST\n");
778 }
779*/
780 } 762 }
781 } 763 }
782 } 764 }
@@ -793,16 +775,7 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
793 enum crit_level lv; 775 enum crit_level lv;
794 lt_t time_slice; 776 lt_t time_slice;
795 777
796 /* no eligible level A or B tasks exists */ 778 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
797 /* check the ghost job */
798 /*
799 ce = &state->crit_entries[CRIT_LEVEL_C];
800 if (ce->running) {
801 TRACE_TASK(ce->running," is GHOST\n");
802 return NULL;
803 }
804 */
805 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
806 BUG_ON(!res); 779 BUG_ON(!res);
807 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { 780 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
808 tsk = res->ops->dispatch_client(res, &time_slice); 781 tsk = res->ops->dispatch_client(res, &time_slice);
@@ -810,7 +783,7 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
810 lv = get_task_crit_level(tsk); 783 lv = get_task_crit_level(tsk);
811 if (lv == NUM_CRIT_LEVELS) { 784 if (lv == NUM_CRIT_LEVELS) {
812#if BUDGET_ENFORCEMENT_AT_C 785#if BUDGET_ENFORCEMENT_AT_C
813 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 786 gmp_add_event_after(_global_env, res->cur_budget, res->id, EVENT_DRAIN);
814#endif 787#endif
815 res->event_added = 1; 788 res->event_added = 1;
816 res->blocked_by_ghost = 0; 789 res->blocked_by_ghost = 0;
@@ -821,7 +794,7 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
821 //ce = &state->crit_entries[lv]; 794 //ce = &state->crit_entries[lv];
822 //if (likely(!ce->running)) { 795 //if (likely(!ce->running)) {
823#if BUDGET_ENFORCEMENT_AT_C 796#if BUDGET_ENFORCEMENT_AT_C
824 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 797 gmp_add_event_after(_global_env, res->cur_budget, res->id, EVENT_DRAIN);
825#endif 798#endif
826 res->event_added = 1; 799 res->event_added = 1;
827 res->blocked_by_ghost = 0; 800 res->blocked_by_ghost = 0;
@@ -905,16 +878,16 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
905 blocks = exists && !is_current_running(); 878 blocks = exists && !is_current_running();
906 np = exists && is_np(state->scheduled); 879 np = exists && is_np(state->scheduled);
907 880
908 raw_spin_lock(&_global_env.lock); 881 raw_spin_lock(&global_lock);
909 preempt = resched_cpu[state->cpu]; 882 preempt = resched_cpu[state->cpu];
910 resched_cpu[state->cpu] = 0; 883 resched_cpu[state->cpu] = 0;
911 raw_spin_unlock(&_global_env.lock); 884 raw_spin_unlock(&global_lock);
912 885
913 /* update time */ 886 /* update time */
914 state->sup_env.will_schedule = true; 887 state->sup_env->will_schedule = true;
915 888
916 now = litmus_clock(); 889 now = litmus_clock();
917 sup_update_time(&state->sup_env, now); 890 sup_update_time(state->sup_env, now);
918 /* 9/20/2015 fix */ 891 /* 9/20/2015 fix */
919 //raw_spin_lock(&_global_env.lock); 892 //raw_spin_lock(&_global_env.lock);
920 //to_schedule = gmp_update_time(&_global_env, now); 893 //to_schedule = gmp_update_time(&_global_env, now);
@@ -935,28 +908,28 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
935 }*/ 908 }*/
936 if (is_realtime(current) && blocks) { 909 if (is_realtime(current) && blocks) {
937 if (get_task_crit_level(current) == CRIT_LEVEL_C) 910 if (get_task_crit_level(current) == CRIT_LEVEL_C)
938 raw_spin_lock(&_global_env.lock); 911 raw_spin_lock(&global_lock);
939 task_departs(current, is_completed(current)); 912 task_departs(current, is_completed(current));
940 if (get_task_crit_level(current) == CRIT_LEVEL_C) 913 if (get_task_crit_level(current) == CRIT_LEVEL_C)
941 raw_spin_unlock(&_global_env.lock); 914 raw_spin_unlock(&global_lock);
942 } 915 }
943 916
944 /* figure out what to schedule next */ 917 /* figure out what to schedule next */
945 if (!np) 918 if (!np)
946 state->scheduled = mc2_dispatch(&state->sup_env, state); 919 state->scheduled = mc2_dispatch(state->sup_env, state);
947 920
948 if (!state->scheduled) { 921 if (!state->scheduled) {
949 raw_spin_lock(&_global_env.lock); 922 raw_spin_lock(&global_lock);
950 to_schedule = gmp_update_time(&_global_env, now); 923 to_schedule = gmp_update_time(_global_env, now);
951 state->scheduled = mc2_global_dispatch(state); 924 state->scheduled = mc2_global_dispatch(state);
952 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 925 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
953 update_cpu_prio(state); 926 update_cpu_prio(state);
954 raw_spin_unlock(&_global_env.lock); 927 raw_spin_unlock(&global_lock);
955 } else { 928 } else {
956 raw_spin_lock(&_global_env.lock); 929 raw_spin_lock(&global_lock);
957 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 930 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
958 update_cpu_prio(state); 931 update_cpu_prio(state);
959 raw_spin_unlock(&_global_env.lock); 932 raw_spin_unlock(&global_lock);
960 } 933 }
961 934
962 //raw_spin_lock(&_lowest_prio_cpu.lock); 935 //raw_spin_lock(&_lowest_prio_cpu.lock);
@@ -968,21 +941,21 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
968 sched_state_task_picked(); 941 sched_state_task_picked();
969 942
970 /* program scheduler timer */ 943 /* program scheduler timer */
971 state->sup_env.will_schedule = false; 944 state->sup_env->will_schedule = false;
972 945
973 /* NOTE: drops state->lock */ 946 /* NOTE: drops state->lock */
974 mc2_update_timer_and_unlock(state); 947 mc2_update_timer_and_unlock(state);
975 948
976 if (prev != state->scheduled && is_realtime(prev)) { 949 if (prev != state->scheduled && is_realtime(prev)) {
977 struct mc2_task_state* tinfo = get_mc2_state(prev); 950 struct mc2_task_state* tinfo = get_mc2_state(prev);
978 struct reservation* res = tinfo->res_info.client.reservation; 951 struct reservation* res = tinfo->res_info[mode].client.reservation;
979 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); 952 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
980 res->scheduled_on = NO_CPU; 953 res->scheduled_on = NO_CPU;
981 TRACE_TASK(prev, "descheduled.\n"); 954 TRACE_TASK(prev, "descheduled.\n");
982 /* if prev is preempted and a global task, find the lowest cpu and reschedule */ 955 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
983 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { 956 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
984 int cpu; 957 int cpu;
985 raw_spin_lock(&_global_env.lock); 958 raw_spin_lock(&global_lock);
986 cpu = get_lowest_prio_cpu(res?res->priority:0); 959 cpu = get_lowest_prio_cpu(res?res->priority:0);
987 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); 960 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
988 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 961 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
@@ -991,12 +964,12 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
991 resched_cpu[cpu] = 1; 964 resched_cpu[cpu] = 1;
992 //raw_spin_unlock(&_lowest_prio_cpu.lock); 965 //raw_spin_unlock(&_lowest_prio_cpu.lock);
993 } 966 }
994 raw_spin_unlock(&_global_env.lock); 967 raw_spin_unlock(&global_lock);
995 } 968 }
996 } 969 }
997 970
998 if (to_schedule != 0) { 971 if (to_schedule != 0) {
999 raw_spin_lock(&_global_env.lock); 972 raw_spin_lock(&global_lock);
1000 while (to_schedule--) { 973 while (to_schedule--) {
1001 int cpu = get_lowest_prio_cpu(0); 974 int cpu = get_lowest_prio_cpu(0);
1002 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 975 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
@@ -1004,7 +977,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1004 resched_cpu[cpu] = 1; 977 resched_cpu[cpu] = 1;
1005 } 978 }
1006 } 979 }
1007 raw_spin_unlock(&_global_env.lock); 980 raw_spin_unlock(&global_lock);
1008 } 981 }
1009 982
1010 if (state->scheduled) { 983 if (state->scheduled) {
@@ -1078,13 +1051,13 @@ static void mc2_task_resume(struct task_struct *tsk)
1078 * since we might not actually be executing on tinfo->cpu 1051 * since we might not actually be executing on tinfo->cpu
1079 * at the moment. */ 1052 * at the moment. */
1080 if (tinfo->cpu != -1) { 1053 if (tinfo->cpu != -1) {
1081 sup_update_time(&state->sup_env, litmus_clock()); 1054 sup_update_time(state->sup_env, litmus_clock());
1082 task_arrives(state, tsk); 1055 task_arrives(state, tsk);
1083 } else { 1056 } else {
1084 raw_spin_lock(&_global_env.lock); 1057 raw_spin_lock(&global_lock);
1085 gmp_update_time(&_global_env, litmus_clock()); 1058 gmp_update_time(_global_env, litmus_clock());
1086 task_arrives(state, tsk); 1059 task_arrives(state, tsk);
1087 raw_spin_unlock(&_global_env.lock); 1060 raw_spin_unlock(&global_lock);
1088 } 1061 }
1089 1062
1090 /* 9/20/2015 fix 1063 /* 9/20/2015 fix
@@ -1110,20 +1083,21 @@ static void mc2_task_resume(struct task_struct *tsk)
1110 */ 1083 */
1111static long mc2_admit_task(struct task_struct *tsk) 1084static long mc2_admit_task(struct task_struct *tsk)
1112{ 1085{
1113 long err = -ESRCH; 1086 long err = 0;
1114 unsigned long flags; 1087 unsigned long flags;
1115 struct reservation *res; 1088 struct reservation *res;
1116 struct mc2_cpu_state *state; 1089 struct mc2_cpu_state *state;
1117 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); 1090 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
1118 struct mc2_task *mp = tsk_rt(tsk)->mc2_data; 1091 struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
1119 enum crit_level lv; 1092 enum crit_level lv;
1120 1093 int i;
1094
1121 if (!tinfo) 1095 if (!tinfo)
1122 return -ENOMEM; 1096 return -ENOMEM;
1123 1097
1124 if (!mp) { 1098 if (!mp) {
1125 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); 1099 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
1126 return err; 1100 return -ESRCH;
1127 } 1101 }
1128 1102
1129 lv = mp->crit; 1103 lv = mp->crit;
@@ -1132,53 +1106,83 @@ static long mc2_admit_task(struct task_struct *tsk)
1132 if (lv < CRIT_LEVEL_C) { 1106 if (lv < CRIT_LEVEL_C) {
1133 state = cpu_state_for(task_cpu(tsk)); 1107 state = cpu_state_for(task_cpu(tsk));
1134 raw_spin_lock_irqsave(&state->lock, flags); 1108 raw_spin_lock_irqsave(&state->lock, flags);
1109
1110 tinfo->mc2_param.crit = mp->crit;
1111 tinfo->cpu = task_cpu(tsk);
1112 tinfo->has_departed = true;
1135 1113
1136 res = sup_find_by_id(&state->sup_env, mp->res_id); 1114 for(i = 0; i < NR_MODES; i++){
1115 if (!in_mode(tsk, i)){
1116 //task not present in mode
1117 continue;
1118 }
1137 1119
1138 /* found the appropriate reservation */ 1120 res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id[i]);
1139 if (res) { 1121
1140 TRACE_TASK(tsk, "SUP FOUND RES ID\n"); 1122 /* found the appropriate reservation */
1141 tinfo->mc2_param.crit = mp->crit; 1123 if (res) {
1142 tinfo->mc2_param.res_id = mp->res_id; 1124 TRACE_TASK(tsk, "SUP FOUND RES ID\n");
1143 1125 tinfo->mc2_param.res_id[i] = mp->res_id[i];
1144 /* initial values */ 1126
1145 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 1127 /* initial values */
1146 tinfo->cpu = task_cpu(tsk); 1128 err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res);
1147 tinfo->has_departed = true; 1129 }
1148 tsk_rt(tsk)->plugin_state = tinfo; 1130 else{
1131 //failed to find an expected reservation
1132 err = -ESRCH;
1133 }
1134 }
1149 1135
1136 if (!err){
1150 /* disable LITMUS^RT's per-thread budget enforcement */ 1137 /* disable LITMUS^RT's per-thread budget enforcement */
1138 tsk_rt(tsk)->plugin_state = tinfo;
1151 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 1139 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1152 } 1140 }
1153 1141
1142
1154 raw_spin_unlock_irqrestore(&state->lock, flags); 1143 raw_spin_unlock_irqrestore(&state->lock, flags);
1155 } else if (lv == CRIT_LEVEL_C) { 1144 } else if (lv == CRIT_LEVEL_C) {
1156 state = local_cpu_state(); 1145 state = local_cpu_state();
1157 raw_spin_lock_irqsave(&state->lock, flags); 1146 raw_spin_lock_irqsave(&state->lock, flags);
1158 raw_spin_lock(&_global_env.lock); 1147 raw_spin_lock(&global_lock);
1159 //state = local_cpu_state(); 1148 //state = local_cpu_state();
1160 1149
1161 //raw_spin_lock(&state->lock); 1150 //raw_spin_lock(&state->lock);
1151
1152 tinfo->mc2_param.crit = mp->crit;
1153 tinfo->cpu = -1;
1154 tinfo->has_departed = true;
1162 1155
1163 res = gmp_find_by_id(&_global_env, mp->res_id); 1156 for(i = 0; i < NR_MODES; i++){
1164 1157 if (!in_mode(tsk, i)) continue;
1165 /* found the appropriate reservation (or vCPU) */ 1158 res = gmp_find_by_id(_global_env, mp->res_id[mode]);
1166 if (res) { 1159
1167 TRACE_TASK(tsk, "GMP FOUND RES ID\n"); 1160 /* found the appropriate reservation (or vCPU) */
1168 tinfo->mc2_param.crit = mp->crit; 1161 if (res) {
1169 tinfo->mc2_param.res_id = mp->res_id; 1162 TRACE_TASK(tsk, "GMP FOUND RES ID\n");
1170 1163 tinfo->mc2_param.res_id[i] = mp->res_id[i];
1171 /* initial values */ 1164
1172 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 1165 /* initial values */
1173 tinfo->cpu = -1; 1166 err = err? err:mc2_task_client_init(&tinfo->res_info[mode], &tinfo->mc2_param, tsk, res);
1174 tinfo->has_departed = true; 1167
1175 tsk_rt(tsk)->plugin_state = tinfo; 1168 }
1176 1169 }
1170
1171 if (!err){
1177 /* disable LITMUS^RT's per-thread budget enforcement */ 1172 /* disable LITMUS^RT's per-thread budget enforcement */
1173 tsk_rt(tsk)->plugin_state = tinfo;
1178 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 1174 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1175 raw_spin_lock(&mode_lock);
1176 for(i = 0; i < NR_MODES; i++){
1177 if (in_mode(tsk, i)){
1178 mode_sizes[i]++;
1179 }
1180 }
1181 raw_spin_unlock(&mode_lock);
1182
1179 } 1183 }
1180 1184
1181 raw_spin_unlock(&_global_env.lock); 1185 raw_spin_unlock(&global_lock);
1182 raw_spin_unlock_irqrestore(&state->lock, flags); 1186 raw_spin_unlock_irqrestore(&state->lock, flags);
1183 } 1187 }
1184 1188
@@ -1225,11 +1229,11 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1225 raw_spin_lock(&state->lock); 1229 raw_spin_lock(&state->lock);
1226 1230
1227 if (lv == CRIT_LEVEL_C) { 1231 if (lv == CRIT_LEVEL_C) {
1228 raw_spin_lock(&_global_env.lock); 1232 raw_spin_lock(&global_lock);
1229 res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); 1233 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id[mode]);
1230 } 1234 }
1231 else { 1235 else {
1232 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 1236 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id[mode]);
1233 } 1237 }
1234 //res = res_find_by_id(state, tinfo->mc2_param.res_id); 1238 //res = res_find_by_id(state, tinfo->mc2_param.res_id);
1235 release = res->next_replenishment; 1239 release = res->next_replenishment;
@@ -1238,25 +1242,25 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1238 /* Assumption: litmus_clock() is synchronized across cores 1242 /* Assumption: litmus_clock() is synchronized across cores
1239 * [see comment in pres_task_resume()] */ 1243 * [see comment in pres_task_resume()] */
1240 if (lv == CRIT_LEVEL_C) { 1244 if (lv == CRIT_LEVEL_C) {
1241 gmp_update_time(&_global_env, litmus_clock()); 1245 gmp_update_time(_global_env, litmus_clock());
1242 //raw_spin_unlock(&_global_env.lock); 1246 //raw_spin_unlock(&_global_env.lock);
1243 } 1247 }
1244 else 1248 else
1245 sup_update_time(&state->sup_env, litmus_clock()); 1249 sup_update_time(state->sup_env, litmus_clock());
1246 //mc2_update_time(lv, state, litmus_clock()); 1250 //mc2_update_time(lv, state, litmus_clock());
1247 /* 9/20/2015 fix 1251 /* 9/20/2015 fix
1248 mc2_update_ghost_state(state); 1252 mc2_update_ghost_state(state);
1249 */ 1253 */
1250 task_arrives(state, tsk); 1254 task_arrives(state, tsk);
1251 if (lv == CRIT_LEVEL_C) 1255 if (lv == CRIT_LEVEL_C)
1252 raw_spin_unlock(&_global_env.lock); 1256 raw_spin_unlock(&global_lock);
1253 /* NOTE: drops state->lock */ 1257 /* NOTE: drops state->lock */
1254 TRACE("mc2_new()\n"); 1258 TRACE("mc2_new()\n");
1255 1259
1256 mc2_update_timer_and_unlock(state); 1260 mc2_update_timer_and_unlock(state);
1257 } else { 1261 } else {
1258 if (lv == CRIT_LEVEL_C) 1262 if (lv == CRIT_LEVEL_C)
1259 raw_spin_unlock(&_global_env.lock); 1263 raw_spin_unlock(&global_lock);
1260 raw_spin_unlock(&state->lock); 1264 raw_spin_unlock(&state->lock);
1261 //raw_spin_unlock(&_global_env.lock); 1265 //raw_spin_unlock(&_global_env.lock);
1262 } 1266 }
@@ -1281,24 +1285,18 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1281 int found = 0; 1285 int found = 0;
1282 //enum crit_level lv = get_task_crit_level(current); 1286 //enum crit_level lv = get_task_crit_level(current);
1283 unsigned long flags; 1287 unsigned long flags;
1288 int i;
1284 1289
1285 if (cpu == -1) { 1290 if (cpu == -1) {
1286 /* if the reservation is global reservation */
1287 local_irq_save(flags); 1291 local_irq_save(flags);
1292 raw_spin_lock(&global_lock);
1293
1294 /* if the reservation is global reservation */
1288 //state = local_cpu_state(); 1295 //state = local_cpu_state();
1289 raw_spin_lock(&_global_env.lock); 1296 for(i = 0; i < NR_MODES; i++){
1290 //raw_spin_lock(&state->lock); 1297 //raw_spin_lock(&state->lock);
1291 1298
1292 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { 1299 list_for_each_entry_safe(res, next, &_global_env->depleted_reservations, list) {
1293 if (res->id == reservation_id) {
1294 list_del(&res->list);
1295 kfree(res);
1296 found = 1;
1297 ret = 0;
1298 }
1299 }
1300 if (!found) {
1301 list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) {
1302 if (res->id == reservation_id) { 1300 if (res->id == reservation_id) {
1303 list_del(&res->list); 1301 list_del(&res->list);
1304 kfree(res); 1302 kfree(res);
@@ -1306,51 +1304,48 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1306 ret = 0; 1304 ret = 0;
1307 } 1305 }
1308 } 1306 }
1309 } 1307 if (!found) {
1310 if (!found) { 1308 list_for_each_entry_safe(res, next, &_global_env->inactive_reservations, list) {
1311 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { 1309 if (res->id == reservation_id) {
1312 if (res->id == reservation_id) { 1310 list_del(&res->list);
1313 list_del(&res->list); 1311 kfree(res);
1314 kfree(res); 1312 found = 1;
1315 found = 1; 1313 ret = 0;
1316 ret = 0; 1314 }
1315 }
1316 }
1317 if (!found) {
1318 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
1319 if (res->id == reservation_id) {
1320 list_del(&res->list);
1321 kfree(res);
1322 found = 1;
1323 ret = 0;
1324 }
1317 } 1325 }
1318 } 1326 }
1319 }
1320 1327
1321 //raw_spin_unlock(&state->lock); 1328 //raw_spin_unlock(&state->lock);
1322 raw_spin_unlock(&_global_env.lock); 1329 if (found) break;
1330 }
1331 raw_spin_unlock(&global_lock);
1323 local_irq_restore(flags); 1332 local_irq_restore(flags);
1324 } else { 1333 } else {
1325 /* if the reservation is partitioned reservation */ 1334 /* if the reservation is partitioned reservation */
1326 state = cpu_state_for(cpu); 1335 state = cpu_state_for(cpu);
1327 local_irq_save(flags); 1336 for (i = 0; i < NR_MODES; i++){
1328 raw_spin_lock(&state->lock); 1337 local_irq_save(flags);
1329 1338 raw_spin_lock(&state->lock);
1330 // res = sup_find_by_id(&state->sup_env, reservation_id); 1339
1331 sup_env = &state->sup_env; 1340 // res = sup_find_by_id(state->sup_env, reservation_id);
1332 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { 1341 sup_env = state->sup_env;
1333 if (res->id == reservation_id) { 1342 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
1334/*
1335 if (lv == CRIT_LEVEL_A) {
1336 struct table_driven_reservation *tdres;
1337 tdres = container_of(res, struct table_driven_reservation, res);
1338 kfree(tdres->intervals);
1339 }
1340*/
1341 list_del(&res->list);
1342 kfree(res);
1343 found = 1;
1344 ret = 0;
1345 }
1346 }
1347 if (!found) {
1348 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
1349 if (res->id == reservation_id) { 1343 if (res->id == reservation_id) {
1350/* if (lv == CRIT_LEVEL_A) { 1344/*
1351 struct table_driven_reservation *tdres; 1345 if (lv == CRIT_LEVEL_A) {
1352 tdres = container_of(res, struct table_driven_reservation, res); 1346 struct table_driven_reservation *tdres;
1353 kfree(tdres->intervals); 1347 tdres = container_of(res, struct table_driven_reservation, res);
1348 kfree(tdres->intervals);
1354 } 1349 }
1355*/ 1350*/
1356 list_del(&res->list); 1351 list_del(&res->list);
@@ -1359,26 +1354,43 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1359 ret = 0; 1354 ret = 0;
1360 } 1355 }
1361 } 1356 }
1362 } 1357 if (!found) {
1363 if (!found) { 1358 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
1364 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 1359 if (res->id == reservation_id) {
1365 if (res->id == reservation_id) { 1360/* if (lv == CRIT_LEVEL_A) {
1366/* if (lv == CRIT_LEVEL_A) { 1361 struct table_driven_reservation *tdres;
1367 struct table_driven_reservation *tdres; 1362 tdres = container_of(res, struct table_driven_reservation, res);
1368 tdres = container_of(res, struct table_driven_reservation, res); 1363 kfree(tdres->intervals);
1369 kfree(tdres->intervals); 1364 }
1370 }
1371*/ 1365*/
1372 list_del(&res->list); 1366 list_del(&res->list);
1373 kfree(res); 1367 kfree(res);
1374 found = 1; 1368 found = 1;
1375 ret = 0; 1369 ret = 0;
1370 }
1371 }
1372 }
1373 if (!found) {
1374 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
1375 if (res->id == reservation_id) {
1376/* if (lv == CRIT_LEVEL_A) {
1377 struct table_driven_reservation *tdres;
1378 tdres = container_of(res, struct table_driven_reservation, res);
1379 kfree(tdres->intervals);
1380 }
1381*/
1382 list_del(&res->list);
1383 kfree(res);
1384 found = 1;
1385 ret = 0;
1386 }
1376 } 1387 }
1377 } 1388 }
1378 }
1379 1389
1380 raw_spin_unlock(&state->lock); 1390 raw_spin_unlock(&state->lock);
1381 local_irq_restore(flags); 1391 local_irq_restore(flags);
1392 if (found) break;
1393 }
1382 } 1394 }
1383 1395
1384 TRACE("Rerservation destroyed ret = %d\n", ret); 1396 TRACE("Rerservation destroyed ret = %d\n", ret);
@@ -1393,8 +1405,9 @@ static void mc2_task_exit(struct task_struct *tsk)
1393 struct mc2_task_state* tinfo = get_mc2_state(tsk); 1405 struct mc2_task_state* tinfo = get_mc2_state(tsk);
1394 struct mc2_cpu_state *state; 1406 struct mc2_cpu_state *state;
1395 enum crit_level lv = tinfo->mc2_param.crit; 1407 enum crit_level lv = tinfo->mc2_param.crit;
1396 struct crit_entry* ce; 1408 //struct crit_entry* ce;
1397 int cpu; 1409 int cpu;
1410 int i;
1398 1411
1399 local_irq_save(flags); 1412 local_irq_save(flags);
1400 if (tinfo->cpu != -1) 1413 if (tinfo->cpu != -1)
@@ -1407,9 +1420,9 @@ static void mc2_task_exit(struct task_struct *tsk)
1407 if (state->scheduled == tsk) 1420 if (state->scheduled == tsk)
1408 state->scheduled = NULL; 1421 state->scheduled = NULL;
1409 1422
1410 ce = &state->crit_entries[lv]; 1423 //ce = &state->crit_entries[lv];
1411 if (ce->running == tsk) 1424 //if (ce->running == tsk)
1412 ce->running = NULL; 1425 // ce->running = NULL;
1413 1426
1414 /* remove from queues */ 1427 /* remove from queues */
1415 if (is_running(tsk)) { 1428 if (is_running(tsk)) {
@@ -1418,11 +1431,11 @@ static void mc2_task_exit(struct task_struct *tsk)
1418 1431
1419 /* update both global and partitioned */ 1432 /* update both global and partitioned */
1420 if (lv < CRIT_LEVEL_C) { 1433 if (lv < CRIT_LEVEL_C) {
1421 sup_update_time(&state->sup_env, litmus_clock()); 1434 sup_update_time(state->sup_env, litmus_clock());
1422 } 1435 }
1423 else if (lv == CRIT_LEVEL_C) { 1436 else if (lv == CRIT_LEVEL_C) {
1424 raw_spin_lock(&_global_env.lock); 1437 raw_spin_lock(&global_lock);
1425 gmp_update_time(&_global_env, litmus_clock()); 1438 gmp_update_time(_global_env, litmus_clock());
1426 //raw_spin_unlock(&_global_env.lock); 1439 //raw_spin_unlock(&_global_env.lock);
1427 } 1440 }
1428 /* 9/20/2015 fix 1441 /* 9/20/2015 fix
@@ -1430,7 +1443,7 @@ static void mc2_task_exit(struct task_struct *tsk)
1430 */ 1443 */
1431 task_departs(tsk, 0); 1444 task_departs(tsk, 0);
1432 if (lv == CRIT_LEVEL_C) 1445 if (lv == CRIT_LEVEL_C)
1433 raw_spin_unlock(&_global_env.lock); 1446 raw_spin_unlock(&global_lock);
1434 1447
1435 /* NOTE: drops state->lock */ 1448 /* NOTE: drops state->lock */
1436 TRACE("mc2_exit()\n"); 1449 TRACE("mc2_exit()\n");
@@ -1442,6 +1455,15 @@ static void mc2_task_exit(struct task_struct *tsk)
1442 } 1455 }
1443 1456
1444 if (lv == CRIT_LEVEL_C) { 1457 if (lv == CRIT_LEVEL_C) {
1458
1459 raw_spin_lock(&mode_lock);
1460 for(i = 0; i < NR_MODES; i++){
1461 if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) )
1462 continue;
1463 mode_sizes[i]--;
1464 }
1465 raw_spin_unlock(&mode_lock);
1466
1445 for_each_online_cpu(cpu) { 1467 for_each_online_cpu(cpu) {
1446 state = cpu_state_for(cpu); 1468 state = cpu_state_for(cpu);
1447 if (state == local_cpu_state()) 1469 if (state == local_cpu_state())
@@ -1451,9 +1473,9 @@ static void mc2_task_exit(struct task_struct *tsk)
1451 if (state->scheduled == tsk) 1473 if (state->scheduled == tsk)
1452 state->scheduled = NULL; 1474 state->scheduled = NULL;
1453 1475
1454 ce = &state->crit_entries[lv]; 1476 //ce = &state->crit_entries[lv];
1455 if (ce->running == tsk) 1477 //if (ce->running == tsk)
1456 ce->running = NULL; 1478 // ce->running = NULL;
1457 1479
1458 raw_spin_unlock(&state->lock); 1480 raw_spin_unlock(&state->lock);
1459 } 1481 }
@@ -1474,12 +1496,14 @@ static long create_polling_reservation(
1474 struct reservation_config *config) 1496 struct reservation_config *config)
1475{ 1497{
1476 struct mc2_cpu_state *state; 1498 struct mc2_cpu_state *state;
1477 struct reservation* res; 1499 int i;
1500 //struct reservation* res = NULL;
1478 struct polling_reservation *pres; 1501 struct polling_reservation *pres;
1479 unsigned long flags; 1502 unsigned long flags;
1480 int use_edf = config->priority == LITMUS_NO_PRIORITY; 1503 int use_edf = config->priority == LITMUS_NO_PRIORITY;
1481 int periodic = res_type == PERIODIC_POLLING; 1504 int periodic = res_type == PERIODIC_POLLING;
1482 long err = -EINVAL; 1505 long err = -EINVAL;
1506 bool resExist = false;
1483 1507
1484 /* sanity checks */ 1508 /* sanity checks */
1485 if (config->polling_params.budget > 1509 if (config->polling_params.budget >
@@ -1501,6 +1525,12 @@ static long create_polling_reservation(
1501 "offset > period\n", config->id); 1525 "offset > period\n", config->id);
1502 return -EINVAL; 1526 return -EINVAL;
1503 } 1527 }
1528 //Added sanity check for mode
1529 if (config->mode < 0 || config->mode >= NR_MODES){
1530 printk(KERN_ERR "invalid polling reservation (%u): "
1531 "Mode outside range\n", config->id);
1532 return -EINVAL;
1533 }
1504 1534
1505 /* Allocate before we grab a spin lock. 1535 /* Allocate before we grab a spin lock.
1506 * Todo: would be nice to use a core-local allocation. 1536 * Todo: would be nice to use a core-local allocation.
@@ -1514,9 +1544,15 @@ static long create_polling_reservation(
1514 //raw_spin_lock_irqsave(&_global_env.lock, flags); 1544 //raw_spin_lock_irqsave(&_global_env.lock, flags);
1515 state = cpu_state_for(config->cpu); 1545 state = cpu_state_for(config->cpu);
1516 raw_spin_lock_irqsave(&state->lock, flags); 1546 raw_spin_lock_irqsave(&state->lock, flags);
1517 1547 //for the sake of reservation_destroy format, force id's unique across
1518 res = sup_find_by_id(&state->sup_env, config->id); 1548 //all modes
1519 if (!res) { 1549 for(i = 0; i < NR_MODES; i++){
1550 if( sup_find_by_id(&(state->sup_env_modes[i]), config->id) ){
1551 resExist = true;
1552 break;
1553 }
1554 }
1555 if (!resExist) {
1520 polling_reservation_init(pres, use_edf, periodic, 1556 polling_reservation_init(pres, use_edf, periodic,
1521 config->polling_params.budget, 1557 config->polling_params.budget,
1522 config->polling_params.period, 1558 config->polling_params.period,
@@ -1531,7 +1567,7 @@ static long create_polling_reservation(
1531 }*/ 1567 }*/
1532 if (!use_edf) 1568 if (!use_edf)
1533 pres->res.priority = config->priority; 1569 pres->res.priority = config->priority;
1534 sup_add_new_reservation(&state->sup_env, &pres->res); 1570 sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res);
1535 err = config->id; 1571 err = config->id;
1536 TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority); 1572 TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority);
1537 } else { 1573 } else {
@@ -1542,10 +1578,16 @@ static long create_polling_reservation(
1542 //raw_spin_unlock_irqrestore(&_global_env.lock, flags); 1578 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1543 1579
1544 } else { 1580 } else {
1545 raw_spin_lock_irqsave(&_global_env.lock, flags); 1581 raw_spin_lock_irqsave(&global_lock, flags);
1546 1582
1547 res = gmp_find_by_id(&_global_env, config->id); 1583 //force id's unique across all modes
1548 if (!res) { 1584 for(i = 0; i < NR_MODES; i++){
1585 if (gmp_find_by_id(&(_global_env_modes[i]), config->id)){
1586 resExist = true;
1587 break;
1588 }
1589 }
1590 if (!resExist) {
1549 polling_reservation_init(pres, use_edf, periodic, 1591 polling_reservation_init(pres, use_edf, periodic,
1550 config->polling_params.budget, 1592 config->polling_params.budget,
1551 config->polling_params.period, 1593 config->polling_params.period,
@@ -1557,14 +1599,18 @@ static long create_polling_reservation(
1557 pres->res.is_ghost = NO_CPU; 1599 pres->res.is_ghost = NO_CPU;
1558 if (!use_edf) 1600 if (!use_edf)
1559 pres->res.priority = config->priority; 1601 pres->res.priority = config->priority;
1560 gmp_add_new_reservation(&_global_env, &pres->res); 1602 gmp_add_new_reservation(&(_global_env_modes[config->mode]), &pres->res);
1561 err = config->id; 1603 err = config->id;
1562 } else { 1604 } else {
1563 err = -EEXIST; 1605 err = -EEXIST;
1564 } 1606 }
1565 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 1607 raw_spin_unlock_irqrestore(&global_lock, flags);
1566 } 1608 }
1567 1609
1610
1611 pres->res.reported = 0;
1612 pres->res.tsk = config->tsk;
1613
1568 if (err < 0) 1614 if (err < 0)
1569 kfree(pres); 1615 kfree(pres);
1570 1616
@@ -1579,14 +1625,14 @@ static long create_table_driven_reservation(
1579 struct reservation_config *config) 1625 struct reservation_config *config)
1580{ 1626{
1581 struct mc2_cpu_state *state; 1627 struct mc2_cpu_state *state;
1582 struct reservation* res; 1628 //struct reservation* res = NULL;
1583 struct table_driven_reservation *td_res = NULL; 1629 struct table_driven_reservation *td_res = NULL;
1584 struct lt_interval *slots = NULL; 1630 struct lt_interval *slots = NULL;
1585 size_t slots_size; 1631 size_t slots_size;
1586 unsigned int i, num_slots; 1632 unsigned int i, num_slots;
1587 unsigned long flags; 1633 unsigned long flags;
1588 long err = -EINVAL; 1634 long err = -EINVAL;
1589 1635 bool resExist = false;
1590 1636
1591 if (!config->table_driven_params.num_intervals) { 1637 if (!config->table_driven_params.num_intervals) {
1592 printk(KERN_ERR "invalid table-driven reservation (%u): " 1638 printk(KERN_ERR "invalid table-driven reservation (%u): "
@@ -1600,6 +1646,12 @@ static long create_table_driven_reservation(
1600 return -EINVAL; 1646 return -EINVAL;
1601 } 1647 }
1602 1648
1649 if (config->mode >= NR_MODES || config->mode < 0){
1650 printk(KERN_ERR "invalid table-driven reservation (%u): "
1651 "mode outside of range\n", config->id);
1652 return -EINVAL;
1653 }
1654
1603 num_slots = config->table_driven_params.num_intervals; 1655 num_slots = config->table_driven_params.num_intervals;
1604 slots_size = sizeof(slots[0]) * num_slots; 1656 slots_size = sizeof(slots[0]) * num_slots;
1605 slots = kzalloc(slots_size, GFP_KERNEL); 1657 slots = kzalloc(slots_size, GFP_KERNEL);
@@ -1649,16 +1701,22 @@ static long create_table_driven_reservation(
1649 if (!err) { 1701 if (!err) {
1650 state = cpu_state_for(config->cpu); 1702 state = cpu_state_for(config->cpu);
1651 raw_spin_lock_irqsave(&state->lock, flags); 1703 raw_spin_lock_irqsave(&state->lock, flags);
1652 1704
1653 res = sup_find_by_id(&state->sup_env, config->id); 1705 //force unique id's across all modes
1654 if (!res) { 1706 for(i = 0; i < NR_MODES; i++){
1707 if (sup_find_by_id(&(state->sup_env_modes[i]), config->id)){
1708 resExist = true;
1709 break;
1710 }
1711 }
1712 if (!resExist) {
1655 table_driven_reservation_init(td_res, 1713 table_driven_reservation_init(td_res,
1656 config->table_driven_params.major_cycle_length, 1714 config->table_driven_params.major_cycle_length,
1657 slots, num_slots); 1715 slots, num_slots);
1658 td_res->res.id = config->id; 1716 td_res->res.id = config->id;
1659 td_res->res.priority = config->priority; 1717 td_res->res.priority = config->priority;
1660 td_res->res.blocked_by_ghost = 0; 1718 td_res->res.blocked_by_ghost = 0;
1661 sup_add_new_reservation(&state->sup_env, &td_res->res); 1719 sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &td_res->res);
1662 err = config->id; 1720 err = config->id;
1663 } else { 1721 } else {
1664 err = -EEXIST; 1722 err = -EEXIST;
@@ -1667,6 +1725,9 @@ static long create_table_driven_reservation(
1667 raw_spin_unlock_irqrestore(&state->lock, flags); 1725 raw_spin_unlock_irqrestore(&state->lock, flags);
1668 } 1726 }
1669 1727
1728 td_res->res.reported = 0;
1729 td_res->res.tsk = config->tsk;
1730
1670 if (err < 0) { 1731 if (err < 0) {
1671 kfree(slots); 1732 kfree(slots);
1672 kfree(td_res); 1733 kfree(td_res);
@@ -1747,18 +1808,24 @@ static void mc2_setup_domain_proc(void)
1747 1808
1748static long mc2_activate_plugin(void) 1809static long mc2_activate_plugin(void)
1749{ 1810{
1750 int cpu, lv; 1811 int cpu;//, lv;
1751 struct mc2_cpu_state *state; 1812 struct mc2_cpu_state *state;
1752 struct cpu_entry *ce; 1813 struct cpu_entry *ce;
1753 1814 int i;
1754 gmp_init(&_global_env);
1755 raw_spin_lock_init(&_lowest_prio_cpu.lock);
1756 1815
1816 for(i = 0; i < NR_MODES; i++){
1817 gmp_init(&(_global_env_modes[i]));
1818 }
1819 _global_env = &_global_env_modes[0];
1820
1821 raw_spin_lock_init(&_lowest_prio_cpu.lock);
1822 raw_spin_lock_init(&mode_lock);
1823
1757 for_each_online_cpu(cpu) { 1824 for_each_online_cpu(cpu) {
1758 TRACE("Initializing CPU%d...\n", cpu); 1825 TRACE("Initializing CPU%d...\n", cpu);
1759 1826
1760 resched_cpu[cpu] = 0; 1827 resched_cpu[cpu] = 0;
1761 level_a_priorities[cpu] = 0; 1828 //level_a_priorities[cpu] = 0;
1762 state = cpu_state_for(cpu); 1829 state = cpu_state_for(cpu);
1763 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 1830 ce = &_lowest_prio_cpu.cpu_entries[cpu];
1764 1831
@@ -1771,19 +1838,34 @@ static long mc2_activate_plugin(void)
1771 raw_spin_lock_init(&state->lock); 1838 raw_spin_lock_init(&state->lock);
1772 state->cpu = cpu; 1839 state->cpu = cpu;
1773 state->scheduled = NULL; 1840 state->scheduled = NULL;
1774 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { 1841 //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
1775 struct crit_entry *cr_entry = &state->crit_entries[lv]; 1842 // struct crit_entry *cr_entry = &state->crit_entries[lv];
1776 cr_entry->level = lv; 1843 // cr_entry->level = lv;
1777 cr_entry->running = NULL; 1844 // cr_entry->running = NULL;
1845 //}
1846
1847 for(i = 0; i < NR_MODES; i++){
1848 sup_init(&(state->sup_env_modes[i]));
1778 } 1849 }
1779 sup_init(&state->sup_env); 1850 state->sup_env = &(state->sup_env_modes[0]);
1780 1851
1781 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 1852 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1782 state->timer.function = on_scheduling_timer; 1853 state->timer.function = on_scheduling_timer;
1854 state->spin_flag = false;
1783 } 1855 }
1784 1856
1785 mc2_setup_domain_proc(); 1857 mc2_setup_domain_proc();
1786 1858
1859 mode = 0;
1860 requested_mode = 0;
1861 raw_spinlock_t mode_lock;
1862
1863 for(i = 0; i < NR_MODES; i++){
1864 mode_sizes[i] = 0;
1865 }
1866 res_reported = 0;
1867 cpu_0_spin_flag = false;
1868
1787 return 0; 1869 return 0;
1788} 1870}
1789 1871
@@ -1810,7 +1892,8 @@ static long mc2_deactivate_plugin(void)
1810 struct reservation *res; 1892 struct reservation *res;
1811 struct next_timer_event *event; 1893 struct next_timer_event *event;
1812 struct cpu_entry *ce; 1894 struct cpu_entry *ce;
1813 1895 int i;
1896
1814 for_each_online_cpu(cpu) { 1897 for_each_online_cpu(cpu) {
1815 state = cpu_state_for(cpu); 1898 state = cpu_state_for(cpu);
1816 raw_spin_lock(&state->lock); 1899 raw_spin_lock(&state->lock);
@@ -1825,72 +1908,77 @@ static long mc2_deactivate_plugin(void)
1825 ce->lv = NUM_CRIT_LEVELS; 1908 ce->lv = NUM_CRIT_LEVELS;
1826 ce->will_schedule = false; 1909 ce->will_schedule = false;
1827 1910
1828 /* Delete all reservations --- assumes struct reservation
1829 * is prefix of containing struct. */
1830 1911
1831 while (!list_empty(&state->sup_env.active_reservations)) { 1912 for(i = 0; i < NR_MODES; i++){
1913 /* Delete all reservations --- assumes struct reservation
1914 * is prefix of containing struct. */
1915 state->sup_env = &state->sup_env_modes[i];
1916 while (!list_empty(&state->sup_env->active_reservations)) {
1917 res = list_first_entry(
1918 &state->sup_env->active_reservations,
1919 struct reservation, list);
1920 list_del(&res->list);
1921 kfree(res);
1922 }
1923
1924 while (!list_empty(&state->sup_env->inactive_reservations)) {
1925 res = list_first_entry(
1926 &state->sup_env->inactive_reservations,
1927 struct reservation, list);
1928 list_del(&res->list);
1929 kfree(res);
1930 }
1931
1932 while (!list_empty(&state->sup_env->depleted_reservations)) {
1933 res = list_first_entry(
1934 &state->sup_env->depleted_reservations,
1935 struct reservation, list);
1936 list_del(&res->list);
1937 kfree(res);
1938 }
1939 }
1940
1941 raw_spin_unlock(&state->lock);
1942 }
1943
1944 raw_spin_lock(&global_lock);
1945 for(i = 0; i < NR_MODES; i++){
1946 _global_env = &_global_env_modes[i];
1947 while (!list_empty(&_global_env->active_reservations)) {
1832 res = list_first_entry( 1948 res = list_first_entry(
1833 &state->sup_env.active_reservations, 1949 &_global_env->active_reservations,
1834 struct reservation, list); 1950 struct reservation, list);
1835 list_del(&res->list); 1951 list_del(&res->list);
1836 kfree(res); 1952 kfree(res);
1837 } 1953 }
1838 1954
1839 while (!list_empty(&state->sup_env.inactive_reservations)) { 1955 while (!list_empty(&_global_env->inactive_reservations)) {
1840 res = list_first_entry( 1956 res = list_first_entry(
1841 &state->sup_env.inactive_reservations, 1957 &_global_env->inactive_reservations,
1842 struct reservation, list); 1958 struct reservation, list);
1843 list_del(&res->list); 1959 list_del(&res->list);
1844 kfree(res); 1960 kfree(res);
1845 } 1961 }
1846 1962
1847 while (!list_empty(&state->sup_env.depleted_reservations)) { 1963 while (!list_empty(&_global_env->depleted_reservations)) {
1848 res = list_first_entry( 1964 res = list_first_entry(
1849 &state->sup_env.depleted_reservations, 1965 &_global_env->depleted_reservations,
1850 struct reservation, list); 1966 struct reservation, list);
1851 list_del(&res->list); 1967 list_del(&res->list);
1852 kfree(res); 1968 kfree(res);
1853 } 1969 }
1854
1855 raw_spin_unlock(&state->lock);
1856 }
1857
1858 raw_spin_lock(&_global_env.lock);
1859 1970
1860 while (!list_empty(&_global_env.active_reservations)) { 1971 while (!list_empty(&_global_env->next_events)) {
1861 res = list_first_entry( 1972 event = list_first_entry(
1862 &_global_env.active_reservations, 1973 &_global_env->next_events,
1863 struct reservation, list); 1974 struct next_timer_event, list);
1864 list_del(&res->list); 1975 list_del(&event->list);
1865 kfree(res); 1976 kfree(event);
1866 } 1977 }
1867
1868 while (!list_empty(&_global_env.inactive_reservations)) {
1869 res = list_first_entry(
1870 &_global_env.inactive_reservations,
1871 struct reservation, list);
1872 list_del(&res->list);
1873 kfree(res);
1874 }
1875
1876 while (!list_empty(&_global_env.depleted_reservations)) {
1877 res = list_first_entry(
1878 &_global_env.depleted_reservations,
1879 struct reservation, list);
1880 list_del(&res->list);
1881 kfree(res);
1882 }
1883 1978
1884 while (!list_empty(&_global_env.next_events)) {
1885 event = list_first_entry(
1886 &_global_env.next_events,
1887 struct next_timer_event, list);
1888 list_del(&event->list);
1889 kfree(event);
1890 } 1979 }
1891 1980
1892 raw_spin_unlock(&_global_env.lock); 1981 raw_spin_unlock(&global_lock);
1893
1894 destroy_domain_proc_info(&mc2_domain_proc_info); 1982 destroy_domain_proc_info(&mc2_domain_proc_info);
1895 return 0; 1983 return 0;
1896} 1984}