aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-04-10 11:10:37 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-04-10 11:10:37 -0400
commit9309774d024934b71816efa41171f439b007f983 (patch)
tree2f86c57db4c2059c399a8850f480d0898253fa0b
parent0c2ed78df17cc5a41f977f70e1e4ab3b142ffa14 (diff)
patches
-rw-r--r--include/litmus/trace.h3
-rw-r--r--litmus/bank_proc.c23
-rw-r--r--litmus/litmus.c3
-rw-r--r--litmus/polling_reservations.c2
-rw-r--r--litmus/sched_mc2.c113
-rw-r--r--litmus/sync.c2
6 files changed, 84 insertions, 62 deletions
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 24ca412e1184..eb0a07f4ba04 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -143,6 +143,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
143#define TS_ISR_START CPU_TIMESTAMP_CUR(192) 143#define TS_ISR_START CPU_TIMESTAMP_CUR(192)
144#define TS_ISR_END CPU_TIMESTAMP_CUR(193) 144#define TS_ISR_END CPU_TIMESTAMP_CUR(193)
145 145
146#define TS_MODE_CHANGE_START CPU_TIMESTAMP(194)
147#define TS_MODE_CHANGE_END CPU_TIMESTAMP(195)
148
146#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when)) 149#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when))
147#define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when)) 150#define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when))
148#define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when)) 151#define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when))
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
index 9bf3dfa99cb2..6f54b770079f 100644
--- a/litmus/bank_proc.c
+++ b/litmus/bank_proc.c
@@ -19,6 +19,9 @@
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20#include <litmus/litmus.h> 20#include <litmus/litmus.h>
21 21
22//#define TRACE(fmt, args...) do {} while (false)
23//#define TRACE_TASK(fmt, args...) do {} while (false)
24
22#define LITMUS_LOCKDEP_NAME_MAX_LEN 50 25#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
23 26
24// This Address Decoding is used in imx6-sabredsd platform 27// This Address Decoding is used in imx6-sabredsd platform
@@ -27,7 +30,7 @@
27#define CACHE_MASK 0x0000f000 30#define CACHE_MASK 0x0000f000
28#define CACHE_SHIFT 12 31#define CACHE_SHIFT 12
29 32
30#define PAGES_PER_COLOR 1024 33#define PAGES_PER_COLOR 1000
31unsigned int NUM_PAGE_LIST; //8*16 34unsigned int NUM_PAGE_LIST; //8*16
32 35
33unsigned int number_banks; 36unsigned int number_banks;
@@ -245,8 +248,8 @@ static int do_add_pages(void)
245 counter[color]++; 248 counter[color]++;
246 // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); 249 // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
247 //show_nr_pages(); 250 //show_nr_pages();
248 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { 251 //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
249 //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { 252 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
250 //if ( PAGES_PER_COLOR && color>=16*2) { 253 //if ( PAGES_PER_COLOR && color>=16*2) {
251 add_page_to_color_list(page); 254 add_page_to_color_list(page);
252 // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); 255 // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
@@ -364,6 +367,7 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
364 struct color_group *cgroup; 367 struct color_group *cgroup;
365 struct page *rPage = NULL; 368 struct page *rPage = NULL;
366 unsigned int color; 369 unsigned int color;
370 int try = 0;
367 371
368 372
369 unsigned int idx = 0; 373 unsigned int idx = 0;
@@ -373,9 +377,20 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
373 377
374 rPage = new_alloc_page_color(idx); 378 rPage = new_alloc_page_color(idx);
375 379
376
377 set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]); 380 set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
378 bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]); 381 bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
382 while (!rPage) {
383 try++;
384 if (try>=16)
385 break;
386 idx = 0;
387 idx += num_by_bitmask_index(set_partition[node], set_index[node]);
388 idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
389 rPage = new_alloc_page_color(idx);
390 set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
391 bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
392 }
393
379 return rPage; 394 return rPage;
380} 395}
381 396
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 7fccc585dedd..f9ad1f405518 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -37,6 +37,9 @@
37#include <trace/events/litmus.h> 37#include <trace/events/litmus.h>
38#endif 38#endif
39 39
40//#define TRACE(fmt, args...) do {} while (false)
41//#define TRACE_TASK(fmt, args...) do {} while (false)
42
40extern void l2c310_flush_all(void); 43extern void l2c310_flush_all(void);
41 44
42/* Number of RT tasks that exist in the system */ 45/* Number of RT tasks that exist in the system */
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
index 1cfd09169959..c6e10eb681c9 100644
--- a/litmus/polling_reservations.c
+++ b/litmus/polling_reservations.c
@@ -4,6 +4,8 @@
4#include <litmus/reservation.h> 4#include <litmus/reservation.h>
5#include <litmus/polling_reservations.h> 5#include <litmus/polling_reservations.h>
6 6
7//#define TRACE(fmt, args...) do {} while (false)
8//#define TRACE_TASK(fmt, args...) do {} while (false)
7 9
8static void periodic_polling_client_arrives( 10static void periodic_polling_client_arrives(
9 struct reservation* res, 11 struct reservation* res,
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 3ccee282ffdf..ec8a92440f2b 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -128,17 +128,21 @@ asmlinkage long sys_enact_mode(void)
128 struct mc2_cpu_state *state = local_cpu_state(); 128 struct mc2_cpu_state *state = local_cpu_state();
129 struct reservation *res; 129 struct reservation *res;
130 struct list_head *pos; 130 struct list_head *pos;
131 unsigned long flags;
131 //lt_t now = litmus_clock(); 132 //lt_t now = litmus_clock();
132 TRACE_TASK(current, "ENACTING MODE TASK\n"); 133 TRACE_TASK(current, "ENACTING MODE TASK\n");
133 if (state->cpu == 0){ 134 if (state->cpu == 0){
134 preempt_disable(); 135 //preempt_disable();
135 raw_spin_lock(&global_lock);
136 raw_spin_lock(&mode_lock);
137 mode_changed = false; 136 mode_changed = false;
137 local_irq_save(flags);
138
139 raw_spin_lock(&global_lock);
140 raw_spin_lock(&mode_lock);
138 if (pending){ //MCR has entered 141 if (pending){ //MCR has entered
139 if (!seen_once){ 142 if (!seen_once){
140 TRACE_TASK(current, "NOTICED MCR in mode %d\n", mode); 143 TRACE_TASK(current, "REQUEST = %llu\n", litmus_clock());
141 sched_trace_request_mode(current); 144 sched_trace_request_mode(current);
145 TS_MODE_CHANGE_START;
142 //clean up jobs that are already done 146 //clean up jobs that are already done
143 //after this jobs report themselves 147 //after this jobs report themselves
144 list_for_each(pos, &_global_env->active_reservations){ 148 list_for_each(pos, &_global_env->active_reservations){
@@ -162,7 +166,7 @@ asmlinkage long sys_enact_mode(void)
162 res = list_entry(pos, struct reservation, list); 166 res = list_entry(pos, struct reservation, list);
163 if (tsk_rt(res->tsk)->completed && res->mode == mode){ 167 if (tsk_rt(res->tsk)->completed && res->mode == mode){
164 res->reported = 1; 168 res->reported = 1;
165 TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); 169 //TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode);
166 res_reported--; 170 res_reported--;
167 } 171 }
168 } 172 }
@@ -170,8 +174,8 @@ asmlinkage long sys_enact_mode(void)
170 } 174 }
171 if( ready ){ //C is throttled 175 if( ready ){ //C is throttled
172 lt_t new_mode_basetime = get_release(current); 176 lt_t new_mode_basetime = get_release(current);
173 177 lt_t t;
174 TRACE("Timer canceled\n"); 178 //TRACE("Timer canceled\n");
175 hrtimer_cancel(&state->timer);//stop listening to old mode timers 179 hrtimer_cancel(&state->timer);//stop listening to old mode timers
176 mode = requested_mode; 180 mode = requested_mode;
177 TRACE("Mode has been changed.\n"); 181 TRACE("Mode has been changed.\n");
@@ -210,19 +214,18 @@ asmlinkage long sys_enact_mode(void)
210 release_at(res->tsk, new_mode_basetime); 214 release_at(res->tsk, new_mode_basetime);
211 } 215 }
212 //raw_spin_unlock(&state->lock); 216 //raw_spin_unlock(&state->lock);
213 217 t=litmus_clock();
214 sched_trace_enact_mode(current); 218 sched_trace_enact_mode(current);
219 TS_MODE_CHANGE_END;
220 TRACE(KERN_ALERT "ENACT = %llu\n", t);
215 } 221 }
216 222
217 223
218 } 224 }
219 raw_spin_unlock(&mode_lock); 225 raw_spin_unlock(&mode_lock);
220 raw_spin_unlock(&global_lock); 226 raw_spin_unlock(&global_lock);
221 //release other CPUs 227 local_irq_restore(flags);
222 cpu_0_spin_flag = !cpu_0_spin_flag; 228 cpu_0_spin_flag = !cpu_0_spin_flag;
223
224 preempt_enable();
225 TRACE_CUR("flag = %d\n",cpu_0_spin_flag);
226 } 229 }
227 else if (cpu_0_task_exist) { 230 else if (cpu_0_task_exist) {
228 //spin, wait for CPU 0 to stabilize mode decision 231 //spin, wait for CPU 0 to stabilize mode decision
@@ -242,9 +245,11 @@ asmlinkage long sys_enact_mode(void)
242 //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); 245 //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed);
243 if (mode_changed) { 246 if (mode_changed) {
244 lt_t new_mode_basetime = get_release(current); 247 lt_t new_mode_basetime = get_release(current);
245 TRACE("CPU%d mode changed\n",state->cpu); 248 //TRACE("CPU%d mode changed\n",state->cpu);
246 hrtimer_cancel(&state->timer); //stop listening to old mode timers 249 hrtimer_cancel(&state->timer); //stop listening to old mode timers
247 //preempt_disable(); 250 //preempt_disable();
251 local_irq_save(flags);
252
248 raw_spin_lock(&state->lock); 253 raw_spin_lock(&state->lock);
249 state->sup_env = &state->sup_env_modes[mode]; 254 state->sup_env = &state->sup_env_modes[mode];
250 list_for_each(pos, &state->sup_env->active_reservations){ 255 list_for_each(pos, &state->sup_env->active_reservations){
@@ -260,20 +265,24 @@ asmlinkage long sys_enact_mode(void)
260 release_at(res->tsk, new_mode_basetime); 265 release_at(res->tsk, new_mode_basetime);
261 } 266 }
262 raw_spin_unlock(&state->lock); 267 raw_spin_unlock(&state->lock);
268 local_irq_restore(flags);
269
263 //preempt_enable(); 270 //preempt_enable();
264 } 271 }
265 state->spin_flag = !state->spin_flag; 272 state->spin_flag = !state->spin_flag;
266 } 273 }
267 else { 274 else {
268 TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); 275 //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed);
276 local_irq_restore(flags);
269 return 0; 277 return 0;
270 } 278 }
271 TRACE("CPU%d everyone should get this.%d\n",state->cpu, mode_changed); 279 TRACE("CPU%d enact syscall ends m_c? %d\n",state->cpu, mode_changed);
272 //if mode didn't change this has no effect on what's being scheduled 280 //if mode didn't change this has no effect on what's being scheduled
273 raw_spin_lock(&state->lock); 281 //raw_spin_lock(&state->lock);
274 state->sup_env = &state->sup_env_modes[mode]; 282 state->sup_env = &state->sup_env_modes[mode];
275 raw_spin_unlock(&state->lock); 283 //raw_spin_unlock(&state->lock);
276 //sup_update_time(state->sup_env, litmus_clock()); 284 //sup_update_time(state->sup_env, litmus_clock());
285
277 return 0; 286 return 0;
278} 287}
279 288
@@ -451,6 +460,9 @@ static int get_lowest_prio_cpu(lt_t priority)
451 int cpu, ret = NO_CPU; 460 int cpu, ret = NO_CPU;
452 lt_t latest_deadline = 0; 461 lt_t latest_deadline = 0;
453 462
463 if (priority == LITMUS_NO_PRIORITY)
464 return ret;
465
454 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 466 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
455 if (!ce->will_schedule && !ce->scheduled) { 467 if (!ce->will_schedule && !ce->scheduled) {
456 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 468 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
@@ -529,8 +541,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
529 if (event->next_update < litmus_clock()) { 541 if (event->next_update < litmus_clock()) {
530 if (event->timer_armed_on == NO_CPU) { 542 if (event->timer_armed_on == NO_CPU) {
531 struct reservation *res = gmp_find_by_id(_global_env, event->id); 543 struct reservation *res = gmp_find_by_id(_global_env, event->id);
532 int cpu = get_lowest_prio_cpu(res?res->priority:0); 544 int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY);
533 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); 545 //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
534 list_del(&event->list); 546 list_del(&event->list);
535 kfree(event); 547 kfree(event);
536 if (cpu != NO_CPU) { 548 if (cpu != NO_CPU) {
@@ -594,17 +606,15 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
594 */ 606 */
595 TRACE("mc2_update_timer for remote CPU %d (update=%llu, " 607 TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
596 "active:%d, set:%llu)\n", 608 "active:%d, set:%llu)\n",
597 state->cpu, 609 state->cpu, update, hrtimer_active(&state->timer),
598 update,
599 hrtimer_active(&state->timer),
600 ktime_to_ns(hrtimer_get_expires(&state->timer))); 610 ktime_to_ns(hrtimer_get_expires(&state->timer)));
601 if (!hrtimer_active(&state->timer) || 611 if (!hrtimer_active(&state->timer) ||
602 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { 612 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
603 TRACE("poking CPU %d so that it can update its " 613 TRACE("poking CPU %d so that it can update its "
604 "scheduling timer (active:%d, set:%llu)\n", 614 "scheduling timer (active:%d, set:%llu)\n",
605 state->cpu, 615 state->cpu,
606 hrtimer_active(&state->timer), 616 hrtimer_active(&state->timer),
607 ktime_to_ns(hrtimer_get_expires(&state->timer))); 617 ktime_to_ns(hrtimer_get_expires(&state->timer)));
608 //litmus_reschedule(state->cpu); 618 //litmus_reschedule(state->cpu);
609/* 619/*
610 raw_spin_lock(&state->lock); 620 raw_spin_lock(&state->lock);
@@ -679,7 +689,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
679 TS_ISR_START; 689 TS_ISR_START;
680 690
681 TRACE("Timer fired at %llu\n", litmus_clock()); 691 TRACE("Timer fired at %llu\n", litmus_clock());
682 //raw_spin_lock_irqsave(&_global_env.lock, flags);
683 raw_spin_lock_irqsave(&state->lock, flags); 692 raw_spin_lock_irqsave(&state->lock, flags);
684 now = litmus_clock(); 693 now = litmus_clock();
685 sup_update_time(state->sup_env, now); 694 sup_update_time(state->sup_env, now);
@@ -718,7 +727,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
718 } 727 }
719 raw_spin_unlock(&global_lock); 728 raw_spin_unlock(&global_lock);
720 raw_spin_unlock_irqrestore(&state->lock, flags); 729 raw_spin_unlock_irqrestore(&state->lock, flags);
721 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
722 730
723 TS_ISR_END; 731 TS_ISR_END;
724 732
@@ -811,7 +819,7 @@ static long mc2_complete_job(void)
811 res->cur_budget = 0; 819 res->cur_budget = 0;
812 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 820 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
813 821
814 TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); 822 // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
815 823
816 //if (lv < CRIT_LEVEL_C) 824 //if (lv < CRIT_LEVEL_C)
817// raw_spin_unlock(&state->lock); 825// raw_spin_unlock(&state->lock);
@@ -901,7 +909,6 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
901 sup_scheduler_update_after(sup_env, res->cur_budget); 909 sup_scheduler_update_after(sup_env, res->cur_budget);
902 return tsk; 910 return tsk;
903 } else { 911 } else {
904 TRACE_TASK(tsk, "@@@@@DISPATCH@@@@@@@ init_finished? %s\n", is_init_finished(tsk)?"true":"false");
905 if (!is_init_finished(tsk)) { 912 if (!is_init_finished(tsk)) {
906 //ce = &state->crit_entries[lv]; 913 //ce = &state->crit_entries[lv];
907 sup_scheduler_update_after(sup_env, res->cur_budget); 914 sup_scheduler_update_after(sup_env, res->cur_budget);
@@ -1132,7 +1139,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1132 if (prev != state->scheduled && is_realtime(prev)) { 1139 if (prev != state->scheduled && is_realtime(prev)) {
1133 struct mc2_task_state* tinfo = get_mc2_state(prev); 1140 struct mc2_task_state* tinfo = get_mc2_state(prev);
1134 struct reservation* res = tinfo->res_info[mode].client.reservation; 1141 struct reservation* res = tinfo->res_info[mode].client.reservation;
1135 TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count());
1136 if (res) { 1142 if (res) {
1137 TRACE_TASK(prev, "PREV JOB was scheduled_on = P%d\n", res->scheduled_on); 1143 TRACE_TASK(prev, "PREV JOB was scheduled_on = P%d\n", res->scheduled_on);
1138 res->scheduled_on = NO_CPU; 1144 res->scheduled_on = NO_CPU;
@@ -1142,7 +1148,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1142 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { 1148 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
1143 int cpu; 1149 int cpu;
1144 raw_spin_lock(&global_lock); 1150 raw_spin_lock(&global_lock);
1145 cpu = get_lowest_prio_cpu(res?res->priority:0); 1151 cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY);
1146 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 1152 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
1147 //raw_spin_lock(&_lowest_prio_cpu.lock); 1153 //raw_spin_lock(&_lowest_prio_cpu.lock);
1148 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 1154 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
@@ -1195,7 +1201,6 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk)
1195 now = litmus_clock(); 1201 now = litmus_clock();
1196 if (is_tardy(tsk, now)) { 1202 if (is_tardy(tsk, now)) {
1197 release_at(tsk, now); 1203 release_at(tsk, now);
1198 //sched_trace_task_release(tsk);
1199 } 1204 }
1200 } 1205 }
1201} 1206}
@@ -1224,7 +1229,7 @@ static void mc2_task_resume(struct task_struct *tsk)
1224 if (tinfo->has_departed) 1229 if (tinfo->has_departed)
1225 { 1230 {
1226 /* We don't want to consider jobs before synchronous releases */ 1231 /* We don't want to consider jobs before synchronous releases */
1227 if (tsk_rt(tsk)->job_params.job_no > 4) { 1232 if (tsk_rt(tsk)->job_params.job_no > 3) {
1228 switch(get_task_crit_level(tsk)) { 1233 switch(get_task_crit_level(tsk)) {
1229 case CRIT_LEVEL_A: 1234 case CRIT_LEVEL_A:
1230 TS_RELEASE_LATENCY_A(get_release(tsk)); 1235 TS_RELEASE_LATENCY_A(get_release(tsk));
@@ -1238,7 +1243,7 @@ static void mc2_task_resume(struct task_struct *tsk)
1238 default: 1243 default:
1239 break; 1244 break;
1240 } 1245 }
1241 TRACE_CUR("INIT_FINISHED is SET\n"); 1246 // TRACE_CUR("INIT_FINISHED is SET\n");
1242 tsk_mc2_data(tsk)->init_finished = 1; 1247 tsk_mc2_data(tsk)->init_finished = 1;
1243 } 1248 }
1244 1249
@@ -1293,14 +1298,11 @@ static long mc2_admit_task(struct task_struct *tsk)
1293 return -ENOMEM; 1298 return -ENOMEM;
1294 1299
1295 if (!mp) { 1300 if (!mp) {
1296 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
1297 TRACE("mc2_admit_task: criticality level has not been set\n"); 1301 TRACE("mc2_admit_task: criticality level has not been set\n");
1298 return -ESRCH; 1302 return -ESRCH;
1299 } 1303 }
1300 1304
1301 lv = mp->crit; 1305 lv = mp->crit;
1302 preempt_disable();
1303
1304 1306
1305 if (lv < CRIT_LEVEL_C) { 1307 if (lv < CRIT_LEVEL_C) {
1306 state = cpu_state_for(task_cpu(tsk)); 1308 state = cpu_state_for(task_cpu(tsk));
@@ -1312,14 +1314,14 @@ static long mc2_admit_task(struct task_struct *tsk)
1312 tinfo->mc2_param.res_id = mp->res_id; 1314 tinfo->mc2_param.res_id = mp->res_id;
1313 tinfo->mc2_param.mode_mask = mp->mode_mask; 1315 tinfo->mc2_param.mode_mask = mp->mode_mask;
1314 tinfo->mc2_param.init_finished = 0; 1316 tinfo->mc2_param.init_finished = 0;
1315 TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); 1317// TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask);
1316 1318
1317 TRACE_TASK(tsk, "Mode 0\n"); 1319// TRACE_TASK(tsk, "Mode 0\n");
1318 res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); 1320 res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id);
1319 1321
1320 /* found the appropriate reservation */ 1322 /* found the appropriate reservation */
1321 if (res) { 1323 if (res) {
1322 TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); 1324// TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n");
1323 1325
1324 /* initial values */ 1326 /* initial values */
1325 err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); 1327 err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res);
@@ -1334,12 +1336,12 @@ static long mc2_admit_task(struct task_struct *tsk)
1334 //task not present in mode 1336 //task not present in mode
1335 continue; 1337 continue;
1336 } 1338 }
1337 TRACE_TASK(tsk, "Mode %d\n",i); 1339// TRACE_TASK(tsk, "Mode %d\n",i);
1338 res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); 1340 res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id);
1339 1341
1340 /* found the appropriate reservation */ 1342 /* found the appropriate reservation */
1341 if (res) { 1343 if (res) {
1342 TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); 1344 // TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i);
1343 1345
1344 /* initial values */ 1346 /* initial values */
1345 err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); 1347 err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res);
@@ -1355,18 +1357,16 @@ static long mc2_admit_task(struct task_struct *tsk)
1355 tsk_rt(tsk)->plugin_state = tinfo; 1357 tsk_rt(tsk)->plugin_state = tinfo;
1356 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 1358 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1357 } 1359 }
1358 TRACE_CUR("ctrl_page mode_poll_task %d, cpu = %d, tsk_rt->ctrl_page = %x\n", tsk_rt(tsk)->ctrl_page->mode_poll_task, tinfo->cpu, tsk_rt(tsk)->ctrl_page); 1360
1359 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { 1361 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) {
1360 TRACE_CUR("CPU0_TASK_EXIST set\n");
1361 cpu_0_task_exist = true; 1362 cpu_0_task_exist = true;
1362 } 1363 }
1363 1364
1364 raw_spin_unlock_irqrestore(&state->lock, flags); 1365 raw_spin_unlock_irqrestore(&state->lock, flags);
1365 } else if (lv == CRIT_LEVEL_C) { 1366 } else if (lv == CRIT_LEVEL_C) {
1366 TRACE_TASK(tsk, "Task being admitted is Level C\n"); 1367// TRACE_TASK(tsk, "Task being admitted is Level C\n");
1367 state = local_cpu_state(); 1368 state = local_cpu_state();
1368 raw_spin_lock_irqsave(&state->lock, flags); 1369 raw_spin_lock_irqsave(&state->lock, flags);
1369 raw_spin_lock(&global_lock);
1370 //state = local_cpu_state(); 1370 //state = local_cpu_state();
1371 1371
1372 //raw_spin_lock(&state->lock); 1372 //raw_spin_lock(&state->lock);
@@ -1378,14 +1378,16 @@ static long mc2_admit_task(struct task_struct *tsk)
1378 tinfo->mc2_param.mode_mask = mp->mode_mask; 1378 tinfo->mc2_param.mode_mask = mp->mode_mask;
1379 tinfo->mc2_param.init_finished = 0; 1379 tinfo->mc2_param.init_finished = 0;
1380 1380
1381 TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); 1381 // TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask);
1382 1382
1383 TRACE_TASK(tsk, "Mode 0\n"); 1383// TRACE_TASK(tsk, "Mode 0\n");
1384
1385 raw_spin_lock(&global_lock);
1384 res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); 1386 res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id);
1385 1387
1386 /* found the appropriate reservation */ 1388 /* found the appropriate reservation */
1387 if (res) { 1389 if (res) {
1388 TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); 1390 // TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n");
1389 1391
1390 /* initial values */ 1392 /* initial values */
1391 err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); 1393 err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res);
@@ -1428,12 +1430,11 @@ static long mc2_admit_task(struct task_struct *tsk)
1428 raw_spin_unlock_irqrestore(&state->lock, flags); 1430 raw_spin_unlock_irqrestore(&state->lock, flags);
1429 } 1431 }
1430 1432
1431 preempt_enable();
1432 1433
1433 if (err) 1434 if (err)
1434 kfree(tinfo); 1435 kfree(tinfo);
1435 1436
1436 TRACE_TASK(tsk, "MC2 task admitted %d\n", err); 1437 //TRACE_TASK(tsk, "MC2 task admitted %d\n", err);
1437 return err; 1438 return err;
1438} 1439}
1439 1440
@@ -1460,7 +1461,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1460 else 1461 else
1461 state = cpu_state_for(tinfo->cpu); 1462 state = cpu_state_for(tinfo->cpu);
1462 1463
1463 local_irq_save(flags);
1464 1464
1465 /* acquire the lock protecting the state and disable interrupts */ 1465 /* acquire the lock protecting the state and disable interrupts */
1466 //raw_spin_lock(&_global_env.lock); 1466 //raw_spin_lock(&_global_env.lock);
@@ -1471,6 +1471,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1471 litmus_reschedule_local(); 1471 litmus_reschedule_local();
1472 } 1472 }
1473 1473
1474 local_irq_save(flags);
1474 raw_spin_lock(&state->lock); 1475 raw_spin_lock(&state->lock);
1475 1476
1476 if (lv == CRIT_LEVEL_C) { 1477 if (lv == CRIT_LEVEL_C) {
@@ -1778,8 +1779,7 @@ static long create_polling_reservation(
1778 /* sanity checks */ 1779 /* sanity checks */
1779 if (config->polling_params.budget > 1780 if (config->polling_params.budget >
1780 config->polling_params.period) { 1781 config->polling_params.period) {
1781 printk(KERN_ERR "invalid polling reservation (%u): " 1782 printk(KERN_ERR "invalid polling reservation (%u): " "budget > period\n", config->id);
1782 "budget > period\n", config->id);
1783 return -EINVAL; 1783 return -EINVAL;
1784 } 1784 }
1785 if (config->polling_params.budget > 1785 if (config->polling_params.budget >
@@ -2160,7 +2160,7 @@ static long mc2_activate_plugin(void)
2160 } 2160 }
2161 _global_env = &_global_env_modes[0]; 2161 _global_env = &_global_env_modes[0];
2162 2162
2163 raw_spin_lock_init(&_lowest_prio_cpu.lock); 2163 //raw_spin_lock_init(&_lowest_prio_cpu.lock);
2164 raw_spin_lock_init(&mode_lock); 2164 raw_spin_lock_init(&mode_lock);
2165 raw_spin_lock_init(&global_lock); 2165 raw_spin_lock_init(&global_lock);
2166 2166
@@ -2188,7 +2188,6 @@ static long mc2_activate_plugin(void)
2188 // cr_entry->level = lv; 2188 // cr_entry->level = lv;
2189 // cr_entry->running = NULL; 2189 // cr_entry->running = NULL;
2190 //} 2190 //}
2191
2192 for(i = 0; i < NR_MODES; i++){ 2191 for(i = 0; i < NR_MODES; i++){
2193 sup_init(&(state->sup_env_modes[i])); 2192 sup_init(&(state->sup_env_modes[i]));
2194 } 2193 }
diff --git a/litmus/sync.c b/litmus/sync.c
index 5d180603f46b..5955c5777786 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -52,7 +52,7 @@ static long do_wait_for_ts_release(void)
52 if (!ret) { 52 if (!ret) {
53 /* Completion succeeded, setup release time. */ 53 /* Completion succeeded, setup release time. */
54 ret = litmus->wait_for_release_at( 54 ret = litmus->wait_for_release_at(
55 wait.ts_release_time + get_rt_phase(current)); 55 wait.ts_release_time + get_rt_phase(current)+1000000000);
56 } else { 56 } else {
57 /* We were interrupted, must cleanup list. */ 57 /* We were interrupted, must cleanup list. */
58 mutex_lock(&task_release_lock); 58 mutex_lock(&task_release_lock);