aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-04-25 02:14:29 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-04-25 02:14:29 -0400
commit6dc3e50fc1916dbc8b8e9cc863217e545ca4159c (patch)
treee693a85beac6b5713a87afa5e8e3c5bb42df502d
parentde945bf970cbb332c0540b2cd071ec3c7e4b7833 (diff)
sync bug fixed
-rw-r--r--include/litmus/sched_trace.h21
-rw-r--r--include/trace/events/litmus.h22
-rw-r--r--litmus/Kconfig2
-rw-r--r--litmus/jobs.c16
-rw-r--r--litmus/sched_mc2.c145
-rw-r--r--litmus/sched_task_trace.c12
-rw-r--r--litmus/sync.c4
7 files changed, 157 insertions, 65 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index fb5d8f33aff5..80d952374f7a 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -89,6 +89,11 @@ struct st_request_mode_data {
89 u8 __unused[8]; 89 u8 __unused[8];
90}; 90};
91 91
92struct st_sys_start_data {
93 u64 when;
94 u64 start;
95};
96
92#define DATA(x) struct st_ ## x ## _data x; 97#define DATA(x) struct st_ ## x ## _data x;
93 98
94typedef enum { 99typedef enum {
@@ -104,7 +109,8 @@ typedef enum {
104 ST_ACTION, 109 ST_ACTION,
105 ST_SYS_RELEASE, 110 ST_SYS_RELEASE,
106 ST_ENACT_MODE, 111 ST_ENACT_MODE,
107 ST_REQUEST_MODE 112 ST_REQUEST_MODE,
113 ST_SYS_START,
108} st_event_record_type_t; 114} st_event_record_type_t;
109 115
110struct st_event_record { 116struct st_event_record {
@@ -124,6 +130,7 @@ struct st_event_record {
124 DATA(sys_release); 130 DATA(sys_release);
125 DATA(enact_mode); 131 DATA(enact_mode);
126 DATA(request_mode); 132 DATA(request_mode);
133 DATA(sys_start);
127 } data; 134 } data;
128}; 135};
129 136
@@ -171,6 +178,8 @@ feather_callback void do_sched_trace_enact_mode(unsigned long id,
171 178
172feather_callback void do_sched_trace_request_mode(unsigned long id, 179feather_callback void do_sched_trace_request_mode(unsigned long id,
173 struct task_struct* task); 180 struct task_struct* task);
181feather_callback void do_sched_trace_sys_start(unsigned long id,
182 lt_t* start);
174 183
175#endif 184#endif
176 185
@@ -198,6 +207,7 @@ feather_callback void do_sched_trace_request_mode(unsigned long id,
198#define trace_litmus_sys_release(start) 207#define trace_litmus_sys_release(start)
199#define trace_litmus_enact_mode(t) 208#define trace_litmus_enact_mode(t)
200#define trace_litmus_request_mode(t) 209#define trace_litmus_request_mode(t)
210#define trace_litmus_sys_start(start)
201 211
202#endif 212#endif
203 213
@@ -286,6 +296,15 @@ feather_callback void do_sched_trace_request_mode(unsigned long id,
286 do_sched_trace_request_mode, t); \ 296 do_sched_trace_request_mode, t); \
287 trace_litmus_request_mode(t); \ 297 trace_litmus_request_mode(t); \
288 } while (0) 298 } while (0)
299
300/* when is a pointer, it does not need an explicit cast to unsigned long */
301#define sched_trace_sys_start(when) \
302 do { \
303 SCHED_TRACE(SCHED_TRACE_BASE_ID + 13, \
304 do_sched_trace_sys_start, when); \
305 trace_litmus_sys_start(when); \
306 } while (0)
307
289#endif /* __KERNEL__ */ 308#endif /* __KERNEL__ */
290 309
291#endif 310#endif
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
index 3094ddd2210b..ef8d60f2df25 100644
--- a/include/trace/events/litmus.h
+++ b/include/trace/events/litmus.h
@@ -273,6 +273,28 @@ TRACE_EVENT(litmus_request_mode,
273 TP_printk("Mode request at %Lu\n", __entry->when) 273 TP_printk("Mode request at %Lu\n", __entry->when)
274); 274);
275 275
276/*
277 * Trace synchronous start
278 */
279TRACE_EVENT(litmus_sys_start,
280
281 TP_PROTO(lt_t *start),
282
283 TP_ARGS(start),
284
285 TP_STRUCT__entry(
286 __field( lt_t, rel )
287 __field( lt_t, when )
288 ),
289
290 TP_fast_assign(
291 __entry->rel = *start;
292 __entry->when = litmus_clock();
293 ),
294
295 TP_printk("SynStart(%Lu) at %Lu\n", __entry->rel, __entry->when)
296);
297
276#endif /* _SCHED_TASK_TRACEPOINT_H */ 298#endif /* _SCHED_TASK_TRACEPOINT_H */
277 299
278/* Must stay outside the protection */ 300/* Must stay outside the protection */
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 2253be5e74eb..603a28107a74 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -243,7 +243,7 @@ config SCHED_TASK_TRACE
243config SCHED_TASK_TRACE_SHIFT 243config SCHED_TASK_TRACE_SHIFT
244 int "Buffer size for sched_trace_xxx() events" 244 int "Buffer size for sched_trace_xxx() events"
245 depends on SCHED_TASK_TRACE 245 depends on SCHED_TASK_TRACE
246 range 8 14 246 range 8 18
247 default 9 247 default 9
248 help 248 help
249 249
diff --git a/litmus/jobs.c b/litmus/jobs.c
index f075d8ed674a..898c09335b9f 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -36,13 +36,17 @@ void prepare_for_next_period(struct task_struct *t)
36 36
37 if (tsk_rt(t)->sporadic_release) { 37 if (tsk_rt(t)->sporadic_release) {
38 TRACE_TASK(t, "sporadic release at %llu\n", 38 TRACE_TASK(t, "sporadic release at %llu\n",
39 tsk_rt(t)->sporadic_release_time); 39 tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no));
40 /* sporadic release */ 40 /* sporadic release */
41 setup_release(t, tsk_rt(t)->sporadic_release_time); 41 setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no));
42 tsk_rt(t)->sporadic_release = 0; 42/* if (num_sync_released == 0) {
43 } else if (num_sync_released > 0) { 43 tsk_rt(t)->sporadic_release = 0;
44 TRACE_TASK(t, "num_sync_released = %d\n", num_sync_released); 44 TRACE("num_sync_released is 0\n");
45 setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS); 45 }
46*/
47// } else if (num_sync_released > 0) {
48// TRACE_TASK(t, "num_sync_released = %d and job_no = %d\n", num_sync_released, tsk_rt(t)->job_params.job_no);
49// setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no));
46 } else { 50 } else {
47 /* periodic release => add period */ 51 /* periodic release => add period */
48 setup_release(t, get_release(t) + get_rt_period(t)); 52 setup_release(t, get_release(t) + get_rt_period(t));
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 415e5fe3bf12..6723e8a96141 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -38,7 +38,7 @@
38 38
39#define BUDGET_ENFORCEMENT_AT_C 0 39#define BUDGET_ENFORCEMENT_AT_C 0
40 40
41extern int num_sync_released; 41extern atomic_t num_sync_released;
42extern void do_partition(enum crit_level lv, int cpu); 42extern void do_partition(enum crit_level lv, int cpu);
43 43
44/* _global_env - reservation container for level-C tasks*/ 44/* _global_env - reservation container for level-C tasks*/
@@ -137,8 +137,8 @@ asmlinkage long sys_enact_mode(void)
137 TRACE_TASK(current, "ENACTING SYSCALL\n"); 137 TRACE_TASK(current, "ENACTING SYSCALL\n");
138 if (state->cpu == 0){ 138 if (state->cpu == 0){
139 mode_changed = false; 139 mode_changed = false;
140 local_irq_save(flags); 140 if (pending){ //MCR has entered
141 if (pending){ //MCR has entered 141 local_irq_save(flags);
142 raw_spin_lock(&state->lock); 142 raw_spin_lock(&state->lock);
143 raw_spin_lock(&global_lock); 143 raw_spin_lock(&global_lock);
144 raw_spin_lock(&mode_lock); 144 raw_spin_lock(&mode_lock);
@@ -223,10 +223,11 @@ asmlinkage long sys_enact_mode(void)
223 raw_spin_unlock(&mode_lock); 223 raw_spin_unlock(&mode_lock);
224 raw_spin_unlock(&global_lock); 224 raw_spin_unlock(&global_lock);
225 //raw_spin_unlock(&state->lock); 225 //raw_spin_unlock(&state->lock);
226 local_irq_restore(flags);
226 mc2_update_timer_and_unlock(state); 227 mc2_update_timer_and_unlock(state);
227 } 228 }
228 this_cpu_inc(mode_counter); 229 this_cpu_inc(mode_counter);
229 local_irq_restore(flags); 230 //local_irq_restore(flags);
230 //cpu_0_spin_flag = !cpu_0_spin_flag; 231 //cpu_0_spin_flag = !cpu_0_spin_flag;
231 } 232 }
232 else if (cpu_0_task_exist) { 233 else if (cpu_0_task_exist) {
@@ -248,13 +249,13 @@ asmlinkage long sys_enact_mode(void)
248 while (*cpu0_counter == this_cpu_read(mode_counter)) 249 while (*cpu0_counter == this_cpu_read(mode_counter))
249 udelay(1); 250 udelay(1);
250 TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); 251 TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter));
251 local_irq_save(flags); 252 //local_irq_save(flags);
252 if (mode_changed) { 253 if (mode_changed) {
253 lt_t new_mode_basetime = get_release(current); 254 lt_t new_mode_basetime = get_release(current);
254 //TRACE("CPU%d mode changed\n",state->cpu); 255 //TRACE("CPU%d mode changed\n",state->cpu);
255 hrtimer_cancel(&state->timer); //stop listening to old mode timers 256 hrtimer_cancel(&state->timer); //stop listening to old mode timers
256 TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); 257 TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock());
257 //local_irq_save(flags); 258 local_irq_save(flags);
258 raw_spin_lock(&state->lock); 259 raw_spin_lock(&state->lock);
259 state->sup_env = &state->sup_env_modes[mode]; 260 state->sup_env = &state->sup_env_modes[mode];
260 list_for_each(pos, &state->sup_env->active_reservations){ 261 list_for_each(pos, &state->sup_env->active_reservations){
@@ -271,12 +272,13 @@ asmlinkage long sys_enact_mode(void)
271 } 272 }
272 sup_update_time(state->sup_env, litmus_clock()); 273 sup_update_time(state->sup_env, litmus_clock());
273 //raw_spin_unlock(&state->lock); 274 //raw_spin_unlock(&state->lock);
275 local_irq_restore(flags);
274 mc2_update_timer_and_unlock(state); 276 mc2_update_timer_and_unlock(state);
275 //local_irq_restore(flags); 277 //local_irq_restore(flags);
276 278
277 } 279 }
278 this_cpu_write(mode_counter, *cpu0_counter); 280 this_cpu_write(mode_counter, *cpu0_counter);
279 local_irq_restore(flags); 281 //local_irq_restore(flags);
280 //state->spin_flag = !state->spin_flag; 282 //state->spin_flag = !state->spin_flag;
281 } 283 }
282 else { 284 else {
@@ -304,18 +306,21 @@ asmlinkage long sys_enact_mode(void)
304asmlinkage long sys_request_mode(int new_mode){ 306asmlinkage long sys_request_mode(int new_mode){
305 preempt_disable(); 307 preempt_disable();
306 raw_spin_lock(&mode_lock); 308 raw_spin_lock(&mode_lock);
309 TRACE("MCR received at %llu\n", litmus_clock());
307 if (pending){ 310 if (pending){
308 raw_spin_unlock(&mode_lock); 311 raw_spin_unlock(&mode_lock);
309 preempt_enable(); 312 preempt_enable();
313 TRACE("MCR rejected because the previous MCR is pedning.\n");
310 return -EAGAIN; 314 return -EAGAIN;
311 } 315 }
312 if (mode == new_mode){ 316 if (mode == new_mode){
313 raw_spin_unlock(&mode_lock); 317 raw_spin_unlock(&mode_lock);
314 preempt_enable(); 318 preempt_enable();
319 TRACE("MCR rejected because the system is already in the new mode = %d.\n", new_mode);
315 return 0; 320 return 0;
316 } 321 }
317 requested_mode = new_mode; 322 requested_mode = new_mode;
318 TRACE("MCR received\n"); 323 TRACE("MCR to %d is accepted.\n", new_mode);
319 res_reported = mode_sizes[mode]; 324 res_reported = mode_sizes[mode];
320 TRACE_CUR("RES_REPORTED = %d\n",res_reported); 325 TRACE_CUR("RES_REPORTED = %d\n",res_reported);
321 seen_once = false; 326 seen_once = false;
@@ -475,7 +480,7 @@ static int get_lowest_prio_cpu(lt_t priority)
475 480
476 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 481 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
477 if (!ce->will_schedule && !ce->scheduled) { 482 if (!ce->will_schedule && !ce->scheduled) {
478 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 483 TRACE("CPU %d (local) is the lowest (Idle)!\n", ce->cpu);
479 return ce->cpu; 484 return ce->cpu;
480 } else { 485 } else {
481 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); 486 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
@@ -485,14 +490,16 @@ static int get_lowest_prio_cpu(lt_t priority)
485 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 490 ce = &_lowest_prio_cpu.cpu_entries[cpu];
486 /* If a CPU will call schedule() in the near future, we don't 491 /* If a CPU will call schedule() in the near future, we don't
487 return that CPU. */ 492 return that CPU. */
493/*
488 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, 494 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
489 ce->scheduled ? (ce->scheduled)->comm : "null", 495 ce->scheduled ? (ce->scheduled)->comm : "null",
490 ce->scheduled ? (ce->scheduled)->pid : 0, 496 ce->scheduled ? (ce->scheduled)->pid : 0,
491 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); 497 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
498*/
492 if (!ce->will_schedule) { 499 if (!ce->will_schedule) {
493 if (!ce->scheduled) { 500 if (!ce->scheduled) {
494 /* Idle cpu, return this. */ 501 /* Idle cpu, return this. */
495 TRACE("CPU %d is the lowest!\n", ce->cpu); 502 TRACE("CPU %d is the lowest (Idle)!\n", ce->cpu);
496 return ce->cpu; 503 return ce->cpu;
497 } else if (ce->lv == CRIT_LEVEL_C && 504 } else if (ce->lv == CRIT_LEVEL_C &&
498 ce->deadline > latest_deadline) { 505 ce->deadline > latest_deadline) {
@@ -502,10 +509,12 @@ static int get_lowest_prio_cpu(lt_t priority)
502 } 509 }
503 } 510 }
504 511
505 if (priority >= latest_deadline) 512 TRACE("CPU %d is the lowest! deadline = %llu, my priority = %llu\n", ret, latest_deadline, priority);
506 ret = NO_CPU;
507 513
508 TRACE("CPU %d is the lowest!\n", ret); 514 if (priority >= latest_deadline) {
515 TRACE("CPU %d is running a higher-priority task. return NO_CPU\n", ret);
516 ret = NO_CPU;
517 }
509 518
510 return ret; 519 return ret;
511} 520}
@@ -559,9 +568,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
559 //raw_spin_lock(&_lowest_prio_cpu.lock); 568 //raw_spin_lock(&_lowest_prio_cpu.lock);
560 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 569 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
561 //raw_spin_unlock(&_lowest_prio_cpu.lock); 570 //raw_spin_unlock(&_lowest_prio_cpu.lock);
562 if (cpu == local_cpu_state()->cpu) 571
563 litmus_reschedule_local(); 572 //if (cpu == local_cpu_state()->cpu)
564 else 573 // litmus_reschedule_local();
574 //else
565 reschedule[cpu] = 1; 575 reschedule[cpu] = 1;
566 } 576 }
567 } 577 }
@@ -735,7 +745,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
735 //raw_spin_unlock(&_lowest_prio_cpu.lock); 745 //raw_spin_unlock(&_lowest_prio_cpu.lock);
736 TRACE("LOWEST CPU = P%d\n", cpu); 746 TRACE("LOWEST CPU = P%d\n", cpu);
737 if (cpu == state->cpu && update > now) 747 if (cpu == state->cpu && update > now)
738 litmus_reschedule_local(); 748 ;//litmus_reschedule_local();
739 else 749 else
740 reschedule[cpu] = 1; 750 reschedule[cpu] = 1;
741 } 751 }
@@ -765,6 +775,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
765 return restart; 775 return restart;
766} 776}
767 777
778#define INIT_PHASE_LENGTH_NS (1000000000)
779
768/* mc2_complete_job - syscall backend for job completions 780/* mc2_complete_job - syscall backend for job completions
769 */ 781 */
770static long mc2_complete_job(void) 782static long mc2_complete_job(void)
@@ -772,14 +784,17 @@ static long mc2_complete_job(void)
772 ktime_t next_release; 784 ktime_t next_release;
773 long err; 785 long err;
774 786
775 enum crit_level lv; 787 enum crit_level lv = get_task_crit_level(current);
776 788
777 raw_spin_lock(&mode_lock); 789 raw_spin_lock(&mode_lock);
778 tsk_rt(current)->completed = 1; 790 tsk_rt(current)->completed = 1;
779 raw_spin_unlock(&mode_lock); 791 raw_spin_unlock(&mode_lock);
780 792
781 lv = get_task_crit_level(current); 793 if (atomic_read(&num_sync_released) == 0 && mode != 0) {
782 794 tsk_rt(current)->sporadic_release = 0;
795 TRACE_CUR("num_sync_released is 0\n");
796 }
797
783 /* If this the first job instance, we need to reset replenish 798 /* If this the first job instance, we need to reset replenish
784 time to the next release time */ 799 time to the next release time */
785 if (tsk_rt(current)->sporadic_release) { 800 if (tsk_rt(current)->sporadic_release) {
@@ -798,7 +813,7 @@ static long mc2_complete_job(void)
798 raw_spin_lock(&state->lock); 813 raw_spin_lock(&state->lock);
799 for (i = 0; i<NR_MODES; i++) { 814 for (i = 0; i<NR_MODES; i++) {
800 if (in_mode(current,i) || i == 0) { 815 if (in_mode(current,i) || i == 0) {
801 state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; 816 state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
802 } 817 }
803 } 818 }
804 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); 819 res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id);
@@ -811,7 +826,7 @@ static long mc2_complete_job(void)
811 raw_spin_lock(&global_lock); 826 raw_spin_lock(&global_lock);
812 for (i = 0; i < NR_MODES; i++) { 827 for (i = 0; i < NR_MODES; i++) {
813 if (in_mode(current,i) || i == 0) { 828 if (in_mode(current,i) || i == 0) {
814 _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; 829 _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
815 } 830 }
816 } 831 }
817 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); 832 res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id);
@@ -821,7 +836,7 @@ static long mc2_complete_job(void)
821 836
822 /* set next_replenish to synchronous release time */ 837 /* set next_replenish to synchronous release time */
823 BUG_ON(!res); 838 BUG_ON(!res);
824 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 839 res->next_replenishment = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
825/* 840/*
826 if (get_task_crit_level(current) == CRIT_LEVEL_A) { 841 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
827 struct table_driven_reservation *tdres; 842 struct table_driven_reservation *tdres;
@@ -834,7 +849,7 @@ static long mc2_complete_job(void)
834 res->cur_budget = 0; 849 res->cur_budget = 0;
835 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 850 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
836 851
837 // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); 852 TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
838 if (lv == CRIT_LEVEL_C) 853 if (lv == CRIT_LEVEL_C)
839 raw_spin_unlock(&global_lock); 854 raw_spin_unlock(&global_lock);
840 855
@@ -863,12 +878,12 @@ static long mc2_complete_job(void)
863 res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); 878 res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id);
864 if (res && !res->reported){ 879 if (res && !res->reported){
865 res_reported--; 880 res_reported--;
866 TRACE_CUR("RES_REPORTED = %d\n",res_reported); 881 TRACE_CUR("RES_REPORTED = %d\n", res_reported);
867 res->reported = 1; 882 res->reported = 1;
868 //Current task doesn't exist in new mode 883 //Current task doesn't exist in new mode
869 if ( !in_mode(current, requested_mode) ){ 884 //if ( !in_mode(current, requested_mode) ){
870 litmus_reschedule_local(); 885 // litmus_reschedule_local();
871 } 886 //}
872 } 887 }
873 raw_spin_unlock(&mode_lock); 888 raw_spin_unlock(&mode_lock);
874 } 889 }
@@ -908,8 +923,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
908 923
909 924
910 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 925 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
911 if (res->state == RESERVATION_ACTIVE) 926 if (res->state == RESERVATION_ACTIVE) {
912 TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); 927 struct task_struct *t = res->ops->dispatch_client(res, &time_slice);
928 TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t));
929 }
913 } 930 }
914 931
915 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { 932 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
@@ -922,7 +939,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
922 return tsk; 939 return tsk;
923 } else { 940 } else {
924 //if (!is_init_finished(tsk)) { 941 //if (!is_init_finished(tsk)) {
925 TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); 942// TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode);
926// if (num_sync_released != 0 && mode == 0) { 943// if (num_sync_released != 0 && mode == 0) {
927 //ce = &state->crit_entries[lv]; 944 //ce = &state->crit_entries[lv];
928 sup_scheduler_update_after(sup_env, res->cur_budget); 945 sup_scheduler_update_after(sup_env, res->cur_budget);
@@ -953,6 +970,13 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
953 enum crit_level lv; 970 enum crit_level lv;
954 lt_t time_slice; 971 lt_t time_slice;
955 972
973 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
974 if (res->state == RESERVATION_ACTIVE) {
975 struct task_struct *t = res->ops->dispatch_client(res, &time_slice);
976 TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t));
977 }
978 }
979
956 raw_spin_lock(&mode_lock); 980 raw_spin_lock(&mode_lock);
957 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { 981 list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
958 BUG_ON(!res); 982 BUG_ON(!res);
@@ -1089,9 +1113,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1089 if (is_realtime(prev)) 1113 if (is_realtime(prev))
1090 gmp_update_time(_global_env, now); 1114 gmp_update_time(_global_env, now);
1091 state->scheduled = mc2_global_dispatch(state); 1115 state->scheduled = mc2_global_dispatch(state);
1116 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1117 update_cpu_prio(state);
1118 raw_spin_unlock(&global_lock);
1119 } else {
1120 raw_spin_lock(&global_lock);
1121 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1122 update_cpu_prio(state);
1092 raw_spin_unlock(&global_lock); 1123 raw_spin_unlock(&global_lock);
1093 } 1124 }
1094
1095 /* 1125 /*
1096 if (!state->scheduled) { 1126 if (!state->scheduled) {
1097 raw_spin_lock(&global_lock); 1127 raw_spin_lock(&global_lock);
@@ -1161,12 +1191,12 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
1161 } 1191 }
1162*/ 1192*/
1163 post_schedule(state->scheduled, state->cpu); 1193 post_schedule(state->scheduled, state->cpu);
1164 1194/*
1165 raw_spin_lock(&global_lock); 1195 raw_spin_lock(&global_lock);
1166 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 1196 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
1167 update_cpu_prio(state); 1197 update_cpu_prio(state);
1168 raw_spin_unlock(&global_lock); 1198 raw_spin_unlock(&global_lock);
1169 1199*/
1170 raw_spin_unlock(&state->lock); 1200 raw_spin_unlock(&state->lock);
1171 if (state->scheduled) { 1201 if (state->scheduled) {
1172 TRACE_TASK(state->scheduled, "scheduled.\n"); 1202 TRACE_TASK(state->scheduled, "scheduled.\n");
@@ -1204,7 +1234,6 @@ static void mc2_task_resume(struct task_struct *tsk)
1204 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 1234 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
1205 1235
1206 //local_irq_save(flags); 1236 //local_irq_save(flags);
1207 TRACE_TASK(tsk, "preemptible?? %d\n", preemptible());
1208 preempt_disable(); 1237 preempt_disable();
1209 tinfo = get_mc2_state(tsk); 1238 tinfo = get_mc2_state(tsk);
1210 if (tinfo->cpu != -1) 1239 if (tinfo->cpu != -1)
@@ -1219,7 +1248,8 @@ static void mc2_task_resume(struct task_struct *tsk)
1219 /* Requeue only if self-suspension was already processed. */ 1248 /* Requeue only if self-suspension was already processed. */
1220 if (tinfo->has_departed) 1249 if (tinfo->has_departed)
1221 { 1250 {
1222 1251 raw_spin_lock(&state->lock);
1252 local_irq_save(flags);
1223 /* We don't want to consider jobs before synchronous releases */ 1253 /* We don't want to consider jobs before synchronous releases */
1224 if (tsk_rt(tsk)->job_params.job_no == 2) { 1254 if (tsk_rt(tsk)->job_params.job_no == 2) {
1225/* 1255/*
@@ -1239,14 +1269,19 @@ static void mc2_task_resume(struct task_struct *tsk)
1239*/ 1269*/
1240 TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); 1270 TRACE_TASK(tsk, "INIT_FINISHED is SET\n");
1241 tsk_mc2_data(tsk)->init_finished = 1; 1271 tsk_mc2_data(tsk)->init_finished = 1;
1242 raw_spin_lock(&global_lock); 1272 atomic_dec(&num_sync_released);
1243 num_sync_released--; 1273 //raw_spin_unlock(&global_lock);
1244 raw_spin_unlock(&global_lock); 1274 if (atomic_read(&num_sync_released) == 0) {
1245 TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); 1275 lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no);
1276 TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n");
1277 sys_request_mode(1);
1278 sched_trace_sys_start(&start);
1279 }
1280 TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released));
1246 } 1281 }
1247 1282
1248 raw_spin_lock(&state->lock); 1283// raw_spin_lock(&state->lock);
1249 local_irq_save(flags); 1284// local_irq_save(flags);
1250 /* Assumption: litmus_clock() is synchronized across cores, 1285 /* Assumption: litmus_clock() is synchronized across cores,
1251 * since we might not actually be executing on tinfo->cpu 1286 * since we might not actually be executing on tinfo->cpu
1252 * at the moment. */ 1287 * at the moment. */
@@ -1363,9 +1398,7 @@ static long mc2_admit_task(struct task_struct *tsk)
1363 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { 1398 if (is_mode_poll_task(tsk) && tinfo->cpu == 0) {
1364 cpu_0_task_exist = true; 1399 cpu_0_task_exist = true;
1365 } 1400 }
1366 raw_spin_lock(&global_lock); 1401 atomic_inc(&num_sync_released);
1367 num_sync_released++;
1368 raw_spin_unlock(&global_lock);
1369 local_irq_restore(flags); 1402 local_irq_restore(flags);
1370 raw_spin_unlock(&state->lock); 1403 raw_spin_unlock(&state->lock);
1371 //raw_spin_unlock_irqrestore(&state->lock, flags); 1404 //raw_spin_unlock_irqrestore(&state->lock, flags);
@@ -1425,15 +1458,16 @@ static long mc2_admit_task(struct task_struct *tsk)
1425 tsk_rt(tsk)->plugin_state = tinfo; 1458 tsk_rt(tsk)->plugin_state = tinfo;
1426 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 1459 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1427 raw_spin_lock(&mode_lock); 1460 raw_spin_lock(&mode_lock);
1428 for(i = 0; i < NR_MODES; i++){ 1461 for(i = 1; i < NR_MODES; i++){
1429 if (in_mode(tsk, i)){ 1462 if (in_mode(tsk, i)){
1430 mode_sizes[i]++; 1463 mode_sizes[i]++;
1431 } 1464 }
1432 } 1465 }
1466 mode_sizes[0]++;
1433 raw_spin_unlock(&mode_lock); 1467 raw_spin_unlock(&mode_lock);
1434 1468
1435 } 1469 }
1436 num_sync_released++; 1470 atomic_inc(&num_sync_released);
1437 raw_spin_unlock(&global_lock); 1471 raw_spin_unlock(&global_lock);
1438 //raw_spin_unlock_irqrestore(&state->lock, flags); 1472 //raw_spin_unlock_irqrestore(&state->lock, flags);
1439 local_irq_restore(flags); 1473 local_irq_restore(flags);
@@ -1728,9 +1762,8 @@ static void mc2_task_exit(struct task_struct *tsk)
1728 /* NOTE: drops state->lock */ 1762 /* NOTE: drops state->lock */
1729 TRACE("mc2_exit()\n"); 1763 TRACE("mc2_exit()\n");
1730 1764
1731 raw_spin_lock(&global_lock); 1765 atomic_dec(&num_sync_released);
1732 num_sync_released--; 1766
1733 raw_spin_unlock(&global_lock);
1734 mc2_update_timer_and_unlock(state); 1767 mc2_update_timer_and_unlock(state);
1735 } else { 1768 } else {
1736 raw_spin_unlock(&state->lock); 1769 raw_spin_unlock(&state->lock);
@@ -1738,15 +1771,16 @@ static void mc2_task_exit(struct task_struct *tsk)
1738 } 1771 }
1739 1772
1740 if (lv == CRIT_LEVEL_C) { 1773 if (lv == CRIT_LEVEL_C) {
1741 raw_spin_lock(&global_lock); 1774 //raw_spin_lock(&global_lock);
1742 raw_spin_lock(&mode_lock); 1775 raw_spin_lock(&mode_lock);
1743 for(i = 0; i < NR_MODES; i++){ 1776 for(i = 1; i < NR_MODES; i++){
1744 if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) 1777 if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) )
1745 continue; 1778 continue;
1746 mode_sizes[i]--; 1779 mode_sizes[i]--;
1747 } 1780 }
1781 mode_sizes[0]--;
1748 raw_spin_unlock(&mode_lock); 1782 raw_spin_unlock(&mode_lock);
1749 raw_spin_unlock(&global_lock); 1783 //raw_spin_unlock(&global_lock);
1750 1784
1751 for_each_online_cpu(cpu) { 1785 for_each_online_cpu(cpu) {
1752 state = cpu_state_for(cpu); 1786 state = cpu_state_for(cpu);
@@ -2340,8 +2374,9 @@ static long mc2_deactivate_plugin(void)
2340 } 2374 }
2341 2375
2342 } 2376 }
2343 num_sync_released = 0;
2344 raw_spin_unlock(&global_lock); 2377 raw_spin_unlock(&global_lock);
2378
2379 atomic_set(&num_sync_released, 0);
2345 destroy_domain_proc_info(&mc2_domain_proc_info); 2380 destroy_domain_proc_info(&mc2_domain_proc_info);
2346 return 0; 2381 return 0;
2347} 2382}
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index acf2b7dc0219..d844180afa28 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -265,3 +265,15 @@ feather_callback void do_sched_trace_request_mode(unsigned long id,
265 put_record(rec); 265 put_record(rec);
266 } 266 }
267} 267}
268
269feather_callback void do_sched_trace_sys_start(unsigned long id,
270 unsigned long _start)
271{
272 lt_t *start = (lt_t*) _start;
273 struct st_event_record* rec = get_record(ST_SYS_START, NULL);
274 if (rec) {
275 rec->data.sys_start.when = now();
276 rec->data.sys_start.start = *start;
277 put_record(rec);
278 }
279} \ No newline at end of file
diff --git a/litmus/sync.c b/litmus/sync.c
index f066ea4219a8..7733f6760c52 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -16,7 +16,7 @@
16 16
17#include <litmus/sched_trace.h> 17#include <litmus/sched_trace.h>
18 18
19int num_sync_released; 19atomic_t num_sync_released;
20 20
21struct ts_release_wait { 21struct ts_release_wait {
22 struct list_head list; 22 struct list_head list;
@@ -149,6 +149,6 @@ asmlinkage long sys_release_ts(lt_t __user *__delay)
149 start_time *= ONE_MS; 149 start_time *= ONE_MS;
150 ret = do_release_ts(start_time + delay); 150 ret = do_release_ts(start_time + delay);
151 } 151 }
152 num_sync_released = ret; 152 atomic_set(&num_sync_released, ret);
153 return ret; 153 return ret;
154} 154}