diff options
-rw-r--r-- | include/litmus/sched_trace.h | 21 | ||||
-rw-r--r-- | include/trace/events/litmus.h | 22 | ||||
-rw-r--r-- | litmus/Kconfig | 2 | ||||
-rw-r--r-- | litmus/jobs.c | 16 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 153 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 12 | ||||
-rw-r--r-- | litmus/sync.c | 4 |
7 files changed, 151 insertions, 79 deletions
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index fb5d8f33aff5..80d952374f7a 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -89,6 +89,11 @@ struct st_request_mode_data { | |||
89 | u8 __unused[8]; | 89 | u8 __unused[8]; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | struct st_sys_start_data { | ||
93 | u64 when; | ||
94 | u64 start; | ||
95 | }; | ||
96 | |||
92 | #define DATA(x) struct st_ ## x ## _data x; | 97 | #define DATA(x) struct st_ ## x ## _data x; |
93 | 98 | ||
94 | typedef enum { | 99 | typedef enum { |
@@ -104,7 +109,8 @@ typedef enum { | |||
104 | ST_ACTION, | 109 | ST_ACTION, |
105 | ST_SYS_RELEASE, | 110 | ST_SYS_RELEASE, |
106 | ST_ENACT_MODE, | 111 | ST_ENACT_MODE, |
107 | ST_REQUEST_MODE | 112 | ST_REQUEST_MODE, |
113 | ST_SYS_START, | ||
108 | } st_event_record_type_t; | 114 | } st_event_record_type_t; |
109 | 115 | ||
110 | struct st_event_record { | 116 | struct st_event_record { |
@@ -124,6 +130,7 @@ struct st_event_record { | |||
124 | DATA(sys_release); | 130 | DATA(sys_release); |
125 | DATA(enact_mode); | 131 | DATA(enact_mode); |
126 | DATA(request_mode); | 132 | DATA(request_mode); |
133 | DATA(sys_start); | ||
127 | } data; | 134 | } data; |
128 | }; | 135 | }; |
129 | 136 | ||
@@ -171,6 +178,8 @@ feather_callback void do_sched_trace_enact_mode(unsigned long id, | |||
171 | 178 | ||
172 | feather_callback void do_sched_trace_request_mode(unsigned long id, | 179 | feather_callback void do_sched_trace_request_mode(unsigned long id, |
173 | struct task_struct* task); | 180 | struct task_struct* task); |
181 | feather_callback void do_sched_trace_sys_start(unsigned long id, | ||
182 | lt_t* start); | ||
174 | 183 | ||
175 | #endif | 184 | #endif |
176 | 185 | ||
@@ -198,6 +207,7 @@ feather_callback void do_sched_trace_request_mode(unsigned long id, | |||
198 | #define trace_litmus_sys_release(start) | 207 | #define trace_litmus_sys_release(start) |
199 | #define trace_litmus_enact_mode(t) | 208 | #define trace_litmus_enact_mode(t) |
200 | #define trace_litmus_request_mode(t) | 209 | #define trace_litmus_request_mode(t) |
210 | #define trace_litmus_sys_start(start) | ||
201 | 211 | ||
202 | #endif | 212 | #endif |
203 | 213 | ||
@@ -286,6 +296,15 @@ feather_callback void do_sched_trace_request_mode(unsigned long id, | |||
286 | do_sched_trace_request_mode, t); \ | 296 | do_sched_trace_request_mode, t); \ |
287 | trace_litmus_request_mode(t); \ | 297 | trace_litmus_request_mode(t); \ |
288 | } while (0) | 298 | } while (0) |
299 | |||
300 | /* when is a pointer, it does not need an explicit cast to unsigned long */ | ||
301 | #define sched_trace_sys_start(when) \ | ||
302 | do { \ | ||
303 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 13, \ | ||
304 | do_sched_trace_sys_start, when); \ | ||
305 | trace_litmus_sys_start(when); \ | ||
306 | } while (0) | ||
307 | |||
289 | #endif /* __KERNEL__ */ | 308 | #endif /* __KERNEL__ */ |
290 | 309 | ||
291 | #endif | 310 | #endif |
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h index 3094ddd2210b..ef8d60f2df25 100644 --- a/include/trace/events/litmus.h +++ b/include/trace/events/litmus.h | |||
@@ -273,6 +273,28 @@ TRACE_EVENT(litmus_request_mode, | |||
273 | TP_printk("Mode request at %Lu\n", __entry->when) | 273 | TP_printk("Mode request at %Lu\n", __entry->when) |
274 | ); | 274 | ); |
275 | 275 | ||
276 | /* | ||
277 | * Trace synchronous start | ||
278 | */ | ||
279 | TRACE_EVENT(litmus_sys_start, | ||
280 | |||
281 | TP_PROTO(lt_t *start), | ||
282 | |||
283 | TP_ARGS(start), | ||
284 | |||
285 | TP_STRUCT__entry( | ||
286 | __field( lt_t, rel ) | ||
287 | __field( lt_t, when ) | ||
288 | ), | ||
289 | |||
290 | TP_fast_assign( | ||
291 | __entry->rel = *start; | ||
292 | __entry->when = litmus_clock(); | ||
293 | ), | ||
294 | |||
295 | TP_printk("SynStart(%Lu) at %Lu\n", __entry->rel, __entry->when) | ||
296 | ); | ||
297 | |||
276 | #endif /* _SCHED_TASK_TRACEPOINT_H */ | 298 | #endif /* _SCHED_TASK_TRACEPOINT_H */ |
277 | 299 | ||
278 | /* Must stay outside the protection */ | 300 | /* Must stay outside the protection */ |
diff --git a/litmus/Kconfig b/litmus/Kconfig index 2253be5e74eb..603a28107a74 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -243,7 +243,7 @@ config SCHED_TASK_TRACE | |||
243 | config SCHED_TASK_TRACE_SHIFT | 243 | config SCHED_TASK_TRACE_SHIFT |
244 | int "Buffer size for sched_trace_xxx() events" | 244 | int "Buffer size for sched_trace_xxx() events" |
245 | depends on SCHED_TASK_TRACE | 245 | depends on SCHED_TASK_TRACE |
246 | range 8 14 | 246 | range 8 18 |
247 | default 9 | 247 | default 9 |
248 | help | 248 | help |
249 | 249 | ||
diff --git a/litmus/jobs.c b/litmus/jobs.c index f075d8ed674a..898c09335b9f 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -36,13 +36,17 @@ void prepare_for_next_period(struct task_struct *t) | |||
36 | 36 | ||
37 | if (tsk_rt(t)->sporadic_release) { | 37 | if (tsk_rt(t)->sporadic_release) { |
38 | TRACE_TASK(t, "sporadic release at %llu\n", | 38 | TRACE_TASK(t, "sporadic release at %llu\n", |
39 | tsk_rt(t)->sporadic_release_time); | 39 | tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); |
40 | /* sporadic release */ | 40 | /* sporadic release */ |
41 | setup_release(t, tsk_rt(t)->sporadic_release_time); | 41 | setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); |
42 | tsk_rt(t)->sporadic_release = 0; | 42 | /* if (num_sync_released == 0) { |
43 | } else if (num_sync_released > 0) { | 43 | tsk_rt(t)->sporadic_release = 0; |
44 | TRACE_TASK(t, "num_sync_released = %d\n", num_sync_released); | 44 | TRACE("num_sync_released is 0\n"); |
45 | setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS); | 45 | } |
46 | */ | ||
47 | // } else if (num_sync_released > 0) { | ||
48 | // TRACE_TASK(t, "num_sync_released = %d and job_no = %d\n", num_sync_released, tsk_rt(t)->job_params.job_no); | ||
49 | // setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); | ||
46 | } else { | 50 | } else { |
47 | /* periodic release => add period */ | 51 | /* periodic release => add period */ |
48 | setup_release(t, get_release(t) + get_rt_period(t)); | 52 | setup_release(t, get_release(t) + get_rt_period(t)); |
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 3b407bd780d0..0c9bb1812367 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define BUDGET_ENFORCEMENT_AT_C 0 | 39 | #define BUDGET_ENFORCEMENT_AT_C 0 |
40 | 40 | ||
41 | extern int num_sync_released; | 41 | extern atomic_t num_sync_released; |
42 | extern void do_partition(enum crit_level lv, int cpu); | 42 | extern void do_partition(enum crit_level lv, int cpu); |
43 | 43 | ||
44 | /* _global_env - reservation container for level-C tasks*/ | 44 | /* _global_env - reservation container for level-C tasks*/ |
@@ -147,8 +147,8 @@ asmlinkage long sys_enact_mode(void) | |||
147 | } | 147 | } |
148 | } | 148 | } |
149 | mode_changed = false; | 149 | mode_changed = false; |
150 | local_irq_save(flags); | 150 | if (pending){ //MCR has entered |
151 | if (pending){ //MCR has entered | 151 | local_irq_save(flags); |
152 | raw_spin_lock(&state->lock); | 152 | raw_spin_lock(&state->lock); |
153 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 153 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); |
154 | raw_spin_lock(&global_lock); | 154 | raw_spin_lock(&global_lock); |
@@ -256,10 +256,11 @@ asmlinkage long sys_enact_mode(void) | |||
256 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 256 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); |
257 | raw_spin_unlock(&global_lock); | 257 | raw_spin_unlock(&global_lock); |
258 | //raw_spin_unlock(&state->lock); | 258 | //raw_spin_unlock(&state->lock); |
259 | local_irq_restore(flags); | ||
259 | mc2_update_timer_and_unlock(state); | 260 | mc2_update_timer_and_unlock(state); |
260 | } | 261 | } |
261 | this_cpu_inc(mode_counter); | 262 | this_cpu_inc(mode_counter); |
262 | local_irq_restore(flags); | 263 | //local_irq_restore(flags); |
263 | //cpu_0_spin_flag = !cpu_0_spin_flag; | 264 | //cpu_0_spin_flag = !cpu_0_spin_flag; |
264 | } | 265 | } |
265 | else if (!mode_poll_exited) { | 266 | else if (!mode_poll_exited) { |
@@ -292,13 +293,13 @@ asmlinkage long sys_enact_mode(void) | |||
292 | //} | 293 | //} |
293 | } | 294 | } |
294 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); | 295 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); |
295 | local_irq_save(flags); | 296 | //local_irq_save(flags); |
296 | if (mode_changed) { | 297 | if (mode_changed) { |
297 | lt_t new_mode_basetime = get_release(current); | 298 | lt_t new_mode_basetime = get_release(current); |
298 | //TRACE("CPU%d mode changed\n",state->cpu); | 299 | //TRACE("CPU%d mode changed\n",state->cpu); |
299 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | 300 | hrtimer_cancel(&state->timer); //stop listening to old mode timers |
300 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); | 301 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); |
301 | //local_irq_save(flags); | 302 | local_irq_save(flags); |
302 | raw_spin_lock(&state->lock); | 303 | raw_spin_lock(&state->lock); |
303 | state->sup_env = &state->sup_env_modes[mode]; | 304 | state->sup_env = &state->sup_env_modes[mode]; |
304 | list_for_each(pos, &state->sup_env->active_reservations){ | 305 | list_for_each(pos, &state->sup_env->active_reservations){ |
@@ -315,12 +316,11 @@ asmlinkage long sys_enact_mode(void) | |||
315 | } | 316 | } |
316 | sup_update_time(state->sup_env, litmus_clock()); | 317 | sup_update_time(state->sup_env, litmus_clock()); |
317 | //raw_spin_unlock(&state->lock); | 318 | //raw_spin_unlock(&state->lock); |
319 | local_irq_restore(flags); | ||
318 | mc2_update_timer_and_unlock(state); | 320 | mc2_update_timer_and_unlock(state); |
319 | //local_irq_restore(flags); | 321 | //local_irq_restore(flags); |
320 | 322 | ||
321 | } | 323 | } |
322 | //this_cpu_write(mode_counter, *cpu0_counter); | ||
323 | local_irq_restore(flags); | ||
324 | //state->spin_flag = !state->spin_flag; | 324 | //state->spin_flag = !state->spin_flag; |
325 | } | 325 | } |
326 | else { | 326 | else { |
@@ -376,6 +376,7 @@ asmlinkage long sys_request_mode(int new_mode){ | |||
376 | TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode); | 376 | TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode); |
377 | raw_spin_unlock(&mode_lock); | 377 | raw_spin_unlock(&mode_lock); |
378 | preempt_enable(); | 378 | preempt_enable(); |
379 | TRACE("MCR rejected because the previous MCR is pedning.\n"); | ||
379 | return -EAGAIN; | 380 | return -EAGAIN; |
380 | } | 381 | } |
381 | if (mode == new_mode){ | 382 | if (mode == new_mode){ |
@@ -545,7 +546,7 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
545 | 546 | ||
546 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | 547 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; |
547 | if (!ce->will_schedule && !ce->scheduled) { | 548 | if (!ce->will_schedule && !ce->scheduled) { |
548 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | 549 | TRACE("CPU %d (local) is the lowest (Idle)!\n", ce->cpu); |
549 | return ce->cpu; | 550 | return ce->cpu; |
550 | } else { | 551 | } else { |
551 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); | 552 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); |
@@ -555,14 +556,16 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
555 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 556 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
556 | /* If a CPU will call schedule() in the near future, we don't | 557 | /* If a CPU will call schedule() in the near future, we don't |
557 | return that CPU. */ | 558 | return that CPU. */ |
559 | /* | ||
558 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | 560 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, |
559 | ce->scheduled ? (ce->scheduled)->comm : "null", | 561 | ce->scheduled ? (ce->scheduled)->comm : "null", |
560 | ce->scheduled ? (ce->scheduled)->pid : 0, | 562 | ce->scheduled ? (ce->scheduled)->pid : 0, |
561 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | 563 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); |
564 | */ | ||
562 | if (!ce->will_schedule) { | 565 | if (!ce->will_schedule) { |
563 | if (!ce->scheduled) { | 566 | if (!ce->scheduled) { |
564 | /* Idle cpu, return this. */ | 567 | /* Idle cpu, return this. */ |
565 | TRACE("CPU %d is the lowest!\n", ce->cpu); | 568 | TRACE("CPU %d is the lowest (Idle)!\n", ce->cpu); |
566 | return ce->cpu; | 569 | return ce->cpu; |
567 | } else if (ce->lv == CRIT_LEVEL_C && | 570 | } else if (ce->lv == CRIT_LEVEL_C && |
568 | ce->deadline > latest_deadline) { | 571 | ce->deadline > latest_deadline) { |
@@ -572,10 +575,12 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
572 | } | 575 | } |
573 | } | 576 | } |
574 | 577 | ||
575 | if (priority >= latest_deadline) | 578 | TRACE("CPU %d is the lowest! deadline = %llu, my priority = %llu\n", ret, latest_deadline, priority); |
576 | ret = NO_CPU; | ||
577 | 579 | ||
578 | TRACE("CPU %d is the lowest!\n", ret); | 580 | if (priority >= latest_deadline) { |
581 | TRACE("CPU %d is running a higher-priority task. return NO_CPU\n", ret); | ||
582 | ret = NO_CPU; | ||
583 | } | ||
579 | 584 | ||
580 | return ret; | 585 | return ret; |
581 | } | 586 | } |
@@ -630,9 +635,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
630 | //raw_spin_lock(&_lowest_prio_cpu.lock); | 635 | //raw_spin_lock(&_lowest_prio_cpu.lock); |
631 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 636 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
632 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 637 | //raw_spin_unlock(&_lowest_prio_cpu.lock); |
633 | if (cpu == local_cpu_state()->cpu) | 638 | |
634 | litmus_reschedule_local(); | 639 | //if (cpu == local_cpu_state()->cpu) |
635 | else | 640 | // litmus_reschedule_local(); |
641 | //else | ||
636 | reschedule[cpu] = 1; | 642 | reschedule[cpu] = 1; |
637 | } | 643 | } |
638 | } | 644 | } |
@@ -808,7 +814,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
808 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 814 | //raw_spin_unlock(&_lowest_prio_cpu.lock); |
809 | TRACE("LOWEST CPU = P%d\n", cpu); | 815 | TRACE("LOWEST CPU = P%d\n", cpu); |
810 | if (cpu == state->cpu && update > now) | 816 | if (cpu == state->cpu && update > now) |
811 | litmus_reschedule_local(); | 817 | ;//litmus_reschedule_local(); |
812 | else | 818 | else |
813 | reschedule[cpu] = 1; | 819 | reschedule[cpu] = 1; |
814 | } | 820 | } |
@@ -839,6 +845,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
839 | return restart; | 845 | return restart; |
840 | } | 846 | } |
841 | 847 | ||
848 | #define INIT_PHASE_LENGTH_NS (1000000000) | ||
849 | |||
842 | /* mc2_complete_job - syscall backend for job completions | 850 | /* mc2_complete_job - syscall backend for job completions |
843 | */ | 851 | */ |
844 | static long mc2_complete_job(void) | 852 | static long mc2_complete_job(void) |
@@ -846,14 +854,17 @@ static long mc2_complete_job(void) | |||
846 | ktime_t next_release; | 854 | ktime_t next_release; |
847 | long err; | 855 | long err; |
848 | 856 | ||
849 | enum crit_level lv; | 857 | enum crit_level lv = get_task_crit_level(current); |
850 | 858 | ||
851 | raw_spin_lock(&mode_lock); | 859 | raw_spin_lock(&mode_lock); |
852 | tsk_rt(current)->completed = 1; | 860 | tsk_rt(current)->completed = 1; |
853 | raw_spin_unlock(&mode_lock); | 861 | raw_spin_unlock(&mode_lock); |
854 | 862 | ||
855 | lv = get_task_crit_level(current); | 863 | if (atomic_read(&num_sync_released) == 0 && mode != 0) { |
856 | 864 | tsk_rt(current)->sporadic_release = 0; | |
865 | TRACE_CUR("num_sync_released is 0\n"); | ||
866 | } | ||
867 | |||
857 | /* If this the first job instance, we need to reset replenish | 868 | /* If this the first job instance, we need to reset replenish |
858 | time to the next release time */ | 869 | time to the next release time */ |
859 | if (tsk_rt(current)->sporadic_release) { | 870 | if (tsk_rt(current)->sporadic_release) { |
@@ -872,7 +883,7 @@ static long mc2_complete_job(void) | |||
872 | raw_spin_lock(&state->lock); | 883 | raw_spin_lock(&state->lock); |
873 | for (i = 0; i<NR_MODES; i++) { | 884 | for (i = 0; i<NR_MODES; i++) { |
874 | if (in_mode(current,i) || i == 0) { | 885 | if (in_mode(current,i) || i == 0) { |
875 | state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; | 886 | state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
876 | } | 887 | } |
877 | } | 888 | } |
878 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | 889 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); |
@@ -886,7 +897,7 @@ static long mc2_complete_job(void) | |||
886 | raw_spin_lock(&global_lock); | 897 | raw_spin_lock(&global_lock); |
887 | for (i = 0; i < NR_MODES; i++) { | 898 | for (i = 0; i < NR_MODES; i++) { |
888 | if (in_mode(current,i) || i == 0) { | 899 | if (in_mode(current,i) || i == 0) { |
889 | _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time; | 900 | _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
890 | } | 901 | } |
891 | } | 902 | } |
892 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | 903 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); |
@@ -896,7 +907,7 @@ static long mc2_complete_job(void) | |||
896 | 907 | ||
897 | /* set next_replenish to synchronous release time */ | 908 | /* set next_replenish to synchronous release time */ |
898 | BUG_ON(!res); | 909 | BUG_ON(!res); |
899 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | 910 | res->next_replenishment = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no); |
900 | /* | 911 | /* |
901 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | 912 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { |
902 | struct table_driven_reservation *tdres; | 913 | struct table_driven_reservation *tdres; |
@@ -909,9 +920,8 @@ static long mc2_complete_job(void) | |||
909 | res->cur_budget = 0; | 920 | res->cur_budget = 0; |
910 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | 921 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); |
911 | 922 | ||
912 | // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); | 923 | TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); |
913 | if (lv == CRIT_LEVEL_C){ | 924 | if (lv == CRIT_LEVEL_C){ |
914 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
915 | raw_spin_unlock(&global_lock); | 925 | raw_spin_unlock(&global_lock); |
916 | } | 926 | } |
917 | raw_spin_unlock(&state->lock); | 927 | raw_spin_unlock(&state->lock); |
@@ -939,12 +949,12 @@ static long mc2_complete_job(void) | |||
939 | res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); | 949 | res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); |
940 | if (res && !res->reported){ | 950 | if (res && !res->reported){ |
941 | res_reported--; | 951 | res_reported--; |
942 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); | 952 | TRACE_CUR("RES_REPORTED = %d\n", res_reported); |
943 | res->reported = 1; | 953 | res->reported = 1; |
944 | //Current task doesn't exist in new mode | 954 | //Current task doesn't exist in new mode |
945 | if ( !in_mode(current, requested_mode) ){ | 955 | //if ( !in_mode(current, requested_mode) ){ |
946 | litmus_reschedule_local(); | 956 | // litmus_reschedule_local(); |
947 | } | 957 | //} |
948 | } | 958 | } |
949 | raw_spin_unlock(&mode_lock); | 959 | raw_spin_unlock(&mode_lock); |
950 | } | 960 | } |
@@ -984,8 +994,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
984 | 994 | ||
985 | 995 | ||
986 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 996 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
987 | if (res->state == RESERVATION_ACTIVE) | 997 | if (res->state == RESERVATION_ACTIVE) { |
988 | TRACE_TASK(tsk, "ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu\n", res->id, mode, res->mode, res->cur_budget); | 998 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); |
999 | TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
1000 | } | ||
989 | } | 1001 | } |
990 | 1002 | ||
991 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 1003 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
@@ -998,9 +1010,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
998 | return tsk; | 1010 | return tsk; |
999 | } else { | 1011 | } else { |
1000 | //if (!is_init_finished(tsk)) { | 1012 | //if (!is_init_finished(tsk)) { |
1001 | TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); | 1013 | // TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); |
1002 | if (mode != res->mode) | ||
1003 | TRACE_CUR("Mode does nto match res mode %d\n", res->mode); | ||
1004 | // if (num_sync_released != 0 && mode == 0) { | 1014 | // if (num_sync_released != 0 && mode == 0) { |
1005 | //ce = &state->crit_entries[lv]; | 1015 | //ce = &state->crit_entries[lv]; |
1006 | sup_scheduler_update_after(sup_env, res->cur_budget); | 1016 | sup_scheduler_update_after(sup_env, res->cur_budget); |
@@ -1031,6 +1041,13 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
1031 | enum crit_level lv; | 1041 | enum crit_level lv; |
1032 | lt_t time_slice; | 1042 | lt_t time_slice; |
1033 | 1043 | ||
1044 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | ||
1045 | if (res->state == RESERVATION_ACTIVE) { | ||
1046 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); | ||
1047 | TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1034 | raw_spin_lock(&mode_lock); | 1051 | raw_spin_lock(&mode_lock); |
1035 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | 1052 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { |
1036 | BUG_ON(!res); | 1053 | BUG_ON(!res); |
@@ -1176,10 +1193,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1176 | if (is_realtime(prev)) | 1193 | if (is_realtime(prev)) |
1177 | gmp_update_time(_global_env, now); | 1194 | gmp_update_time(_global_env, now); |
1178 | state->scheduled = mc2_global_dispatch(state); | 1195 | state->scheduled = mc2_global_dispatch(state); |
1179 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 1196 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1197 | update_cpu_prio(state); | ||
1198 | raw_spin_unlock(&global_lock); | ||
1199 | } else { | ||
1200 | raw_spin_lock(&global_lock); | ||
1201 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1202 | update_cpu_prio(state); | ||
1180 | raw_spin_unlock(&global_lock); | 1203 | raw_spin_unlock(&global_lock); |
1181 | } | 1204 | } |
1182 | |||
1183 | /* | 1205 | /* |
1184 | if (!state->scheduled) { | 1206 | if (!state->scheduled) { |
1185 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); | 1207 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); |
@@ -1251,14 +1273,13 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1251 | } | 1273 | } |
1252 | */ | 1274 | */ |
1253 | post_schedule(state->scheduled, state->cpu); | 1275 | post_schedule(state->scheduled, state->cpu); |
1254 | 1276 | /* | |
1255 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1256 | raw_spin_lock(&global_lock); | 1277 | raw_spin_lock(&global_lock); |
1257 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 1278 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1258 | update_cpu_prio(state); | 1279 | update_cpu_prio(state); |
1259 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 1280 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); |
1260 | raw_spin_unlock(&global_lock); | 1281 | raw_spin_unlock(&global_lock); |
1261 | 1282 | */ | |
1262 | raw_spin_unlock(&state->lock); | 1283 | raw_spin_unlock(&state->lock); |
1263 | if (state->scheduled) { | 1284 | if (state->scheduled) { |
1264 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1285 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
@@ -1296,7 +1317,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1296 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 1317 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
1297 | 1318 | ||
1298 | //local_irq_save(flags); | 1319 | //local_irq_save(flags); |
1299 | TRACE_TASK(tsk, "preemptible?? %d\n", preemptible()); | ||
1300 | preempt_disable(); | 1320 | preempt_disable(); |
1301 | tinfo = get_mc2_state(tsk); | 1321 | tinfo = get_mc2_state(tsk); |
1302 | if (tinfo->cpu != -1) | 1322 | if (tinfo->cpu != -1) |
@@ -1311,7 +1331,8 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1311 | /* Requeue only if self-suspension was already processed. */ | 1331 | /* Requeue only if self-suspension was already processed. */ |
1312 | if (tinfo->has_departed) | 1332 | if (tinfo->has_departed) |
1313 | { | 1333 | { |
1314 | 1334 | raw_spin_lock(&state->lock); | |
1335 | local_irq_save(flags); | ||
1315 | /* We don't want to consider jobs before synchronous releases */ | 1336 | /* We don't want to consider jobs before synchronous releases */ |
1316 | if (tsk_rt(tsk)->job_params.job_no == 2) { | 1337 | if (tsk_rt(tsk)->job_params.job_no == 2) { |
1317 | /* | 1338 | /* |
@@ -1331,16 +1352,19 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1331 | */ | 1352 | */ |
1332 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); | 1353 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); |
1333 | tsk_mc2_data(tsk)->init_finished = 1; | 1354 | tsk_mc2_data(tsk)->init_finished = 1; |
1334 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 1355 | atomic_dec(&num_sync_released); |
1335 | raw_spin_lock(&global_lock); | 1356 | //raw_spin_unlock(&global_lock); |
1336 | num_sync_released--; | 1357 | if (atomic_read(&num_sync_released) == 0) { |
1337 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 1358 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); |
1338 | raw_spin_unlock(&global_lock); | 1359 | TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); |
1339 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); | 1360 | sys_request_mode(1); |
1361 | sched_trace_sys_start(&start); | ||
1362 | } | ||
1363 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); | ||
1340 | } | 1364 | } |
1341 | 1365 | ||
1342 | raw_spin_lock(&state->lock); | 1366 | // raw_spin_lock(&state->lock); |
1343 | local_irq_save(flags); | 1367 | // local_irq_save(flags); |
1344 | /* Assumption: litmus_clock() is synchronized across cores, | 1368 | /* Assumption: litmus_clock() is synchronized across cores, |
1345 | * since we might not actually be executing on tinfo->cpu | 1369 | * since we might not actually be executing on tinfo->cpu |
1346 | * at the moment. */ | 1370 | * at the moment. */ |
@@ -1459,11 +1483,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1459 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { | 1483 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { |
1460 | cpu_0_task_exist = true; | 1484 | cpu_0_task_exist = true; |
1461 | } | 1485 | } |
1462 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 1486 | atomic_inc(&num_sync_released); |
1463 | raw_spin_lock(&global_lock); | ||
1464 | num_sync_released++; | ||
1465 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1466 | raw_spin_unlock(&global_lock); | ||
1467 | local_irq_restore(flags); | 1487 | local_irq_restore(flags); |
1468 | raw_spin_unlock(&state->lock); | 1488 | raw_spin_unlock(&state->lock); |
1469 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1489 | //raw_spin_unlock_irqrestore(&state->lock, flags); |
@@ -1533,8 +1553,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1533 | raw_spin_unlock(&mode_lock); | 1553 | raw_spin_unlock(&mode_lock); |
1534 | 1554 | ||
1535 | } | 1555 | } |
1536 | num_sync_released++; | 1556 | atomic_inc(&num_sync_released); |
1537 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1538 | raw_spin_unlock(&global_lock); | 1557 | raw_spin_unlock(&global_lock); |
1539 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1558 | //raw_spin_unlock_irqrestore(&state->lock, flags); |
1540 | local_irq_restore(flags); | 1559 | local_irq_restore(flags); |
@@ -1838,11 +1857,8 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1838 | /* NOTE: drops state->lock */ | 1857 | /* NOTE: drops state->lock */ |
1839 | TRACE("mc2_exit()\n"); | 1858 | TRACE("mc2_exit()\n"); |
1840 | 1859 | ||
1841 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 1860 | atomic_dec(&num_sync_released); |
1842 | raw_spin_lock(&global_lock); | 1861 | |
1843 | num_sync_released--; | ||
1844 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1845 | raw_spin_unlock(&global_lock); | ||
1846 | mc2_update_timer_and_unlock(state); | 1862 | mc2_update_timer_and_unlock(state); |
1847 | } else { | 1863 | } else { |
1848 | raw_spin_unlock(&state->lock); | 1864 | raw_spin_unlock(&state->lock); |
@@ -1850,17 +1866,16 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1850 | } | 1866 | } |
1851 | 1867 | ||
1852 | if (lv == CRIT_LEVEL_C) { | 1868 | if (lv == CRIT_LEVEL_C) { |
1853 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | 1869 | //raw_spin_lock(&global_lock); |
1854 | raw_spin_lock(&global_lock); | ||
1855 | raw_spin_lock(&mode_lock); | 1870 | raw_spin_lock(&mode_lock); |
1856 | for(i = 0; i < NR_MODES; i++){ | 1871 | for(i = 1; i < NR_MODES; i++){ |
1857 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) | 1872 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) |
1858 | continue; | 1873 | continue; |
1859 | mode_sizes[i]--; | 1874 | mode_sizes[i]--; |
1860 | } | 1875 | } |
1876 | mode_sizes[0]--; | ||
1861 | raw_spin_unlock(&mode_lock); | 1877 | raw_spin_unlock(&mode_lock); |
1862 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 1878 | //raw_spin_unlock(&global_lock); |
1863 | raw_spin_unlock(&global_lock); | ||
1864 | 1879 | ||
1865 | for_each_online_cpu(cpu) { | 1880 | for_each_online_cpu(cpu) { |
1866 | state = cpu_state_for(cpu); | 1881 | state = cpu_state_for(cpu); |
@@ -2460,9 +2475,9 @@ static long mc2_deactivate_plugin(void) | |||
2460 | } | 2475 | } |
2461 | 2476 | ||
2462 | } | 2477 | } |
2463 | num_sync_released = 0; | ||
2464 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
2465 | raw_spin_unlock(&global_lock); | 2478 | raw_spin_unlock(&global_lock); |
2479 | |||
2480 | atomic_set(&num_sync_released, 0); | ||
2466 | destroy_domain_proc_info(&mc2_domain_proc_info); | 2481 | destroy_domain_proc_info(&mc2_domain_proc_info); |
2467 | return 0; | 2482 | return 0; |
2468 | } | 2483 | } |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index acf2b7dc0219..d844180afa28 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -265,3 +265,15 @@ feather_callback void do_sched_trace_request_mode(unsigned long id, | |||
265 | put_record(rec); | 265 | put_record(rec); |
266 | } | 266 | } |
267 | } | 267 | } |
268 | |||
269 | feather_callback void do_sched_trace_sys_start(unsigned long id, | ||
270 | unsigned long _start) | ||
271 | { | ||
272 | lt_t *start = (lt_t*) _start; | ||
273 | struct st_event_record* rec = get_record(ST_SYS_START, NULL); | ||
274 | if (rec) { | ||
275 | rec->data.sys_start.when = now(); | ||
276 | rec->data.sys_start.start = *start; | ||
277 | put_record(rec); | ||
278 | } | ||
279 | } \ No newline at end of file | ||
diff --git a/litmus/sync.c b/litmus/sync.c index f066ea4219a8..7733f6760c52 100644 --- a/litmus/sync.c +++ b/litmus/sync.c | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include <litmus/sched_trace.h> | 17 | #include <litmus/sched_trace.h> |
18 | 18 | ||
19 | int num_sync_released; | 19 | atomic_t num_sync_released; |
20 | 20 | ||
21 | struct ts_release_wait { | 21 | struct ts_release_wait { |
22 | struct list_head list; | 22 | struct list_head list; |
@@ -149,6 +149,6 @@ asmlinkage long sys_release_ts(lt_t __user *__delay) | |||
149 | start_time *= ONE_MS; | 149 | start_time *= ONE_MS; |
150 | ret = do_release_ts(start_time + delay); | 150 | ret = do_release_ts(start_time + delay); |
151 | } | 151 | } |
152 | num_sync_released = ret; | 152 | atomic_set(&num_sync_released, ret); |
153 | return ret; | 153 | return ret; |
154 | } | 154 | } |