From b2ecb9f8d20baa3edfb305d263a7f0902ac019f3 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Sat, 30 Mar 2013 11:21:59 -0400 Subject: Removed ARM-specific hacks which disabled less common mixed-criticality features. --- include/litmus/event_group.h | 1 + include/litmus/preempt.h | 2 +- include/litmus/rt_param.h | 11 +- include/litmus/sched_mc.h | 1 + include/litmus/sched_trace.h | 51 +-------- include/trace/events/litmus.h | 22 ---- litmus/ce_domain.c | 1 + litmus/jobs.c | 19 ---- litmus/litmus.c | 3 - litmus/lockdown.c | 8 ++ litmus/preempt.c | 28 ++--- litmus/sched_mc.c | 245 ++++++++++++++++++++++++++---------------- litmus/sched_mc_ce.c | 63 ++++++----- litmus/sched_task_trace.c | 38 +------ litmus/way_tracker.c | 2 + 15 files changed, 226 insertions(+), 269 deletions(-) diff --git a/include/litmus/event_group.h b/include/litmus/event_group.h index b2a6a3ff5627..7b15a7e0412d 100644 --- a/include/litmus/event_group.h +++ b/include/litmus/event_group.h @@ -25,6 +25,7 @@ struct event_list { /* For timer firing */ lt_t fire_time; struct hrtimer timer; + struct hrtimer_start_on_info info; struct list_head queue_node; /* For event_queue */ struct event_group* group; /* For callback */ diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index cbf315aa01e9..beb4d480f21b 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h @@ -13,7 +13,7 @@ extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); #ifdef CONFIG_PREEMPT_STATE_TRACE const char* sched_state_name(int s); -#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) +#define TRACE_STATE(fmt, args...) STRACE("SCHED_STATE " fmt, args) #else #define TRACE_STATE(fmt, args...) /* ignore */ #endif diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index a8fe95b32c06..52a0116f7282 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -268,19 +268,14 @@ struct rt_param { /* Pointer to the page shared between userspace and kernel. */ struct control_page * ctrl_page; - lt_t total_tardy; - lt_t max_tardy; - unsigned int missed; - +#ifdef CONFIG_SCHED_TASK_TRACE lt_t load; lt_t flush; - int load_work; - int flush_work; +#endif - lt_t max_exec_time; - lt_t tot_exec_time; lt_t last_exec_time; lt_t orig_cost; + struct color_ctrl_page color_ctrl_page; struct dgl_group_req *req; enum server_state state; diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index 70f57cfd2706..567603c5ffff 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h @@ -55,6 +55,7 @@ struct ce_dom_data { struct rt_event event; #else struct hrtimer timer; + struct hrtimer_start_on_info timer_info; #endif }; diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 0580340d0170..40187826ef19 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -51,18 +51,9 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU. u64 exec_time; }; -/* changed: like everything */ struct st_completion_data { /* A job completed. */ - u32 exec; - u16 flush_work; - u16 load_work; - u32 flush; - u32 load; - /* u8 forced:1; /\* Set to 1 if job overran and kernel advanced to the */ - /* * next task automatically; set to 0 otherwise. */ - /* *\/ */ - /* u8 __uflags:7; */ - /* u8 __unused[7]; */ + u64 when; + u64 load; }; struct st_block_data { /* A task blocks. */ @@ -86,19 +77,6 @@ struct st_sys_release_data { u64 release; }; -/* changed: easy enough to remove */ -struct st_task_exit_data { - u64 avg_exec_time; - u64 max_exec_time; -}; - -/* changed: calculate yoself */ -struct st_task_tardy_data { - u64 total_tardy; - u32 max_tardy; - u32 missed; -}; - #define DATA(x) struct st_ ## x ## _data x; typedef enum { @@ -113,9 +91,7 @@ typedef enum { ST_BLOCK, ST_RESUME, ST_ACTION, - ST_SYS_RELEASE, - ST_TASK_EXIT, - ST_TASK_TARDY, + ST_SYS_RELEASE } st_event_record_type_t; struct st_event_record { @@ -134,8 +110,6 @@ struct st_event_record { DATA(resume); DATA(action); DATA(sys_release); - DATA(task_exit); - DATA(task_tardy); } data; }; @@ -177,11 +151,6 @@ feather_callback void do_sched_trace_action(unsigned long id, unsigned long action); feather_callback void do_sched_trace_sys_release(unsigned long id, lt_t* start); -feather_callback void do_sched_trace_task_exit(unsigned long id, - struct task_struct* task); -feather_callback void do_sched_trace_task_tardy(unsigned long id, - struct task_struct* task); - #endif #else @@ -306,20 +275,6 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, trace_litmus_sys_release(when); \ } while (0) -#define sched_trace_task_exit(t) \ - do { \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, \ - do_sched_trace_task_exit, t); \ - trace_litmus_task_exit(t); \ - } while (0) - - -#define sched_trace_task_tardy(t) \ - do { \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, \ - do_sched_trace_task_tardy, t); \ - } while (0) - #define QT_START lt_t _qt_start = litmus_clock() #define QT_END \ sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h index 95aae9460cbc..c2dbdc34eb42 100644 --- a/include/trace/events/litmus.h +++ b/include/trace/events/litmus.h @@ -277,28 +277,6 @@ TRACE_EVENT(litmus_sys_release, TP_printk("SynRelease(%Lu)\n", __entry->rel) ); -/* - * Trace task exit - */ -TRACE_EVENT(litmus_task_exit, - - TP_PROTO(struct task_struct *t), - - TP_ARGS(t), - - TP_STRUCT__entry( - __field( pid_t, pid ) - __field( unsigned long long, max_exec_time ) - ), - - TP_fast_assign( - __entry->pid = t ? t->pid : 0; - __entry->max_exec_time = t ? t->rt_param.max_exec_time : 0; - ), - - TP_printk("(%u) exit\n", __entry->pid) -); - /* * Containers */ diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c index ca7e6ae67cf3..c750b95581b9 100644 --- a/litmus/ce_domain.c +++ b/litmus/ce_domain.c @@ -93,6 +93,7 @@ void ce_domain_init(domain_t *dom, init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback, event_list_alloc(GFP_ATOMIC)); #else + hrtimer_start_on_info_init(&dom_data->timer_info); hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); dom_data->timer.function = ce_timer_callback; #endif diff --git a/litmus/jobs.c b/litmus/jobs.c index 097d7dd94d12..9e6de1b08982 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -116,25 +116,6 @@ void release_at(struct task_struct *t, lt_t start) */ long complete_job(void) { - lt_t amount; - lt_t now = litmus_clock(); - lt_t exec_time = tsk_rt(current)->job_params.exec_time; - - /* Task statistic summaries */ - tsk_rt(current)->tot_exec_time += exec_time; - if (lt_before(tsk_rt(current)->max_exec_time, exec_time)) - tsk_rt(current)->max_exec_time = exec_time; - - if (is_tardy(current, now)) { - TRACE_TASK(current, "is tardy, now: %llu, deadline: %llu\n", - now, get_deadline(current)); - amount = now - get_deadline(current); - if (lt_after(amount, tsk_rt(current)->max_tardy)) - tsk_rt(current)->max_tardy = amount; - tsk_rt(current)->total_tardy += amount; - ++tsk_rt(current)->missed; - } - TRACE_TASK(current, "user complete\n"); /* Mark that we do not execute anymore */ diff --git a/litmus/litmus.c b/litmus/litmus.c index 3de9252b3223..6dd631cfda4d 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -490,9 +490,6 @@ void litmus_exit_task(struct task_struct* tsk) { if (is_realtime(tsk)) { sched_trace_task_completion(tsk, 1); - sched_trace_task_exit(tsk); - sched_trace_task_tardy(tsk); - litmus->task_exit(tsk); BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); diff --git a/litmus/lockdown.c b/litmus/lockdown.c index 09712554c5b9..bc946f7464c0 100644 --- a/litmus/lockdown.c +++ b/litmus/lockdown.c @@ -63,6 +63,14 @@ u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end) return 0; } +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end) +{ + TRACE_CUR("Dummy read_in_mem: lock_val: 0x%x unlock_val: 0x%x " + "start: 0x%p end: 0x%p\n", lock_val, unlock_val, + start, end); + return 0; +} + void set_lockdown(u32 lockdown_state) { TRACE_CUR("Dummy set_lockdown function lockdown_state: 0x%x\n", diff --git a/litmus/preempt.c b/litmus/preempt.c index 8f1304afea26..26c6c7a929d9 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c @@ -22,9 +22,10 @@ void sched_state_will_schedule(struct task_struct* tsk) */ if (likely(task_cpu(tsk) == smp_processor_id())) { VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); - if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) + if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { + TRACE_TASK(tsk, "Wrong task\n"); set_sched_state(PICKED_WRONG_TASK); - else + } else set_sched_state(WILL_SCHEDULE); } /* else */ /* /\* Litmus tasks should never be subject to a remote */ @@ -32,8 +33,8 @@ void sched_state_will_schedule(struct task_struct* tsk) /* BUG_ON(is_realtime(tsk)); */ #ifdef CONFIG_PREEMPT_STATE_TRACE - TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", - __builtin_return_address(0)); + STRACE("%d: set_tsk_need_resched() ret:%p\n", + tsk->pid, __builtin_return_address(0)); #endif } @@ -69,9 +70,11 @@ void litmus_reschedule(int cpu) * is not aware of the need to reschedule at this point. */ /* is a context switch in progress? */ - if (cpu_is_in_sched_state(cpu, TASK_PICKED)) + if (cpu_is_in_sched_state(cpu, TASK_PICKED)) { + STRACE("Transition onto wrong task\n"); picked_transition_ok = sched_state_transition_on( cpu, TASK_PICKED, PICKED_WRONG_TASK); + } if (!picked_transition_ok && cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { @@ -90,17 +93,18 @@ void litmus_reschedule(int cpu) smp_send_reschedule(cpu); } - TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", - __FUNCTION__, - picked_transition_ok, - scheduled_transition_ok); + STRACE("%s picked-ok:%d sched-ok:%d\n", + __FUNCTION__, + picked_transition_ok, + scheduled_transition_ok); } void litmus_reschedule_local(void) { - if (is_in_sched_state(TASK_PICKED)) + if (is_in_sched_state(TASK_PICKED)) { + STRACE("Rescheduling into wrong task\n"); set_sched_state(PICKED_WRONG_TASK); - else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { + } else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { set_sched_state(WILL_SCHEDULE); set_tsk_need_resched(current); } @@ -111,7 +115,7 @@ void litmus_reschedule_local(void) void sched_state_plugin_check(void) { if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { - TRACE("!!!! plugin did not call sched_state_task_picked()!" + STRACE("!!!! plugin did not call sched_state_task_picked()!" "Calling sched_state_task_picked() is mandatory---fix this.\n"); set_sched_state(TASK_PICKED); } diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 6edf86935a29..c8e50d30a483 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -30,7 +30,7 @@ #include #include #include -#warning "MUST ADD CHECK FOR MAX WAYS" + struct mc_signal { int update:1; int preempt:1; @@ -207,6 +207,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) { struct task_struct *next = dom->peek_ready(dom); + if (!next || !curr) { return next && !curr; } else { @@ -223,10 +224,13 @@ static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) static void update_crit_position(struct crit_entry *ce) { struct bheap *heap; + if (is_global(ce->domain)) { heap = domain_data(ce->domain)->heap; + BUG_ON(!heap); BUG_ON(!bheap_node_in_heap(ce->node)); + bheap_delete(cpu_lower_prio, heap, ce->node); bheap_insert(cpu_lower_prio, heap, ce->node); } @@ -239,6 +243,7 @@ static void update_crit_position(struct crit_entry *ce) static void fix_crit_position(struct crit_entry *ce) { struct server *server = &ce->server; + if (is_global(ce->domain) && server->in_transit) { server_state_change(server, server->state, 0); update_crit_position(ce); @@ -368,7 +373,7 @@ static void link_task_to_crit(struct crit_entry *ce, server_state_change(ce_server, SS_ACTIVE, 0); } - TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); + /* TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); */ stop_crit(ce); tsk_rt(ce->server.linked)->server.parent = 0; @@ -552,9 +557,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c /* Per-domain preemption */ link_task_to_crit(ce, task); - /* if (old && can_requeue(old)) { */ - /* dom->requeue(dom, old); */ - /* } */ + update_crit_position(ce); /* Preempt actual execution if this is a running task. @@ -574,6 +577,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c /** * update_crit_levels() - Update criticality entries for the new cpu state. + * Disables criticality levels lower than @entry's currenly linked task. * This should be called after a new task has been linked to @entry. * The caller must hold the @entry->lock, but this method will release it. */ @@ -585,6 +589,8 @@ static void update_crit_levels(struct cpu_entry *entry) struct task_struct *readmit[NUM_CRIT_LEVELS]; enum crit_level level = entry_level(entry); + STRACE("Updating crit levels for cpu %d\n", entry->cpu); + /* Remove lower priority tasks from the entry */ for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { ce = &entry->crit_entries[i]; @@ -609,8 +615,10 @@ static void update_crit_levels(struct cpu_entry *entry) link_task_to_crit(ce, NULL); } TRACE_CRIT_ENTRY(ce, "Removing lower crit\n"); - server_state_change(server, SS_REMOVED, 1); - + server_state_change(server, SS_REMOVED, + is_global(ce->domain)?1:0); + } else { + TRACE_CRIT_ENTRY(ce, "Already removed!\n"); } } /* Need to unlock so we can access domains */ @@ -669,21 +677,26 @@ static void check_global_preempt(struct domain *dom) } } -static void check_partitioned_preempt(struct domain *dom) +static void check_partitioned_preempt(struct cpu_entry *entry, + struct domain *dom) { - struct cpu_entry *entry; - struct crit_entry *ce; + struct crit_entry *ce = domain_data(dom)->crit_entry; - ce = domain_data(dom)->crit_entry; - entry = crit_cpu(ce); + /* Cache next task */ + dom->peek_ready(dom); + + raw_spin_lock(&entry->lock); if (ce->server.state == SS_REMOVED || !mc_preempt_needed(dom, ce->server.linked)) { - return; + goto out_unlock; } entry->signal.preempt = 1; litmus_reschedule(entry->cpu); + + out_unlock: + raw_spin_unlock(&entry->lock); } /** @@ -701,12 +714,7 @@ static void check_for_preempt(struct domain *dom) ce = domain_data(dom)->crit_entry; entry = crit_cpu(ce); - /* Cache next task */ - dom->peek_ready(dom); - - raw_spin_lock(&entry->lock); - check_partitioned_preempt(dom); - raw_spin_unlock(&entry->lock); + check_partitioned_preempt(entry, dom); } } @@ -798,25 +806,24 @@ static void job_completion(struct task_struct *task, int forced) if (lt_before(get_user_release(task), litmus_clock()) || (release_server && tsk_rt(task)->completed)){ - TRACE_TASK(task, "Executable task going back to running\n"); + TRACE_MC_TASK(task, "Executable task going back to running\n"); tsk_rt(task)->completed = 0; } if (release_server || forced) { - /* TODO: Level A does this independently and should not */ - if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) { - prepare_for_next_period(task); - } + prepare_for_next_period(task); - TRACE_TASK(task, "Is released: %d, now: %llu, rel: %llu\n", - is_released(task, litmus_clock()), litmus_clock(), - get_release(task)); + TRACE_MC_TASK(task, "Is released: %d, now: %llu, rel: %llu\n", + is_released(task, litmus_clock()), litmus_clock(), + get_release(task)); /* Requeue non-blocking tasks */ if (is_running(task)) { job_arrival(task); } } else if (is_ghost(task)) { + BUG_ON(tsk_rt(task)->linked_on == NO_CPU); + entry = &per_cpu(cpus, tsk_rt(task)->linked_on); ce = &entry->crit_entries[tsk_mc_crit(task)]; @@ -847,24 +854,27 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) #endif struct task_struct *tmp = NULL; struct cpu_entry *entry = crit_cpu(ce); - TRACE("Firing here at %llu\n", litmus_clock()); - TRACE_CRIT_ENTRY(ce, "For this\n"); + int resched = 0; + + TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); raw_spin_lock(&entry->lock); - if (is_ghost(ce->server.linked)) { + if (ce->server.linked && is_ghost(ce->server.linked)) { update_server_time(ce->server.linked); if (budget_exhausted(ce->server.linked)) { tmp = ce->server.linked; } } else { - litmus_reschedule(crit_cpu(ce)->cpu); + resched = 1; } raw_spin_unlock(&entry->lock); if (tmp) job_completion(tmp, 1); + else if (resched) + litmus_reschedule(entry->cpu); #ifndef CONFIG_MERGE_TIMERS return HRTIMER_NORESTART; @@ -891,8 +901,6 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) ce->server.linked == ce_data->should_schedule) { old_link = ce->server.linked; - link_task_to_crit(ce, NULL); - mc_ce_job_completion(dom, old_link); } raw_spin_unlock(&crit_cpu(ce)->lock); @@ -900,7 +908,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data) /* Job completion will check for preemptions by means of calling job * arrival if the task is not blocked */ - if (NULL != old_link) { + if (old_link) { STRACE("old_link " TS " so will call job completion\n", TA(old_link)); raw_spin_unlock(dom->lock); job_completion(old_link, 1); @@ -993,8 +1001,10 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) entry = &per_cpu(cpus, task_cpu(t)); t->rt_param._domain = entry->crit_entries[level].domain; +#ifdef CONFIG_SCHED_TASK_TRACE tsk_rt(t)->flush = 0; tsk_rt(t)->load = 0; +#endif /* Userspace and kernelspace view of task state may differ. * Model kernel state as a budget enforced container @@ -1098,16 +1108,17 @@ static void mc_task_exit(struct task_struct *task) color_sched_out_task(task); } + /* TODO: restore. This was geting triggered by race conditions even when + * no level-A task was executing */ + if (CRIT_LEVEL_A == tsk_mc_crit(task)) + mc_ce_task_exit_common(task); + remove_from_all(task); if (tsk_rt(task)->scheduled_on != NO_CPU) { per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL; tsk_rt(task)->scheduled_on = NO_CPU; } - /* TODO: restore. This was geting triggered by race conditions even when - * no level-A task was executing */ - /* if (CRIT_LEVEL_A == tsk_mc_crit(task)) */ - /* mc_ce_task_exit_common(task); */ local_irq_restore(flags); } @@ -1259,30 +1270,30 @@ static void process_update_signal(struct cpu_entry *entry) stop_crit(ce); server_state_change(crit_server, SS_BLOCKED, 0); } - - } -static void process_signals(struct cpu_entry *entry) +static void process_preempt_signal(struct cpu_entry *entry) { + int i; struct domain *dom; struct crit_entry *ce; - struct mc_signal signal; - struct task_struct *preempted; + struct task_struct *preempted = NULL; + struct server *server; - ce = &entry->crit_entries[CRIT_LEVEL_B]; - dom = ce->domain; + STRACE("Reading preempt signal\n"); - /* Load signals */ - raw_spin_lock(&entry->signal_lock); - signal = entry->signal; - clear_signal(&entry->signal); - raw_spin_unlock(&entry->signal_lock); + for (i = 0; i < NUM_CRIT_LEVELS; i++) { + ce = &entry->crit_entries[i]; + dom = ce->domain; + server = &ce->server; + preempted = NULL; - if (signal.preempt) { + /* Swap locks. We cannot acquire a domain lock while + * holding an entry lock or deadlocks will happen + */ raw_spin_lock(dom->lock); - /* A higher-priority task may exist */ - STRACE("Reading preempt signal\n"); + + /* Do domain stuff before grabbing CPU locks */ dom->peek_ready(dom); raw_spin_lock(&entry->lock); @@ -1308,26 +1319,106 @@ static void process_signals(struct cpu_entry *entry) raw_spin_unlock(dom->lock); } - raw_spin_lock(&entry->lock); + break; } else { + raw_spin_unlock(&entry->lock); raw_spin_unlock(dom->lock); } - } else { - raw_spin_lock(&entry->lock); + } +} + +static void process_signals(struct cpu_entry *entry) +{ + struct mc_signal signal; + + /* Load signals */ + raw_spin_lock(&entry->signal_lock); + signal = entry->signal; + clear_signal(&entry->signal); + raw_spin_unlock(&entry->signal_lock); + + if (signal.preempt) { + process_preempt_signal(entry); } + raw_spin_lock(&entry->lock); + if (signal.update) { process_update_signal(entry); } } +static void reschedule_if_signaled(struct cpu_entry *entry) +{ + struct mc_signal signal; + + raw_spin_lock(&entry->signal_lock); + signal = entry->signal; + raw_spin_unlock(&entry->signal_lock); + + if (signal.update || signal.preempt) { + litmus_reschedule_local(); + } +} + +static void pre_schedule(struct task_struct *prev) +{ + lt_t exec, start = litmus_clock(); + + /* Update userspace exec time */ + if (prev && tsk_rt(prev)->last_exec_time) { + exec = start - tsk_rt(prev)->last_exec_time; + tsk_rt(prev)->user_job.exec_time += exec; + } + + /* Flush task pages */ + if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B && + is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) { + color_sched_out_task(prev); + +#ifdef CONFIG_SCHED_TASK_TRACE + tsk_rt(prev)->load += litmus_clock() - start; +#endif + } + + TS_LVLA_SCHED_START; + TS_LVLB_SCHED_START; + TS_LVLC_SCHED_START; +} + +static void post_schedule(struct task_struct *next) +{ + lt_t start; + + switch (tsk_mc_crit(next)) { + case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break; + case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break; + case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break; + } + + /* Cache in task pages */ + if (tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache && + get_rt_job(next) > 1) { + start = litmus_clock(); + + color_sched_in_task(next); + +#ifdef CONFIG_SCHED_TASK_TRACE + BUG_ON(tsk_rt(next)->load); + tsk_rt(next)->load = litmus_clock() - start; +#endif + } + + tsk_rt(next)->last_exec_time = litmus_clock(); +} + /** * mc_schedule() - Return next task which should be scheduled. */ static struct task_struct* mc_schedule(struct task_struct* prev) { - lt_t start, exec; - int out_of_time, sleep, preempt, exists, blocks, global, lower, work; + + int out_of_time, sleep, preempt, exists, blocks, global, lower; struct cpu_entry* entry = &__get_cpu_var(cpus); struct task_struct *next = NULL; @@ -1340,22 +1431,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) low_prio_arrival(entry->will_schedule); } - if (prev && tsk_rt(prev)->last_exec_time) { - exec = litmus_clock() - tsk_rt(prev)->last_exec_time; - tsk_rt(prev)->user_job.exec_time += exec; - } - - if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B && - is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) { - start = litmus_clock(); - work = color_sched_out_task(prev); - tsk_rt(prev)->flush = litmus_clock() - start; - ++tsk_rt(prev)->flush_work; - } - - TS_LVLA_SCHED_START; - TS_LVLB_SCHED_START; - TS_LVLC_SCHED_START; + pre_schedule(prev); raw_spin_lock(&entry->lock); @@ -1406,9 +1482,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) job_arrival(entry->scheduled); } - /* TODO: move this down somehow */ - sched_state_task_picked(); - + /* Acquires the entry lock */ process_signals(entry); /* Pick next task if none is linked */ @@ -1424,23 +1498,12 @@ static struct task_struct* mc_schedule(struct task_struct* prev) raw_spin_unlock(&entry->lock); - if (next) { - switch (tsk_mc_crit(next)) { - case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break; - case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break; - case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break; - } - } + sched_state_task_picked(); - if (next && tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache && get_rt_job(next) > 1) { - start = litmus_clock(); - work = color_sched_in_task(next); - tsk_rt(next)->load = litmus_clock() - start; - tsk_rt(next)->load_work = work; - } + reschedule_if_signaled(entry); if (next) { - tsk_rt(next)->last_exec_time = litmus_clock(); + post_schedule(next); TRACE_MC_TASK(next, "Picked this task\n"); } else { STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock()); diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c index c799738c0090..9f6929e55ab2 100644 --- a/litmus/sched_mc_ce.c +++ b/litmus/sched_mc_ce.c @@ -118,9 +118,9 @@ unsigned int mc_ce_get_expected_job(const int cpu, const int idx) static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time) { long long st = atomic64_read(&start_time); - //lt_t offset = (when - st) % cycle_time; - lt_t offset = 0; - TRACE("when: %llu cycle_time: %llu start_time: %lld offset %llu\n", + lt_t offset = (when - st) % cycle_time; + /* lt_t offset = 0; */ + STRACE("when: %llu cycle_time: %llu start_time: %lld offset %llu\n", when, cycle_time, st, offset); return offset; } @@ -138,7 +138,7 @@ void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx); unsigned int just_finished; - TRACE_TASK(ts, "Completed\n"); + TRACE_MC_TASK(ts, "Completed\n"); /* sched_trace_task_completion(ts, 0); */ /* post-increment is important here */ @@ -152,13 +152,13 @@ void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) if (just_finished < pid_entry->expected_job) { /* this job is already released because it's running behind */ set_rt_flags(ts, RT_F_RUNNING); - TRACE_TASK(ts, "appears behind: the expected job is %u but " + TRACE_MC_TASK(ts, "appears behind: the expected job is %u but " "job %u just completed\n", pid_entry->expected_job, just_finished); } else if (pid_entry->expected_job < just_finished) { - printk(KERN_CRIT "job %u completed in expected job %u which " - "seems too early\n", just_finished, - pid_entry->expected_job); + TRACE_MC_TASK(ts, "job %u completed in expected job %u which " + "seems too early\n", just_finished, + pid_entry->expected_job); } } @@ -189,7 +189,7 @@ static int mc_ce_schedule_at(const struct domain *dom, lt_t offset) } /* can only happen if cycle_time is not right */ BUG_ON(pid_entry->acc_time > pid_table->cycle_time); - TRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu); + STRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu); return idx; } @@ -211,7 +211,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev) exists = NULL != ce_data->scheduled; sleep = exists && RT_F_SLEEP == get_rt_flags(ce_data->scheduled); - TRACE("exists: %d, sleep: %d\n", exists, sleep); + STRACE("exists: %d, sleep: %d\n", exists, sleep); if (sleep) mc_ce_job_completion(dom, ce_data->scheduled); @@ -226,7 +226,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev) should_sched_asleep = should_sched_exists && RT_F_SLEEP == get_rt_flags(ce_data->should_schedule); - TRACE("should_sched_exists: %d, should_sched_blocked: %d, " + STRACE("should_sched_exists: %d, should_sched_blocked: %d, " "should_sched_asleep: %d\n", should_sched_exists, should_sched_blocked, should_sched_asleep); @@ -248,7 +248,7 @@ static void mc_ce_finish_switch(struct task_struct *prev) struct domain *dom = get_domain_for(smp_processor_id()); struct ce_dom_data *ce_data = dom->data; - TRACE("finish switch\n"); + STRACE("finish switch\n"); if (is_realtime(current) && CRIT_LEVEL_A == tsk_mc_crit(current)) ce_data->scheduled = current; @@ -495,19 +495,19 @@ lt_t mc_ce_timer_callback_common(struct domain *dom) } } - if (ce_data->should_schedule) { - get_deadline(should_schedule) = - cycle_start_abs + pid_entry->acc_time; - get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - - pid_entry->budget; - tsk_rt(should_schedule)->job_params.exec_time = 0; - - TRACE_MC_TASK(should_schedule, "Released!\n"); - set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); - sched_trace_task_release(should_schedule); - sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), - tsk_rt(should_schedule)->job_params); - } + /* if (ce_data->should_schedule) { */ + /* get_deadline(should_schedule) = */ + /* cycle_start_abs + pid_entry->acc_time; */ + /* get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - */ + /* pid_entry->budget; */ + /* tsk_rt(should_schedule)->job_params.exec_time = 0; */ + + /* TRACE_MC_TASK(should_schedule, "Released!\n"); */ + /* set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); */ + /* sched_trace_task_release(should_schedule); */ + /* sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), */ + /* tsk_rt(should_schedule)->job_params); */ + /* } */ return next_timer_abs; } @@ -535,7 +535,7 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer) #endif dom = get_domain_for(ce_data->cpu); - TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); + STRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); raw_spin_lock_irqsave(dom->lock, flags); next_timer_abs = mc_ce_timer_callback_common(dom); @@ -569,7 +569,7 @@ static int cancel_all_timers(void) int cancel_res; #endif - TRACE("cancel all timers\n"); + STRACE("cancel all timers\n"); for_each_online_cpu(cpu) { dom = get_domain_for(cpu); @@ -579,6 +579,8 @@ static int cancel_all_timers(void) cancel_event(&ce_data->event); #else cancel_res = hrtimer_cancel(&ce_data->timer); + atomic_set(&ce_data->timer_info.state, + HRTIMER_START_ON_INACTIVE); ret = ret || cancel_res; #endif } @@ -598,7 +600,7 @@ static void arm_all_timers(void) int cpu, idx, cpu_for_timer; const lt_t start = atomic64_read(&start_time); - TRACE("arm all timers\n"); + STRACE("arm all timers\n"); for_each_online_cpu(cpu) { dom = get_domain_for(cpu); @@ -619,6 +621,9 @@ static void arm_all_timers(void) add_event(get_event_group_for(cpu_for_timer), &ce_data->event, start); #else + hrtimer_start_on(cpu_for_timer, &ce_data->timer_info, + &ce_data->timer, ns_to_ktime(start), + HRTIMER_MODE_ABS_PINNED); #endif } } @@ -630,7 +635,7 @@ static void arm_all_timers(void) */ void mc_ce_release_at_common(struct task_struct *ts, lt_t start) { - TRACE("release CE at %llu\n", start); + STRACE("release CE at %llu\n", start); if (atomic_inc_and_test(&start_time_set)) { /* in this case, we won the race */ cancel_all_timers(); diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5a196e1b0fbd..c30fa1d74726 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -198,15 +198,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, struct task_struct *t = (struct task_struct*) _task; struct st_event_record* rec = get_record(ST_COMPLETION, t); if (rec) { - rec->data.completion.exec = tsk_rt(t)->user_job.exec_time; - rec->data.completion.flush = tsk_rt(t)->flush; - rec->data.completion.load = tsk_rt(t)->load; - rec->data.completion.flush_work = tsk_rt(t)->flush_work; - rec->data.completion.load_work = tsk_rt(t)->load_work; - tsk_rt(t)->flush = 0; + rec->data.completion.when = now(); + rec->data.completion.load = tsk_rt(t)->load; tsk_rt(t)->load = 0; - tsk_rt(t)->flush_work = 0; - tsk_rt(t)->load_work = 0; put_record(rec); } } @@ -245,34 +239,6 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, } } -feather_callback void do_sched_trace_task_exit(unsigned long id, - unsigned long _task) -{ - struct task_struct *t = (struct task_struct*) _task; - const lt_t max_exec_time = tsk_rt(t)->max_exec_time; - const lt_t avg_exec_time = div64_u64(tsk_rt(t)->tot_exec_time, (get_job_no(t) - 1)); - - struct st_event_record *rec = get_record(ST_TASK_EXIT, t); - if (rec) { - rec->data.task_exit.avg_exec_time = avg_exec_time; - rec->data.task_exit.max_exec_time = max_exec_time; - put_record(rec); - } -} - -feather_callback void do_sched_trace_task_tardy(unsigned long id, - unsigned long _task) -{ - struct task_struct *t = (struct task_struct*) _task; - struct st_event_record *rec = get_record(ST_TASK_TARDY, t); - if (rec) { - rec->data.task_tardy.max_tardy = tsk_rt(t)->max_tardy; - rec->data.task_tardy.total_tardy = tsk_rt(t)->total_tardy; - rec->data.task_tardy.missed = tsk_rt(t)->missed; - put_record(rec); - } -} - feather_callback void do_sched_trace_action(unsigned long id, unsigned long _task, unsigned long action) diff --git a/litmus/way_tracker.c b/litmus/way_tracker.c index ff392ab09c4d..9131c9658c2a 100644 --- a/litmus/way_tracker.c +++ b/litmus/way_tracker.c @@ -41,7 +41,9 @@ static int take_next_way(unsigned int color) } else { printk(KERN_WARNING "Vury bad\n"); /* Seriously bad. */ +#ifdef CONFIG_KGDB kgdb_breakpoint(); +#endif BUG(); } raw_spin_unlock(&lock); -- cgit v1.2.2