aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-03-30 11:21:59 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-03-30 11:21:59 -0400
commitb2ecb9f8d20baa3edfb305d263a7f0902ac019f3 (patch)
tree77af90862b09542fa6fbcae7117b41b19086e552
parentc31763ecf41cbcdb61e8960f0354d8b2e39a8645 (diff)
Removed ARM-specific hacks which disabled less common mixed-criticality features.wip-mc
-rw-r--r--include/litmus/event_group.h1
-rw-r--r--include/litmus/preempt.h2
-rw-r--r--include/litmus/rt_param.h11
-rw-r--r--include/litmus/sched_mc.h1
-rw-r--r--include/litmus/sched_trace.h51
-rw-r--r--include/trace/events/litmus.h22
-rw-r--r--litmus/ce_domain.c1
-rw-r--r--litmus/jobs.c19
-rw-r--r--litmus/litmus.c3
-rw-r--r--litmus/lockdown.c8
-rw-r--r--litmus/preempt.c28
-rw-r--r--litmus/sched_mc.c245
-rw-r--r--litmus/sched_mc_ce.c63
-rw-r--r--litmus/sched_task_trace.c38
-rw-r--r--litmus/way_tracker.c2
15 files changed, 226 insertions, 269 deletions
diff --git a/include/litmus/event_group.h b/include/litmus/event_group.h
index b2a6a3ff5627..7b15a7e0412d 100644
--- a/include/litmus/event_group.h
+++ b/include/litmus/event_group.h
@@ -25,6 +25,7 @@ struct event_list {
25 /* For timer firing */ 25 /* For timer firing */
26 lt_t fire_time; 26 lt_t fire_time;
27 struct hrtimer timer; 27 struct hrtimer timer;
28 struct hrtimer_start_on_info info;
28 29
29 struct list_head queue_node; /* For event_queue */ 30 struct list_head queue_node; /* For event_queue */
30 struct event_group* group; /* For callback */ 31 struct event_group* group; /* For callback */
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
index cbf315aa01e9..beb4d480f21b 100644
--- a/include/litmus/preempt.h
+++ b/include/litmus/preempt.h
@@ -13,7 +13,7 @@ extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
13 13
14#ifdef CONFIG_PREEMPT_STATE_TRACE 14#ifdef CONFIG_PREEMPT_STATE_TRACE
15const char* sched_state_name(int s); 15const char* sched_state_name(int s);
16#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) 16#define TRACE_STATE(fmt, args...) STRACE("SCHED_STATE " fmt, args)
17#else 17#else
18#define TRACE_STATE(fmt, args...) /* ignore */ 18#define TRACE_STATE(fmt, args...) /* ignore */
19#endif 19#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index a8fe95b32c06..52a0116f7282 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -268,19 +268,14 @@ struct rt_param {
268 /* Pointer to the page shared between userspace and kernel. */ 268 /* Pointer to the page shared between userspace and kernel. */
269 struct control_page * ctrl_page; 269 struct control_page * ctrl_page;
270 270
271 lt_t total_tardy; 271#ifdef CONFIG_SCHED_TASK_TRACE
272 lt_t max_tardy;
273 unsigned int missed;
274
275 lt_t load; 272 lt_t load;
276 lt_t flush; 273 lt_t flush;
277 int load_work; 274#endif
278 int flush_work;
279 275
280 lt_t max_exec_time;
281 lt_t tot_exec_time;
282 lt_t last_exec_time; 276 lt_t last_exec_time;
283 lt_t orig_cost; 277 lt_t orig_cost;
278
284 struct color_ctrl_page color_ctrl_page; 279 struct color_ctrl_page color_ctrl_page;
285 struct dgl_group_req *req; 280 struct dgl_group_req *req;
286 enum server_state state; 281 enum server_state state;
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index 70f57cfd2706..567603c5ffff 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -55,6 +55,7 @@ struct ce_dom_data {
55 struct rt_event event; 55 struct rt_event event;
56#else 56#else
57 struct hrtimer timer; 57 struct hrtimer timer;
58 struct hrtimer_start_on_info timer_info;
58#endif 59#endif
59}; 60};
60 61
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 0580340d0170..40187826ef19 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -51,18 +51,9 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU.
51 u64 exec_time; 51 u64 exec_time;
52}; 52};
53 53
54/* changed: like everything */
55struct st_completion_data { /* A job completed. */ 54struct st_completion_data { /* A job completed. */
56 u32 exec; 55 u64 when;
57 u16 flush_work; 56 u64 load;
58 u16 load_work;
59 u32 flush;
60 u32 load;
61 /* u8 forced:1; /\* Set to 1 if job overran and kernel advanced to the */
62 /* * next task automatically; set to 0 otherwise. */
63 /* *\/ */
64 /* u8 __uflags:7; */
65 /* u8 __unused[7]; */
66}; 57};
67 58
68struct st_block_data { /* A task blocks. */ 59struct st_block_data { /* A task blocks. */
@@ -86,19 +77,6 @@ struct st_sys_release_data {
86 u64 release; 77 u64 release;
87}; 78};
88 79
89/* changed: easy enough to remove */
90struct st_task_exit_data {
91 u64 avg_exec_time;
92 u64 max_exec_time;
93};
94
95/* changed: calculate yoself */
96struct st_task_tardy_data {
97 u64 total_tardy;
98 u32 max_tardy;
99 u32 missed;
100};
101
102#define DATA(x) struct st_ ## x ## _data x; 80#define DATA(x) struct st_ ## x ## _data x;
103 81
104typedef enum { 82typedef enum {
@@ -113,9 +91,7 @@ typedef enum {
113 ST_BLOCK, 91 ST_BLOCK,
114 ST_RESUME, 92 ST_RESUME,
115 ST_ACTION, 93 ST_ACTION,
116 ST_SYS_RELEASE, 94 ST_SYS_RELEASE
117 ST_TASK_EXIT,
118 ST_TASK_TARDY,
119} st_event_record_type_t; 95} st_event_record_type_t;
120 96
121struct st_event_record { 97struct st_event_record {
@@ -134,8 +110,6 @@ struct st_event_record {
134 DATA(resume); 110 DATA(resume);
135 DATA(action); 111 DATA(action);
136 DATA(sys_release); 112 DATA(sys_release);
137 DATA(task_exit);
138 DATA(task_tardy);
139 } data; 113 } data;
140}; 114};
141 115
@@ -177,11 +151,6 @@ feather_callback void do_sched_trace_action(unsigned long id,
177 unsigned long action); 151 unsigned long action);
178feather_callback void do_sched_trace_sys_release(unsigned long id, 152feather_callback void do_sched_trace_sys_release(unsigned long id,
179 lt_t* start); 153 lt_t* start);
180feather_callback void do_sched_trace_task_exit(unsigned long id,
181 struct task_struct* task);
182feather_callback void do_sched_trace_task_tardy(unsigned long id,
183 struct task_struct* task);
184
185#endif 154#endif
186 155
187#else 156#else
@@ -306,20 +275,6 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id,
306 trace_litmus_sys_release(when); \ 275 trace_litmus_sys_release(when); \
307 } while (0) 276 } while (0)
308 277
309#define sched_trace_task_exit(t) \
310 do { \
311 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, \
312 do_sched_trace_task_exit, t); \
313 trace_litmus_task_exit(t); \
314 } while (0)
315
316
317#define sched_trace_task_tardy(t) \
318 do { \
319 SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, \
320 do_sched_trace_task_tardy, t); \
321 } while (0)
322
323#define QT_START lt_t _qt_start = litmus_clock() 278#define QT_START lt_t _qt_start = litmus_clock()
324#define QT_END \ 279#define QT_END \
325 sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ 280 sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
index 95aae9460cbc..c2dbdc34eb42 100644
--- a/include/trace/events/litmus.h
+++ b/include/trace/events/litmus.h
@@ -278,28 +278,6 @@ TRACE_EVENT(litmus_sys_release,
278); 278);
279 279
280/* 280/*
281 * Trace task exit
282 */
283TRACE_EVENT(litmus_task_exit,
284
285 TP_PROTO(struct task_struct *t),
286
287 TP_ARGS(t),
288
289 TP_STRUCT__entry(
290 __field( pid_t, pid )
291 __field( unsigned long long, max_exec_time )
292 ),
293
294 TP_fast_assign(
295 __entry->pid = t ? t->pid : 0;
296 __entry->max_exec_time = t ? t->rt_param.max_exec_time : 0;
297 ),
298
299 TP_printk("(%u) exit\n", __entry->pid)
300);
301
302/*
303 * Containers 281 * Containers
304 */ 282 */
305TRACE_EVENT(litmus_container_param, 283TRACE_EVENT(litmus_container_param,
diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
index ca7e6ae67cf3..c750b95581b9 100644
--- a/litmus/ce_domain.c
+++ b/litmus/ce_domain.c
@@ -93,6 +93,7 @@ void ce_domain_init(domain_t *dom,
93 init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback, 93 init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback,
94 event_list_alloc(GFP_ATOMIC)); 94 event_list_alloc(GFP_ATOMIC));
95#else 95#else
96 hrtimer_start_on_info_init(&dom_data->timer_info);
96 hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 97 hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
97 dom_data->timer.function = ce_timer_callback; 98 dom_data->timer.function = ce_timer_callback;
98#endif 99#endif
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 097d7dd94d12..9e6de1b08982 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -116,25 +116,6 @@ void release_at(struct task_struct *t, lt_t start)
116 */ 116 */
117long complete_job(void) 117long complete_job(void)
118{ 118{
119 lt_t amount;
120 lt_t now = litmus_clock();
121 lt_t exec_time = tsk_rt(current)->job_params.exec_time;
122
123 /* Task statistic summaries */
124 tsk_rt(current)->tot_exec_time += exec_time;
125 if (lt_before(tsk_rt(current)->max_exec_time, exec_time))
126 tsk_rt(current)->max_exec_time = exec_time;
127
128 if (is_tardy(current, now)) {
129 TRACE_TASK(current, "is tardy, now: %llu, deadline: %llu\n",
130 now, get_deadline(current));
131 amount = now - get_deadline(current);
132 if (lt_after(amount, tsk_rt(current)->max_tardy))
133 tsk_rt(current)->max_tardy = amount;
134 tsk_rt(current)->total_tardy += amount;
135 ++tsk_rt(current)->missed;
136 }
137
138 TRACE_TASK(current, "user complete\n"); 119 TRACE_TASK(current, "user complete\n");
139 120
140 /* Mark that we do not execute anymore */ 121 /* Mark that we do not execute anymore */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3de9252b3223..6dd631cfda4d 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -490,9 +490,6 @@ void litmus_exit_task(struct task_struct* tsk)
490{ 490{
491 if (is_realtime(tsk)) { 491 if (is_realtime(tsk)) {
492 sched_trace_task_completion(tsk, 1); 492 sched_trace_task_completion(tsk, 1);
493 sched_trace_task_exit(tsk);
494 sched_trace_task_tardy(tsk);
495
496 litmus->task_exit(tsk); 493 litmus->task_exit(tsk);
497 494
498 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); 495 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
index 09712554c5b9..bc946f7464c0 100644
--- a/litmus/lockdown.c
+++ b/litmus/lockdown.c
@@ -63,6 +63,14 @@ u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
63 return 0; 63 return 0;
64} 64}
65 65
66u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
67{
68 TRACE_CUR("Dummy read_in_mem: lock_val: 0x%x unlock_val: 0x%x "
69 "start: 0x%p end: 0x%p\n", lock_val, unlock_val,
70 start, end);
71 return 0;
72}
73
66void set_lockdown(u32 lockdown_state) 74void set_lockdown(u32 lockdown_state)
67{ 75{
68 TRACE_CUR("Dummy set_lockdown function lockdown_state: 0x%x\n", 76 TRACE_CUR("Dummy set_lockdown function lockdown_state: 0x%x\n",
diff --git a/litmus/preempt.c b/litmus/preempt.c
index 8f1304afea26..26c6c7a929d9 100644
--- a/litmus/preempt.c
+++ b/litmus/preempt.c
@@ -22,9 +22,10 @@ void sched_state_will_schedule(struct task_struct* tsk)
22 */ 22 */
23 if (likely(task_cpu(tsk) == smp_processor_id())) { 23 if (likely(task_cpu(tsk) == smp_processor_id())) {
24 VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); 24 VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
25 if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) 25 if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) {
26 TRACE_TASK(tsk, "Wrong task\n");
26 set_sched_state(PICKED_WRONG_TASK); 27 set_sched_state(PICKED_WRONG_TASK);
27 else 28 } else
28 set_sched_state(WILL_SCHEDULE); 29 set_sched_state(WILL_SCHEDULE);
29 } /* else */ 30 } /* else */
30 /* /\* Litmus tasks should never be subject to a remote */ 31 /* /\* Litmus tasks should never be subject to a remote */
@@ -32,8 +33,8 @@ void sched_state_will_schedule(struct task_struct* tsk)
32 /* BUG_ON(is_realtime(tsk)); */ 33 /* BUG_ON(is_realtime(tsk)); */
33 34
34#ifdef CONFIG_PREEMPT_STATE_TRACE 35#ifdef CONFIG_PREEMPT_STATE_TRACE
35 TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", 36 STRACE("%d: set_tsk_need_resched() ret:%p\n",
36 __builtin_return_address(0)); 37 tsk->pid, __builtin_return_address(0));
37#endif 38#endif
38} 39}
39 40
@@ -69,9 +70,11 @@ void litmus_reschedule(int cpu)
69 * is not aware of the need to reschedule at this point. */ 70 * is not aware of the need to reschedule at this point. */
70 71
71 /* is a context switch in progress? */ 72 /* is a context switch in progress? */
72 if (cpu_is_in_sched_state(cpu, TASK_PICKED)) 73 if (cpu_is_in_sched_state(cpu, TASK_PICKED)) {
74 STRACE("Transition onto wrong task\n");
73 picked_transition_ok = sched_state_transition_on( 75 picked_transition_ok = sched_state_transition_on(
74 cpu, TASK_PICKED, PICKED_WRONG_TASK); 76 cpu, TASK_PICKED, PICKED_WRONG_TASK);
77 }
75 78
76 if (!picked_transition_ok && 79 if (!picked_transition_ok &&
77 cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { 80 cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) {
@@ -90,17 +93,18 @@ void litmus_reschedule(int cpu)
90 smp_send_reschedule(cpu); 93 smp_send_reschedule(cpu);
91 } 94 }
92 95
93 TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", 96 STRACE("%s picked-ok:%d sched-ok:%d\n",
94 __FUNCTION__, 97 __FUNCTION__,
95 picked_transition_ok, 98 picked_transition_ok,
96 scheduled_transition_ok); 99 scheduled_transition_ok);
97} 100}
98 101
99void litmus_reschedule_local(void) 102void litmus_reschedule_local(void)
100{ 103{
101 if (is_in_sched_state(TASK_PICKED)) 104 if (is_in_sched_state(TASK_PICKED)) {
105 STRACE("Rescheduling into wrong task\n");
102 set_sched_state(PICKED_WRONG_TASK); 106 set_sched_state(PICKED_WRONG_TASK);
103 else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { 107 } else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) {
104 set_sched_state(WILL_SCHEDULE); 108 set_sched_state(WILL_SCHEDULE);
105 set_tsk_need_resched(current); 109 set_tsk_need_resched(current);
106 } 110 }
@@ -111,7 +115,7 @@ void litmus_reschedule_local(void)
111void sched_state_plugin_check(void) 115void sched_state_plugin_check(void)
112{ 116{
113 if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { 117 if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) {
114 TRACE("!!!! plugin did not call sched_state_task_picked()!" 118 STRACE("!!!! plugin did not call sched_state_task_picked()!"
115 "Calling sched_state_task_picked() is mandatory---fix this.\n"); 119 "Calling sched_state_task_picked() is mandatory---fix this.\n");
116 set_sched_state(TASK_PICKED); 120 set_sched_state(TASK_PICKED);
117 } 121 }
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 6edf86935a29..c8e50d30a483 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -30,7 +30,7 @@
30#include <litmus/dgl.h> 30#include <litmus/dgl.h>
31#include <litmus/color.h> 31#include <litmus/color.h>
32#include <litmus/way_tracker.h> 32#include <litmus/way_tracker.h>
33#warning "MUST ADD CHECK FOR MAX WAYS" 33
34struct mc_signal { 34struct mc_signal {
35 int update:1; 35 int update:1;
36 int preempt:1; 36 int preempt:1;
@@ -207,6 +207,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
207static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) 207static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
208{ 208{
209 struct task_struct *next = dom->peek_ready(dom); 209 struct task_struct *next = dom->peek_ready(dom);
210
210 if (!next || !curr) { 211 if (!next || !curr) {
211 return next && !curr; 212 return next && !curr;
212 } else { 213 } else {
@@ -223,10 +224,13 @@ static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
223static void update_crit_position(struct crit_entry *ce) 224static void update_crit_position(struct crit_entry *ce)
224{ 225{
225 struct bheap *heap; 226 struct bheap *heap;
227
226 if (is_global(ce->domain)) { 228 if (is_global(ce->domain)) {
227 heap = domain_data(ce->domain)->heap; 229 heap = domain_data(ce->domain)->heap;
230
228 BUG_ON(!heap); 231 BUG_ON(!heap);
229 BUG_ON(!bheap_node_in_heap(ce->node)); 232 BUG_ON(!bheap_node_in_heap(ce->node));
233
230 bheap_delete(cpu_lower_prio, heap, ce->node); 234 bheap_delete(cpu_lower_prio, heap, ce->node);
231 bheap_insert(cpu_lower_prio, heap, ce->node); 235 bheap_insert(cpu_lower_prio, heap, ce->node);
232 } 236 }
@@ -239,6 +243,7 @@ static void update_crit_position(struct crit_entry *ce)
239static void fix_crit_position(struct crit_entry *ce) 243static void fix_crit_position(struct crit_entry *ce)
240{ 244{
241 struct server *server = &ce->server; 245 struct server *server = &ce->server;
246
242 if (is_global(ce->domain) && server->in_transit) { 247 if (is_global(ce->domain) && server->in_transit) {
243 server_state_change(server, server->state, 0); 248 server_state_change(server, server->state, 0);
244 update_crit_position(ce); 249 update_crit_position(ce);
@@ -368,7 +373,7 @@ static void link_task_to_crit(struct crit_entry *ce,
368 server_state_change(ce_server, SS_ACTIVE, 0); 373 server_state_change(ce_server, SS_ACTIVE, 0);
369 } 374 }
370 375
371 TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); 376 /* TRACE_MC_TASK(ce->server.linked, "Unlinking\n"); */
372 377
373 stop_crit(ce); 378 stop_crit(ce);
374 tsk_rt(ce->server.linked)->server.parent = 0; 379 tsk_rt(ce->server.linked)->server.parent = 0;
@@ -552,9 +557,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c
552 557
553 /* Per-domain preemption */ 558 /* Per-domain preemption */
554 link_task_to_crit(ce, task); 559 link_task_to_crit(ce, task);
555 /* if (old && can_requeue(old)) { */ 560
556 /* dom->requeue(dom, old); */
557 /* } */
558 update_crit_position(ce); 561 update_crit_position(ce);
559 562
560 /* Preempt actual execution if this is a running task. 563 /* Preempt actual execution if this is a running task.
@@ -574,6 +577,7 @@ static struct task_struct* preempt_crit(struct domain *dom, struct crit_entry *c
574 577
575/** 578/**
576 * update_crit_levels() - Update criticality entries for the new cpu state. 579 * update_crit_levels() - Update criticality entries for the new cpu state.
580 * Disables criticality levels lower than @entry's currenly linked task.
577 * This should be called after a new task has been linked to @entry. 581 * This should be called after a new task has been linked to @entry.
578 * The caller must hold the @entry->lock, but this method will release it. 582 * The caller must hold the @entry->lock, but this method will release it.
579 */ 583 */
@@ -585,6 +589,8 @@ static void update_crit_levels(struct cpu_entry *entry)
585 struct task_struct *readmit[NUM_CRIT_LEVELS]; 589 struct task_struct *readmit[NUM_CRIT_LEVELS];
586 enum crit_level level = entry_level(entry); 590 enum crit_level level = entry_level(entry);
587 591
592 STRACE("Updating crit levels for cpu %d\n", entry->cpu);
593
588 /* Remove lower priority tasks from the entry */ 594 /* Remove lower priority tasks from the entry */
589 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { 595 for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
590 ce = &entry->crit_entries[i]; 596 ce = &entry->crit_entries[i];
@@ -609,8 +615,10 @@ static void update_crit_levels(struct cpu_entry *entry)
609 link_task_to_crit(ce, NULL); 615 link_task_to_crit(ce, NULL);
610 } 616 }
611 TRACE_CRIT_ENTRY(ce, "Removing lower crit\n"); 617 TRACE_CRIT_ENTRY(ce, "Removing lower crit\n");
612 server_state_change(server, SS_REMOVED, 1); 618 server_state_change(server, SS_REMOVED,
613 619 is_global(ce->domain)?1:0);
620 } else {
621 TRACE_CRIT_ENTRY(ce, "Already removed!\n");
614 } 622 }
615 } 623 }
616 /* Need to unlock so we can access domains */ 624 /* Need to unlock so we can access domains */
@@ -669,21 +677,26 @@ static void check_global_preempt(struct domain *dom)
669 } 677 }
670} 678}
671 679
672static void check_partitioned_preempt(struct domain *dom) 680static void check_partitioned_preempt(struct cpu_entry *entry,
681 struct domain *dom)
673{ 682{
674 struct cpu_entry *entry; 683 struct crit_entry *ce = domain_data(dom)->crit_entry;
675 struct crit_entry *ce;
676 684
677 ce = domain_data(dom)->crit_entry; 685 /* Cache next task */
678 entry = crit_cpu(ce); 686 dom->peek_ready(dom);
687
688 raw_spin_lock(&entry->lock);
679 689
680 if (ce->server.state == SS_REMOVED || 690 if (ce->server.state == SS_REMOVED ||
681 !mc_preempt_needed(dom, ce->server.linked)) { 691 !mc_preempt_needed(dom, ce->server.linked)) {
682 return; 692 goto out_unlock;
683 } 693 }
684 694
685 entry->signal.preempt = 1; 695 entry->signal.preempt = 1;
686 litmus_reschedule(entry->cpu); 696 litmus_reschedule(entry->cpu);
697
698 out_unlock:
699 raw_spin_unlock(&entry->lock);
687} 700}
688 701
689/** 702/**
@@ -701,12 +714,7 @@ static void check_for_preempt(struct domain *dom)
701 ce = domain_data(dom)->crit_entry; 714 ce = domain_data(dom)->crit_entry;
702 entry = crit_cpu(ce); 715 entry = crit_cpu(ce);
703 716
704 /* Cache next task */ 717 check_partitioned_preempt(entry, dom);
705 dom->peek_ready(dom);
706
707 raw_spin_lock(&entry->lock);
708 check_partitioned_preempt(dom);
709 raw_spin_unlock(&entry->lock);
710 } 718 }
711} 719}
712 720
@@ -798,25 +806,24 @@ static void job_completion(struct task_struct *task, int forced)
798 806
799 if (lt_before(get_user_release(task), litmus_clock()) || 807 if (lt_before(get_user_release(task), litmus_clock()) ||
800 (release_server && tsk_rt(task)->completed)){ 808 (release_server && tsk_rt(task)->completed)){
801 TRACE_TASK(task, "Executable task going back to running\n"); 809 TRACE_MC_TASK(task, "Executable task going back to running\n");
802 tsk_rt(task)->completed = 0; 810 tsk_rt(task)->completed = 0;
803 } 811 }
804 812
805 if (release_server || forced) { 813 if (release_server || forced) {
806 /* TODO: Level A does this independently and should not */ 814 prepare_for_next_period(task);
807 if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) {
808 prepare_for_next_period(task);
809 }
810 815
811 TRACE_TASK(task, "Is released: %d, now: %llu, rel: %llu\n", 816 TRACE_MC_TASK(task, "Is released: %d, now: %llu, rel: %llu\n",
812 is_released(task, litmus_clock()), litmus_clock(), 817 is_released(task, litmus_clock()), litmus_clock(),
813 get_release(task)); 818 get_release(task));
814 819
815 /* Requeue non-blocking tasks */ 820 /* Requeue non-blocking tasks */
816 if (is_running(task)) { 821 if (is_running(task)) {
817 job_arrival(task); 822 job_arrival(task);
818 } 823 }
819 } else if (is_ghost(task)) { 824 } else if (is_ghost(task)) {
825 BUG_ON(tsk_rt(task)->linked_on == NO_CPU);
826
820 entry = &per_cpu(cpus, tsk_rt(task)->linked_on); 827 entry = &per_cpu(cpus, tsk_rt(task)->linked_on);
821 ce = &entry->crit_entries[tsk_mc_crit(task)]; 828 ce = &entry->crit_entries[tsk_mc_crit(task)];
822 829
@@ -847,24 +854,27 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
847#endif 854#endif
848 struct task_struct *tmp = NULL; 855 struct task_struct *tmp = NULL;
849 struct cpu_entry *entry = crit_cpu(ce); 856 struct cpu_entry *entry = crit_cpu(ce);
850 TRACE("Firing here at %llu\n", litmus_clock()); 857 int resched = 0;
851 TRACE_CRIT_ENTRY(ce, "For this\n"); 858
859 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock());
852 860
853 raw_spin_lock(&entry->lock); 861 raw_spin_lock(&entry->lock);
854 862
855 if (is_ghost(ce->server.linked)) { 863 if (ce->server.linked && is_ghost(ce->server.linked)) {
856 update_server_time(ce->server.linked); 864 update_server_time(ce->server.linked);
857 if (budget_exhausted(ce->server.linked)) { 865 if (budget_exhausted(ce->server.linked)) {
858 tmp = ce->server.linked; 866 tmp = ce->server.linked;
859 } 867 }
860 } else { 868 } else {
861 litmus_reschedule(crit_cpu(ce)->cpu); 869 resched = 1;
862 } 870 }
863 871
864 raw_spin_unlock(&entry->lock); 872 raw_spin_unlock(&entry->lock);
865 873
866 if (tmp) 874 if (tmp)
867 job_completion(tmp, 1); 875 job_completion(tmp, 1);
876 else if (resched)
877 litmus_reschedule(entry->cpu);
868 878
869#ifndef CONFIG_MERGE_TIMERS 879#ifndef CONFIG_MERGE_TIMERS
870 return HRTIMER_NORESTART; 880 return HRTIMER_NORESTART;
@@ -891,8 +901,6 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
891 ce->server.linked == ce_data->should_schedule) 901 ce->server.linked == ce_data->should_schedule)
892 { 902 {
893 old_link = ce->server.linked; 903 old_link = ce->server.linked;
894 link_task_to_crit(ce, NULL);
895 mc_ce_job_completion(dom, old_link);
896 } 904 }
897 raw_spin_unlock(&crit_cpu(ce)->lock); 905 raw_spin_unlock(&crit_cpu(ce)->lock);
898 906
@@ -900,7 +908,7 @@ static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
900 908
901 /* Job completion will check for preemptions by means of calling job 909 /* Job completion will check for preemptions by means of calling job
902 * arrival if the task is not blocked */ 910 * arrival if the task is not blocked */
903 if (NULL != old_link) { 911 if (old_link) {
904 STRACE("old_link " TS " so will call job completion\n", TA(old_link)); 912 STRACE("old_link " TS " so will call job completion\n", TA(old_link));
905 raw_spin_unlock(dom->lock); 913 raw_spin_unlock(dom->lock);
906 job_completion(old_link, 1); 914 job_completion(old_link, 1);
@@ -993,8 +1001,10 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
993 entry = &per_cpu(cpus, task_cpu(t)); 1001 entry = &per_cpu(cpus, task_cpu(t));
994 t->rt_param._domain = entry->crit_entries[level].domain; 1002 t->rt_param._domain = entry->crit_entries[level].domain;
995 1003
1004#ifdef CONFIG_SCHED_TASK_TRACE
996 tsk_rt(t)->flush = 0; 1005 tsk_rt(t)->flush = 0;
997 tsk_rt(t)->load = 0; 1006 tsk_rt(t)->load = 0;
1007#endif
998 1008
999 /* Userspace and kernelspace view of task state may differ. 1009 /* Userspace and kernelspace view of task state may differ.
1000 * Model kernel state as a budget enforced container 1010 * Model kernel state as a budget enforced container
@@ -1098,16 +1108,17 @@ static void mc_task_exit(struct task_struct *task)
1098 color_sched_out_task(task); 1108 color_sched_out_task(task);
1099 } 1109 }
1100 1110
1111 /* TODO: restore. This was geting triggered by race conditions even when
1112 * no level-A task was executing */
1113 if (CRIT_LEVEL_A == tsk_mc_crit(task))
1114 mc_ce_task_exit_common(task);
1115
1101 remove_from_all(task); 1116 remove_from_all(task);
1102 if (tsk_rt(task)->scheduled_on != NO_CPU) { 1117 if (tsk_rt(task)->scheduled_on != NO_CPU) {
1103 per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL; 1118 per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL;
1104 tsk_rt(task)->scheduled_on = NO_CPU; 1119 tsk_rt(task)->scheduled_on = NO_CPU;
1105 } 1120 }
1106 1121
1107 /* TODO: restore. This was geting triggered by race conditions even when
1108 * no level-A task was executing */
1109 /* if (CRIT_LEVEL_A == tsk_mc_crit(task)) */
1110 /* mc_ce_task_exit_common(task); */
1111 1122
1112 local_irq_restore(flags); 1123 local_irq_restore(flags);
1113} 1124}
@@ -1259,30 +1270,30 @@ static void process_update_signal(struct cpu_entry *entry)
1259 stop_crit(ce); 1270 stop_crit(ce);
1260 server_state_change(crit_server, SS_BLOCKED, 0); 1271 server_state_change(crit_server, SS_BLOCKED, 0);
1261 } 1272 }
1262
1263
1264} 1273}
1265 1274
1266static void process_signals(struct cpu_entry *entry) 1275static void process_preempt_signal(struct cpu_entry *entry)
1267{ 1276{
1277 int i;
1268 struct domain *dom; 1278 struct domain *dom;
1269 struct crit_entry *ce; 1279 struct crit_entry *ce;
1270 struct mc_signal signal; 1280 struct task_struct *preempted = NULL;
1271 struct task_struct *preempted; 1281 struct server *server;
1272 1282
1273 ce = &entry->crit_entries[CRIT_LEVEL_B]; 1283 STRACE("Reading preempt signal\n");
1274 dom = ce->domain;
1275 1284
1276 /* Load signals */ 1285 for (i = 0; i < NUM_CRIT_LEVELS; i++) {
1277 raw_spin_lock(&entry->signal_lock); 1286 ce = &entry->crit_entries[i];
1278 signal = entry->signal; 1287 dom = ce->domain;
1279 clear_signal(&entry->signal); 1288 server = &ce->server;
1280 raw_spin_unlock(&entry->signal_lock); 1289 preempted = NULL;
1281 1290
1282 if (signal.preempt) { 1291 /* Swap locks. We cannot acquire a domain lock while
1292 * holding an entry lock or deadlocks will happen
1293 */
1283 raw_spin_lock(dom->lock); 1294 raw_spin_lock(dom->lock);
1284 /* A higher-priority task may exist */ 1295
1285 STRACE("Reading preempt signal\n"); 1296 /* Do domain stuff before grabbing CPU locks */
1286 dom->peek_ready(dom); 1297 dom->peek_ready(dom);
1287 1298
1288 raw_spin_lock(&entry->lock); 1299 raw_spin_lock(&entry->lock);
@@ -1308,26 +1319,106 @@ static void process_signals(struct cpu_entry *entry)
1308 raw_spin_unlock(dom->lock); 1319 raw_spin_unlock(dom->lock);
1309 } 1320 }
1310 1321
1311 raw_spin_lock(&entry->lock); 1322 break;
1312 } else { 1323 } else {
1324 raw_spin_unlock(&entry->lock);
1313 raw_spin_unlock(dom->lock); 1325 raw_spin_unlock(dom->lock);
1314 } 1326 }
1315 } else { 1327 }
1316 raw_spin_lock(&entry->lock); 1328}
1329
1330static void process_signals(struct cpu_entry *entry)
1331{
1332 struct mc_signal signal;
1333
1334 /* Load signals */
1335 raw_spin_lock(&entry->signal_lock);
1336 signal = entry->signal;
1337 clear_signal(&entry->signal);
1338 raw_spin_unlock(&entry->signal_lock);
1339
1340 if (signal.preempt) {
1341 process_preempt_signal(entry);
1317 } 1342 }
1318 1343
1344 raw_spin_lock(&entry->lock);
1345
1319 if (signal.update) { 1346 if (signal.update) {
1320 process_update_signal(entry); 1347 process_update_signal(entry);
1321 } 1348 }
1322} 1349}
1323 1350
1351static void reschedule_if_signaled(struct cpu_entry *entry)
1352{
1353 struct mc_signal signal;
1354
1355 raw_spin_lock(&entry->signal_lock);
1356 signal = entry->signal;
1357 raw_spin_unlock(&entry->signal_lock);
1358
1359 if (signal.update || signal.preempt) {
1360 litmus_reschedule_local();
1361 }
1362}
1363
1364static void pre_schedule(struct task_struct *prev)
1365{
1366 lt_t exec, start = litmus_clock();
1367
1368 /* Update userspace exec time */
1369 if (prev && tsk_rt(prev)->last_exec_time) {
1370 exec = start - tsk_rt(prev)->last_exec_time;
1371 tsk_rt(prev)->user_job.exec_time += exec;
1372 }
1373
1374 /* Flush task pages */
1375 if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B &&
1376 is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) {
1377 color_sched_out_task(prev);
1378
1379#ifdef CONFIG_SCHED_TASK_TRACE
1380 tsk_rt(prev)->load += litmus_clock() - start;
1381#endif
1382 }
1383
1384 TS_LVLA_SCHED_START;
1385 TS_LVLB_SCHED_START;
1386 TS_LVLC_SCHED_START;
1387}
1388
1389static void post_schedule(struct task_struct *next)
1390{
1391 lt_t start;
1392
1393 switch (tsk_mc_crit(next)) {
1394 case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break;
1395 case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break;
1396 case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break;
1397 }
1398
1399 /* Cache in task pages */
1400 if (tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache &&
1401 get_rt_job(next) > 1) {
1402 start = litmus_clock();
1403
1404 color_sched_in_task(next);
1405
1406#ifdef CONFIG_SCHED_TASK_TRACE
1407 BUG_ON(tsk_rt(next)->load);
1408 tsk_rt(next)->load = litmus_clock() - start;
1409#endif
1410 }
1411
1412 tsk_rt(next)->last_exec_time = litmus_clock();
1413}
1414
1324/** 1415/**
1325 * mc_schedule() - Return next task which should be scheduled. 1416 * mc_schedule() - Return next task which should be scheduled.
1326 */ 1417 */
1327static struct task_struct* mc_schedule(struct task_struct* prev) 1418static struct task_struct* mc_schedule(struct task_struct* prev)
1328{ 1419{
1329 lt_t start, exec; 1420
1330 int out_of_time, sleep, preempt, exists, blocks, global, lower, work; 1421 int out_of_time, sleep, preempt, exists, blocks, global, lower;
1331 struct cpu_entry* entry = &__get_cpu_var(cpus); 1422 struct cpu_entry* entry = &__get_cpu_var(cpus);
1332 struct task_struct *next = NULL; 1423 struct task_struct *next = NULL;
1333 1424
@@ -1340,22 +1431,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1340 low_prio_arrival(entry->will_schedule); 1431 low_prio_arrival(entry->will_schedule);
1341 } 1432 }
1342 1433
1343 if (prev && tsk_rt(prev)->last_exec_time) { 1434 pre_schedule(prev);
1344 exec = litmus_clock() - tsk_rt(prev)->last_exec_time;
1345 tsk_rt(prev)->user_job.exec_time += exec;
1346 }
1347
1348 if (prev && tsk_mc_crit(prev) == CRIT_LEVEL_B &&
1349 is_realtime(prev) && get_rt_job(prev) > 1 && lock_cache) {
1350 start = litmus_clock();
1351 work = color_sched_out_task(prev);
1352 tsk_rt(prev)->flush = litmus_clock() - start;
1353 ++tsk_rt(prev)->flush_work;
1354 }
1355
1356 TS_LVLA_SCHED_START;
1357 TS_LVLB_SCHED_START;
1358 TS_LVLC_SCHED_START;
1359 1435
1360 raw_spin_lock(&entry->lock); 1436 raw_spin_lock(&entry->lock);
1361 1437
@@ -1406,9 +1482,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1406 job_arrival(entry->scheduled); 1482 job_arrival(entry->scheduled);
1407 } 1483 }
1408 1484
1409 /* TODO: move this down somehow */ 1485 /* Acquires the entry lock */
1410 sched_state_task_picked();
1411
1412 process_signals(entry); 1486 process_signals(entry);
1413 1487
1414 /* Pick next task if none is linked */ 1488 /* Pick next task if none is linked */
@@ -1424,23 +1498,12 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1424 1498
1425 raw_spin_unlock(&entry->lock); 1499 raw_spin_unlock(&entry->lock);
1426 1500
1427 if (next) { 1501 sched_state_task_picked();
1428 switch (tsk_mc_crit(next)) {
1429 case CRIT_LEVEL_A: TS_LVLA_SCHED_END(next); break;
1430 case CRIT_LEVEL_B: TS_LVLB_SCHED_END(next); break;
1431 case CRIT_LEVEL_C: TS_LVLC_SCHED_END(next); break;
1432 }
1433 }
1434 1502
1435 if (next && tsk_mc_crit(next) == CRIT_LEVEL_B && lock_cache && get_rt_job(next) > 1) { 1503 reschedule_if_signaled(entry);
1436 start = litmus_clock();
1437 work = color_sched_in_task(next);
1438 tsk_rt(next)->load = litmus_clock() - start;
1439 tsk_rt(next)->load_work = work;
1440 }
1441 1504
1442 if (next) { 1505 if (next) {
1443 tsk_rt(next)->last_exec_time = litmus_clock(); 1506 post_schedule(next);
1444 TRACE_MC_TASK(next, "Picked this task\n"); 1507 TRACE_MC_TASK(next, "Picked this task\n");
1445 } else { 1508 } else {
1446 STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock()); 1509 STRACE("CPU %d idles at %llu\n", entry->cpu, litmus_clock());
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index c799738c0090..9f6929e55ab2 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -118,9 +118,9 @@ unsigned int mc_ce_get_expected_job(const int cpu, const int idx)
118static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time) 118static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
119{ 119{
120 long long st = atomic64_read(&start_time); 120 long long st = atomic64_read(&start_time);
121 //lt_t offset = (when - st) % cycle_time; 121 lt_t offset = (when - st) % cycle_time;
122 lt_t offset = 0; 122 /* lt_t offset = 0; */
123 TRACE("when: %llu cycle_time: %llu start_time: %lld offset %llu\n", 123 STRACE("when: %llu cycle_time: %llu start_time: %lld offset %llu\n",
124 when, cycle_time, st, offset); 124 when, cycle_time, st, offset);
125 return offset; 125 return offset;
126} 126}
@@ -138,7 +138,7 @@ void mc_ce_job_completion(struct domain *dom, struct task_struct *ts)
138 const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx); 138 const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx);
139 unsigned int just_finished; 139 unsigned int just_finished;
140 140
141 TRACE_TASK(ts, "Completed\n"); 141 TRACE_MC_TASK(ts, "Completed\n");
142 142
143 /* sched_trace_task_completion(ts, 0); */ 143 /* sched_trace_task_completion(ts, 0); */
144 /* post-increment is important here */ 144 /* post-increment is important here */
@@ -152,13 +152,13 @@ void mc_ce_job_completion(struct domain *dom, struct task_struct *ts)
152 if (just_finished < pid_entry->expected_job) { 152 if (just_finished < pid_entry->expected_job) {
153 /* this job is already released because it's running behind */ 153 /* this job is already released because it's running behind */
154 set_rt_flags(ts, RT_F_RUNNING); 154 set_rt_flags(ts, RT_F_RUNNING);
155 TRACE_TASK(ts, "appears behind: the expected job is %u but " 155 TRACE_MC_TASK(ts, "appears behind: the expected job is %u but "
156 "job %u just completed\n", 156 "job %u just completed\n",
157 pid_entry->expected_job, just_finished); 157 pid_entry->expected_job, just_finished);
158 } else if (pid_entry->expected_job < just_finished) { 158 } else if (pid_entry->expected_job < just_finished) {
159 printk(KERN_CRIT "job %u completed in expected job %u which " 159 TRACE_MC_TASK(ts, "job %u completed in expected job %u which "
160 "seems too early\n", just_finished, 160 "seems too early\n", just_finished,
161 pid_entry->expected_job); 161 pid_entry->expected_job);
162 } 162 }
163} 163}
164 164
@@ -189,7 +189,7 @@ static int mc_ce_schedule_at(const struct domain *dom, lt_t offset)
189 } 189 }
190 /* can only happen if cycle_time is not right */ 190 /* can only happen if cycle_time is not right */
191 BUG_ON(pid_entry->acc_time > pid_table->cycle_time); 191 BUG_ON(pid_entry->acc_time > pid_table->cycle_time);
192 TRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu); 192 STRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu);
193 return idx; 193 return idx;
194} 194}
195 195
@@ -211,7 +211,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
211 exists = NULL != ce_data->scheduled; 211 exists = NULL != ce_data->scheduled;
212 sleep = exists && RT_F_SLEEP == get_rt_flags(ce_data->scheduled); 212 sleep = exists && RT_F_SLEEP == get_rt_flags(ce_data->scheduled);
213 213
214 TRACE("exists: %d, sleep: %d\n", exists, sleep); 214 STRACE("exists: %d, sleep: %d\n", exists, sleep);
215 215
216 if (sleep) 216 if (sleep)
217 mc_ce_job_completion(dom, ce_data->scheduled); 217 mc_ce_job_completion(dom, ce_data->scheduled);
@@ -226,7 +226,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
226 should_sched_asleep = should_sched_exists && 226 should_sched_asleep = should_sched_exists &&
227 RT_F_SLEEP == get_rt_flags(ce_data->should_schedule); 227 RT_F_SLEEP == get_rt_flags(ce_data->should_schedule);
228 228
229 TRACE("should_sched_exists: %d, should_sched_blocked: %d, " 229 STRACE("should_sched_exists: %d, should_sched_blocked: %d, "
230 "should_sched_asleep: %d\n", should_sched_exists, 230 "should_sched_asleep: %d\n", should_sched_exists,
231 should_sched_blocked, should_sched_asleep); 231 should_sched_blocked, should_sched_asleep);
232 232
@@ -248,7 +248,7 @@ static void mc_ce_finish_switch(struct task_struct *prev)
248 struct domain *dom = get_domain_for(smp_processor_id()); 248 struct domain *dom = get_domain_for(smp_processor_id());
249 struct ce_dom_data *ce_data = dom->data; 249 struct ce_dom_data *ce_data = dom->data;
250 250
251 TRACE("finish switch\n"); 251 STRACE("finish switch\n");
252 252
253 if (is_realtime(current) && CRIT_LEVEL_A == tsk_mc_crit(current)) 253 if (is_realtime(current) && CRIT_LEVEL_A == tsk_mc_crit(current))
254 ce_data->scheduled = current; 254 ce_data->scheduled = current;
@@ -495,19 +495,19 @@ lt_t mc_ce_timer_callback_common(struct domain *dom)
495 } 495 }
496 } 496 }
497 497
498 if (ce_data->should_schedule) { 498 /* if (ce_data->should_schedule) { */
499 get_deadline(should_schedule) = 499 /* get_deadline(should_schedule) = */
500 cycle_start_abs + pid_entry->acc_time; 500 /* cycle_start_abs + pid_entry->acc_time; */
501 get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - 501 /* get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - */
502 pid_entry->budget; 502 /* pid_entry->budget; */
503 tsk_rt(should_schedule)->job_params.exec_time = 0; 503 /* tsk_rt(should_schedule)->job_params.exec_time = 0; */
504 504
505 TRACE_MC_TASK(should_schedule, "Released!\n"); 505 /* TRACE_MC_TASK(should_schedule, "Released!\n"); */
506 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); 506 /* set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); */
507 sched_trace_task_release(should_schedule); 507 /* sched_trace_task_release(should_schedule); */
508 sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), 508 /* sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), */
509 tsk_rt(should_schedule)->job_params); 509 /* tsk_rt(should_schedule)->job_params); */
510 } 510 /* } */
511 return next_timer_abs; 511 return next_timer_abs;
512} 512}
513 513
@@ -535,7 +535,7 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
535#endif 535#endif
536 dom = get_domain_for(ce_data->cpu); 536 dom = get_domain_for(ce_data->cpu);
537 537
538 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); 538 STRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
539 539
540 raw_spin_lock_irqsave(dom->lock, flags); 540 raw_spin_lock_irqsave(dom->lock, flags);
541 next_timer_abs = mc_ce_timer_callback_common(dom); 541 next_timer_abs = mc_ce_timer_callback_common(dom);
@@ -569,7 +569,7 @@ static int cancel_all_timers(void)
569 int cancel_res; 569 int cancel_res;
570#endif 570#endif
571 571
572 TRACE("cancel all timers\n"); 572 STRACE("cancel all timers\n");
573 573
574 for_each_online_cpu(cpu) { 574 for_each_online_cpu(cpu) {
575 dom = get_domain_for(cpu); 575 dom = get_domain_for(cpu);
@@ -579,6 +579,8 @@ static int cancel_all_timers(void)
579 cancel_event(&ce_data->event); 579 cancel_event(&ce_data->event);
580#else 580#else
581 cancel_res = hrtimer_cancel(&ce_data->timer); 581 cancel_res = hrtimer_cancel(&ce_data->timer);
582 atomic_set(&ce_data->timer_info.state,
583 HRTIMER_START_ON_INACTIVE);
582 ret = ret || cancel_res; 584 ret = ret || cancel_res;
583#endif 585#endif
584 } 586 }
@@ -598,7 +600,7 @@ static void arm_all_timers(void)
598 int cpu, idx, cpu_for_timer; 600 int cpu, idx, cpu_for_timer;
599 const lt_t start = atomic64_read(&start_time); 601 const lt_t start = atomic64_read(&start_time);
600 602
601 TRACE("arm all timers\n"); 603 STRACE("arm all timers\n");
602 604
603 for_each_online_cpu(cpu) { 605 for_each_online_cpu(cpu) {
604 dom = get_domain_for(cpu); 606 dom = get_domain_for(cpu);
@@ -619,6 +621,9 @@ static void arm_all_timers(void)
619 add_event(get_event_group_for(cpu_for_timer), 621 add_event(get_event_group_for(cpu_for_timer),
620 &ce_data->event, start); 622 &ce_data->event, start);
621#else 623#else
624 hrtimer_start_on(cpu_for_timer, &ce_data->timer_info,
625 &ce_data->timer, ns_to_ktime(start),
626 HRTIMER_MODE_ABS_PINNED);
622#endif 627#endif
623 } 628 }
624} 629}
@@ -630,7 +635,7 @@ static void arm_all_timers(void)
630 */ 635 */
631void mc_ce_release_at_common(struct task_struct *ts, lt_t start) 636void mc_ce_release_at_common(struct task_struct *ts, lt_t start)
632{ 637{
633 TRACE("release CE at %llu\n", start); 638 STRACE("release CE at %llu\n", start);
634 if (atomic_inc_and_test(&start_time_set)) { 639 if (atomic_inc_and_test(&start_time_set)) {
635 /* in this case, we won the race */ 640 /* in this case, we won the race */
636 cancel_all_timers(); 641 cancel_all_timers();
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 5a196e1b0fbd..c30fa1d74726 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -198,15 +198,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id,
198 struct task_struct *t = (struct task_struct*) _task; 198 struct task_struct *t = (struct task_struct*) _task;
199 struct st_event_record* rec = get_record(ST_COMPLETION, t); 199 struct st_event_record* rec = get_record(ST_COMPLETION, t);
200 if (rec) { 200 if (rec) {
201 rec->data.completion.exec = tsk_rt(t)->user_job.exec_time; 201 rec->data.completion.when = now();
202 rec->data.completion.flush = tsk_rt(t)->flush; 202 rec->data.completion.load = tsk_rt(t)->load;
203 rec->data.completion.load = tsk_rt(t)->load;
204 rec->data.completion.flush_work = tsk_rt(t)->flush_work;
205 rec->data.completion.load_work = tsk_rt(t)->load_work;
206 tsk_rt(t)->flush = 0;
207 tsk_rt(t)->load = 0; 203 tsk_rt(t)->load = 0;
208 tsk_rt(t)->flush_work = 0;
209 tsk_rt(t)->load_work = 0;
210 put_record(rec); 204 put_record(rec);
211 } 205 }
212} 206}
@@ -245,34 +239,6 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
245 } 239 }
246} 240}
247 241
248feather_callback void do_sched_trace_task_exit(unsigned long id,
249 unsigned long _task)
250{
251 struct task_struct *t = (struct task_struct*) _task;
252 const lt_t max_exec_time = tsk_rt(t)->max_exec_time;
253 const lt_t avg_exec_time = div64_u64(tsk_rt(t)->tot_exec_time, (get_job_no(t) - 1));
254
255 struct st_event_record *rec = get_record(ST_TASK_EXIT, t);
256 if (rec) {
257 rec->data.task_exit.avg_exec_time = avg_exec_time;
258 rec->data.task_exit.max_exec_time = max_exec_time;
259 put_record(rec);
260 }
261}
262
263feather_callback void do_sched_trace_task_tardy(unsigned long id,
264 unsigned long _task)
265{
266 struct task_struct *t = (struct task_struct*) _task;
267 struct st_event_record *rec = get_record(ST_TASK_TARDY, t);
268 if (rec) {
269 rec->data.task_tardy.max_tardy = tsk_rt(t)->max_tardy;
270 rec->data.task_tardy.total_tardy = tsk_rt(t)->total_tardy;
271 rec->data.task_tardy.missed = tsk_rt(t)->missed;
272 put_record(rec);
273 }
274}
275
276feather_callback void do_sched_trace_action(unsigned long id, 242feather_callback void do_sched_trace_action(unsigned long id,
277 unsigned long _task, 243 unsigned long _task,
278 unsigned long action) 244 unsigned long action)
diff --git a/litmus/way_tracker.c b/litmus/way_tracker.c
index ff392ab09c4d..9131c9658c2a 100644
--- a/litmus/way_tracker.c
+++ b/litmus/way_tracker.c
@@ -41,7 +41,9 @@ static int take_next_way(unsigned int color)
41 } else { 41 } else {
42 printk(KERN_WARNING "Vury bad\n"); 42 printk(KERN_WARNING "Vury bad\n");
43 /* Seriously bad. */ 43 /* Seriously bad. */
44#ifdef CONFIG_KGDB
44 kgdb_breakpoint(); 45 kgdb_breakpoint();
46#endif
45 BUG(); 47 BUG();
46 } 48 }
47 raw_spin_unlock(&lock); 49 raw_spin_unlock(&lock);