aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-20 09:58:37 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-20 09:58:37 -0500
commit18aae5bfbead48ce3ae03da8932a69b641e18f98 (patch)
tree0585c32c60c5ffab00ef61a87b4c480b9cdfa8a0
parent6da7825f25093758981c1436f6956569662f72b1 (diff)
Fixed migration tracing. Added inject of ST_ACTION
-rw-r--r--include/litmus/rt_param.h14
-rw-r--r--include/litmus/sched_trace.h49
-rw-r--r--litmus/sched_task_trace.c51
3 files changed, 64 insertions, 50 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index c4cba8551c47..de20eff7cf71 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -59,14 +59,20 @@ typedef enum {
59 ST_INJECT_COMPLETION, /* supported */ 59 ST_INJECT_COMPLETION, /* supported */
60 ST_INJECT_BLOCK, 60 ST_INJECT_BLOCK,
61 ST_INJECT_RESUME, 61 ST_INJECT_RESUME,
62 ST_INJECT_ACTION, 62 ST_INJECT_ACTION, /* supported */
63 ST_INJECT_SYS_RELEASE, /* supported */ 63 ST_INJECT_SYS_RELEASE, /* supported */
64} sched_trace_injection_events_t; 64} sched_trace_injection_events_t;
65 65
66struct st_inject_args { 66struct st_inject_args {
67 lt_t release; 67 union
68 lt_t deadline; 68 {
69 unsigned int job_no; 69 struct{
70 lt_t release;
71 lt_t deadline;
72 unsigned int job_no;
73 };
74 unsigned int action;
75 };
70}; 76};
71 77
72/* We use the common priority interpretation "lower index == higher priority", 78/* We use the common priority interpretation "lower index == higher priority",
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 0d22c275dd72..0785db39b2fc 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -61,7 +61,8 @@ struct st_completion_data { /* A job completed. */
61 61
62struct st_block_data { /* A task blocks. */ 62struct st_block_data { /* A task blocks. */
63 u64 when; 63 u64 when;
64 u64 __unused; 64 u8 for_io;
65 u8 __unused[7];
65} __attribute__((packed)); 66} __attribute__((packed));
66 67
67struct st_resume_data { /* A task resumes. */ 68struct st_resume_data { /* A task resumes. */
@@ -71,8 +72,8 @@ struct st_resume_data { /* A task resumes. */
71 72
72struct st_action_data { 73struct st_action_data {
73 u64 when; 74 u64 when;
74 u8 action; 75 u32 action;
75 u8 __unused[7]; 76 u8 __unused[4];
76} __attribute__((packed)); 77} __attribute__((packed));
77 78
78struct st_sys_release_data { 79struct st_sys_release_data {
@@ -138,20 +139,24 @@ struct st_nv_interrupt_end_data {
138 u32 serialNumber; 139 u32 serialNumber;
139} __attribute__((packed)); 140} __attribute__((packed));
140 141
141struct st_prediction_err_data {
142 u64 distance;
143 u64 rel_err;
144} __attribute__((packed));
145
146struct st_migration_data { 142struct st_migration_data {
147 u64 observed; 143 u64 observed;
148 u64 estimated; 144 u64 estimated;
149} __attribute__((packed)); 145} __attribute__((packed));
150 146
147
148/* passed as an argument to tracing for st_migration_data */
151struct migration_info { 149struct migration_info {
152 u64 observed; 150 u64 observed;
153 u64 estimated; 151 u64 estimated;
154 u8 distance; 152 u8 distance;
153};
154
155struct st_lock_data{
156 u64 when;
157 u32 lock_id;
158 u8 acquired;
159 u8 __unused[3];
155} __attribute__((packed)); 160} __attribute__((packed));
156 161
157#define DATA(x) struct st_ ## x ## _data x; 162#define DATA(x) struct st_ ## x ## _data x;
@@ -179,8 +184,8 @@ typedef enum {
179 ST_NV_INTERRUPT_BEGIN, 184 ST_NV_INTERRUPT_BEGIN,
180 ST_NV_INTERRUPT_END, 185 ST_NV_INTERRUPT_END,
181 186
182 ST_PREDICTION_ERR,
183 ST_MIGRATION, 187 ST_MIGRATION,
188 ST_LOCK
184} st_event_record_type_t; 189} st_event_record_type_t;
185 190
186struct st_event_record { 191struct st_event_record {
@@ -209,8 +214,8 @@ struct st_event_record {
209 DATA(nv_interrupt_begin); 214 DATA(nv_interrupt_begin);
210 DATA(nv_interrupt_end); 215 DATA(nv_interrupt_end);
211 216
212 DATA(prediction_err);
213 DATA(migration); 217 DATA(migration);
218 DATA(lock);
214 } data; 219 } data;
215} __attribute__((packed)); 220} __attribute__((packed));
216 221
@@ -284,19 +289,14 @@ feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
284feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, 289feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
285 unsigned long unused); 290 unsigned long unused);
286 291
287feather_callback void do_sched_trace_prediction_err(unsigned long id,
288 struct task_struct* task,
289 gpu_migration_dist_t* distance,
290 fp_t* rel_err);
291
292
293
294
295
296feather_callback void do_sched_trace_migration(unsigned long id, 292feather_callback void do_sched_trace_migration(unsigned long id,
297 struct task_struct* task, 293 struct task_struct* task,
298 struct migration_info* mig_info); 294 struct migration_info* mig_info);
299 295
296feather_callback void do_sched_trace_lock(unsigned long id,
297 struct task_struct* task,
298 unsigned long lock_id,
299 unsigned long acquired);
300 300
301/* returns true if we're tracing an interrupt on current CPU */ 301/* returns true if we're tracing an interrupt on current CPU */
302/* int is_interrupt_tracing_active(void); */ 302/* int is_interrupt_tracing_active(void); */
@@ -428,11 +428,14 @@ feather_callback void do_sched_trace_migration(unsigned long id,
428#define sched_trace_nv_interrupt_end(d) \ 428#define sched_trace_nv_interrupt_end(d) \
429 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) 429 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d)
430 430
431#define sched_trace_prediction_err(t, dist, rel_err) \
432 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 20, do_sched_trace_prediction_err, t, dist, rel_err)
433
434#define sched_trace_migration(t, mig_info) \ 431#define sched_trace_migration(t, mig_info) \
435 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 21, do_sched_trace_migration, t, mig_info) 432 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 20, do_sched_trace_migration, t, mig_info)
433
434
435#define sched_trace_lock(t, lock_id, acquired) \
436 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 21, do_sched_trace_lock, t, lock_id, acquired)
437
438
436 439
437#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 440#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
438 441
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index d1df0127cfa4..e863eaf41b96 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -200,8 +200,21 @@ feather_callback void do_sched_trace_task_block(unsigned long id,
200{ 200{
201 struct task_struct *t = (struct task_struct*) _task; 201 struct task_struct *t = (struct task_struct*) _task;
202 struct st_event_record* rec = get_record(ST_BLOCK, t); 202 struct st_event_record* rec = get_record(ST_BLOCK, t);
203
203 if (rec) { 204 if (rec) {
204 rec->data.block.when = now(); 205 rec->data.block.when = now();
206
207 // hiding is turned on by locking protocols, so if there isn't any
208 // hiding, then we're blocking for some other reason. assume it's I/O.
209 rec->data.block.for_io = 0
210#ifdef CONFIG_REALTIME_AUX_TASKS
211 || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks)
212#endif
213#ifdef CONFIG_LITMUS_NVIDIA
214 || (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
215#endif
216 ;
217
205 put_record(rec); 218 put_record(rec);
206 } 219 }
207} 220}
@@ -243,28 +256,6 @@ feather_callback void do_sched_trace_action(unsigned long id,
243 } 256 }
244} 257}
245 258
246
247
248
249feather_callback void do_sched_trace_prediction_err(unsigned long id,
250 unsigned long _task,
251 unsigned long _distance,
252 unsigned long _rel_err)
253{
254 struct task_struct *t = (struct task_struct*) _task;
255 struct st_event_record *rec = get_record(ST_PREDICTION_ERR, t);
256
257 if (rec) {
258 gpu_migration_dist_t* distance = (gpu_migration_dist_t*) _distance;
259 fp_t* rel_err = (fp_t*) _rel_err;
260
261 rec->data.prediction_err.distance = *distance;
262 rec->data.prediction_err.rel_err = rel_err->val;
263 put_record(rec);
264 }
265}
266
267
268feather_callback void do_sched_trace_migration(unsigned long id, 259feather_callback void do_sched_trace_migration(unsigned long id,
269 unsigned long _task, 260 unsigned long _task,
270 unsigned long _mig_info) 261 unsigned long _mig_info)
@@ -285,7 +276,21 @@ feather_callback void do_sched_trace_migration(unsigned long id,
285 276
286 277
287 278
279feather_callback void do_sched_trace_lock(unsigned long id,
280 unsigned long _task,
281 unsigned long _lock_id,
282 unsigned long _acquired)
283{
284 struct task_struct *t = (struct task_struct*) _task;
285 struct st_event_record *rec = get_record(ST_LOCK, t);
288 286
287 if (rec) {
288 rec->data.lock.when = now();
289 rec->data.lock.lock_id = _lock_id;
290 rec->data.lock.acquired = _acquired;
291 put_record(rec);
292 }
293}
289 294
290 295
291 296